summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet
diff options
context:
space:
mode:
authorJiri Kosina <jkosina@suse.cz>2021-11-05 12:10:27 +0100
committerJiri Kosina <jkosina@suse.cz>2021-11-05 12:10:27 +0100
commit820e9906cf64142169134f35b996108303cf22ca (patch)
tree89f8831fb39c59aba208395d91e25ab4b26f473f /drivers/net/ethernet
parentb9865081a56a5cd01cd7c9735911709ff82bd8df (diff)
parent2ea5999d07d2a0ab6ad92ccf65524707f2c5e456 (diff)
Merge branch 'for-5.16/asus' into for-linus
Diffstat (limited to 'drivers/net/ethernet')
-rw-r--r--drivers/net/ethernet/3com/3c515.c2
-rw-r--r--drivers/net/ethernet/3com/3c59x.c2
-rw-r--r--drivers/net/ethernet/8390/ne.c22
-rw-r--r--drivers/net/ethernet/amd/ni65.c2
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c4
-rw-r--r--drivers/net/ethernet/broadcom/bgmac-bcma.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.c33
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x.h1
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c91
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c151
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h42
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c51
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h4
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.c14
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c3
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c115
-rw-r--r--drivers/net/ethernet/broadcom/tg3.h1
-rw-r--r--drivers/net/ethernet/cadence/macb_pci.c2
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_vf_main.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/cxgb2.c1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/sge.c3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c85
-rw-r--r--drivers/net/ethernet/cirrus/Kconfig2
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc.c7
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_ierb.c2
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_ierb.h2
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c1
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.c14
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c4
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c8
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c99
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c10
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c58
-rw-r--r--drivers/net/ethernet/i825xx/82596.c2
-rw-r--r--drivers/net/ethernet/i825xx/sun3_82586.c2
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c16
-rw-r--r--drivers/net/ethernet/intel/Kconfig1
-rw-r--r--drivers/net/ethernet/intel/ice/ice.h2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_idc.c6
-rw-r--r--drivers/net/ethernet/intel/igc/igc_main.c4
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cgx.c8
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu.c17
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu.h1
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c42
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c5
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h28
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c4
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c12
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h2
-rw-r--r--drivers/net/ethernet/mediatek/mtk_ppe_offload.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c50
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/devlink.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c7
-rw-r--r--drivers/net/ethernet/microsoft/mana/hw_channel.c4
-rw-r--r--drivers/net/ethernet/mscc/ocelot.c21
-rw-r--r--drivers/net/ethernet/mscc/ocelot_devlink.c2
-rw-r--r--drivers/net/ethernet/mscc/ocelot_mrp.c2
-rw-r--r--drivers/net/ethernet/mscc/ocelot_net.c2
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/offload.c3
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_ethtool.c5
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_lif.c12
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_rx_filter.c2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_cxt.c4
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_iwarp.c8
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mcp.c6
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_roce.c8
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c2
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c1
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac-ethtool.c2
-rw-r--r--drivers/net/ethernet/rdc/r6040.c9
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c1
-rw-r--r--drivers/net/ethernet/sfc/efx.c78
-rw-r--r--drivers/net/ethernet/sfc/efx_channels.c106
-rw-r--r--drivers/net/ethernet/sfc/falcon/efx.c79
-rw-r--r--drivers/net/ethernet/sfc/net_driver.h8
-rw-r--r--drivers/net/ethernet/sfc/tx.c29
-rw-r--r--drivers/net/ethernet/smsc/smc911x.c12
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c62
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c44
-rw-r--r--drivers/net/ethernet/xscale/ptp_ixp46x.c1
97 files changed, 847 insertions, 786 deletions
diff --git a/drivers/net/ethernet/3com/3c515.c b/drivers/net/ethernet/3com/3c515.c
index 8d90fed5d33e..6f0ea2facea9 100644
--- a/drivers/net/ethernet/3com/3c515.c
+++ b/drivers/net/ethernet/3com/3c515.c
@@ -1050,7 +1050,7 @@ static netdev_tx_t corkscrew_start_xmit(struct sk_buff *skb,
#ifdef VORTEX_BUS_MASTER
if (vp->bus_master) {
/* Set the bus-master controller to transfer the packet. */
- outl((int) (skb->data), ioaddr + Wn7_MasterAddr);
+ outl(isa_virt_to_bus(skb->data), ioaddr + Wn7_MasterAddr);
outw((skb->len + 3) & ~3, ioaddr + Wn7_MasterLen);
vp->tx_skb = skb;
outw(StartDMADown, ioaddr + EL3_CMD);
diff --git a/drivers/net/ethernet/3com/3c59x.c b/drivers/net/ethernet/3com/3c59x.c
index 17c16333a412..7b0ae9efc004 100644
--- a/drivers/net/ethernet/3com/3c59x.c
+++ b/drivers/net/ethernet/3com/3c59x.c
@@ -2786,7 +2786,7 @@ static void
dump_tx_ring(struct net_device *dev)
{
if (vortex_debug > 0) {
- struct vortex_private *vp = netdev_priv(dev);
+ struct vortex_private *vp = netdev_priv(dev);
void __iomem *ioaddr = vp->ioaddr;
if (vp->full_bus_master_tx) {
diff --git a/drivers/net/ethernet/8390/ne.c b/drivers/net/ethernet/8390/ne.c
index 53660bc8d6ff..9afc712f5948 100644
--- a/drivers/net/ethernet/8390/ne.c
+++ b/drivers/net/ethernet/8390/ne.c
@@ -922,13 +922,16 @@ static void __init ne_add_devices(void)
}
}
-#ifdef MODULE
static int __init ne_init(void)
{
int retval;
- ne_add_devices();
+
+ if (IS_MODULE(CONFIG_NE2000))
+ ne_add_devices();
+
retval = platform_driver_probe(&ne_driver, ne_drv_probe);
- if (retval) {
+
+ if (IS_MODULE(CONFIG_NE2000) && retval) {
if (io[0] == 0)
pr_notice("ne.c: You must supply \"io=0xNNN\""
" value(s) for ISA cards.\n");
@@ -941,18 +944,8 @@ static int __init ne_init(void)
return retval;
}
module_init(ne_init);
-#else /* MODULE */
-static int __init ne_init(void)
-{
- int retval = platform_driver_probe(&ne_driver, ne_drv_probe);
-
- /* Unregister unused platform_devices. */
- ne_loop_rm_unreg(0);
- return retval;
-}
-module_init(ne_init);
-#ifdef CONFIG_NETDEV_LEGACY_INIT
+#if !defined(MODULE) && defined(CONFIG_NETDEV_LEGACY_INIT)
struct net_device * __init ne_probe(int unit)
{
int this_dev;
@@ -994,7 +987,6 @@ struct net_device * __init ne_probe(int unit)
return ERR_PTR(-ENODEV);
}
#endif
-#endif /* MODULE */
static void __exit ne_exit(void)
{
diff --git a/drivers/net/ethernet/amd/ni65.c b/drivers/net/ethernet/amd/ni65.c
index b5df7ad5a83f..032e8922b482 100644
--- a/drivers/net/ethernet/amd/ni65.c
+++ b/drivers/net/ethernet/amd/ni65.c
@@ -748,7 +748,7 @@ static void ni65_stop_start(struct net_device *dev,struct priv *p)
#ifdef XMT_VIA_SKB
skb_save[i] = p->tmd_skb[i];
#endif
- buffer[i] = (u32) isa_bus_to_virt(tmdp->u.buffer);
+ buffer[i] = (unsigned long)isa_bus_to_virt(tmdp->u.buffer);
blen[i] = tmdp->blen;
tmdp->u.s.status = 0x0;
}
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
index dee9ff74d6d6..d4b1976ee69b 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
@@ -413,13 +413,13 @@ static int atl_resume_common(struct device *dev, bool deep)
if (deep) {
/* Reinitialize Nic/Vecs objects */
aq_nic_deinit(nic, !nic->aq_hw->aq_nic_cfg->wol);
+ }
+ if (netif_running(nic->ndev)) {
ret = aq_nic_init(nic);
if (ret)
goto err_exit;
- }
- if (netif_running(nic->ndev)) {
ret = aq_nic_start(nic);
if (ret)
goto err_exit;
diff --git a/drivers/net/ethernet/broadcom/bgmac-bcma.c b/drivers/net/ethernet/broadcom/bgmac-bcma.c
index 85fa0ab7201c..9513cfb5ba58 100644
--- a/drivers/net/ethernet/broadcom/bgmac-bcma.c
+++ b/drivers/net/ethernet/broadcom/bgmac-bcma.c
@@ -129,6 +129,8 @@ static int bgmac_probe(struct bcma_device *core)
bcma_set_drvdata(core, bgmac);
err = of_get_mac_address(bgmac->dev->of_node, bgmac->net_dev->dev_addr);
+ if (err == -EPROBE_DEFER)
+ return err;
/* If no MAC address assigned via device tree, check SPROM */
if (err) {
diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c
index a705e2615307..8c83973adca5 100644
--- a/drivers/net/ethernet/broadcom/bnx2.c
+++ b/drivers/net/ethernet/broadcom/bnx2.c
@@ -8038,9 +8038,9 @@ bnx2_get_pci_speed(struct bnx2 *bp)
static void
bnx2_read_vpd_fw_ver(struct bnx2 *bp)
{
+ unsigned int len;
int rc, i, j;
u8 *data;
- unsigned int block_end, rosize, len;
#define BNX2_VPD_NVRAM_OFFSET 0x300
#define BNX2_VPD_LEN 128
@@ -8057,38 +8057,21 @@ bnx2_read_vpd_fw_ver(struct bnx2 *bp)
for (i = 0; i < BNX2_VPD_LEN; i += 4)
swab32s((u32 *)&data[i]);
- i = pci_vpd_find_tag(data, BNX2_VPD_LEN, PCI_VPD_LRDT_RO_DATA);
- if (i < 0)
- goto vpd_done;
-
- rosize = pci_vpd_lrdt_size(&data[i]);
- i += PCI_VPD_LRDT_TAG_SIZE;
- block_end = i + rosize;
-
- if (block_end > BNX2_VPD_LEN)
- goto vpd_done;
-
- j = pci_vpd_find_info_keyword(data, i, rosize,
- PCI_VPD_RO_KEYWORD_MFR_ID);
+ j = pci_vpd_find_ro_info_keyword(data, BNX2_VPD_LEN,
+ PCI_VPD_RO_KEYWORD_MFR_ID, &len);
if (j < 0)
goto vpd_done;
- len = pci_vpd_info_field_size(&data[j]);
-
- j += PCI_VPD_INFO_FLD_HDR_SIZE;
- if (j + len > block_end || len != 4 ||
- memcmp(&data[j], "1028", 4))
+ if (len != 4 || memcmp(&data[j], "1028", 4))
goto vpd_done;
- j = pci_vpd_find_info_keyword(data, i, rosize,
- PCI_VPD_RO_KEYWORD_VENDOR0);
+ j = pci_vpd_find_ro_info_keyword(data, BNX2_VPD_LEN,
+ PCI_VPD_RO_KEYWORD_VENDOR0,
+ &len);
if (j < 0)
goto vpd_done;
- len = pci_vpd_info_field_size(&data[j]);
-
- j += PCI_VPD_INFO_FLD_HDR_SIZE;
- if (j + len > block_end || len > BNX2_MAX_VER_SLEN)
+ if (len > BNX2_MAX_VER_SLEN)
goto vpd_done;
memcpy(bp->fw_version, &data[j], len);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
index d04994840b87..e789430f407c 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
@@ -2407,7 +2407,6 @@ void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func, u8 idu_sb_id,
#define ETH_MAX_RX_CLIENTS_E2 ETH_MAX_RX_CLIENTS_E1H
#endif
-#define BNX2X_VPD_LEN 128
#define VENDOR_ID_LEN 4
#define VF_ACQUIRE_THRESH 3
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 6d98134913cd..ae87296ae1ff 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -12189,86 +12189,35 @@ static int bnx2x_get_hwinfo(struct bnx2x *bp)
static void bnx2x_read_fwinfo(struct bnx2x *bp)
{
- int cnt, i, block_end, rodi;
- char vpd_start[BNX2X_VPD_LEN+1];
- char str_id_reg[VENDOR_ID_LEN+1];
- char str_id_cap[VENDOR_ID_LEN+1];
- char *vpd_data;
- char *vpd_extended_data = NULL;
- u8 len;
-
- cnt = pci_read_vpd(bp->pdev, 0, BNX2X_VPD_LEN, vpd_start);
- memset(bp->fw_ver, 0, sizeof(bp->fw_ver));
-
- if (cnt < BNX2X_VPD_LEN)
- goto out_not_found;
-
- /* VPD RO tag should be first tag after identifier string, hence
- * we should be able to find it in first BNX2X_VPD_LEN chars
- */
- i = pci_vpd_find_tag(vpd_start, BNX2X_VPD_LEN, PCI_VPD_LRDT_RO_DATA);
- if (i < 0)
- goto out_not_found;
-
- block_end = i + PCI_VPD_LRDT_TAG_SIZE +
- pci_vpd_lrdt_size(&vpd_start[i]);
-
- i += PCI_VPD_LRDT_TAG_SIZE;
-
- if (block_end > BNX2X_VPD_LEN) {
- vpd_extended_data = kmalloc(block_end, GFP_KERNEL);
- if (vpd_extended_data == NULL)
- goto out_not_found;
-
- /* read rest of vpd image into vpd_extended_data */
- memcpy(vpd_extended_data, vpd_start, BNX2X_VPD_LEN);
- cnt = pci_read_vpd(bp->pdev, BNX2X_VPD_LEN,
- block_end - BNX2X_VPD_LEN,
- vpd_extended_data + BNX2X_VPD_LEN);
- if (cnt < (block_end - BNX2X_VPD_LEN))
- goto out_not_found;
- vpd_data = vpd_extended_data;
- } else
- vpd_data = vpd_start;
+ char str_id[VENDOR_ID_LEN + 1];
+ unsigned int vpd_len, kw_len;
+ u8 *vpd_data;
+ int rodi;
- /* now vpd_data holds full vpd content in both cases */
-
- rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
- PCI_VPD_RO_KEYWORD_MFR_ID);
- if (rodi < 0)
- goto out_not_found;
+ memset(bp->fw_ver, 0, sizeof(bp->fw_ver));
- len = pci_vpd_info_field_size(&vpd_data[rodi]);
+ vpd_data = pci_vpd_alloc(bp->pdev, &vpd_len);
+ if (IS_ERR(vpd_data))
+ return;
- if (len != VENDOR_ID_LEN)
+ rodi = pci_vpd_find_ro_info_keyword(vpd_data, vpd_len,
+ PCI_VPD_RO_KEYWORD_MFR_ID, &kw_len);
+ if (rodi < 0 || kw_len != VENDOR_ID_LEN)
goto out_not_found;
- rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
-
/* vendor specific info */
- snprintf(str_id_reg, VENDOR_ID_LEN + 1, "%04x", PCI_VENDOR_ID_DELL);
- snprintf(str_id_cap, VENDOR_ID_LEN + 1, "%04X", PCI_VENDOR_ID_DELL);
- if (!strncmp(str_id_reg, &vpd_data[rodi], VENDOR_ID_LEN) ||
- !strncmp(str_id_cap, &vpd_data[rodi], VENDOR_ID_LEN)) {
-
- rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
- PCI_VPD_RO_KEYWORD_VENDOR0);
- if (rodi >= 0) {
- len = pci_vpd_info_field_size(&vpd_data[rodi]);
-
- rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
-
- if (len < 32 && (len + rodi) <= BNX2X_VPD_LEN) {
- memcpy(bp->fw_ver, &vpd_data[rodi], len);
- bp->fw_ver[len] = ' ';
- }
+ snprintf(str_id, VENDOR_ID_LEN + 1, "%04x", PCI_VENDOR_ID_DELL);
+ if (!strncasecmp(str_id, &vpd_data[rodi], VENDOR_ID_LEN)) {
+ rodi = pci_vpd_find_ro_info_keyword(vpd_data, vpd_len,
+ PCI_VPD_RO_KEYWORD_VENDOR0,
+ &kw_len);
+ if (rodi >= 0 && kw_len < sizeof(bp->fw_ver)) {
+ memcpy(bp->fw_ver, &vpd_data[rodi], kw_len);
+ bp->fw_ver[kw_len] = ' ';
}
- kfree(vpd_extended_data);
- return;
}
out_not_found:
- kfree(vpd_extended_data);
- return;
+ kfree(vpd_data);
}
static void bnx2x_set_modes_bitmap(struct bnx2x *bp)
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
index f255fd0b16db..6fbf735fca31 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
@@ -1224,7 +1224,7 @@ int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param,
/* SR-IOV capability was enabled but there are no VFs*/
if (iov->total == 0) {
- err = -EINVAL;
+ err = 0;
goto failed;
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 627f85ee3922..62f84cc91e4d 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -305,13 +305,15 @@ static bool bnxt_vf_pciid(enum board_idx idx)
writel(DB_CP_FLAGS | RING_CMP(idx), (db)->doorbell)
#define BNXT_DB_NQ_P5(db, idx) \
- writeq((db)->db_key64 | DBR_TYPE_NQ | RING_CMP(idx), (db)->doorbell)
+ bnxt_writeq(bp, (db)->db_key64 | DBR_TYPE_NQ | RING_CMP(idx), \
+ (db)->doorbell)
#define BNXT_DB_CQ_ARM(db, idx) \
writel(DB_CP_REARM_FLAGS | RING_CMP(idx), (db)->doorbell)
#define BNXT_DB_NQ_ARM_P5(db, idx) \
- writeq((db)->db_key64 | DBR_TYPE_NQ_ARM | RING_CMP(idx), (db)->doorbell)
+ bnxt_writeq(bp, (db)->db_key64 | DBR_TYPE_NQ_ARM | RING_CMP(idx),\
+ (db)->doorbell)
static void bnxt_db_nq(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
{
@@ -332,8 +334,8 @@ static void bnxt_db_nq_arm(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
static void bnxt_db_cq(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
{
if (bp->flags & BNXT_FLAG_CHIP_P5)
- writeq(db->db_key64 | DBR_TYPE_CQ_ARMALL | RING_CMP(idx),
- db->doorbell);
+ bnxt_writeq(bp, db->db_key64 | DBR_TYPE_CQ_ARMALL |
+ RING_CMP(idx), db->doorbell);
else
BNXT_DB_CQ(db, idx);
}
@@ -389,7 +391,7 @@ static bool bnxt_txr_netif_try_stop_queue(struct bnxt *bp,
* netif_tx_queue_stopped().
*/
smp_mb();
- if (bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh) {
+ if (bnxt_tx_avail(bp, txr) >= bp->tx_wake_thresh) {
netif_tx_wake_queue(txq);
return false;
}
@@ -762,7 +764,7 @@ next_tx_int:
smp_mb();
if (unlikely(netif_tx_queue_stopped(txq)) &&
- bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh &&
+ bnxt_tx_avail(bp, txr) >= bp->tx_wake_thresh &&
READ_ONCE(txr->dev_state) != BNXT_DEV_STATE_CLOSING)
netif_tx_wake_queue(txq);
}
@@ -2200,25 +2202,33 @@ static int bnxt_async_event_process(struct bnxt *bp,
if (!fw_health)
goto async_event_process_exit;
- fw_health->enabled = EVENT_DATA1_RECOVERY_ENABLED(data1);
- fw_health->master = EVENT_DATA1_RECOVERY_MASTER_FUNC(data1);
- if (!fw_health->enabled) {
+ if (!EVENT_DATA1_RECOVERY_ENABLED(data1)) {
+ fw_health->enabled = false;
netif_info(bp, drv, bp->dev,
"Error recovery info: error recovery[0]\n");
break;
}
+ fw_health->master = EVENT_DATA1_RECOVERY_MASTER_FUNC(data1);
fw_health->tmr_multiplier =
DIV_ROUND_UP(fw_health->polling_dsecs * HZ,
bp->current_interval * 10);
fw_health->tmr_counter = fw_health->tmr_multiplier;
- fw_health->last_fw_heartbeat =
- bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG);
+ if (!fw_health->enabled)
+ fw_health->last_fw_heartbeat =
+ bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG);
fw_health->last_fw_reset_cnt =
bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
netif_info(bp, drv, bp->dev,
"Error recovery info: error recovery[1], master[%d], reset count[%u], health status: 0x%x\n",
fw_health->master, fw_health->last_fw_reset_cnt,
bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG));
+ if (!fw_health->enabled) {
+ /* Make sure tmr_counter is set and visible to
+ * bnxt_health_check() before setting enabled to true.
+ */
+ smp_wmb();
+ fw_health->enabled = true;
+ }
goto async_event_process_exit;
}
case ASYNC_EVENT_CMPL_EVENT_ID_DEBUG_NOTIFICATION:
@@ -2406,7 +2416,7 @@ static int __bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
if (TX_CMP_TYPE(txcmp) == CMP_TYPE_TX_L2_CMP) {
tx_pkts++;
/* return full budget so NAPI will complete. */
- if (unlikely(tx_pkts > bp->tx_wake_thresh)) {
+ if (unlikely(tx_pkts >= bp->tx_wake_thresh)) {
rx_pkts = budget;
raw_cons = NEXT_RAW_CMP(raw_cons);
if (budget)
@@ -2638,8 +2648,8 @@ static void __bnxt_poll_cqs_done(struct bnxt *bp, struct bnxt_napi *bnapi,
if (cpr2 && cpr2->had_work_done) {
db = &cpr2->cp_db;
- writeq(db->db_key64 | dbr_type |
- RING_CMP(cpr2->cp_raw_cons), db->doorbell);
+ bnxt_writeq(bp, db->db_key64 | dbr_type |
+ RING_CMP(cpr2->cp_raw_cons), db->doorbell);
cpr2->had_work_done = 0;
}
}
@@ -2719,6 +2729,9 @@ static void bnxt_free_tx_skbs(struct bnxt *bp)
struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
int j;
+ if (!txr->tx_buf_ring)
+ continue;
+
for (j = 0; j < max_idx;) {
struct bnxt_sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
struct sk_buff *skb;
@@ -2803,6 +2816,9 @@ static void bnxt_free_one_rx_ring_skbs(struct bnxt *bp, int ring_nr)
}
skip_rx_tpa_free:
+ if (!rxr->rx_buf_ring)
+ goto skip_rx_buf_free;
+
for (i = 0; i < max_idx; i++) {
struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[i];
dma_addr_t mapping = rx_buf->mapping;
@@ -2825,6 +2841,11 @@ skip_rx_tpa_free:
kfree(data);
}
}
+
+skip_rx_buf_free:
+ if (!rxr->rx_agg_ring)
+ goto skip_rx_agg_free;
+
for (i = 0; i < max_agg_idx; i++) {
struct bnxt_sw_rx_agg_bd *rx_agg_buf = &rxr->rx_agg_ring[i];
struct page *page = rx_agg_buf->page;
@@ -2841,6 +2862,8 @@ skip_rx_tpa_free:
__free_page(page);
}
+
+skip_rx_agg_free:
if (rxr->rx_page) {
__free_page(rxr->rx_page);
rxr->rx_page = NULL;
@@ -2889,6 +2912,9 @@ static void bnxt_free_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
struct pci_dev *pdev = bp->pdev;
int i;
+ if (!rmem->pg_arr)
+ goto skip_pages;
+
for (i = 0; i < rmem->nr_pages; i++) {
if (!rmem->pg_arr[i])
continue;
@@ -2898,6 +2924,7 @@ static void bnxt_free_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
rmem->pg_arr[i] = NULL;
}
+skip_pages:
if (rmem->pg_tbl) {
size_t pg_tbl_size = rmem->nr_pages * 8;
@@ -3217,10 +3244,14 @@ static int bnxt_alloc_tx_rings(struct bnxt *bp)
static void bnxt_free_cp_arrays(struct bnxt_cp_ring_info *cpr)
{
+ struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
+
kfree(cpr->cp_desc_ring);
cpr->cp_desc_ring = NULL;
+ ring->ring_mem.pg_arr = NULL;
kfree(cpr->cp_desc_mapping);
cpr->cp_desc_mapping = NULL;
+ ring->ring_mem.dma_arr = NULL;
}
static int bnxt_alloc_cp_arrays(struct bnxt_cp_ring_info *cpr, int n)
@@ -3609,7 +3640,7 @@ static int bnxt_init_tx_rings(struct bnxt *bp)
u16 i;
bp->tx_wake_thresh = max_t(int, bp->tx_ring_size / 2,
- MAX_SKB_FRAGS + 1);
+ BNXT_MIN_TX_DESC_CNT);
for (i = 0; i < bp->tx_nr_rings; i++) {
struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
@@ -4639,6 +4670,13 @@ static int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, u8 tunnel_type)
struct hwrm_tunnel_dst_port_free_input *req;
int rc;
+ if (tunnel_type == TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN &&
+ bp->vxlan_fw_dst_port_id == INVALID_HW_RING_ID)
+ return 0;
+ if (tunnel_type == TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE &&
+ bp->nge_fw_dst_port_id == INVALID_HW_RING_ID)
+ return 0;
+
rc = hwrm_req_init(bp, req, HWRM_TUNNEL_DST_PORT_FREE);
if (rc)
return rc;
@@ -4648,10 +4686,12 @@ static int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, u8 tunnel_type)
switch (tunnel_type) {
case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN:
req->tunnel_dst_port_id = cpu_to_le16(bp->vxlan_fw_dst_port_id);
+ bp->vxlan_port = 0;
bp->vxlan_fw_dst_port_id = INVALID_HW_RING_ID;
break;
case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE:
req->tunnel_dst_port_id = cpu_to_le16(bp->nge_fw_dst_port_id);
+ bp->nge_port = 0;
bp->nge_fw_dst_port_id = INVALID_HW_RING_ID;
break;
default:
@@ -4689,10 +4729,12 @@ static int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, __be16 port,
switch (tunnel_type) {
case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN:
+ bp->vxlan_port = port;
bp->vxlan_fw_dst_port_id =
le16_to_cpu(resp->tunnel_dst_port_id);
break;
case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE:
+ bp->nge_port = port;
bp->nge_fw_dst_port_id = le16_to_cpu(resp->tunnel_dst_port_id);
break;
default:
@@ -8221,12 +8263,10 @@ static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp, u8 flags)
static void bnxt_hwrm_free_tunnel_ports(struct bnxt *bp)
{
- if (bp->vxlan_fw_dst_port_id != INVALID_HW_RING_ID)
- bnxt_hwrm_tunnel_dst_port_free(
- bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN);
- if (bp->nge_fw_dst_port_id != INVALID_HW_RING_ID)
- bnxt_hwrm_tunnel_dst_port_free(
- bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE);
+ bnxt_hwrm_tunnel_dst_port_free(bp,
+ TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN);
+ bnxt_hwrm_tunnel_dst_port_free(bp,
+ TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE);
}
static int bnxt_set_tpa(struct bnxt *bp, bool set_tpa)
@@ -11247,6 +11287,8 @@ static void bnxt_fw_health_check(struct bnxt *bp)
if (!fw_health->enabled || test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
return;
+ /* Make sure it is enabled before checking the tmr_counter. */
+ smp_rmb();
if (fw_health->tmr_counter) {
fw_health->tmr_counter--;
return;
@@ -12185,6 +12227,11 @@ static void bnxt_fw_reset_task(struct work_struct *work)
return;
}
+ if ((bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) &&
+ bp->fw_health->enabled) {
+ bp->fw_health->last_fw_reset_cnt =
+ bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
+ }
bp->fw_reset_state = 0;
/* Make sure fw_reset_state is 0 before clearing the flag */
smp_mb__before_atomic();
@@ -12625,13 +12672,10 @@ static int bnxt_udp_tunnel_sync(struct net_device *netdev, unsigned int table)
unsigned int cmd;
udp_tunnel_nic_get_port(netdev, table, 0, &ti);
- if (ti.type == UDP_TUNNEL_TYPE_VXLAN) {
- bp->vxlan_port = ti.port;
+ if (ti.type == UDP_TUNNEL_TYPE_VXLAN)
cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN;
- } else {
- bp->nge_port = ti.port;
+ else
cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE;
- }
if (ti.port)
return bnxt_hwrm_tunnel_dst_port_alloc(bp, ti.port, cmd);
@@ -13081,66 +13125,35 @@ static int bnxt_init_mac_addr(struct bnxt *bp)
return rc;
}
-#define BNXT_VPD_LEN 512
static void bnxt_vpd_read_info(struct bnxt *bp)
{
struct pci_dev *pdev = bp->pdev;
- int i, len, pos, ro_size, size;
- ssize_t vpd_size;
+ unsigned int vpd_size, kw_len;
+ int pos, size;
u8 *vpd_data;
- vpd_data = kmalloc(BNXT_VPD_LEN, GFP_KERNEL);
- if (!vpd_data)
+ vpd_data = pci_vpd_alloc(pdev, &vpd_size);
+ if (IS_ERR(vpd_data)) {
+ pci_warn(pdev, "Unable to read VPD\n");
return;
-
- vpd_size = pci_read_vpd(pdev, 0, BNXT_VPD_LEN, vpd_data);
- if (vpd_size <= 0) {
- netdev_err(bp->dev, "Unable to read VPD\n");
- goto exit;
}
- i = pci_vpd_find_tag(vpd_data, vpd_size, PCI_VPD_LRDT_RO_DATA);
- if (i < 0) {
- netdev_err(bp->dev, "VPD READ-Only not found\n");
- goto exit;
- }
-
- i = pci_vpd_find_tag(vpd_data, vpd_size, PCI_VPD_LRDT_RO_DATA);
- if (i < 0) {
- netdev_err(bp->dev, "VPD READ-Only not found\n");
- goto exit;
- }
-
- ro_size = pci_vpd_lrdt_size(&vpd_data[i]);
- i += PCI_VPD_LRDT_TAG_SIZE;
- if (i + ro_size > vpd_size)
- goto exit;
-
- pos = pci_vpd_find_info_keyword(vpd_data, i, ro_size,
- PCI_VPD_RO_KEYWORD_PARTNO);
+ pos = pci_vpd_find_ro_info_keyword(vpd_data, vpd_size,
+ PCI_VPD_RO_KEYWORD_PARTNO, &kw_len);
if (pos < 0)
goto read_sn;
- len = pci_vpd_info_field_size(&vpd_data[pos]);
- pos += PCI_VPD_INFO_FLD_HDR_SIZE;
- if (len + pos > vpd_size)
- goto read_sn;
-
- size = min(len, BNXT_VPD_FLD_LEN - 1);
+ size = min_t(int, kw_len, BNXT_VPD_FLD_LEN - 1);
memcpy(bp->board_partno, &vpd_data[pos], size);
read_sn:
- pos = pci_vpd_find_info_keyword(vpd_data, i, ro_size,
- PCI_VPD_RO_KEYWORD_SERIALNO);
+ pos = pci_vpd_find_ro_info_keyword(vpd_data, vpd_size,
+ PCI_VPD_RO_KEYWORD_SERIALNO,
+ &kw_len);
if (pos < 0)
goto exit;
- len = pci_vpd_info_field_size(&vpd_data[pos]);
- pos += PCI_VPD_INFO_FLD_HDR_SIZE;
- if (len + pos > vpd_size)
- goto exit;
-
- size = min(len, BNXT_VPD_FLD_LEN - 1);
+ size = min_t(int, kw_len, BNXT_VPD_FLD_LEN - 1);
memcpy(bp->board_serialno, &vpd_data[pos], size);
exit:
kfree(vpd_data);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index a8212dcdad5f..19fe6478e9b4 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -28,6 +28,7 @@
#include <net/dst_metadata.h>
#include <net/xdp.h>
#include <linux/dim.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
#ifdef CONFIG_TEE_BNXT_FW
#include <linux/firmware/broadcom/tee_bnxt_fw.h>
#endif
@@ -628,6 +629,11 @@ struct nqe_cn {
#define BNXT_MAX_RX_JUM_DESC_CNT (RX_DESC_CNT * MAX_RX_AGG_PAGES - 1)
#define BNXT_MAX_TX_DESC_CNT (TX_DESC_CNT * MAX_TX_PAGES - 1)
+/* Minimum TX BDs for a TX packet with MAX_SKB_FRAGS + 1. We need one extra
+ * BD because the first TX BD is always a long BD.
+ */
+#define BNXT_MIN_TX_DESC_CNT (MAX_SKB_FRAGS + 2)
+
#define RX_RING(x) (((x) & ~(RX_DESC_CNT - 1)) >> (BNXT_PAGE_SHIFT - 4))
#define RX_IDX(x) ((x) & (RX_DESC_CNT - 1))
@@ -1981,7 +1987,7 @@ struct bnxt {
struct mutex sriov_lock;
#endif
-#ifndef writeq
+#if BITS_PER_LONG == 32
/* ensure atomic 64-bit doorbell writes on 32-bit systems. */
spinlock_t db_lock;
#endif
@@ -2110,24 +2116,36 @@ static inline u32 bnxt_tx_avail(struct bnxt *bp, struct bnxt_tx_ring_info *txr)
((txr->tx_prod - txr->tx_cons) & bp->tx_ring_mask);
}
-#ifndef writeq
-#define writeq(val64, db) \
-do { \
- spin_lock(&bp->db_lock); \
- writel((val64) & 0xffffffff, db); \
- writel((val64) >> 32, (db) + 4); \
- spin_unlock(&bp->db_lock); \
-} while (0)
+static inline void bnxt_writeq(struct bnxt *bp, u64 val,
+ volatile void __iomem *addr)
+{
+#if BITS_PER_LONG == 32
+ spin_lock(&bp->db_lock);
+ lo_hi_writeq(val, addr);
+ spin_unlock(&bp->db_lock);
+#else
+ writeq(val, addr);
+#endif
+}
-#define writeq_relaxed writeq
+static inline void bnxt_writeq_relaxed(struct bnxt *bp, u64 val,
+ volatile void __iomem *addr)
+{
+#if BITS_PER_LONG == 32
+ spin_lock(&bp->db_lock);
+ lo_hi_writeq_relaxed(val, addr);
+ spin_unlock(&bp->db_lock);
+#else
+ writeq_relaxed(val, addr);
#endif
+}
/* For TX and RX ring doorbells with no ordering guarantee*/
static inline void bnxt_db_write_relaxed(struct bnxt *bp,
struct bnxt_db_info *db, u32 idx)
{
if (bp->flags & BNXT_FLAG_CHIP_P5) {
- writeq_relaxed(db->db_key64 | idx, db->doorbell);
+ bnxt_writeq_relaxed(bp, db->db_key64 | idx, db->doorbell);
} else {
u32 db_val = db->db_key32 | idx;
@@ -2142,7 +2160,7 @@ static inline void bnxt_db_write(struct bnxt *bp, struct bnxt_db_info *db,
u32 idx)
{
if (bp->flags & BNXT_FLAG_CHIP_P5) {
- writeq(db->db_key64 | idx, db->doorbell);
+ bnxt_writeq(bp, db->db_key64 | idx, db->doorbell);
} else {
u32 db_val = db->db_key32 | idx;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
index 1423cc617d93..9576547df4ab 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
@@ -352,13 +352,16 @@ static void bnxt_copy_from_nvm_data(union devlink_param_value *dst,
dst->vu8 = (u8)val32;
}
-static int bnxt_hwrm_get_nvm_cfg_ver(struct bnxt *bp,
- union devlink_param_value *nvm_cfg_ver)
+static int bnxt_hwrm_get_nvm_cfg_ver(struct bnxt *bp, u32 *nvm_cfg_ver)
{
struct hwrm_nvm_get_variable_input *req;
+ u16 bytes = BNXT_NVM_CFG_VER_BYTES;
+ u16 bits = BNXT_NVM_CFG_VER_BITS;
+ union devlink_param_value ver;
union bnxt_nvm_data *data;
dma_addr_t data_dma_addr;
- int rc;
+ int rc, i = 2;
+ u16 dim = 1;
rc = hwrm_req_init(bp, req, HWRM_NVM_GET_VARIABLE);
if (rc)
@@ -370,16 +373,34 @@ static int bnxt_hwrm_get_nvm_cfg_ver(struct bnxt *bp,
goto exit;
}
+ /* earlier devices present as an array of raw bytes */
+ if (!BNXT_CHIP_P5(bp)) {
+ dim = 0;
+ i = 0;
+ bits *= 3; /* array of 3 version components */
+ bytes *= 4; /* copy whole word */
+ }
+
hwrm_req_hold(bp, req);
req->dest_data_addr = cpu_to_le64(data_dma_addr);
- req->data_len = cpu_to_le16(BNXT_NVM_CFG_VER_BITS);
+ req->data_len = cpu_to_le16(bits);
req->option_num = cpu_to_le16(NVM_OFF_NVM_CFG_VER);
+ req->dimensions = cpu_to_le16(dim);
- rc = hwrm_req_send_silent(bp, req);
- if (!rc)
- bnxt_copy_from_nvm_data(nvm_cfg_ver, data,
- BNXT_NVM_CFG_VER_BITS,
- BNXT_NVM_CFG_VER_BYTES);
+ while (i >= 0) {
+ req->index_0 = cpu_to_le16(i--);
+ rc = hwrm_req_send_silent(bp, req);
+ if (rc)
+ goto exit;
+ bnxt_copy_from_nvm_data(&ver, data, bits, bytes);
+
+ if (BNXT_CHIP_P5(bp)) {
+ *nvm_cfg_ver <<= 8;
+ *nvm_cfg_ver |= ver.vu8;
+ } else {
+ *nvm_cfg_ver = ver.vu32;
+ }
+ }
exit:
hwrm_req_drop(bp, req);
@@ -416,12 +437,12 @@ static int bnxt_dl_info_get(struct devlink *dl, struct devlink_info_req *req,
{
struct hwrm_nvm_get_dev_info_output nvm_dev_info;
struct bnxt *bp = bnxt_get_bp_from_dl(dl);
- union devlink_param_value nvm_cfg_ver;
struct hwrm_ver_get_output *ver_resp;
char mgmt_ver[FW_VER_STR_LEN];
char roce_ver[FW_VER_STR_LEN];
char ncsi_ver[FW_VER_STR_LEN];
char buf[32];
+ u32 ver = 0;
int rc;
rc = devlink_info_driver_name_put(req, DRV_MODULE_NAME);
@@ -456,7 +477,7 @@ static int bnxt_dl_info_get(struct devlink *dl, struct devlink_info_req *req,
return rc;
ver_resp = &bp->ver_resp;
- sprintf(buf, "%X", ver_resp->chip_rev);
+ sprintf(buf, "%c%d", 'A' + ver_resp->chip_rev, ver_resp->chip_metal);
rc = bnxt_dl_info_put(bp, req, BNXT_VERSION_FIXED,
DEVLINK_INFO_VERSION_GENERIC_ASIC_REV, buf);
if (rc)
@@ -475,11 +496,9 @@ static int bnxt_dl_info_get(struct devlink *dl, struct devlink_info_req *req,
if (rc)
return rc;
- if (BNXT_PF(bp) && !bnxt_hwrm_get_nvm_cfg_ver(bp, &nvm_cfg_ver)) {
- u32 ver = nvm_cfg_ver.vu32;
-
- sprintf(buf, "%d.%d.%d", (ver >> 16) & 0xf, (ver >> 8) & 0xf,
- ver & 0xf);
+ if (BNXT_PF(bp) && !bnxt_hwrm_get_nvm_cfg_ver(bp, &ver)) {
+ sprintf(buf, "%d.%d.%d", (ver >> 16) & 0xff, (ver >> 8) & 0xff,
+ ver & 0xff);
rc = bnxt_dl_info_put(bp, req, BNXT_VERSION_STORED,
DEVLINK_INFO_VERSION_GENERIC_FW_PSID,
buf);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h
index d22cab5d6856..d889f240da2b 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h
@@ -40,8 +40,8 @@ static inline void bnxt_link_bp_to_dl(struct bnxt *bp, struct devlink *dl)
#define NVM_OFF_ENABLE_SRIOV 401
#define NVM_OFF_NVM_CFG_VER 602
-#define BNXT_NVM_CFG_VER_BITS 24
-#define BNXT_NVM_CFG_VER_BYTES 4
+#define BNXT_NVM_CFG_VER_BITS 8
+#define BNXT_NVM_CFG_VER_BYTES 1
#define BNXT_MSIX_VEC_MAX 512
#define BNXT_MSIX_VEC_MIN_MAX 128
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
index b056e3c29bbd..7260910e75fb 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
@@ -798,7 +798,7 @@ static int bnxt_set_ringparam(struct net_device *dev,
if ((ering->rx_pending > BNXT_MAX_RX_DESC_CNT) ||
(ering->tx_pending > BNXT_MAX_TX_DESC_CNT) ||
- (ering->tx_pending <= MAX_SKB_FRAGS))
+ (ering->tx_pending < BNXT_MIN_TX_DESC_CNT))
return -EINVAL;
if (netif_running(dev))
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.c
index acef61abe35d..bb7327b82d0b 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.c
@@ -145,11 +145,11 @@ void hwrm_req_timeout(struct bnxt *bp, void *req, unsigned int timeout)
* @bp: The driver context.
* @req: The request for which calls to hwrm_req_dma_slice() will have altered
* allocation flags.
- * @flags: A bitmask of GFP flags. These flags are passed to
- * dma_alloc_coherent() whenever it is used to allocate backing memory
- * for slices. Note that calls to hwrm_req_dma_slice() will not always
- * result in new allocations, however, memory suballocated from the
- * request buffer is already __GFP_ZERO.
+ * @gfp: A bitmask of GFP flags. These flags are passed to dma_alloc_coherent()
+ * whenever it is used to allocate backing memory for slices. Note that
+ * calls to hwrm_req_dma_slice() will not always result in new allocations,
+ * however, memory suballocated from the request buffer is already
+ * __GFP_ZERO.
*
* Sets the GFP allocation flags associated with the request for subsequent
* calls to hwrm_req_dma_slice(). This can be useful for specifying __GFP_ZERO
@@ -698,8 +698,8 @@ int hwrm_req_send_silent(struct bnxt *bp, void *req)
* @bp: The driver context.
* @req: The request for which indirect data will be associated.
* @size: The size of the allocation.
- * @dma: The bus address associated with the allocation. The HWRM API has no
- * knowledge about the type of the request and so cannot infer how the
+ * @dma_handle: The bus address associated with the allocation. The HWRM API has
+ * no knowledge about the type of the request and so cannot infer how the
* caller intends to use the indirect data. Thus, the caller is
* responsible for configuring the request object appropriately to
* point to the associated indirect memory. Note, DMA handle has the
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
index 46fae1acbeed..e6a4a768b10b 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
@@ -1884,9 +1884,6 @@ bnxt_tc_indr_block_cb_lookup(struct bnxt *bp, struct net_device *netdev)
{
struct bnxt_flower_indr_block_cb_priv *cb_priv;
- /* All callback list access should be protected by RTNL. */
- ASSERT_RTNL();
-
list_for_each_entry(cb_priv, &bp->tc_indr_block_list, list)
if (cb_priv->tunnel_netdev == netdev)
return cb_priv;
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index 8a238e349e02..5e0e0e70d801 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -12788,7 +12788,7 @@ static void tg3_get_ethtool_stats(struct net_device *dev,
memset(tmp_stats, 0, sizeof(struct tg3_ethtool_stats));
}
-static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen)
+static __be32 *tg3_vpd_readblock(struct tg3 *tp, unsigned int *vpdlen)
{
int i;
__be32 *buf;
@@ -12822,15 +12822,11 @@ static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen)
offset = TG3_NVM_VPD_OFF;
len = TG3_NVM_VPD_LEN;
}
- } else {
- len = TG3_NVM_PCI_VPD_MAX_LEN;
- }
- buf = kmalloc(len, GFP_KERNEL);
- if (buf == NULL)
- return NULL;
+ buf = kmalloc(len, GFP_KERNEL);
+ if (!buf)
+ return NULL;
- if (magic == TG3_EEPROM_MAGIC) {
for (i = 0; i < len; i += 4) {
/* The data is in little-endian format in NVRAM.
* Use the big-endian read routines to preserve
@@ -12841,12 +12837,9 @@ static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen)
}
*vpdlen = len;
} else {
- ssize_t cnt;
-
- cnt = pci_read_vpd(tp->pdev, 0, len, (u8 *)buf);
- if (cnt < 0)
- goto error;
- *vpdlen = cnt;
+ buf = pci_vpd_alloc(tp->pdev, vpdlen);
+ if (IS_ERR(buf))
+ return NULL;
}
return buf;
@@ -12868,9 +12861,10 @@ error:
static int tg3_test_nvram(struct tg3 *tp)
{
- u32 csum, magic, len;
+ u32 csum, magic;
__be32 *buf;
int i, j, k, err = 0, size;
+ unsigned int len;
if (tg3_flag(tp, NO_NVRAM))
return 0;
@@ -13013,33 +13007,10 @@ static int tg3_test_nvram(struct tg3 *tp)
if (!buf)
return -ENOMEM;
- i = pci_vpd_find_tag((u8 *)buf, len, PCI_VPD_LRDT_RO_DATA);
- if (i > 0) {
- j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
- if (j < 0)
- goto out;
-
- if (i + PCI_VPD_LRDT_TAG_SIZE + j > len)
- goto out;
-
- i += PCI_VPD_LRDT_TAG_SIZE;
- j = pci_vpd_find_info_keyword((u8 *)buf, i, j,
- PCI_VPD_RO_KEYWORD_CHKSUM);
- if (j > 0) {
- u8 csum8 = 0;
-
- j += PCI_VPD_INFO_FLD_HDR_SIZE;
-
- for (i = 0; i <= j; i++)
- csum8 += ((u8 *)buf)[i];
-
- if (csum8)
- goto out;
- }
- }
-
- err = 0;
-
+ err = pci_vpd_check_csum(buf, len);
+ /* go on if no checksum found */
+ if (err == 1)
+ err = 0;
out:
kfree(buf);
return err;
@@ -15624,64 +15595,36 @@ skip_phy_reset:
static void tg3_read_vpd(struct tg3 *tp)
{
u8 *vpd_data;
- unsigned int block_end, rosize, len;
- u32 vpdlen;
- int j, i = 0;
+ unsigned int len, vpdlen;
+ int i;
vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen);
if (!vpd_data)
goto out_no_vpd;
- i = pci_vpd_find_tag(vpd_data, vpdlen, PCI_VPD_LRDT_RO_DATA);
+ i = pci_vpd_find_ro_info_keyword(vpd_data, vpdlen,
+ PCI_VPD_RO_KEYWORD_MFR_ID, &len);
if (i < 0)
- goto out_not_found;
-
- rosize = pci_vpd_lrdt_size(&vpd_data[i]);
- block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
- i += PCI_VPD_LRDT_TAG_SIZE;
+ goto partno;
- if (block_end > vpdlen)
- goto out_not_found;
-
- j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
- PCI_VPD_RO_KEYWORD_MFR_ID);
- if (j > 0) {
- len = pci_vpd_info_field_size(&vpd_data[j]);
-
- j += PCI_VPD_INFO_FLD_HDR_SIZE;
- if (j + len > block_end || len != 4 ||
- memcmp(&vpd_data[j], "1028", 4))
- goto partno;
-
- j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
- PCI_VPD_RO_KEYWORD_VENDOR0);
- if (j < 0)
- goto partno;
+ if (len != 4 || memcmp(vpd_data + i, "1028", 4))
+ goto partno;
- len = pci_vpd_info_field_size(&vpd_data[j]);
-
- j += PCI_VPD_INFO_FLD_HDR_SIZE;
- if (j + len > block_end)
- goto partno;
+ i = pci_vpd_find_ro_info_keyword(vpd_data, vpdlen,
+ PCI_VPD_RO_KEYWORD_VENDOR0, &len);
+ if (i < 0)
+ goto partno;
- if (len >= sizeof(tp->fw_ver))
- len = sizeof(tp->fw_ver) - 1;
- memset(tp->fw_ver, 0, sizeof(tp->fw_ver));
- snprintf(tp->fw_ver, sizeof(tp->fw_ver), "%.*s bc ", len,
- &vpd_data[j]);
- }
+ memset(tp->fw_ver, 0, sizeof(tp->fw_ver));
+ snprintf(tp->fw_ver, sizeof(tp->fw_ver), "%.*s bc ", len, vpd_data + i);
partno:
- i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
- PCI_VPD_RO_KEYWORD_PARTNO);
+ i = pci_vpd_find_ro_info_keyword(vpd_data, vpdlen,
+ PCI_VPD_RO_KEYWORD_PARTNO, &len);
if (i < 0)
goto out_not_found;
- len = pci_vpd_info_field_size(&vpd_data[i]);
-
- i += PCI_VPD_INFO_FLD_HDR_SIZE;
- if (len > TG3_BPN_SIZE ||
- (len + i) > vpdlen)
+ if (len > TG3_BPN_SIZE)
goto out_not_found;
memcpy(tp->board_part_number, &vpd_data[i], len);
diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h
index 46ec4fdfd16a..1000c894064f 100644
--- a/drivers/net/ethernet/broadcom/tg3.h
+++ b/drivers/net/ethernet/broadcom/tg3.h
@@ -2101,7 +2101,6 @@
/* Hardware Legacy NVRAM layout */
#define TG3_NVM_VPD_OFF 0x100
#define TG3_NVM_VPD_LEN 256
-#define TG3_NVM_PCI_VPD_MAX_LEN 512
/* Hardware Selfboot NVRAM layout */
#define TG3_NVM_HWSB_CFG1 0x00000004
diff --git a/drivers/net/ethernet/cadence/macb_pci.c b/drivers/net/ethernet/cadence/macb_pci.c
index 8b7b59908a1a..f66d22de5168 100644
--- a/drivers/net/ethernet/cadence/macb_pci.c
+++ b/drivers/net/ethernet/cadence/macb_pci.c
@@ -111,9 +111,9 @@ static void macb_remove(struct pci_dev *pdev)
struct platform_device *plat_dev = pci_get_drvdata(pdev);
struct macb_platform_data *plat_data = dev_get_platdata(&plat_dev->dev);
- platform_device_unregister(plat_dev);
clk_unregister(plat_data->pclk);
clk_unregister(plat_data->hclk);
+ platform_device_unregister(plat_dev);
}
static const struct pci_device_id dev_id_table[] = {
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
index c6fe0f2a4d0e..f6396ac64006 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
@@ -526,7 +526,7 @@ static void octeon_destroy_resources(struct octeon_device *oct)
oct->irq_name_storage = NULL;
}
/* Soft reset the octeon device before exiting */
- if (oct->pci_dev->reset_fn)
+ if (!pcie_reset_flr(oct->pci_dev, PCI_RESET_PROBE))
octeon_pci_flr(oct);
else
cn23xx_vf_ask_pf_to_do_flr(oct);
diff --git a/drivers/net/ethernet/chelsio/cxgb/cxgb2.c b/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
index 73c016166f06..d246eee4b6d5 100644
--- a/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
+++ b/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
@@ -1111,6 +1111,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (!adapter->registered_device_map) {
pr_err("%s: could not register any net devices\n",
pci_name(pdev));
+ err = -EINVAL;
goto out_release_adapter_res;
}
diff --git a/drivers/net/ethernet/chelsio/cxgb3/sge.c b/drivers/net/ethernet/chelsio/cxgb3/sge.c
index e21a2e691382..c3afec1041f8 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/sge.c
@@ -3301,6 +3301,9 @@ void t3_sge_stop(struct adapter *adap)
t3_sge_stop_dma(adap);
+ /* workqueues aren't initialized otherwise */
+ if (!(adap->flags & FULL_INIT_DONE))
+ return;
for (i = 0; i < SGE_QSETS; ++i) {
struct sge_qset *qs = &adap->sge.qs[i];
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index 9058f09f921e..ecea3cdd30b3 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -84,7 +84,6 @@ extern struct mutex uld_mutex;
enum {
MAX_NPORTS = 4, /* max # of ports */
SERNUM_LEN = 24, /* Serial # length */
- EC_LEN = 16, /* E/C length */
ID_LEN = 16, /* ID length */
PN_LEN = 16, /* Part Number length */
MACADDR_LEN = 12, /* MAC Address length */
@@ -391,7 +390,6 @@ struct tp_params {
struct vpd_params {
unsigned int cclk;
- u8 ec[EC_LEN + 1];
u8 sn[SERNUM_LEN + 1];
u8 id[ID_LEN + 1];
u8 pn[PN_LEN + 1];
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index 6606fb8b3e42..64144b6171d7 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -2743,10 +2743,9 @@ int t4_seeprom_wp(struct adapter *adapter, bool enable)
*/
int t4_get_raw_vpd_params(struct adapter *adapter, struct vpd_params *p)
{
- int i, ret = 0, addr;
- int ec, sn, pn, na;
- u8 *vpd, csum, base_val = 0;
- unsigned int vpdr_len, kw_offset, id_len;
+ unsigned int id_len, pn_len, sn_len, na_len;
+ int id, sn, pn, na, addr, ret = 0;
+ u8 *vpd, base_val = 0;
vpd = vmalloc(VPD_LEN);
if (!vpd)
@@ -2765,74 +2764,52 @@ int t4_get_raw_vpd_params(struct adapter *adapter, struct vpd_params *p)
if (ret < 0)
goto out;
- if (vpd[0] != PCI_VPD_LRDT_ID_STRING) {
- dev_err(adapter->pdev_dev, "missing VPD ID string\n");
- ret = -EINVAL;
+ ret = pci_vpd_find_id_string(vpd, VPD_LEN, &id_len);
+ if (ret < 0)
goto out;
- }
+ id = ret;
- id_len = pci_vpd_lrdt_size(vpd);
- if (id_len > ID_LEN)
- id_len = ID_LEN;
-
- i = pci_vpd_find_tag(vpd, VPD_LEN, PCI_VPD_LRDT_RO_DATA);
- if (i < 0) {
- dev_err(adapter->pdev_dev, "missing VPD-R section\n");
+ ret = pci_vpd_check_csum(vpd, VPD_LEN);
+ if (ret) {
+ dev_err(adapter->pdev_dev, "VPD checksum incorrect or missing\n");
ret = -EINVAL;
goto out;
}
- vpdr_len = pci_vpd_lrdt_size(&vpd[i]);
- kw_offset = i + PCI_VPD_LRDT_TAG_SIZE;
- if (vpdr_len + kw_offset > VPD_LEN) {
- dev_err(adapter->pdev_dev, "bad VPD-R length %u\n", vpdr_len);
- ret = -EINVAL;
+ ret = pci_vpd_find_ro_info_keyword(vpd, VPD_LEN,
+ PCI_VPD_RO_KEYWORD_SERIALNO, &sn_len);
+ if (ret < 0)
goto out;
- }
-
-#define FIND_VPD_KW(var, name) do { \
- var = pci_vpd_find_info_keyword(vpd, kw_offset, vpdr_len, name); \
- if (var < 0) { \
- dev_err(adapter->pdev_dev, "missing VPD keyword " name "\n"); \
- ret = -EINVAL; \
- goto out; \
- } \
- var += PCI_VPD_INFO_FLD_HDR_SIZE; \
-} while (0)
-
- FIND_VPD_KW(i, "RV");
- for (csum = 0; i >= 0; i--)
- csum += vpd[i];
+ sn = ret;
- if (csum) {
- dev_err(adapter->pdev_dev,
- "corrupted VPD EEPROM, actual csum %u\n", csum);
- ret = -EINVAL;
+ ret = pci_vpd_find_ro_info_keyword(vpd, VPD_LEN,
+ PCI_VPD_RO_KEYWORD_PARTNO, &pn_len);
+ if (ret < 0)
goto out;
- }
+ pn = ret;
- FIND_VPD_KW(ec, "EC");
- FIND_VPD_KW(sn, "SN");
- FIND_VPD_KW(pn, "PN");
- FIND_VPD_KW(na, "NA");
-#undef FIND_VPD_KW
+ ret = pci_vpd_find_ro_info_keyword(vpd, VPD_LEN, "NA", &na_len);
+ if (ret < 0)
+ goto out;
+ na = ret;
- memcpy(p->id, vpd + PCI_VPD_LRDT_TAG_SIZE, id_len);
+ memcpy(p->id, vpd + id, min_t(int, id_len, ID_LEN));
strim(p->id);
- memcpy(p->ec, vpd + ec, EC_LEN);
- strim(p->ec);
- i = pci_vpd_info_field_size(vpd + sn - PCI_VPD_INFO_FLD_HDR_SIZE);
- memcpy(p->sn, vpd + sn, min(i, SERNUM_LEN));
+ memcpy(p->sn, vpd + sn, min_t(int, sn_len, SERNUM_LEN));
strim(p->sn);
- i = pci_vpd_info_field_size(vpd + pn - PCI_VPD_INFO_FLD_HDR_SIZE);
- memcpy(p->pn, vpd + pn, min(i, PN_LEN));
+ memcpy(p->pn, vpd + pn, min_t(int, pn_len, PN_LEN));
strim(p->pn);
- memcpy(p->na, vpd + na, min(i, MACADDR_LEN));
+ memcpy(p->na, vpd + na, min_t(int, na_len, MACADDR_LEN));
strim((char *)p->na);
out:
vfree(vpd);
- return ret < 0 ? ret : 0;
+ if (ret < 0) {
+ dev_err(adapter->pdev_dev, "error reading VPD\n");
+ return ret;
+ }
+
+ return 0;
}
/**
diff --git a/drivers/net/ethernet/cirrus/Kconfig b/drivers/net/ethernet/cirrus/Kconfig
index dac1764ba740..5bdf731d9503 100644
--- a/drivers/net/ethernet/cirrus/Kconfig
+++ b/drivers/net/ethernet/cirrus/Kconfig
@@ -38,7 +38,7 @@ config CS89x0_ISA
config CS89x0_PLATFORM
tristate "CS89x0 platform driver support"
- depends on ARM || COMPILE_TEST
+ depends on ARM || (COMPILE_TEST && !PPC)
select CS89x0
help
Say Y to compile the cs89x0 platform driver. This makes this driver
diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c
index 3ca93adb9662..042327b9981f 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc.c
@@ -419,7 +419,7 @@ static void enetc_rx_dim_work(struct work_struct *w)
static void enetc_rx_net_dim(struct enetc_int_vector *v)
{
- struct dim_sample dim_sample;
+ struct dim_sample dim_sample = {};
v->comp_cnt++;
@@ -1879,7 +1879,6 @@ static void enetc_clear_bdrs(struct enetc_ndev_priv *priv)
static int enetc_setup_irqs(struct enetc_ndev_priv *priv)
{
struct pci_dev *pdev = priv->si->pdev;
- cpumask_t cpu_mask;
int i, j, err;
for (i = 0; i < priv->bdr_int_num; i++) {
@@ -1908,9 +1907,7 @@ static int enetc_setup_irqs(struct enetc_ndev_priv *priv)
enetc_wr(hw, ENETC_SIMSITRV(idx), entry);
}
- cpumask_clear(&cpu_mask);
- cpumask_set_cpu(i % num_online_cpus(), &cpu_mask);
- irq_set_affinity_hint(irq, &cpu_mask);
+ irq_set_affinity_hint(irq, get_cpu_mask(i % num_online_cpus()));
}
return 0;
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_ierb.c b/drivers/net/ethernet/freescale/enetc/enetc_ierb.c
index ee1468e3eaa3..91f02c505028 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_ierb.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_ierb.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
-/* Copyright 2021 NXP Semiconductors
+/* Copyright 2021 NXP
*
* The Integrated Endpoint Register Block (IERB) is configured by pre-boot
* software and is supposed to be to ENETC what a NVRAM is to a 'real' PCIe
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_ierb.h b/drivers/net/ethernet/freescale/enetc/enetc_ierb.h
index b3b774e0998a..c2ce47c4be9f 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_ierb.h
+++ b/drivers/net/ethernet/freescale/enetc/enetc_ierb.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
-/* Copyright 2021 NXP Semiconductors */
+/* Copyright 2021 NXP */
#include <linux/pci.h>
#include <linux/platform_device.h>
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 80bd5c629fa0..ec87b370bba1 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -4176,5 +4176,4 @@ static struct platform_driver fec_driver = {
module_platform_driver(fec_driver);
-MODULE_ALIAS("platform:"DRIVER_NAME);
MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
index 22af3d6ce178..adc54a726661 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
@@ -61,6 +61,9 @@ static unsigned int tx_sgl = 1;
module_param(tx_sgl, uint, 0600);
MODULE_PARM_DESC(tx_sgl, "Minimum number of frags when using dma_map_sg() to optimize the IOMMU mapping");
+static bool page_pool_enabled = true;
+module_param(page_pool_enabled, bool, 0400);
+
#define HNS3_SGL_SIZE(nfrag) (sizeof(struct scatterlist) * (nfrag) + \
sizeof(struct sg_table))
#define HNS3_MAX_SGL_SIZE ALIGN(HNS3_SGL_SIZE(HNS3_MAX_TSO_BD_NUM), \
@@ -73,6 +76,7 @@ MODULE_PARM_DESC(tx_sgl, "Minimum number of frags when using dma_map_sg() to opt
#define HNS3_OUTER_VLAN_TAG 2
#define HNS3_MIN_TX_LEN 33U
+#define HNS3_MIN_TUN_PKT_LEN 65U
/* hns3_pci_tbl - PCI Device ID Table
*
@@ -1424,8 +1428,11 @@ static int hns3_set_l2l3l4(struct sk_buff *skb, u8 ol4_proto,
l4.tcp->doff);
break;
case IPPROTO_UDP:
- if (hns3_tunnel_csum_bug(skb))
- return skb_checksum_help(skb);
+ if (hns3_tunnel_csum_bug(skb)) {
+ int ret = skb_put_padto(skb, HNS3_MIN_TUN_PKT_LEN);
+
+ return ret ? ret : skb_checksum_help(skb);
+ }
hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1);
hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4T_S,
@@ -4753,7 +4760,8 @@ static int hns3_alloc_ring_memory(struct hns3_enet_ring *ring)
goto out_with_desc_cb;
if (!HNAE3_IS_TX_RING(ring)) {
- hns3_alloc_page_pool(ring);
+ if (page_pool_enabled)
+ hns3_alloc_page_pool(ring);
ret = hns3_alloc_ring_buffers(ring);
if (ret)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
index 474c6d1664e7..ac9b69513332 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
@@ -362,7 +362,7 @@ static void hclge_set_default_capability(struct hclge_dev *hdev)
}
}
-const struct hclge_caps_bit_map hclge_cmd_caps_bit_map0[] = {
+static const struct hclge_caps_bit_map hclge_cmd_caps_bit_map0[] = {
{HCLGE_CAP_UDP_GSO_B, HNAE3_DEV_SUPPORT_UDP_GSO_B},
{HCLGE_CAP_PTP_B, HNAE3_DEV_SUPPORT_PTP_B},
{HCLGE_CAP_INT_QL_B, HNAE3_DEV_SUPPORT_INT_QL_B},
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
index 68ed1715ac52..87d96f82c318 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
@@ -1724,6 +1724,10 @@ hclge_dbg_get_imp_stats_info(struct hclge_dev *hdev, char *buf, int len)
}
bd_num = le32_to_cpu(req->bd_num);
+ if (!bd_num) {
+ dev_err(&hdev->pdev->dev, "imp statistics bd number is 0!\n");
+ return -EINVAL;
+ }
desc_src = kcalloc(bd_num, sizeof(struct hclge_desc), GFP_KERNEL);
if (!desc_src)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
index 718c16d686fa..bb9b026ae88e 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
@@ -2445,12 +2445,12 @@ static void hclge_handle_over_8bd_err(struct hclge_dev *hdev,
return;
}
- dev_err(dev, "PPU_PF_ABNORMAL_INT_ST over_8bd_no_fe found, vf_id(%u), queue_id(%u)\n",
+ dev_err(dev, "PPU_PF_ABNORMAL_INT_ST over_8bd_no_fe found, vport(%u), queue_id(%u)\n",
vf_id, q_id);
if (vf_id) {
if (vf_id >= hdev->num_alloc_vport) {
- dev_err(dev, "invalid vf id(%u)\n", vf_id);
+ dev_err(dev, "invalid vport(%u)\n", vf_id);
return;
}
@@ -2463,8 +2463,8 @@ static void hclge_handle_over_8bd_err(struct hclge_dev *hdev,
ret = hclge_inform_reset_assert_to_vf(&hdev->vport[vf_id]);
if (ret)
- dev_err(dev, "inform reset to vf(%u) failed %d!\n",
- hdev->vport->vport_id, ret);
+ dev_err(dev, "inform reset to vport(%u) failed %d!\n",
+ vf_id, ret);
} else {
set_bit(HNAE3_FUNC_RESET, reset_requests);
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
index e55ba2e511b1..47fea8985861 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
@@ -1528,9 +1528,10 @@ static void hclge_init_kdump_kernel_config(struct hclge_dev *hdev)
static int hclge_configure(struct hclge_dev *hdev)
{
struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
+ const struct cpumask *cpumask = cpu_online_mask;
struct hclge_cfg cfg;
unsigned int i;
- int ret;
+ int node, ret;
ret = hclge_get_cfg(hdev, &cfg);
if (ret)
@@ -1595,11 +1596,12 @@ static int hclge_configure(struct hclge_dev *hdev)
hclge_init_kdump_kernel_config(hdev);
- /* Set the init affinity based on pci func number */
- i = cpumask_weight(cpumask_of_node(dev_to_node(&hdev->pdev->dev)));
- i = i ? PCI_FUNC(hdev->pdev->devfn) % i : 0;
- cpumask_set_cpu(cpumask_local_spread(i, dev_to_node(&hdev->pdev->dev)),
- &hdev->affinity_mask);
+ /* Set the affinity based on numa node */
+ node = dev_to_node(&hdev->pdev->dev);
+ if (node != NUMA_NO_NODE)
+ cpumask = cpumask_of_node(node);
+
+ cpumask_copy(&hdev->affinity_mask, cpumask);
return ret;
}
@@ -3659,7 +3661,8 @@ static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset)
if (ret) {
dev_err(&hdev->pdev->dev,
"set vf(%u) rst failed %d!\n",
- vport->vport_id, ret);
+ vport->vport_id - HCLGE_VF_VPORT_START_NUM,
+ ret);
return ret;
}
@@ -3674,7 +3677,8 @@ static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset)
if (ret)
dev_warn(&hdev->pdev->dev,
"inform reset to vf(%u) failed %d!\n",
- vport->vport_id, ret);
+ vport->vport_id - HCLGE_VF_VPORT_START_NUM,
+ ret);
}
return 0;
@@ -4739,6 +4743,24 @@ static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir,
return 0;
}
+static int hclge_parse_rss_hfunc(struct hclge_vport *vport, const u8 hfunc,
+ u8 *hash_algo)
+{
+ switch (hfunc) {
+ case ETH_RSS_HASH_TOP:
+ *hash_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
+ return 0;
+ case ETH_RSS_HASH_XOR:
+ *hash_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
+ return 0;
+ case ETH_RSS_HASH_NO_CHANGE:
+ *hash_algo = vport->rss_algo;
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir,
const u8 *key, const u8 hfunc)
{
@@ -4748,30 +4770,27 @@ static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir,
u8 hash_algo;
int ret, i;
+ ret = hclge_parse_rss_hfunc(vport, hfunc, &hash_algo);
+ if (ret) {
+ dev_err(&hdev->pdev->dev, "invalid hfunc type %u\n", hfunc);
+ return ret;
+ }
+
/* Set the RSS Hash Key if specififed by the user */
if (key) {
- switch (hfunc) {
- case ETH_RSS_HASH_TOP:
- hash_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
- break;
- case ETH_RSS_HASH_XOR:
- hash_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
- break;
- case ETH_RSS_HASH_NO_CHANGE:
- hash_algo = vport->rss_algo;
- break;
- default:
- return -EINVAL;
- }
-
ret = hclge_set_rss_algo_key(hdev, hash_algo, key);
if (ret)
return ret;
/* Update the shadow RSS key with user specified qids */
memcpy(vport->rss_hash_key, key, HCLGE_RSS_KEY_SIZE);
- vport->rss_algo = hash_algo;
+ } else {
+ ret = hclge_set_rss_algo_key(hdev, hash_algo,
+ vport->rss_hash_key);
+ if (ret)
+ return ret;
}
+ vport->rss_algo = hash_algo;
/* Update the shadow RSS table with user specified qids */
for (i = 0; i < ae_dev->dev_specs.rss_ind_tbl_size; i++)
@@ -6625,10 +6644,13 @@ static int hclge_fd_parse_ring_cookie(struct hclge_dev *hdev, u64 ring_cookie,
u8 vf = ethtool_get_flow_spec_ring_vf(ring_cookie);
u16 tqps;
+ /* To keep consistent with user's configuration, minus 1 when
+ * printing 'vf', because vf id from ethtool is added 1 for vf.
+ */
if (vf > hdev->num_req_vfs) {
dev_err(&hdev->pdev->dev,
- "Error: vf id (%u) > max vf num (%u)\n",
- vf, hdev->num_req_vfs);
+ "Error: vf id (%u) should be less than %u\n",
+ vf - 1, hdev->num_req_vfs);
return -EINVAL;
}
@@ -8125,11 +8147,12 @@ static void hclge_ae_stop(struct hnae3_handle *handle)
hclge_clear_arfs_rules(hdev);
spin_unlock_bh(&hdev->fd_rule_lock);
- /* If it is not PF reset, the firmware will disable the MAC,
+ /* If it is not PF reset or FLR, the firmware will disable the MAC,
* so it only need to stop phy here.
*/
if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) &&
- hdev->reset_type != HNAE3_FUNC_RESET) {
+ hdev->reset_type != HNAE3_FUNC_RESET &&
+ hdev->reset_type != HNAE3_FLR_RESET) {
hclge_mac_stop_phy(hdev);
hclge_update_link_status(hdev);
return;
@@ -9794,6 +9817,9 @@ static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
if (is_kill && !vlan_id)
return 0;
+ if (vlan_id >= VLAN_N_VID)
+ return -EINVAL;
+
ret = hclge_set_vf_vlan_common(hdev, vport_id, is_kill, vlan_id);
if (ret) {
dev_err(&hdev->pdev->dev,
@@ -10700,7 +10726,8 @@ static int hclge_reset_tqp_cmd_send(struct hclge_dev *hdev, u16 queue_id,
return 0;
}
-static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id)
+static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id,
+ u8 *reset_status)
{
struct hclge_reset_tqp_queue_cmd *req;
struct hclge_desc desc;
@@ -10718,7 +10745,9 @@ static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id)
return ret;
}
- return hnae3_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B);
+ *reset_status = hnae3_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B);
+
+ return 0;
}
u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id)
@@ -10737,7 +10766,7 @@ static int hclge_reset_tqp_cmd(struct hnae3_handle *handle)
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
u16 reset_try_times = 0;
- int reset_status;
+ u8 reset_status;
u16 queue_gid;
int ret;
u16 i;
@@ -10753,7 +10782,11 @@ static int hclge_reset_tqp_cmd(struct hnae3_handle *handle)
}
while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
- reset_status = hclge_get_reset_status(hdev, queue_gid);
+ ret = hclge_get_reset_status(hdev, queue_gid,
+ &reset_status);
+ if (ret)
+ return ret;
+
if (reset_status)
break;
@@ -11446,11 +11479,11 @@ static void hclge_clear_resetting_state(struct hclge_dev *hdev)
struct hclge_vport *vport = &hdev->vport[i];
int ret;
- /* Send cmd to clear VF's FUNC_RST_ING */
+ /* Send cmd to clear vport's FUNC_RST_ING */
ret = hclge_set_vf_rst(hdev, vport->vport_id, false);
if (ret)
dev_warn(&hdev->pdev->dev,
- "clear vf(%u) rst failed %d!\n",
+ "clear vport(%u) rst failed %d!\n",
vport->vport_id, ret);
}
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
index 2ce5302c5956..65d78ee4d65a 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
@@ -566,7 +566,7 @@ static int hclge_reset_vf(struct hclge_vport *vport)
struct hclge_dev *hdev = vport->back;
dev_warn(&hdev->pdev->dev, "PF received VF reset request from VF %u!",
- vport->vport_id);
+ vport->vport_id - HCLGE_VF_VPORT_START_NUM);
return hclge_func_reset_cmd(hdev, vport->vport_id);
}
@@ -590,9 +590,17 @@ static void hclge_get_queue_id_in_pf(struct hclge_vport *vport,
struct hclge_mbx_vf_to_pf_cmd *mbx_req,
struct hclge_respond_to_vf_msg *resp_msg)
{
+ struct hnae3_handle *handle = &vport->nic;
+ struct hclge_dev *hdev = vport->back;
u16 queue_id, qid_in_pf;
memcpy(&queue_id, mbx_req->msg.data, sizeof(queue_id));
+ if (queue_id >= handle->kinfo.num_tqps) {
+ dev_err(&hdev->pdev->dev, "Invalid queue id(%u) from VF %u\n",
+ queue_id, mbx_req->mbx_src_vfid);
+ return;
+ }
+
qid_in_pf = hclge_covert_handle_qid_global(&vport->nic, queue_id);
memcpy(resp_msg->data, &qid_in_pf, sizeof(qid_in_pf));
resp_msg->len = sizeof(qid_in_pf);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
index 78d5bf1ea561..44618cc4cca1 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
@@ -581,7 +581,7 @@ int hclge_tm_qs_shaper_cfg(struct hclge_vport *vport, int max_tx_rate)
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret) {
dev_err(&hdev->pdev->dev,
- "vf%u, qs%u failed to set tx_rate:%d, ret=%d\n",
+ "vport%u, qs%u failed to set tx_rate:%d, ret=%d\n",
vport->vport_id, shap_cfg_cmd->qs_id,
max_tx_rate, ret);
return ret;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c
index 59772b0e9531..f89bfb352adf 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c
@@ -342,7 +342,7 @@ static void hclgevf_set_default_capability(struct hclgevf_dev *hdev)
set_bit(HNAE3_DEV_SUPPORT_FEC_B, ae_dev->caps);
}
-const struct hclgevf_caps_bit_map hclgevf_cmd_caps_bit_map0[] = {
+static const struct hclgevf_caps_bit_map hclgevf_cmd_caps_bit_map0[] = {
{HCLGEVF_CAP_UDP_GSO_B, HNAE3_DEV_SUPPORT_UDP_GSO_B},
{HCLGEVF_CAP_INT_QL_B, HNAE3_DEV_SUPPORT_INT_QL_B},
{HCLGEVF_CAP_TQP_TXRX_INDEP_B, HNAE3_DEV_SUPPORT_TQP_TXRX_INDEP_B},
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
index 82e727020120..5fdac8685f95 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
@@ -816,40 +816,56 @@ static int hclgevf_get_rss(struct hnae3_handle *handle, u32 *indir, u8 *key,
return 0;
}
+static int hclgevf_parse_rss_hfunc(struct hclgevf_dev *hdev, const u8 hfunc,
+ u8 *hash_algo)
+{
+ switch (hfunc) {
+ case ETH_RSS_HASH_TOP:
+ *hash_algo = HCLGEVF_RSS_HASH_ALGO_TOEPLITZ;
+ return 0;
+ case ETH_RSS_HASH_XOR:
+ *hash_algo = HCLGEVF_RSS_HASH_ALGO_SIMPLE;
+ return 0;
+ case ETH_RSS_HASH_NO_CHANGE:
+ *hash_algo = hdev->rss_cfg.hash_algo;
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
static int hclgevf_set_rss(struct hnae3_handle *handle, const u32 *indir,
const u8 *key, const u8 hfunc)
{
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
+ u8 hash_algo;
int ret, i;
if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
+ ret = hclgevf_parse_rss_hfunc(hdev, hfunc, &hash_algo);
+ if (ret)
+ return ret;
+
/* Set the RSS Hash Key if specififed by the user */
if (key) {
- switch (hfunc) {
- case ETH_RSS_HASH_TOP:
- rss_cfg->hash_algo =
- HCLGEVF_RSS_HASH_ALGO_TOEPLITZ;
- break;
- case ETH_RSS_HASH_XOR:
- rss_cfg->hash_algo =
- HCLGEVF_RSS_HASH_ALGO_SIMPLE;
- break;
- case ETH_RSS_HASH_NO_CHANGE:
- break;
- default:
- return -EINVAL;
- }
-
- ret = hclgevf_set_rss_algo_key(hdev, rss_cfg->hash_algo,
- key);
- if (ret)
+ ret = hclgevf_set_rss_algo_key(hdev, hash_algo, key);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "invalid hfunc type %u\n", hfunc);
return ret;
+ }
/* Update the shadow RSS key with user specified qids */
memcpy(rss_cfg->rss_hash_key, key,
HCLGEVF_RSS_KEY_SIZE);
+ } else {
+ ret = hclgevf_set_rss_algo_key(hdev, hash_algo,
+ rss_cfg->rss_hash_key);
+ if (ret)
+ return ret;
}
+ rss_cfg->hash_algo = hash_algo;
}
/* update the shadow RSS table with user specified qids */
@@ -2465,6 +2481,8 @@ static irqreturn_t hclgevf_misc_irq_handle(int irq, void *data)
hclgevf_enable_vector(&hdev->misc_vector, false);
event_cause = hclgevf_check_evt_cause(hdev, &clearval);
+ if (event_cause != HCLGEVF_VECTOR0_EVENT_OTHER)
+ hclgevf_clear_event_cause(hdev, clearval);
switch (event_cause) {
case HCLGEVF_VECTOR0_EVENT_RST:
@@ -2477,10 +2495,8 @@ static irqreturn_t hclgevf_misc_irq_handle(int irq, void *data)
break;
}
- if (event_cause != HCLGEVF_VECTOR0_EVENT_OTHER) {
- hclgevf_clear_event_cause(hdev, clearval);
+ if (event_cause != HCLGEVF_VECTOR0_EVENT_OTHER)
hclgevf_enable_vector(&hdev->misc_vector, true);
- }
return IRQ_HANDLED;
}
diff --git a/drivers/net/ethernet/i825xx/82596.c b/drivers/net/ethernet/i825xx/82596.c
index b8a40146b895..b482f6f633bd 100644
--- a/drivers/net/ethernet/i825xx/82596.c
+++ b/drivers/net/ethernet/i825xx/82596.c
@@ -1144,7 +1144,7 @@ static struct net_device * __init i82596_probe(void)
err = -ENODEV;
goto out;
}
- memcpy(eth_addr, (void *) 0xfffc1f2c, ETH_ALEN); /* YUCK! Get addr from NOVRAM */
+ memcpy(eth_addr, absolute_pointer(0xfffc1f2c), ETH_ALEN); /* YUCK! Get addr from NOVRAM */
dev->base_addr = MVME_I596_BASE;
dev->irq = (unsigned) MVME16x_IRQ_I596;
goto found;
diff --git a/drivers/net/ethernet/i825xx/sun3_82586.c b/drivers/net/ethernet/i825xx/sun3_82586.c
index 893e0ddcb611..0696f723228a 100644
--- a/drivers/net/ethernet/i825xx/sun3_82586.c
+++ b/drivers/net/ethernet/i825xx/sun3_82586.c
@@ -314,7 +314,7 @@ static int __init sun3_82586_probe(void)
err = register_netdev(dev);
if (err)
goto out2;
- return dev;
+ return 0;
out2:
release_region(ioaddr, SUN3_82586_TOTAL_SIZE);
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index a775c69e4fd7..a4579b340120 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -4700,6 +4700,22 @@ static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq,
return 0;
}
+ if (adapter->failover_pending) {
+ adapter->init_done_rc = -EAGAIN;
+ netdev_dbg(netdev, "Failover pending, ignoring login response\n");
+ complete(&adapter->init_done);
+ /* login response buffer will be released on reset */
+ return 0;
+ }
+
+ if (adapter->failover_pending) {
+ adapter->init_done_rc = -EAGAIN;
+ netdev_dbg(netdev, "Failover pending, ignoring login response\n");
+ complete(&adapter->init_done);
+ /* login response buffer will be released on reset */
+ return 0;
+ }
+
netdev->mtu = adapter->req_mtu - ETH_HLEN;
netdev_dbg(adapter->netdev, "Login Response Buffer:\n");
diff --git a/drivers/net/ethernet/intel/Kconfig b/drivers/net/ethernet/intel/Kconfig
index b0b6f90deb7d..ed8ea63bb172 100644
--- a/drivers/net/ethernet/intel/Kconfig
+++ b/drivers/net/ethernet/intel/Kconfig
@@ -335,6 +335,7 @@ config IGC
tristate "Intel(R) Ethernet Controller I225-LM/I225-V support"
default n
depends on PCI
+ depends on PTP_1588_CLOCK_OPTIONAL
help
This driver supports Intel(R) Ethernet Controller I225-LM/I225-V
family of adapters.
diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h
index eadcb9958346..3c4f08d20414 100644
--- a/drivers/net/ethernet/intel/ice/ice.h
+++ b/drivers/net/ethernet/intel/ice/ice.h
@@ -695,6 +695,7 @@ static inline void ice_set_rdma_cap(struct ice_pf *pf)
{
if (pf->hw.func_caps.common_cap.rdma && pf->num_rdma_msix) {
set_bit(ICE_FLAG_RDMA_ENA, pf->flags);
+ set_bit(ICE_FLAG_AUX_ENA, pf->flags);
ice_plug_aux_dev(pf);
}
}
@@ -707,5 +708,6 @@ static inline void ice_clear_rdma_cap(struct ice_pf *pf)
{
ice_unplug_aux_dev(pf);
clear_bit(ICE_FLAG_RDMA_ENA, pf->flags);
+ clear_bit(ICE_FLAG_AUX_ENA, pf->flags);
}
#endif /* _ICE_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_idc.c b/drivers/net/ethernet/intel/ice/ice_idc.c
index 1f2afdf6cd48..adcc9a251595 100644
--- a/drivers/net/ethernet/intel/ice/ice_idc.c
+++ b/drivers/net/ethernet/intel/ice/ice_idc.c
@@ -271,6 +271,12 @@ int ice_plug_aux_dev(struct ice_pf *pf)
struct auxiliary_device *adev;
int ret;
+ /* if this PF doesn't support a technology that requires auxiliary
+ * devices, then gracefully exit
+ */
+ if (!ice_is_aux_ena(pf))
+ return 0;
+
iadev = kzalloc(sizeof(*iadev), GFP_KERNEL);
if (!iadev)
return -ENOMEM;
diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c
index b877efae61df..0e19b4d02e62 100644
--- a/drivers/net/ethernet/intel/igc/igc_main.c
+++ b/drivers/net/ethernet/intel/igc/igc_main.c
@@ -6350,7 +6350,9 @@ static int igc_probe(struct pci_dev *pdev,
if (pci_using_dac)
netdev->features |= NETIF_F_HIGHDMA;
- netdev->vlan_features |= netdev->features;
+ netdev->vlan_features |= netdev->features | NETIF_F_TSO_MANGLEID;
+ netdev->mpls_features |= NETIF_F_HW_CSUM;
+ netdev->hw_enc_features |= netdev->vlan_features;
/* MTU range: 68 - 9216 */
netdev->min_mtu = ETH_MIN_MTU;
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
index 7f3d01059e19..34a089b71e55 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
@@ -1487,7 +1487,7 @@ static int cgx_lmac_init(struct cgx *cgx)
MAX_DMAC_ENTRIES_PER_CGX / cgx->lmac_count;
err = rvu_alloc_bitmap(&lmac->mac_to_index_bmap);
if (err)
- return err;
+ goto err_name_free;
/* Reserve first entry for default MAC address */
set_bit(0, lmac->mac_to_index_bmap.bmap);
@@ -1497,7 +1497,7 @@ static int cgx_lmac_init(struct cgx *cgx)
spin_lock_init(&lmac->event_cb_lock);
err = cgx_configure_interrupt(cgx, lmac, lmac->lmac_id, false);
if (err)
- goto err_irq;
+ goto err_bitmap_free;
/* Add reference */
cgx->lmac_idmap[lmac->lmac_id] = lmac;
@@ -1507,7 +1507,9 @@ static int cgx_lmac_init(struct cgx *cgx)
return cgx_lmac_verify_fwi_version(cgx);
-err_irq:
+err_bitmap_free:
+ rvu_free_bitmap(&lmac->mac_to_index_bmap);
+err_name_free:
kfree(lmac->name);
err_lmac_free:
kfree(lmac);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
index ce647e037f4d..35836903b7fb 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
@@ -92,7 +92,8 @@ static void rvu_setup_hw_capabilities(struct rvu *rvu)
*/
int rvu_poll_reg(struct rvu *rvu, u64 block, u64 offset, u64 mask, bool zero)
{
- unsigned long timeout = jiffies + usecs_to_jiffies(10000);
+ unsigned long timeout = jiffies + usecs_to_jiffies(20000);
+ bool twice = false;
void __iomem *reg;
u64 reg_val;
@@ -107,6 +108,15 @@ again:
usleep_range(1, 5);
goto again;
}
+ /* In scenarios where CPU is scheduled out before checking
+ * 'time_before' (above) and gets scheduled in such that
+ * jiffies are beyond timeout value, then check again if HW is
+ * done with the operation in the meantime.
+ */
+ if (!twice) {
+ twice = true;
+ goto again;
+ }
return -EBUSY;
}
@@ -201,6 +211,11 @@ int rvu_alloc_bitmap(struct rsrc_bmap *rsrc)
return 0;
}
+void rvu_free_bitmap(struct rsrc_bmap *rsrc)
+{
+ kfree(rsrc->bmap);
+}
+
/* Get block LF's HW index from a PF_FUNC's block slot number */
int rvu_get_lf(struct rvu *rvu, struct rvu_block *block, u16 pcifunc, u16 slot)
{
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
index d38e5c980c30..1d9411232f1d 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
@@ -638,6 +638,7 @@ static inline bool is_rvu_fwdata_valid(struct rvu *rvu)
}
int rvu_alloc_bitmap(struct rsrc_bmap *rsrc);
+void rvu_free_bitmap(struct rsrc_bmap *rsrc);
int rvu_alloc_rsrc(struct rsrc_bmap *rsrc);
void rvu_free_rsrc(struct rsrc_bmap *rsrc, int id);
bool is_rsrc_free(struct rsrc_bmap *rsrc, int id);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c
index 3cc76f14d2fd..95f21dfdba48 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c
@@ -27,7 +27,8 @@ int cn10k_lmtst_init(struct otx2_nic *pfvf)
{
struct lmtst_tbl_setup_req *req;
- int qcount, err;
+ struct otx2_lmt_info *lmt_info;
+ int err, cpu;
if (!test_bit(CN10K_LMTST, &pfvf->hw.cap_flag)) {
pfvf->hw_ops = &otx2_hw_ops;
@@ -35,15 +36,9 @@ int cn10k_lmtst_init(struct otx2_nic *pfvf)
}
pfvf->hw_ops = &cn10k_hw_ops;
- qcount = pfvf->hw.max_queues;
- /* LMTST lines allocation
- * qcount = num_online_cpus();
- * NPA = TX + RX + XDP.
- * NIX = TX * 32 (For Burst SQE flush).
- */
- pfvf->tot_lmt_lines = (qcount * 3) + (qcount * 32);
- pfvf->npa_lmt_lines = qcount * 3;
- pfvf->nix_lmt_size = LMT_BURST_SIZE * LMT_LINE_SIZE;
+ /* Total LMTLINES = num_online_cpus() * 32 (For Burst flush).*/
+ pfvf->tot_lmt_lines = (num_online_cpus() * LMT_BURST_SIZE);
+ pfvf->hw.lmt_info = alloc_percpu(struct otx2_lmt_info);
mutex_lock(&pfvf->mbox.lock);
req = otx2_mbox_alloc_msg_lmtst_tbl_setup(&pfvf->mbox);
@@ -66,6 +61,13 @@ int cn10k_lmtst_init(struct otx2_nic *pfvf)
err = otx2_sync_mbox_msg(&pfvf->mbox);
mutex_unlock(&pfvf->mbox.lock);
+ for_each_possible_cpu(cpu) {
+ lmt_info = per_cpu_ptr(pfvf->hw.lmt_info, cpu);
+ lmt_info->lmt_addr = ((u64)pfvf->hw.lmt_base +
+ (cpu * LMT_BURST_SIZE * LMT_LINE_SIZE));
+ lmt_info->lmt_id = cpu * LMT_BURST_SIZE;
+ }
+
return 0;
}
EXPORT_SYMBOL(cn10k_lmtst_init);
@@ -74,13 +76,6 @@ int cn10k_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura)
{
struct nix_cn10k_aq_enq_req *aq;
struct otx2_nic *pfvf = dev;
- struct otx2_snd_queue *sq;
-
- sq = &pfvf->qset.sq[qidx];
- sq->lmt_addr = (u64 *)((u64)pfvf->hw.nix_lmt_base +
- (qidx * pfvf->nix_lmt_size));
-
- sq->lmt_id = pfvf->npa_lmt_lines + (qidx * LMT_BURST_SIZE);
/* Get memory to put this msg */
aq = otx2_mbox_alloc_msg_nix_cn10k_aq_enq(&pfvf->mbox);
@@ -125,8 +120,7 @@ void cn10k_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq)
if (otx2_alloc_buffer(pfvf, cq, &bufptr)) {
if (num_ptrs--)
__cn10k_aura_freeptr(pfvf, cq->cq_idx, ptrs,
- num_ptrs,
- cq->rbpool->lmt_addr);
+ num_ptrs);
break;
}
cq->pool_ptrs--;
@@ -134,8 +128,7 @@ void cn10k_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq)
num_ptrs++;
if (num_ptrs == NPA_MAX_BURST || cq->pool_ptrs == 0) {
__cn10k_aura_freeptr(pfvf, cq->cq_idx, ptrs,
- num_ptrs,
- cq->rbpool->lmt_addr);
+ num_ptrs);
num_ptrs = 1;
}
}
@@ -143,20 +136,23 @@ void cn10k_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq)
void cn10k_sqe_flush(void *dev, struct otx2_snd_queue *sq, int size, int qidx)
{
+ struct otx2_lmt_info *lmt_info;
+ struct otx2_nic *pfvf = dev;
u64 val = 0, tar_addr = 0;
+ lmt_info = per_cpu_ptr(pfvf->hw.lmt_info, smp_processor_id());
/* FIXME: val[0:10] LMT_ID.
* [12:15] no of LMTST - 1 in the burst.
* [19:63] data size of each LMTST in the burst except first.
*/
- val = (sq->lmt_id & 0x7FF);
+ val = (lmt_info->lmt_id & 0x7FF);
/* Target address for LMTST flush tells HW how many 128bit
* words are present.
* tar_addr[6:4] size of first LMTST - 1 in units of 128b.
*/
tar_addr |= sq->io_addr | (((size / 16) - 1) & 0x7) << 4;
dma_wmb();
- memcpy(sq->lmt_addr, sq->sqe_base, size);
+ memcpy((u64 *)lmt_info->lmt_addr, sq->sqe_base, size);
cn10k_lmt_flush(val, tar_addr);
sq->head++;
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
index ce25c2744435..78df173e6df2 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
@@ -1230,11 +1230,6 @@ static int otx2_pool_init(struct otx2_nic *pfvf, u16 pool_id,
pool->rbsize = buf_size;
- /* Set LMTST addr for NPA batch free */
- if (test_bit(CN10K_LMTST, &pfvf->hw.cap_flag))
- pool->lmt_addr = (__force u64 *)((u64)pfvf->hw.npa_lmt_base +
- (pool_id * LMT_LINE_SIZE));
-
/* Initialize this pool's context via AF */
aq = otx2_mbox_alloc_msg_npa_aq_enq(&pfvf->mbox);
if (!aq) {
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
index 48227cec06ee..a51ecd771d07 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
@@ -53,6 +53,10 @@ enum arua_mapped_qtypes {
/* Send skid of 2000 packets required for CQ size of 4K CQEs. */
#define SEND_CQ_SKID 2000
+struct otx2_lmt_info {
+ u64 lmt_addr;
+ u16 lmt_id;
+};
/* RSS configuration */
struct otx2_rss_ctx {
u8 ind_tbl[MAX_RSS_INDIR_TBL_SIZE];
@@ -224,8 +228,7 @@ struct otx2_hw {
#define LMT_LINE_SIZE 128
#define LMT_BURST_SIZE 32 /* 32 LMTST lines for burst SQE flush */
u64 *lmt_base;
- u64 *npa_lmt_base;
- u64 *nix_lmt_base;
+ struct otx2_lmt_info __percpu *lmt_info;
};
enum vfperm {
@@ -407,17 +410,18 @@ static inline bool is_96xx_B0(struct pci_dev *pdev)
*/
#define PCI_REVISION_ID_96XX 0x00
#define PCI_REVISION_ID_95XX 0x10
-#define PCI_REVISION_ID_LOKI 0x20
+#define PCI_REVISION_ID_95XXN 0x20
#define PCI_REVISION_ID_98XX 0x30
#define PCI_REVISION_ID_95XXMM 0x40
+#define PCI_REVISION_ID_95XXO 0xE0
static inline bool is_dev_otx2(struct pci_dev *pdev)
{
u8 midr = pdev->revision & 0xF0;
return (midr == PCI_REVISION_ID_96XX || midr == PCI_REVISION_ID_95XX ||
- midr == PCI_REVISION_ID_LOKI || midr == PCI_REVISION_ID_98XX ||
- midr == PCI_REVISION_ID_95XXMM);
+ midr == PCI_REVISION_ID_95XXN || midr == PCI_REVISION_ID_98XX ||
+ midr == PCI_REVISION_ID_95XXMM || midr == PCI_REVISION_ID_95XXO);
}
static inline void otx2_setup_dev_hw_settings(struct otx2_nic *pfvf)
@@ -562,15 +566,16 @@ static inline u64 otx2_atomic64_add(u64 incr, u64 *ptr)
#endif
static inline void __cn10k_aura_freeptr(struct otx2_nic *pfvf, u64 aura,
- u64 *ptrs, u64 num_ptrs,
- u64 *lmt_addr)
+ u64 *ptrs, u64 num_ptrs)
{
+ struct otx2_lmt_info *lmt_info;
u64 size = 0, count_eot = 0;
u64 tar_addr, val = 0;
+ lmt_info = per_cpu_ptr(pfvf->hw.lmt_info, smp_processor_id());
tar_addr = (__force u64)otx2_get_regaddr(pfvf, NPA_LF_AURA_BATCH_FREE0);
/* LMTID is same as AURA Id */
- val = (aura & 0x7FF) | BIT_ULL(63);
+ val = (lmt_info->lmt_id & 0x7FF) | BIT_ULL(63);
/* Set if [127:64] of last 128bit word has a valid pointer */
count_eot = (num_ptrs % 2) ? 0ULL : 1ULL;
/* Set AURA ID to free pointer */
@@ -586,7 +591,7 @@ static inline void __cn10k_aura_freeptr(struct otx2_nic *pfvf, u64 aura,
size++;
tar_addr |= ((size - 1) & 0x7) << 4;
}
- memcpy(lmt_addr, ptrs, sizeof(u64) * num_ptrs);
+ memcpy((u64 *)lmt_info->lmt_addr, ptrs, sizeof(u64) * num_ptrs);
/* Perform LMTST flush */
cn10k_lmt_flush(val, tar_addr);
}
@@ -594,12 +599,11 @@ static inline void __cn10k_aura_freeptr(struct otx2_nic *pfvf, u64 aura,
static inline void cn10k_aura_freeptr(void *dev, int aura, u64 buf)
{
struct otx2_nic *pfvf = dev;
- struct otx2_pool *pool;
u64 ptrs[2];
- pool = &pfvf->qset.pool[aura];
ptrs[1] = buf;
- __cn10k_aura_freeptr(pfvf, aura, ptrs, 2, pool->lmt_addr);
+ /* Free only one buffer at time during init and teardown */
+ __cn10k_aura_freeptr(pfvf, aura, ptrs, 2);
}
/* Alloc pointer from pool/aura */
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
index 799486c72177..dbfa3bc39e34 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
@@ -16,8 +16,8 @@
#include "otx2_common.h"
#include "otx2_ptp.h"
-#define DRV_NAME "octeontx2-nicpf"
-#define DRV_VF_NAME "octeontx2-nicvf"
+#define DRV_NAME "rvu-nicpf"
+#define DRV_VF_NAME "rvu-nicvf"
struct otx2_stat {
char name[ETH_GSTRING_LEN];
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
index 2f2e8a3d7924..53df7fff92c4 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
@@ -1533,14 +1533,6 @@ int otx2_open(struct net_device *netdev)
if (!qset->rq)
goto err_free_mem;
- if (test_bit(CN10K_LMTST, &pf->hw.cap_flag)) {
- /* Reserve LMT lines for NPA AURA batch free */
- pf->hw.npa_lmt_base = pf->hw.lmt_base;
- /* Reserve LMT lines for NIX TX */
- pf->hw.nix_lmt_base = (u64 *)((u64)pf->hw.npa_lmt_base +
- (pf->npa_lmt_lines * LMT_LINE_SIZE));
- }
-
err = otx2_init_hw_resources(pf);
if (err)
goto err_free_mem;
@@ -2668,6 +2660,8 @@ err_del_mcam_entries:
err_ptp_destroy:
otx2_ptp_destroy(pf);
err_detach_rsrc:
+ if (pf->hw.lmt_info)
+ free_percpu(pf->hw.lmt_info);
if (test_bit(CN10K_LMTST, &pf->hw.cap_flag))
qmem_free(pf->dev, pf->dync_lmt);
otx2_detach_resources(&pf->mbox);
@@ -2811,6 +2805,8 @@ static void otx2_remove(struct pci_dev *pdev)
otx2_mcam_flow_del(pf);
otx2_shutdown_tc(pf);
otx2_detach_resources(&pf->mbox);
+ if (pf->hw.lmt_info)
+ free_percpu(pf->hw.lmt_info);
if (test_bit(CN10K_LMTST, &pf->hw.cap_flag))
qmem_free(pf->dev, pf->dync_lmt);
otx2_disable_mbox_intr(pf);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h
index 869de5f59e73..3ff1ad79c001 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h
@@ -80,7 +80,6 @@ struct otx2_snd_queue {
u16 num_sqbs;
u16 sqe_thresh;
u8 sqe_per_sqb;
- u32 lmt_id;
u64 io_addr;
u64 *aura_fc_addr;
u64 *lmt_addr;
@@ -111,7 +110,6 @@ struct otx2_cq_poll {
struct otx2_pool {
struct qmem *stack;
struct qmem *fc_addr;
- u64 *lmt_addr;
u16 rbsize;
};
diff --git a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
index b5f68f66d42a..7bb1f20002b5 100644
--- a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
+++ b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
@@ -186,6 +186,9 @@ mtk_flow_offload_replace(struct mtk_eth *eth, struct flow_cls_offload *f)
int hash;
int i;
+ if (rhashtable_lookup(&eth->flow_table, &f->cookie, mtk_flow_ht_params))
+ return -EEXIST;
+
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_META)) {
struct flow_match_meta match;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index a2f61a87cef8..8af7f2827322 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -372,6 +372,9 @@ mlx4_en_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
int nhoff = skb_network_offset(skb);
int ret = 0;
+ if (skb->encapsulation)
+ return -EPROTONOSUPPORT;
+
if (skb->protocol != htons(ETH_P_IP))
return -EPROTONOSUPPORT;
@@ -1269,7 +1272,6 @@ static void mlx4_en_do_set_rx_mode(struct work_struct *work)
if (!netif_carrier_ok(dev)) {
if (!mlx4_en_QUERY_PORT(mdev, priv->port)) {
if (priv->port_state.link_state) {
- priv->last_link_state = MLX4_DEV_EVENT_PORT_UP;
netif_carrier_on(dev);
en_dbg(LINK, priv, "Link Up\n");
}
@@ -1557,26 +1559,36 @@ static void mlx4_en_service_task(struct work_struct *work)
mutex_unlock(&mdev->state_lock);
}
-static void mlx4_en_linkstate(struct work_struct *work)
+static void mlx4_en_linkstate(struct mlx4_en_priv *priv)
+{
+ struct mlx4_en_port_state *port_state = &priv->port_state;
+ struct mlx4_en_dev *mdev = priv->mdev;
+ struct net_device *dev = priv->dev;
+ bool up;
+
+ if (mlx4_en_QUERY_PORT(mdev, priv->port))
+ port_state->link_state = MLX4_PORT_STATE_DEV_EVENT_PORT_DOWN;
+
+ up = port_state->link_state == MLX4_PORT_STATE_DEV_EVENT_PORT_UP;
+ if (up == netif_carrier_ok(dev))
+ netif_carrier_event(dev);
+ if (!up) {
+ en_info(priv, "Link Down\n");
+ netif_carrier_off(dev);
+ } else {
+ en_info(priv, "Link Up\n");
+ netif_carrier_on(dev);
+ }
+}
+
+static void mlx4_en_linkstate_work(struct work_struct *work)
{
struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
linkstate_task);
struct mlx4_en_dev *mdev = priv->mdev;
- int linkstate = priv->link_state;
mutex_lock(&mdev->state_lock);
- /* If observable port state changed set carrier state and
- * report to system log */
- if (priv->last_link_state != linkstate) {
- if (linkstate == MLX4_DEV_EVENT_PORT_DOWN) {
- en_info(priv, "Link Down\n");
- netif_carrier_off(priv->dev);
- } else {
- en_info(priv, "Link Up\n");
- netif_carrier_on(priv->dev);
- }
- }
- priv->last_link_state = linkstate;
+ mlx4_en_linkstate(priv);
mutex_unlock(&mdev->state_lock);
}
@@ -2079,9 +2091,11 @@ static int mlx4_en_open(struct net_device *dev)
mlx4_en_clear_stats(dev);
err = mlx4_en_start_port(dev);
- if (err)
+ if (err) {
en_err(priv, "Failed starting port:%d\n", priv->port);
-
+ goto out;
+ }
+ mlx4_en_linkstate(priv);
out:
mutex_unlock(&mdev->state_lock);
return err;
@@ -3168,7 +3182,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
spin_lock_init(&priv->stats_lock);
INIT_WORK(&priv->rx_mode_task, mlx4_en_do_set_rx_mode);
INIT_WORK(&priv->restart_task, mlx4_en_restart);
- INIT_WORK(&priv->linkstate_task, mlx4_en_linkstate);
+ INIT_WORK(&priv->linkstate_task, mlx4_en_linkstate_work);
INIT_DELAYED_WORK(&priv->stats_task, mlx4_en_do_get_stats);
INIT_DELAYED_WORK(&priv->service_task, mlx4_en_service_task);
#ifdef CONFIG_RFS_ACCEL
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index f3d1a20201ef..6bf558c5ec10 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -552,7 +552,6 @@ struct mlx4_en_priv {
struct mlx4_hwq_resources res;
int link_state;
- int last_link_state;
bool port_up;
int port;
int registered;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
index e84287ffc7ce..dcf9f27ba2ef 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
@@ -658,11 +658,10 @@ static const struct devlink_param enable_rdma_param =
static int mlx5_devlink_rdma_param_register(struct devlink *devlink)
{
- struct mlx5_core_dev *dev = devlink_priv(devlink);
union devlink_param_value value;
int err;
- if (!IS_ENABLED(CONFIG_MLX5_INFINIBAND) || MLX5_ESWITCH_MANAGER(dev))
+ if (!IS_ENABLED(CONFIG_MLX5_INFINIBAND))
return 0;
err = devlink_param_register(devlink, &enable_rdma_param);
@@ -679,9 +678,7 @@ static int mlx5_devlink_rdma_param_register(struct devlink *devlink)
static void mlx5_devlink_rdma_param_unregister(struct devlink *devlink)
{
- struct mlx5_core_dev *dev = devlink_priv(devlink);
-
- if (!IS_ENABLED(CONFIG_MLX5_INFINIBAND) || MLX5_ESWITCH_MANAGER(dev))
+ if (!IS_ENABLED(CONFIG_MLX5_INFINIBAND))
return;
devlink_param_unpublish(devlink, &enable_rdma_param);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
index 3f8a98093f8c..f9cf9fb31547 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
@@ -1007,7 +1007,7 @@ int mlx5_fw_tracer_init(struct mlx5_fw_tracer *tracer)
err = mlx5_core_alloc_pd(dev, &tracer->buff.pdn);
if (err) {
mlx5_core_warn(dev, "FWTracer: Failed to allocate PD %d\n", err);
- return err;
+ goto err_cancel_work;
}
err = mlx5_fw_tracer_create_mkey(tracer);
@@ -1031,6 +1031,7 @@ err_notifier_unregister:
mlx5_core_destroy_mkey(dev, &tracer->buff.mkey);
err_dealloc_pd:
mlx5_core_dealloc_pd(dev, tracer->buff.pdn);
+err_cancel_work:
cancel_work_sync(&tracer->read_fw_strings_work);
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index 669a75f3537a..7b8c8187543a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -922,7 +922,7 @@ void mlx5e_set_rx_mode_work(struct work_struct *work);
int mlx5e_hwstamp_set(struct mlx5e_priv *priv, struct ifreq *ifr);
int mlx5e_hwstamp_get(struct mlx5e_priv *priv, struct ifreq *ifr);
-int mlx5e_modify_rx_cqe_compression_locked(struct mlx5e_priv *priv, bool val);
+int mlx5e_modify_rx_cqe_compression_locked(struct mlx5e_priv *priv, bool val, bool rx_filter);
int mlx5e_vlan_rx_add_vid(struct net_device *dev, __always_unused __be16 proto,
u16 vid);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c
index 0c38c2e319be..b5ddaa82755f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c
@@ -137,7 +137,7 @@ static int mlx5_esw_bridge_port_changeupper(struct notifier_block *nb, void *ptr
u16 vport_num, esw_owner_vhca_id;
struct netlink_ext_ack *extack;
int ifindex = upper->ifindex;
- int err;
+ int err = 0;
if (!netif_is_bridge_master(upper))
return 0;
@@ -244,7 +244,7 @@ mlx5_esw_bridge_port_obj_attr_set(struct net_device *dev,
struct netlink_ext_ack *extack = switchdev_notifier_info_to_extack(&port_attr_info->info);
const struct switchdev_attr *attr = port_attr_info->attr;
u16 vport_num, esw_owner_vhca_id;
- int err;
+ int err = 0;
if (!mlx5_esw_bridge_lower_rep_vport_num_vhca_id_get(dev, br_offloads->esw, &vport_num,
&esw_owner_vhca_id))
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c
index 51a4d80f7fa3..de03684528bb 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c
@@ -300,9 +300,6 @@ mlx5e_rep_indr_block_priv_lookup(struct mlx5e_rep_priv *rpriv,
{
struct mlx5e_rep_indr_block_priv *cb_priv;
- /* All callback list access should be protected by RTNL. */
- ASSERT_RTNL();
-
list_for_each_entry(cb_priv,
&rpriv->uplink_priv.tc_indr_block_priv_list,
list)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c
index bf0313e2682b..13056cb9757d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c
@@ -572,7 +572,7 @@ void mlx5e_rx_res_channels_activate(struct mlx5e_rx_res *res, struct mlx5e_chann
if (res->features & MLX5E_RX_RES_FEATURE_PTP) {
u32 rqn;
- if (mlx5e_channels_get_ptp_rqn(chs, &rqn))
+ if (!mlx5e_channels_get_ptp_rqn(chs, &rqn))
rqn = res->drop_rqn;
err = mlx5e_rqt_redirect_direct(&res->ptp.rqt, rqn);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index 2cfd12953909..306fb5d6a36d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -1884,7 +1884,7 @@ static int set_pflag_rx_cqe_based_moder(struct net_device *netdev, bool enable)
return set_pflag_cqe_based_moder(netdev, enable, true);
}
-int mlx5e_modify_rx_cqe_compression_locked(struct mlx5e_priv *priv, bool new_val)
+int mlx5e_modify_rx_cqe_compression_locked(struct mlx5e_priv *priv, bool new_val, bool rx_filter)
{
bool curr_val = MLX5E_GET_PFLAG(&priv->channels.params, MLX5E_PFLAG_RX_CQE_COMPRESS);
struct mlx5e_params new_params;
@@ -1896,8 +1896,7 @@ int mlx5e_modify_rx_cqe_compression_locked(struct mlx5e_priv *priv, bool new_val
if (curr_val == new_val)
return 0;
- if (new_val && !priv->profile->rx_ptp_support &&
- priv->tstamp.rx_filter != HWTSTAMP_FILTER_NONE) {
+ if (new_val && !priv->profile->rx_ptp_support && rx_filter) {
netdev_err(priv->netdev,
"Profile doesn't support enabling of CQE compression while hardware time-stamping is enabled.\n");
return -EINVAL;
@@ -1905,7 +1904,7 @@ int mlx5e_modify_rx_cqe_compression_locked(struct mlx5e_priv *priv, bool new_val
new_params = priv->channels.params;
MLX5E_SET_PFLAG(&new_params, MLX5E_PFLAG_RX_CQE_COMPRESS, new_val);
- if (priv->tstamp.rx_filter != HWTSTAMP_FILTER_NONE)
+ if (rx_filter)
new_params.ptp_rx = new_val;
if (new_params.ptp_rx == priv->channels.params.ptp_rx)
@@ -1928,12 +1927,14 @@ static int set_pflag_rx_cqe_compress(struct net_device *netdev,
{
struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5_core_dev *mdev = priv->mdev;
+ bool rx_filter;
int err;
if (!MLX5_CAP_GEN(mdev, cqe_compression))
return -EOPNOTSUPP;
- err = mlx5e_modify_rx_cqe_compression_locked(priv, enable);
+ rx_filter = priv->tstamp.rx_filter != HWTSTAMP_FILTER_NONE;
+ err = mlx5e_modify_rx_cqe_compression_locked(priv, enable, rx_filter);
if (err)
return err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 47efd858964d..3fd515e7bf30 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -3554,14 +3554,14 @@ static int mlx5e_hwstamp_config_no_ptp_rx(struct mlx5e_priv *priv, bool rx_filte
if (!rx_filter)
/* Reset CQE compression to Admin default */
- return mlx5e_modify_rx_cqe_compression_locked(priv, rx_cqe_compress_def);
+ return mlx5e_modify_rx_cqe_compression_locked(priv, rx_cqe_compress_def, false);
if (!MLX5E_GET_PFLAG(&priv->channels.params, MLX5E_PFLAG_RX_CQE_COMPRESS))
return 0;
/* Disable CQE compression */
netdev_warn(priv->netdev, "Disabling RX cqe compression\n");
- err = mlx5e_modify_rx_cqe_compression_locked(priv, false);
+ err = mlx5e_modify_rx_cqe_compression_locked(priv, false, true);
if (err)
netdev_err(priv->netdev, "Failed disabling cqe compression err=%d\n", err);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index 9fe8e3c204d6..fe501ba88bea 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -1682,14 +1682,13 @@ static int build_match_list(struct match_list *match_head,
curr_match = kmalloc(sizeof(*curr_match), GFP_ATOMIC);
if (!curr_match) {
+ rcu_read_unlock();
free_match_list(match_head, ft_locked);
- err = -ENOMEM;
- goto out;
+ return -ENOMEM;
}
curr_match->g = g;
list_add_tail(&curr_match->list, &match_head->list);
}
-out:
rcu_read_unlock();
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag.c
index 49ca57c6d31d..ca5690b0a7ab 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag.c
@@ -927,9 +927,12 @@ void mlx5_lag_disable_change(struct mlx5_core_dev *dev)
struct mlx5_core_dev *dev1;
struct mlx5_lag *ldev;
+ ldev = mlx5_lag_dev(dev);
+ if (!ldev)
+ return;
+
mlx5_dev_list_lock();
- ldev = mlx5_lag_dev(dev);
dev0 = ldev->pf[MLX5_LAG_P1].dev;
dev1 = ldev->pf[MLX5_LAG_P2].dev;
@@ -946,8 +949,11 @@ void mlx5_lag_enable_change(struct mlx5_core_dev *dev)
{
struct mlx5_lag *ldev;
- mlx5_dev_list_lock();
ldev = mlx5_lag_dev(dev);
+ if (!ldev)
+ return;
+
+ mlx5_dev_list_lock();
ldev->mode_changes_in_progress--;
mlx5_dev_list_unlock();
mlx5_queue_bond_work(ldev, 0);
diff --git a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c
index 3e85b17f5857..6704f5c1aa32 100644
--- a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c
+++ b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c
@@ -142,6 +142,13 @@ static int mlxbf_gige_open(struct net_device *netdev)
err = mlxbf_gige_clean_port(priv);
if (err)
goto free_irqs;
+
+ /* Clear driver's valid_polarity to match hardware,
+ * since the above call to clean_port() resets the
+ * receive polarity used by hardware.
+ */
+ priv->valid_polarity = 0;
+
err = mlxbf_gige_rx_init(priv);
if (err)
goto free_irqs;
diff --git a/drivers/net/ethernet/microsoft/mana/hw_channel.c b/drivers/net/ethernet/microsoft/mana/hw_channel.c
index c1310ea1c216..d5c485a6d284 100644
--- a/drivers/net/ethernet/microsoft/mana/hw_channel.c
+++ b/drivers/net/ethernet/microsoft/mana/hw_channel.c
@@ -398,9 +398,7 @@ static int mana_hwc_alloc_dma_buf(struct hw_channel_context *hwc, u16 q_depth,
int err;
u16 i;
- dma_buf = kzalloc(sizeof(*dma_buf) +
- q_depth * sizeof(struct hwc_work_request),
- GFP_KERNEL);
+ dma_buf = kzalloc(struct_size(dma_buf, reqs, q_depth), GFP_KERNEL);
if (!dma_buf)
return -ENOMEM;
diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c
index c581b955efb3..559177e6ded4 100644
--- a/drivers/net/ethernet/mscc/ocelot.c
+++ b/drivers/net/ethernet/mscc/ocelot.c
@@ -563,16 +563,6 @@ void ocelot_phylink_mac_link_up(struct ocelot *ocelot, int port,
ocelot_port_writel(ocelot_port, DEV_MAC_ENA_CFG_RX_ENA |
DEV_MAC_ENA_CFG_TX_ENA, DEV_MAC_ENA_CFG);
- /* Take MAC, Port, Phy (intern) and PCS (SGMII/Serdes) clock out of
- * reset
- */
- ocelot_port_writel(ocelot_port, DEV_CLOCK_CFG_LINK_SPEED(speed),
- DEV_CLOCK_CFG);
-
- /* No PFC */
- ocelot_write_gix(ocelot, ANA_PFC_PFC_CFG_FC_LINK_SPEED(speed),
- ANA_PFC_PFC_CFG, port);
-
/* Core: Enable port for frame transfer */
ocelot_fields_write(ocelot, port,
QSYS_SWITCH_PORT_MODE_PORT_ENA, 1);
@@ -1303,14 +1293,19 @@ static u32 ocelot_get_bond_mask(struct ocelot *ocelot, struct net_device *bond,
return mask;
}
-static u32 ocelot_get_bridge_fwd_mask(struct ocelot *ocelot,
+static u32 ocelot_get_bridge_fwd_mask(struct ocelot *ocelot, int src_port,
struct net_device *bridge)
{
+ struct ocelot_port *ocelot_port = ocelot->ports[src_port];
u32 mask = 0;
int port;
+ if (!ocelot_port || ocelot_port->bridge != bridge ||
+ ocelot_port->stp_state != BR_STATE_FORWARDING)
+ return 0;
+
for (port = 0; port < ocelot->num_phys_ports; port++) {
- struct ocelot_port *ocelot_port = ocelot->ports[port];
+ ocelot_port = ocelot->ports[port];
if (!ocelot_port)
continue;
@@ -1376,7 +1371,7 @@ void ocelot_apply_bridge_fwd_mask(struct ocelot *ocelot)
struct net_device *bridge = ocelot_port->bridge;
struct net_device *bond = ocelot_port->bond;
- mask = ocelot_get_bridge_fwd_mask(ocelot, bridge);
+ mask = ocelot_get_bridge_fwd_mask(ocelot, port, bridge);
mask |= cpu_fwd_mask;
mask &= ~BIT(port);
if (bond) {
diff --git a/drivers/net/ethernet/mscc/ocelot_devlink.c b/drivers/net/ethernet/mscc/ocelot_devlink.c
index edafbd37d12c..b8737efd2a85 100644
--- a/drivers/net/ethernet/mscc/ocelot_devlink.c
+++ b/drivers/net/ethernet/mscc/ocelot_devlink.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: (GPL-2.0 OR MIT)
-/* Copyright 2020-2021 NXP Semiconductors
+/* Copyright 2020-2021 NXP
*/
#include <net/devlink.h>
#include "ocelot.h"
diff --git a/drivers/net/ethernet/mscc/ocelot_mrp.c b/drivers/net/ethernet/mscc/ocelot_mrp.c
index 08b481a93460..4b0941f09f71 100644
--- a/drivers/net/ethernet/mscc/ocelot_mrp.c
+++ b/drivers/net/ethernet/mscc/ocelot_mrp.c
@@ -2,7 +2,7 @@
/* Microsemi Ocelot Switch driver
*
* Copyright (c) 2017, 2019 Microsemi Corporation
- * Copyright 2020-2021 NXP Semiconductors
+ * Copyright 2020-2021 NXP
*/
#include <linux/if_bridge.h>
diff --git a/drivers/net/ethernet/mscc/ocelot_net.c b/drivers/net/ethernet/mscc/ocelot_net.c
index c0c465a4a981..e54b9fb2a97a 100644
--- a/drivers/net/ethernet/mscc/ocelot_net.c
+++ b/drivers/net/ethernet/mscc/ocelot_net.c
@@ -5,7 +5,7 @@
* mscc_ocelot_switch_lib.
*
* Copyright (c) 2017, 2019 Microsemi Corporation
- * Copyright 2020-2021 NXP Semiconductors
+ * Copyright 2020-2021 NXP
*/
#include <linux/if_bridge.h>
diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c
index 556c3495211d..64c0ef57ad42 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/offload.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c
@@ -1767,9 +1767,6 @@ nfp_flower_indr_block_cb_priv_lookup(struct nfp_app *app,
struct nfp_flower_indr_block_cb_priv *cb_priv;
struct nfp_flower_priv *priv = app->priv;
- /* All callback list access should be protected by RTNL. */
- ASSERT_RTNL();
-
list_for_each_entry(cb_priv, &priv->indr_block_cb_priv, list)
if (cb_priv->netdev == netdev)
return cb_priv;
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c b/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c
index e91b4874a57f..3de1a03839e2 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c
@@ -582,7 +582,10 @@ static int ionic_set_ringparam(struct net_device *netdev,
qparam.ntxq_descs = ring->tx_pending;
qparam.nrxq_descs = ring->rx_pending;
+
+ mutex_lock(&lif->queue_lock);
err = ionic_reconfigure_queues(lif, &qparam);
+ mutex_unlock(&lif->queue_lock);
if (err)
netdev_info(netdev, "Ring reconfiguration failed, changes canceled: %d\n", err);
@@ -679,7 +682,9 @@ static int ionic_set_channels(struct net_device *netdev,
return 0;
}
+ mutex_lock(&lif->queue_lock);
err = ionic_reconfigure_queues(lif, &qparam);
+ mutex_unlock(&lif->queue_lock);
if (err)
netdev_info(netdev, "Queue reconfiguration failed, changes canceled: %d\n", err);
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.c b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
index 23c9e196a784..381966e8f557 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_lif.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
@@ -1715,7 +1715,6 @@ static int ionic_set_mac_address(struct net_device *netdev, void *sa)
static void ionic_stop_queues_reconfig(struct ionic_lif *lif)
{
/* Stop and clean the queues before reconfiguration */
- mutex_lock(&lif->queue_lock);
netif_device_detach(lif->netdev);
ionic_stop_queues(lif);
ionic_txrx_deinit(lif);
@@ -1734,8 +1733,7 @@ static int ionic_start_queues_reconfig(struct ionic_lif *lif)
* DOWN and UP to try to reset and clear the issue.
*/
err = ionic_txrx_init(lif);
- mutex_unlock(&lif->queue_lock);
- ionic_link_status_check_request(lif, CAN_SLEEP);
+ ionic_link_status_check_request(lif, CAN_NOT_SLEEP);
netif_device_attach(lif->netdev);
return err;
@@ -1765,9 +1763,13 @@ static int ionic_change_mtu(struct net_device *netdev, int new_mtu)
return 0;
}
+ mutex_lock(&lif->queue_lock);
ionic_stop_queues_reconfig(lif);
netdev->mtu = new_mtu;
- return ionic_start_queues_reconfig(lif);
+ err = ionic_start_queues_reconfig(lif);
+ mutex_unlock(&lif->queue_lock);
+
+ return err;
}
static void ionic_tx_timeout_work(struct work_struct *ws)
@@ -1783,8 +1785,10 @@ static void ionic_tx_timeout_work(struct work_struct *ws)
if (!netif_running(lif->netdev))
return;
+ mutex_lock(&lif->queue_lock);
ionic_stop_queues_reconfig(lif);
ionic_start_queues_reconfig(lif);
+ mutex_unlock(&lif->queue_lock);
}
static void ionic_tx_timeout(struct net_device *netdev, unsigned int txqueue)
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_rx_filter.c b/drivers/net/ethernet/pensando/ionic/ionic_rx_filter.c
index 7e3a5634c161..25ecfcfa1281 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_rx_filter.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_rx_filter.c
@@ -318,7 +318,7 @@ void ionic_rx_filter_sync(struct ionic_lif *lif)
if (f->state == IONIC_FILTER_STATE_NEW ||
f->state == IONIC_FILTER_STATE_OLD) {
sync_item = devm_kzalloc(dev, sizeof(*sync_item),
- GFP_KERNEL);
+ GFP_ATOMIC);
if (!sync_item)
goto loop_out;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.c b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
index 5a0a3cbcc1c1..cb0f2a3a1ac9 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_cxt.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
@@ -2226,8 +2226,8 @@ qed_cxt_dynamic_ilt_alloc(struct qed_hwfn *p_hwfn,
p_blk = &p_cli->pf_blks[CDUT_SEG_BLK(QED_CXT_ROCE_TID_SEG)];
break;
default:
- DP_NOTICE(p_hwfn, "-EINVALID elem type = %d", elem_type);
- return -EINVAL;
+ DP_NOTICE(p_hwfn, "-EOPNOTSUPP elem type = %d", elem_type);
+ return -EOPNOTSUPP;
}
/* Calculate line in ilt */
diff --git a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c
index fc8b3e64f153..186d0048a9d1 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c
@@ -1297,6 +1297,14 @@ qed_iwarp_wait_cid_map_cleared(struct qed_hwfn *p_hwfn, struct qed_bmap *bmap)
prev_weight = weight;
while (weight) {
+ /* If the HW device is during recovery, all resources are
+ * immediately reset without receiving a per-cid indication
+ * from HW. In this case we don't expect the cid_map to be
+ * cleared.
+ */
+ if (p_hwfn->cdev->recov_in_prog)
+ return 0;
+
msleep(QED_IWARP_MAX_CID_CLEAN_TIME);
weight = bitmap_weight(bmap->bitmap, bmap->max_count);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
index 6e5a6cc97d0e..24cd41567775 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
@@ -3367,6 +3367,7 @@ qed_mcp_get_nvm_image_att(struct qed_hwfn *p_hwfn,
struct qed_nvm_image_att *p_image_att)
{
enum nvm_image_type type;
+ int rc;
u32 i;
/* Translate image_id into MFW definitions */
@@ -3395,7 +3396,10 @@ qed_mcp_get_nvm_image_att(struct qed_hwfn *p_hwfn,
return -EINVAL;
}
- qed_mcp_nvm_info_populate(p_hwfn);
+ rc = qed_mcp_nvm_info_populate(p_hwfn);
+ if (rc)
+ return rc;
+
for (i = 0; i < p_hwfn->nvm_info.num_images; i++)
if (type == p_hwfn->nvm_info.image_att[i].image_type)
break;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_roce.c b/drivers/net/ethernet/qlogic/qed/qed_roce.c
index f16a157bb95a..cf5baa5e59bc 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_roce.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_roce.c
@@ -77,6 +77,14 @@ void qed_roce_stop(struct qed_hwfn *p_hwfn)
* Beyond the added delay we clear the bitmap anyway.
*/
while (bitmap_weight(rcid_map->bitmap, rcid_map->max_count)) {
+ /* If the HW device is during recovery, all resources are
+ * immediately reset without receiving a per-cid indication
+ * from HW. In this case we don't expect the cid bitmap to be
+ * cleared.
+ */
+ if (p_hwfn->cdev->recov_in_prog)
+ return;
+
msleep(100);
if (wait_count++ > 20) {
DP_NOTICE(p_hwfn, "cid bitmap wait timed out\n");
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
index 0a2f34fc8b24..27dffa299ca6 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
@@ -1354,10 +1354,10 @@ static int qlcnic_83xx_copy_fw_file(struct qlcnic_adapter *adapter)
struct qlc_83xx_fw_info *fw_info = adapter->ahw->fw_info;
const struct firmware *fw = fw_info->fw;
u32 dest, *p_cache, *temp;
- int i, ret = -EIO;
__le32 *temp_le;
u8 data[16];
size_t size;
+ int i, ret;
u64 addr;
temp = vzalloc(fw->size);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c
index 3d61a767a8a3..09f20c794754 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c
@@ -437,7 +437,6 @@ int qlcnic_pinit_from_rom(struct qlcnic_adapter *adapter)
QLCWR32(adapter, QLCNIC_CRB_PEG_NET_4 + 0x3c, 1);
msleep(20);
- qlcnic_rom_unlock(adapter);
/* big hammer don't reset CAM block on reset */
QLCWR32(adapter, QLCNIC_ROMUSB_GLB_SW_RESET, 0xfeffffff);
diff --git a/drivers/net/ethernet/qualcomm/emac/emac-ethtool.c b/drivers/net/ethernet/qualcomm/emac/emac-ethtool.c
index 79e50079ed03..f72e13b83869 100644
--- a/drivers/net/ethernet/qualcomm/emac/emac-ethtool.c
+++ b/drivers/net/ethernet/qualcomm/emac/emac-ethtool.c
@@ -100,7 +100,7 @@ static void emac_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
case ETH_SS_STATS:
for (i = 0; i < EMAC_STATS_LEN; i++) {
- strlcpy(data, emac_ethtool_stat_strings[i],
+ strscpy(data, emac_ethtool_stat_strings[i],
ETH_GSTRING_LEN);
data += ETH_GSTRING_LEN;
}
diff --git a/drivers/net/ethernet/rdc/r6040.c b/drivers/net/ethernet/rdc/r6040.c
index 4b2eca5e08e2..01ef5efd7bc2 100644
--- a/drivers/net/ethernet/rdc/r6040.c
+++ b/drivers/net/ethernet/rdc/r6040.c
@@ -119,6 +119,8 @@
#define PHY_ST 0x8A /* PHY status register */
#define MAC_SM 0xAC /* MAC status machine */
#define MAC_SM_RST 0x0002 /* MAC status machine reset */
+#define MD_CSC 0xb6 /* MDC speed control register */
+#define MD_CSC_DEFAULT 0x0030
#define MAC_ID 0xBE /* Identifier register */
#define TX_DCNT 0x80 /* TX descriptor count */
@@ -355,8 +357,9 @@ static void r6040_reset_mac(struct r6040_private *lp)
{
void __iomem *ioaddr = lp->base;
int limit = MAC_DEF_TIMEOUT;
- u16 cmd;
+ u16 cmd, md_csc;
+ md_csc = ioread16(ioaddr + MD_CSC);
iowrite16(MAC_RST, ioaddr + MCR1);
while (limit--) {
cmd = ioread16(ioaddr + MCR1);
@@ -368,6 +371,10 @@ static void r6040_reset_mac(struct r6040_private *lp)
iowrite16(MAC_SM_RST, ioaddr + MAC_SM);
iowrite16(0, ioaddr + MAC_SM);
mdelay(5);
+
+ /* Restore MDIO clock frequency */
+ if (md_csc != MD_CSC_DEFAULT)
+ iowrite16(md_csc, ioaddr + MD_CSC);
}
static void r6040_init_mac_regs(struct net_device *dev)
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index 6c8ba916d1a6..1374faa229a2 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -2533,6 +2533,7 @@ static netdev_tx_t sh_eth_start_xmit(struct sk_buff *skb,
else
txdesc->status |= cpu_to_le32(TD_TACT);
+ wmb(); /* cur_tx must be incremented after TACT bit was set */
mdp->cur_tx++;
if (!(sh_eth_read(ndev, EDTRR) & mdp->cd->edtrr_trns))
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index a295e2621cf3..43ef4f529028 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -900,74 +900,36 @@ static void efx_pci_remove(struct pci_dev *pci_dev)
/* NIC VPD information
* Called during probe to display the part number of the
- * installed NIC. VPD is potentially very large but this should
- * always appear within the first 512 bytes.
+ * installed NIC.
*/
-#define SFC_VPD_LEN 512
static void efx_probe_vpd_strings(struct efx_nic *efx)
{
struct pci_dev *dev = efx->pci_dev;
- char vpd_data[SFC_VPD_LEN];
- ssize_t vpd_size;
- int ro_start, ro_size, i, j;
-
- /* Get the vpd data from the device */
- vpd_size = pci_read_vpd(dev, 0, sizeof(vpd_data), vpd_data);
- if (vpd_size <= 0) {
- netif_err(efx, drv, efx->net_dev, "Unable to read VPD\n");
- return;
- }
-
- /* Get the Read only section */
- ro_start = pci_vpd_find_tag(vpd_data, vpd_size, PCI_VPD_LRDT_RO_DATA);
- if (ro_start < 0) {
- netif_err(efx, drv, efx->net_dev, "VPD Read-only not found\n");
- return;
- }
-
- ro_size = pci_vpd_lrdt_size(&vpd_data[ro_start]);
- j = ro_size;
- i = ro_start + PCI_VPD_LRDT_TAG_SIZE;
- if (i + j > vpd_size)
- j = vpd_size - i;
-
- /* Get the Part number */
- i = pci_vpd_find_info_keyword(vpd_data, i, j, "PN");
- if (i < 0) {
- netif_err(efx, drv, efx->net_dev, "Part number not found\n");
- return;
- }
+ unsigned int vpd_size, kw_len;
+ u8 *vpd_data;
+ int start;
- j = pci_vpd_info_field_size(&vpd_data[i]);
- i += PCI_VPD_INFO_FLD_HDR_SIZE;
- if (i + j > vpd_size) {
- netif_err(efx, drv, efx->net_dev, "Incomplete part number\n");
+ vpd_data = pci_vpd_alloc(dev, &vpd_size);
+ if (IS_ERR(vpd_data)) {
+ pci_warn(dev, "Unable to read VPD\n");
return;
}
- netif_info(efx, drv, efx->net_dev,
- "Part Number : %.*s\n", j, &vpd_data[i]);
-
- i = ro_start + PCI_VPD_LRDT_TAG_SIZE;
- j = ro_size;
- i = pci_vpd_find_info_keyword(vpd_data, i, j, "SN");
- if (i < 0) {
- netif_err(efx, drv, efx->net_dev, "Serial number not found\n");
- return;
- }
-
- j = pci_vpd_info_field_size(&vpd_data[i]);
- i += PCI_VPD_INFO_FLD_HDR_SIZE;
- if (i + j > vpd_size) {
- netif_err(efx, drv, efx->net_dev, "Incomplete serial number\n");
- return;
- }
+ start = pci_vpd_find_ro_info_keyword(vpd_data, vpd_size,
+ PCI_VPD_RO_KEYWORD_PARTNO, &kw_len);
+ if (start < 0)
+ pci_err(dev, "Part number not found or incomplete\n");
+ else
+ pci_info(dev, "Part Number : %.*s\n", kw_len, vpd_data + start);
- efx->vpd_sn = kmalloc(j + 1, GFP_KERNEL);
- if (!efx->vpd_sn)
- return;
+ start = pci_vpd_find_ro_info_keyword(vpd_data, vpd_size,
+ PCI_VPD_RO_KEYWORD_SERIALNO, &kw_len);
+ if (start < 0)
+ pci_err(dev, "Serial number not found or incomplete\n");
+ else
+ efx->vpd_sn = kmemdup_nul(vpd_data + start, kw_len, GFP_KERNEL);
- snprintf(efx->vpd_sn, j + 1, "%s", &vpd_data[i]);
+ kfree(vpd_data);
}
diff --git a/drivers/net/ethernet/sfc/efx_channels.c b/drivers/net/ethernet/sfc/efx_channels.c
index e5b0d795c301..3dbea028b325 100644
--- a/drivers/net/ethernet/sfc/efx_channels.c
+++ b/drivers/net/ethernet/sfc/efx_channels.c
@@ -166,32 +166,46 @@ static int efx_allocate_msix_channels(struct efx_nic *efx,
* We need a channel per event queue, plus a VI per tx queue.
* This may be more pessimistic than it needs to be.
*/
- if (n_channels + n_xdp_ev > max_channels) {
- netif_err(efx, drv, efx->net_dev,
- "Insufficient resources for %d XDP event queues (%d other channels, max %d)\n",
- n_xdp_ev, n_channels, max_channels);
- netif_err(efx, drv, efx->net_dev,
- "XDP_TX and XDP_REDIRECT will not work on this interface");
- efx->n_xdp_channels = 0;
- efx->xdp_tx_per_channel = 0;
- efx->xdp_tx_queue_count = 0;
+ if (n_channels >= max_channels) {
+ efx->xdp_txq_queues_mode = EFX_XDP_TX_QUEUES_BORROWED;
+ netif_warn(efx, drv, efx->net_dev,
+ "Insufficient resources for %d XDP event queues (%d other channels, max %d)\n",
+ n_xdp_ev, n_channels, max_channels);
+ netif_warn(efx, drv, efx->net_dev,
+ "XDP_TX and XDP_REDIRECT might decrease device's performance\n");
} else if (n_channels + n_xdp_tx > efx->max_vis) {
- netif_err(efx, drv, efx->net_dev,
- "Insufficient resources for %d XDP TX queues (%d other channels, max VIs %d)\n",
- n_xdp_tx, n_channels, efx->max_vis);
- netif_err(efx, drv, efx->net_dev,
- "XDP_TX and XDP_REDIRECT will not work on this interface");
- efx->n_xdp_channels = 0;
- efx->xdp_tx_per_channel = 0;
- efx->xdp_tx_queue_count = 0;
+ efx->xdp_txq_queues_mode = EFX_XDP_TX_QUEUES_BORROWED;
+ netif_warn(efx, drv, efx->net_dev,
+ "Insufficient resources for %d XDP TX queues (%d other channels, max VIs %d)\n",
+ n_xdp_tx, n_channels, efx->max_vis);
+ netif_warn(efx, drv, efx->net_dev,
+ "XDP_TX and XDP_REDIRECT might decrease device's performance\n");
+ } else if (n_channels + n_xdp_ev > max_channels) {
+ efx->xdp_txq_queues_mode = EFX_XDP_TX_QUEUES_SHARED;
+ netif_warn(efx, drv, efx->net_dev,
+ "Insufficient resources for %d XDP event queues (%d other channels, max %d)\n",
+ n_xdp_ev, n_channels, max_channels);
+
+ n_xdp_ev = max_channels - n_channels;
+ netif_warn(efx, drv, efx->net_dev,
+ "XDP_TX and XDP_REDIRECT will work with reduced performance (%d cpus/tx_queue)\n",
+ DIV_ROUND_UP(n_xdp_tx, tx_per_ev * n_xdp_ev));
} else {
+ efx->xdp_txq_queues_mode = EFX_XDP_TX_QUEUES_DEDICATED;
+ }
+
+ if (efx->xdp_txq_queues_mode != EFX_XDP_TX_QUEUES_BORROWED) {
efx->n_xdp_channels = n_xdp_ev;
efx->xdp_tx_per_channel = tx_per_ev;
efx->xdp_tx_queue_count = n_xdp_tx;
n_channels += n_xdp_ev;
netif_dbg(efx, drv, efx->net_dev,
"Allocating %d TX and %d event queues for XDP\n",
- n_xdp_tx, n_xdp_ev);
+ n_xdp_ev * tx_per_ev, n_xdp_ev);
+ } else {
+ efx->n_xdp_channels = 0;
+ efx->xdp_tx_per_channel = 0;
+ efx->xdp_tx_queue_count = n_xdp_tx;
}
if (vec_count < n_channels) {
@@ -858,6 +872,20 @@ rollback:
goto out;
}
+static inline int
+efx_set_xdp_tx_queue(struct efx_nic *efx, int xdp_queue_number,
+ struct efx_tx_queue *tx_queue)
+{
+ if (xdp_queue_number >= efx->xdp_tx_queue_count)
+ return -EINVAL;
+
+ netif_dbg(efx, drv, efx->net_dev, "Channel %u TXQ %u is XDP %u, HW %u\n",
+ tx_queue->channel->channel, tx_queue->label,
+ xdp_queue_number, tx_queue->queue);
+ efx->xdp_tx_queues[xdp_queue_number] = tx_queue;
+ return 0;
+}
+
int efx_set_channels(struct efx_nic *efx)
{
struct efx_tx_queue *tx_queue;
@@ -896,20 +924,9 @@ int efx_set_channels(struct efx_nic *efx)
if (efx_channel_is_xdp_tx(channel)) {
efx_for_each_channel_tx_queue(tx_queue, channel) {
tx_queue->queue = next_queue++;
-
- /* We may have a few left-over XDP TX
- * queues owing to xdp_tx_queue_count
- * not dividing evenly by EFX_MAX_TXQ_PER_CHANNEL.
- * We still allocate and probe those
- * TXQs, but never use them.
- */
- if (xdp_queue_number < efx->xdp_tx_queue_count) {
- netif_dbg(efx, drv, efx->net_dev, "Channel %u TXQ %u is XDP %u, HW %u\n",
- channel->channel, tx_queue->label,
- xdp_queue_number, tx_queue->queue);
- efx->xdp_tx_queues[xdp_queue_number] = tx_queue;
+ rc = efx_set_xdp_tx_queue(efx, xdp_queue_number, tx_queue);
+ if (rc == 0)
xdp_queue_number++;
- }
}
} else {
efx_for_each_channel_tx_queue(tx_queue, channel) {
@@ -918,10 +935,35 @@ int efx_set_channels(struct efx_nic *efx)
channel->channel, tx_queue->label,
tx_queue->queue);
}
+
+ /* If XDP is borrowing queues from net stack, it must use the queue
+ * with no csum offload, which is the first one of the channel
+ * (note: channel->tx_queue_by_type is not initialized yet)
+ */
+ if (efx->xdp_txq_queues_mode == EFX_XDP_TX_QUEUES_BORROWED) {
+ tx_queue = &channel->tx_queue[0];
+ rc = efx_set_xdp_tx_queue(efx, xdp_queue_number, tx_queue);
+ if (rc == 0)
+ xdp_queue_number++;
+ }
}
}
}
- WARN_ON(xdp_queue_number != efx->xdp_tx_queue_count);
+ WARN_ON(efx->xdp_txq_queues_mode == EFX_XDP_TX_QUEUES_DEDICATED &&
+ xdp_queue_number != efx->xdp_tx_queue_count);
+ WARN_ON(efx->xdp_txq_queues_mode != EFX_XDP_TX_QUEUES_DEDICATED &&
+ xdp_queue_number > efx->xdp_tx_queue_count);
+
+ /* If we have more CPUs than assigned XDP TX queues, assign the already
+ * existing queues to the exceeding CPUs
+ */
+ next_queue = 0;
+ while (xdp_queue_number < efx->xdp_tx_queue_count) {
+ tx_queue = efx->xdp_tx_queues[next_queue++];
+ rc = efx_set_xdp_tx_queue(efx, xdp_queue_number, tx_queue);
+ if (rc == 0)
+ xdp_queue_number++;
+ }
rc = netif_set_real_num_tx_queues(efx->net_dev, efx->n_tx_channels);
if (rc)
diff --git a/drivers/net/ethernet/sfc/falcon/efx.c b/drivers/net/ethernet/sfc/falcon/efx.c
index c177ea0f301e..423bdf81200f 100644
--- a/drivers/net/ethernet/sfc/falcon/efx.c
+++ b/drivers/net/ethernet/sfc/falcon/efx.c
@@ -2780,75 +2780,36 @@ static void ef4_pci_remove(struct pci_dev *pci_dev)
};
/* NIC VPD information
- * Called during probe to display the part number of the
- * installed NIC. VPD is potentially very large but this should
- * always appear within the first 512 bytes.
+ * Called during probe to display the part number of the installed NIC.
*/
-#define SFC_VPD_LEN 512
static void ef4_probe_vpd_strings(struct ef4_nic *efx)
{
struct pci_dev *dev = efx->pci_dev;
- char vpd_data[SFC_VPD_LEN];
- ssize_t vpd_size;
- int ro_start, ro_size, i, j;
-
- /* Get the vpd data from the device */
- vpd_size = pci_read_vpd(dev, 0, sizeof(vpd_data), vpd_data);
- if (vpd_size <= 0) {
- netif_err(efx, drv, efx->net_dev, "Unable to read VPD\n");
- return;
- }
-
- /* Get the Read only section */
- ro_start = pci_vpd_find_tag(vpd_data, vpd_size, PCI_VPD_LRDT_RO_DATA);
- if (ro_start < 0) {
- netif_err(efx, drv, efx->net_dev, "VPD Read-only not found\n");
- return;
- }
-
- ro_size = pci_vpd_lrdt_size(&vpd_data[ro_start]);
- j = ro_size;
- i = ro_start + PCI_VPD_LRDT_TAG_SIZE;
- if (i + j > vpd_size)
- j = vpd_size - i;
-
- /* Get the Part number */
- i = pci_vpd_find_info_keyword(vpd_data, i, j, "PN");
- if (i < 0) {
- netif_err(efx, drv, efx->net_dev, "Part number not found\n");
- return;
- }
+ unsigned int vpd_size, kw_len;
+ u8 *vpd_data;
+ int start;
- j = pci_vpd_info_field_size(&vpd_data[i]);
- i += PCI_VPD_INFO_FLD_HDR_SIZE;
- if (i + j > vpd_size) {
- netif_err(efx, drv, efx->net_dev, "Incomplete part number\n");
+ vpd_data = pci_vpd_alloc(dev, &vpd_size);
+ if (IS_ERR(vpd_data)) {
+ pci_warn(dev, "Unable to read VPD\n");
return;
}
- netif_info(efx, drv, efx->net_dev,
- "Part Number : %.*s\n", j, &vpd_data[i]);
-
- i = ro_start + PCI_VPD_LRDT_TAG_SIZE;
- j = ro_size;
- i = pci_vpd_find_info_keyword(vpd_data, i, j, "SN");
- if (i < 0) {
- netif_err(efx, drv, efx->net_dev, "Serial number not found\n");
- return;
- }
-
- j = pci_vpd_info_field_size(&vpd_data[i]);
- i += PCI_VPD_INFO_FLD_HDR_SIZE;
- if (i + j > vpd_size) {
- netif_err(efx, drv, efx->net_dev, "Incomplete serial number\n");
- return;
- }
+ start = pci_vpd_find_ro_info_keyword(vpd_data, vpd_size,
+ PCI_VPD_RO_KEYWORD_PARTNO, &kw_len);
+ if (start < 0)
+ pci_warn(dev, "Part number not found or incomplete\n");
+ else
+ pci_info(dev, "Part Number : %.*s\n", kw_len, vpd_data + start);
- efx->vpd_sn = kmalloc(j + 1, GFP_KERNEL);
- if (!efx->vpd_sn)
- return;
+ start = pci_vpd_find_ro_info_keyword(vpd_data, vpd_size,
+ PCI_VPD_RO_KEYWORD_SERIALNO, &kw_len);
+ if (start < 0)
+ pci_warn(dev, "Serial number not found or incomplete\n");
+ else
+ efx->vpd_sn = kmemdup_nul(vpd_data + start, kw_len, GFP_KERNEL);
- snprintf(efx->vpd_sn, j + 1, "%s", &vpd_data[i]);
+ kfree(vpd_data);
}
diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h
index 9b4b25704271..f6981810039d 100644
--- a/drivers/net/ethernet/sfc/net_driver.h
+++ b/drivers/net/ethernet/sfc/net_driver.h
@@ -782,6 +782,12 @@ struct efx_async_filter_insertion {
#define EFX_RPS_MAX_IN_FLIGHT 8
#endif /* CONFIG_RFS_ACCEL */
+enum efx_xdp_tx_queues_mode {
+ EFX_XDP_TX_QUEUES_DEDICATED, /* one queue per core, locking not needed */
+ EFX_XDP_TX_QUEUES_SHARED, /* each queue used by more than 1 core */
+ EFX_XDP_TX_QUEUES_BORROWED /* queues borrowed from net stack */
+};
+
/**
* struct efx_nic - an Efx NIC
* @name: Device name (net device name or bus id before net device registered)
@@ -820,6 +826,7 @@ struct efx_async_filter_insertion {
* should be allocated for this NIC
* @xdp_tx_queue_count: Number of entries in %xdp_tx_queues.
* @xdp_tx_queues: Array of pointers to tx queues used for XDP transmit.
+ * @xdp_txq_queues_mode: XDP TX queues sharing strategy.
* @rxq_entries: Size of receive queues requested by user.
* @txq_entries: Size of transmit queues requested by user.
* @txq_stop_thresh: TX queue fill level at or above which we stop it.
@@ -979,6 +986,7 @@ struct efx_nic {
unsigned int xdp_tx_queue_count;
struct efx_tx_queue **xdp_tx_queues;
+ enum efx_xdp_tx_queues_mode xdp_txq_queues_mode;
unsigned rxq_entries;
unsigned txq_entries;
diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c
index 0c6650d2e239..d16e031e95f4 100644
--- a/drivers/net/ethernet/sfc/tx.c
+++ b/drivers/net/ethernet/sfc/tx.c
@@ -428,23 +428,32 @@ int efx_xdp_tx_buffers(struct efx_nic *efx, int n, struct xdp_frame **xdpfs,
unsigned int len;
int space;
int cpu;
- int i;
+ int i = 0;
- cpu = raw_smp_processor_id();
+ if (unlikely(n && !xdpfs))
+ return -EINVAL;
+ if (unlikely(!n))
+ return 0;
- if (!efx->xdp_tx_queue_count ||
- unlikely(cpu >= efx->xdp_tx_queue_count))
+ cpu = raw_smp_processor_id();
+ if (unlikely(cpu >= efx->xdp_tx_queue_count))
return -EINVAL;
tx_queue = efx->xdp_tx_queues[cpu];
if (unlikely(!tx_queue))
return -EINVAL;
- if (unlikely(n && !xdpfs))
- return -EINVAL;
+ if (efx->xdp_txq_queues_mode != EFX_XDP_TX_QUEUES_DEDICATED)
+ HARD_TX_LOCK(efx->net_dev, tx_queue->core_txq, cpu);
- if (!n)
- return 0;
+ /* If we're borrowing net stack queues we have to handle stop-restart
+ * or we might block the queue and it will be considered as frozen
+ */
+ if (efx->xdp_txq_queues_mode == EFX_XDP_TX_QUEUES_BORROWED) {
+ if (netif_tx_queue_stopped(tx_queue->core_txq))
+ goto unlock;
+ efx_tx_maybe_stop_queue(tx_queue);
+ }
/* Check for available space. We should never need multiple
* descriptors per frame.
@@ -484,6 +493,10 @@ int efx_xdp_tx_buffers(struct efx_nic *efx, int n, struct xdp_frame **xdpfs,
if (flush && i > 0)
efx_nic_push_buffers(tx_queue);
+unlock:
+ if (efx->xdp_txq_queues_mode != EFX_XDP_TX_QUEUES_DEDICATED)
+ HARD_TX_UNLOCK(efx->net_dev, tx_queue->core_txq);
+
return i == 0 ? -EIO : i;
}
diff --git a/drivers/net/ethernet/smsc/smc911x.c b/drivers/net/ethernet/smsc/smc911x.c
index 22cdbf12c823..b008b4e8a2a5 100644
--- a/drivers/net/ethernet/smsc/smc911x.c
+++ b/drivers/net/ethernet/smsc/smc911x.c
@@ -1550,7 +1550,7 @@ static int smc911x_ethtool_getregslen(struct net_device *dev)
}
static void smc911x_ethtool_getregs(struct net_device *dev,
- struct ethtool_regs* regs, void *buf)
+ struct ethtool_regs *regs, void *buf)
{
struct smc911x_local *lp = netdev_priv(dev);
unsigned long flags;
@@ -1600,7 +1600,7 @@ static int smc911x_ethtool_wait_eeprom_ready(struct net_device *dev)
}
static inline int smc911x_ethtool_write_eeprom_cmd(struct net_device *dev,
- int cmd, int addr)
+ int cmd, int addr)
{
struct smc911x_local *lp = netdev_priv(dev);
int ret;
@@ -1614,7 +1614,7 @@ static inline int smc911x_ethtool_write_eeprom_cmd(struct net_device *dev,
}
static inline int smc911x_ethtool_read_eeprom_byte(struct net_device *dev,
- u8 *data)
+ u8 *data)
{
struct smc911x_local *lp = netdev_priv(dev);
int ret;
@@ -1626,7 +1626,7 @@ static inline int smc911x_ethtool_read_eeprom_byte(struct net_device *dev,
}
static inline int smc911x_ethtool_write_eeprom_byte(struct net_device *dev,
- u8 data)
+ u8 data)
{
struct smc911x_local *lp = netdev_priv(dev);
int ret;
@@ -1638,7 +1638,7 @@ static inline int smc911x_ethtool_write_eeprom_byte(struct net_device *dev,
}
static int smc911x_ethtool_geteeprom(struct net_device *dev,
- struct ethtool_eeprom *eeprom, u8 *data)
+ struct ethtool_eeprom *eeprom, u8 *data)
{
u8 eebuf[SMC911X_EEPROM_LEN];
int i, ret;
@@ -1654,7 +1654,7 @@ static int smc911x_ethtool_geteeprom(struct net_device *dev,
}
static int smc911x_ethtool_seteeprom(struct net_device *dev,
- struct ethtool_eeprom *eeprom, u8 *data)
+ struct ethtool_eeprom *eeprom, u8 *data)
{
int i, ret;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c
index 4c9a37dd0d3f..ecf759ee1c9f 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c
@@ -109,8 +109,10 @@ static int loongson_dwmac_probe(struct pci_dev *pdev, const struct pci_device_id
plat->bus_id = pci_dev_id(pdev);
phy_mode = device_get_phy_mode(&pdev->dev);
- if (phy_mode < 0)
+ if (phy_mode < 0) {
dev_err(&pdev->dev, "phy_mode not found\n");
+ return phy_mode;
+ }
plat->phy_interface = phy_mode;
plat->interface = PHY_INTERFACE_MODE_GMII;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index ed0cd3920171..553c4403258a 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -309,7 +309,7 @@ static void stmmac_clk_csr_set(struct stmmac_priv *priv)
priv->clk_csr = STMMAC_CSR_100_150M;
else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M))
priv->clk_csr = STMMAC_CSR_150_250M;
- else if ((clk_rate >= CSR_F_250M) && (clk_rate < CSR_F_300M))
+ else if ((clk_rate >= CSR_F_250M) && (clk_rate <= CSR_F_300M))
priv->clk_csr = STMMAC_CSR_250_300M;
}
@@ -5347,7 +5347,7 @@ static int stmmac_napi_poll_rxtx(struct napi_struct *napi, int budget)
struct stmmac_channel *ch =
container_of(napi, struct stmmac_channel, rxtx_napi);
struct stmmac_priv *priv = ch->priv_data;
- int rx_done, tx_done;
+ int rx_done, tx_done, rxtx_done;
u32 chan = ch->index;
priv->xstats.napi_poll++;
@@ -5357,14 +5357,16 @@ static int stmmac_napi_poll_rxtx(struct napi_struct *napi, int budget)
rx_done = stmmac_rx_zc(priv, budget, chan);
+ rxtx_done = max(tx_done, rx_done);
+
/* If either TX or RX work is not complete, return budget
* and keep pooling
*/
- if (tx_done >= budget || rx_done >= budget)
+ if (rxtx_done >= budget)
return budget;
/* all work done, exit the polling mode */
- if (napi_complete_done(napi, rx_done)) {
+ if (napi_complete_done(napi, rxtx_done)) {
unsigned long flags;
spin_lock_irqsave(&ch->lock, flags);
@@ -5375,7 +5377,7 @@ static int stmmac_napi_poll_rxtx(struct napi_struct *napi, int budget)
spin_unlock_irqrestore(&ch->lock, flags);
}
- return min(rx_done, budget - 1);
+ return min(rxtx_done, budget - 1);
}
/**
@@ -7116,13 +7118,10 @@ int stmmac_suspend(struct device *dev)
struct net_device *ndev = dev_get_drvdata(dev);
struct stmmac_priv *priv = netdev_priv(ndev);
u32 chan;
- int ret;
if (!ndev || !netif_running(ndev))
return 0;
- phylink_mac_change(priv->phylink, false);
-
mutex_lock(&priv->lock);
netif_device_detach(ndev);
@@ -7148,27 +7147,22 @@ int stmmac_suspend(struct device *dev)
stmmac_pmt(priv, priv->hw, priv->wolopts);
priv->irq_wake = 1;
} else {
- mutex_unlock(&priv->lock);
- rtnl_lock();
- if (device_may_wakeup(priv->device))
- phylink_speed_down(priv->phylink, false);
- phylink_stop(priv->phylink);
- rtnl_unlock();
- mutex_lock(&priv->lock);
-
stmmac_mac_set(priv, priv->ioaddr, false);
pinctrl_pm_select_sleep_state(priv->device);
- /* Disable clock in case of PWM is off */
- clk_disable_unprepare(priv->plat->clk_ptp_ref);
- ret = pm_runtime_force_suspend(dev);
- if (ret) {
- mutex_unlock(&priv->lock);
- return ret;
- }
}
mutex_unlock(&priv->lock);
+ rtnl_lock();
+ if (device_may_wakeup(priv->device) && priv->plat->pmt) {
+ phylink_suspend(priv->phylink, true);
+ } else {
+ if (device_may_wakeup(priv->device))
+ phylink_speed_down(priv->phylink, false);
+ phylink_suspend(priv->phylink, false);
+ }
+ rtnl_unlock();
+
if (priv->dma_cap.fpesel) {
/* Disable FPE */
stmmac_fpe_configure(priv, priv->ioaddr,
@@ -7240,12 +7234,6 @@ int stmmac_resume(struct device *dev)
priv->irq_wake = 0;
} else {
pinctrl_pm_select_default_state(priv->device);
- /* enable the clk previously disabled */
- ret = pm_runtime_force_resume(dev);
- if (ret)
- return ret;
- if (priv->plat->clk_ptp_ref)
- clk_prepare_enable(priv->plat->clk_ptp_ref);
/* reset the phy so that it's ready */
if (priv->mii)
stmmac_mdio_reset(priv->mii);
@@ -7259,13 +7247,15 @@ int stmmac_resume(struct device *dev)
return ret;
}
- if (!device_may_wakeup(priv->device) || !priv->plat->pmt) {
- rtnl_lock();
- phylink_start(priv->phylink);
- /* We may have called phylink_speed_down before */
- phylink_speed_up(priv->phylink);
- rtnl_unlock();
+ rtnl_lock();
+ if (device_may_wakeup(priv->device) && priv->plat->pmt) {
+ phylink_resume(priv->phylink);
+ } else {
+ phylink_resume(priv->phylink);
+ if (device_may_wakeup(priv->device))
+ phylink_speed_up(priv->phylink);
}
+ rtnl_unlock();
rtnl_lock();
mutex_lock(&priv->lock);
@@ -7286,8 +7276,6 @@ int stmmac_resume(struct device *dev)
mutex_unlock(&priv->lock);
rtnl_unlock();
- phylink_mac_change(priv->phylink, true);
-
netif_device_attach(ndev);
return 0;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index 5ca710844cc1..62cec9bfcd33 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -9,6 +9,7 @@
*******************************************************************************/
#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
#include <linux/module.h>
#include <linux/io.h>
#include <linux/of.h>
@@ -771,9 +772,52 @@ static int __maybe_unused stmmac_runtime_resume(struct device *dev)
return stmmac_bus_clks_config(priv, true);
}
+static int __maybe_unused stmmac_pltfr_noirq_suspend(struct device *dev)
+{
+ struct net_device *ndev = dev_get_drvdata(dev);
+ struct stmmac_priv *priv = netdev_priv(ndev);
+ int ret;
+
+ if (!netif_running(ndev))
+ return 0;
+
+ if (!device_may_wakeup(priv->device) || !priv->plat->pmt) {
+ /* Disable clock in case of PWM is off */
+ clk_disable_unprepare(priv->plat->clk_ptp_ref);
+
+ ret = pm_runtime_force_suspend(dev);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int __maybe_unused stmmac_pltfr_noirq_resume(struct device *dev)
+{
+ struct net_device *ndev = dev_get_drvdata(dev);
+ struct stmmac_priv *priv = netdev_priv(ndev);
+ int ret;
+
+ if (!netif_running(ndev))
+ return 0;
+
+ if (!device_may_wakeup(priv->device) || !priv->plat->pmt) {
+ /* enable the clk previously disabled */
+ ret = pm_runtime_force_resume(dev);
+ if (ret)
+ return ret;
+
+ clk_prepare_enable(priv->plat->clk_ptp_ref);
+ }
+
+ return 0;
+}
+
const struct dev_pm_ops stmmac_pltfr_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(stmmac_pltfr_suspend, stmmac_pltfr_resume)
SET_RUNTIME_PM_OPS(stmmac_runtime_suspend, stmmac_runtime_resume, NULL)
+ SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(stmmac_pltfr_noirq_suspend, stmmac_pltfr_noirq_resume)
};
EXPORT_SYMBOL_GPL(stmmac_pltfr_pm_ops);
diff --git a/drivers/net/ethernet/xscale/ptp_ixp46x.c b/drivers/net/ethernet/xscale/ptp_ixp46x.c
index ecece21315c3..39234852e01b 100644
--- a/drivers/net/ethernet/xscale/ptp_ixp46x.c
+++ b/drivers/net/ethernet/xscale/ptp_ixp46x.c
@@ -16,7 +16,6 @@
#include <linux/ptp_clock_kernel.h>
#include <linux/platform_device.h>
#include <linux/soc/ixp4xx/cpu.h>
-#include <linux/module.h>
#include <mach/ixp4xx-regs.h>
#include "ixp46x_ts.h"