summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/broadcom
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/broadcom')
-rw-r--r--drivers/net/ethernet/broadcom/Kconfig3
-rw-r--r--drivers/net/ethernet/broadcom/asp2/bcmasp.c98
-rw-r--r--drivers/net/ethernet/broadcom/asp2/bcmasp.h26
-rw-r--r--drivers/net/ethernet/broadcom/asp2/bcmasp_ethtool.c52
-rw-r--r--drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c289
-rw-r--r--drivers/net/ethernet/broadcom/b44.c18
-rw-r--r--drivers/net/ethernet/broadcom/bcm4908_enet.c2
-rw-r--r--drivers/net/ethernet/broadcom/bcm63xx_enet.c18
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.c70
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.h23
-rw-r--r--drivers/net/ethernet/broadcom/bgmac-platform.c7
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.c3
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.h3
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.c3
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c25
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h6
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c124
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c14
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c3516
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h327
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.c256
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.h51
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_debugfs.c4
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c15
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c1017
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h7
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h791
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c289
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h83
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c94
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c4
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c237
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h34
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c59
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h5
-rw-r--r--drivers/net/ethernet/broadcom/cnic.c47
-rw-r--r--drivers/net/ethernet/broadcom/cnic.h3
-rw-r--r--drivers/net/ethernet/broadcom/cnic_if.h1
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c52
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.h6
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c32
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmmii.c12
-rw-r--r--drivers/net/ethernet/broadcom/sb1250-mac.c2
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c264
-rw-r--r--drivers/net/ethernet/broadcom/tg3.h4
51 files changed, 5710 insertions, 2300 deletions
diff --git a/drivers/net/ethernet/broadcom/Kconfig b/drivers/net/ethernet/broadcom/Kconfig
index 75ca3ddda1f5..eeec8bf17cf4 100644
--- a/drivers/net/ethernet/broadcom/Kconfig
+++ b/drivers/net/ethernet/broadcom/Kconfig
@@ -72,7 +72,6 @@ config BCMGENET
tristate "Broadcom GENET internal MAC support"
depends on HAS_IOMEM
depends on PTP_1588_CLOCK_OPTIONAL || !ARCH_BCM2835
- select MII
select PHYLIB
select FIXED_PHY
select BCM7XXX_PHY
@@ -195,7 +194,6 @@ config SYSTEMPORT
tristate "Broadcom SYSTEMPORT internal MAC support"
depends on HAS_IOMEM
depends on NET_DSA || !NET_DSA
- select MII
select PHYLIB
select FIXED_PHY
select DIMLIB
@@ -260,7 +258,6 @@ config BCMASP
depends on ARCH_BRCMSTB || COMPILE_TEST
default ARCH_BRCMSTB
depends on OF
- select MII
select PHYLIB
select MDIO_BCM_UNIMAC
help
diff --git a/drivers/net/ethernet/broadcom/asp2/bcmasp.c b/drivers/net/ethernet/broadcom/asp2/bcmasp.c
index 80245c65cc90..a68fab1b05f0 100644
--- a/drivers/net/ethernet/broadcom/asp2/bcmasp.c
+++ b/drivers/net/ethernet/broadcom/asp2/bcmasp.c
@@ -31,6 +31,20 @@ static void _intr2_mask_set(struct bcmasp_priv *priv, u32 mask)
priv->irq_mask |= mask;
}
+void bcmasp_enable_phy_irq(struct bcmasp_intf *intf, int en)
+{
+ struct bcmasp_priv *priv = intf->parent;
+
+ /* Only supported with internal phys */
+ if (!intf->internal_phy)
+ return;
+
+ if (en)
+ _intr2_mask_clear(priv, ASP_INTR2_PHY_EVENT(intf->channel));
+ else
+ _intr2_mask_set(priv, ASP_INTR2_PHY_EVENT(intf->channel));
+}
+
void bcmasp_enable_tx_irq(struct bcmasp_intf *intf, int en)
{
struct bcmasp_priv *priv = intf->parent;
@@ -79,6 +93,9 @@ static void bcmasp_intr2_handling(struct bcmasp_intf *intf, u32 status)
__napi_schedule_irqoff(&intf->tx_napi);
}
}
+
+ if (status & ASP_INTR2_PHY_EVENT(intf->channel))
+ phy_mac_interrupt(intf->ndev->phydev);
}
static irqreturn_t bcmasp_isr(int irq, void *data)
@@ -972,7 +989,26 @@ static void bcmasp_core_init(struct bcmasp_priv *priv)
ASP_INTR2_CLEAR);
}
-static void bcmasp_core_clock_select(struct bcmasp_priv *priv, bool slow)
+static void bcmasp_core_clock_select_many(struct bcmasp_priv *priv, bool slow)
+{
+ u32 reg;
+
+ reg = ctrl2_core_rl(priv, ASP_CTRL2_CORE_CLOCK_SELECT);
+ if (slow)
+ reg &= ~ASP_CTRL2_CORE_CLOCK_SELECT_MAIN;
+ else
+ reg |= ASP_CTRL2_CORE_CLOCK_SELECT_MAIN;
+ ctrl2_core_wl(priv, reg, ASP_CTRL2_CORE_CLOCK_SELECT);
+
+ reg = ctrl2_core_rl(priv, ASP_CTRL2_CPU_CLOCK_SELECT);
+ if (slow)
+ reg &= ~ASP_CTRL2_CPU_CLOCK_SELECT_MAIN;
+ else
+ reg |= ASP_CTRL2_CPU_CLOCK_SELECT_MAIN;
+ ctrl2_core_wl(priv, reg, ASP_CTRL2_CPU_CLOCK_SELECT);
+}
+
+static void bcmasp_core_clock_select_one(struct bcmasp_priv *priv, bool slow)
{
u32 reg;
@@ -1166,6 +1202,24 @@ static void bcmasp_wol_irq_destroy_per_intf(struct bcmasp_priv *priv)
}
}
+static void bcmasp_eee_fixup(struct bcmasp_intf *intf, bool en)
+{
+ u32 reg, phy_lpi_overwrite;
+
+ reg = rx_edpkt_core_rl(intf->parent, ASP_EDPKT_SPARE_REG);
+ phy_lpi_overwrite = intf->internal_phy ? ASP_EDPKT_SPARE_REG_EPHY_LPI :
+ ASP_EDPKT_SPARE_REG_GPHY_LPI;
+
+ if (en)
+ reg |= phy_lpi_overwrite;
+ else
+ reg &= ~phy_lpi_overwrite;
+
+ rx_edpkt_core_wl(intf->parent, reg, ASP_EDPKT_SPARE_REG);
+
+ usleep_range(50, 100);
+}
+
static struct bcmasp_hw_info v20_hw_info = {
.rx_ctrl_flush = ASP_RX_CTRL_FLUSH,
.umac2fb = UMAC2FB_OFFSET,
@@ -1178,6 +1232,7 @@ static const struct bcmasp_plat_data v20_plat_data = {
.init_wol = bcmasp_init_wol_per_intf,
.enable_wol = bcmasp_enable_wol_per_intf,
.destroy_wol = bcmasp_wol_irq_destroy_per_intf,
+ .core_clock_select = bcmasp_core_clock_select_one,
.hw_info = &v20_hw_info,
};
@@ -1194,17 +1249,39 @@ static const struct bcmasp_plat_data v21_plat_data = {
.init_wol = bcmasp_init_wol_shared,
.enable_wol = bcmasp_enable_wol_shared,
.destroy_wol = bcmasp_wol_irq_destroy_shared,
+ .core_clock_select = bcmasp_core_clock_select_one,
+ .hw_info = &v21_hw_info,
+};
+
+static const struct bcmasp_plat_data v22_plat_data = {
+ .init_wol = bcmasp_init_wol_shared,
+ .enable_wol = bcmasp_enable_wol_shared,
+ .destroy_wol = bcmasp_wol_irq_destroy_shared,
+ .core_clock_select = bcmasp_core_clock_select_many,
.hw_info = &v21_hw_info,
+ .eee_fixup = bcmasp_eee_fixup,
};
+static void bcmasp_set_pdata(struct bcmasp_priv *priv, const struct bcmasp_plat_data *pdata)
+{
+ priv->init_wol = pdata->init_wol;
+ priv->enable_wol = pdata->enable_wol;
+ priv->destroy_wol = pdata->destroy_wol;
+ priv->core_clock_select = pdata->core_clock_select;
+ priv->eee_fixup = pdata->eee_fixup;
+ priv->hw_info = pdata->hw_info;
+}
+
static const struct of_device_id bcmasp_of_match[] = {
{ .compatible = "brcm,asp-v2.0", .data = &v20_plat_data },
{ .compatible = "brcm,asp-v2.1", .data = &v21_plat_data },
+ { .compatible = "brcm,asp-v2.2", .data = &v22_plat_data },
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, bcmasp_of_match);
static const struct of_device_id bcmasp_mdio_of_match[] = {
+ { .compatible = "brcm,asp-v2.2-mdio", },
{ .compatible = "brcm,asp-v2.1-mdio", },
{ .compatible = "brcm,asp-v2.0-mdio", },
{ /* sentinel */ },
@@ -1223,9 +1300,9 @@ static void bcmasp_remove_intfs(struct bcmasp_priv *priv)
static int bcmasp_probe(struct platform_device *pdev)
{
- struct device_node *ports_node, *intf_node;
const struct bcmasp_plat_data *pdata;
struct device *dev = &pdev->dev;
+ struct device_node *ports_node;
struct bcmasp_priv *priv;
struct bcmasp_intf *intf;
int ret = 0, count = 0;
@@ -1265,16 +1342,13 @@ static int bcmasp_probe(struct platform_device *pdev)
if (!pdata)
return dev_err_probe(dev, -EINVAL, "unable to find platform data\n");
- priv->init_wol = pdata->init_wol;
- priv->enable_wol = pdata->enable_wol;
- priv->destroy_wol = pdata->destroy_wol;
- priv->hw_info = pdata->hw_info;
+ bcmasp_set_pdata(priv, pdata);
/* Enable all clocks to ensure successful probing */
bcmasp_core_clock_set(priv, ASP_CTRL_CLOCK_CTRL_ASP_ALL_DISABLE, 0);
/* Switch to the main clock */
- bcmasp_core_clock_select(priv, false);
+ priv->core_clock_select(priv, false);
bcmasp_intr2_mask_set_all(priv);
bcmasp_intr2_clear_all(priv);
@@ -1300,12 +1374,12 @@ static int bcmasp_probe(struct platform_device *pdev)
}
i = 0;
- for_each_available_child_of_node(ports_node, intf_node) {
+ for_each_available_child_of_node_scoped(ports_node, intf_node) {
intf = bcmasp_interface_create(priv, intf_node, i);
if (!intf) {
dev_err(dev, "Cannot create eth interface %d\n", i);
bcmasp_remove_intfs(priv);
- of_node_put(intf_node);
+ ret = -ENOMEM;
goto of_put_exit;
}
list_add_tail(&intf->list, &priv->intfs);
@@ -1381,7 +1455,7 @@ static int __maybe_unused bcmasp_suspend(struct device *d)
*/
bcmasp_core_clock_set(priv, 0, ASP_CTRL_CLOCK_CTRL_ASP_TX_DISABLE);
- bcmasp_core_clock_select(priv, true);
+ priv->core_clock_select(priv, true);
clk_disable_unprepare(priv->clk);
@@ -1399,7 +1473,7 @@ static int __maybe_unused bcmasp_resume(struct device *d)
return ret;
/* Switch to the main clock domain */
- bcmasp_core_clock_select(priv, false);
+ priv->core_clock_select(priv, false);
/* Re-enable all clocks for re-initialization */
bcmasp_core_clock_set(priv, ASP_CTRL_CLOCK_CTRL_ASP_ALL_DISABLE, 0);
@@ -1426,7 +1500,7 @@ static SIMPLE_DEV_PM_OPS(bcmasp_pm_ops,
static struct platform_driver bcmasp_driver = {
.probe = bcmasp_probe,
- .remove_new = bcmasp_remove,
+ .remove = bcmasp_remove,
.shutdown = bcmasp_shutdown,
.driver = {
.name = "brcm,asp-v2",
diff --git a/drivers/net/ethernet/broadcom/asp2/bcmasp.h b/drivers/net/ethernet/broadcom/asp2/bcmasp.h
index ec90add6b03e..8fc75bcedb70 100644
--- a/drivers/net/ethernet/broadcom/asp2/bcmasp.h
+++ b/drivers/net/ethernet/broadcom/asp2/bcmasp.h
@@ -19,6 +19,8 @@
#define ASP_INTR2_TX_DESC(intr) BIT((intr) + 14)
#define ASP_INTR2_UMC0_WAKE BIT(22)
#define ASP_INTR2_UMC1_WAKE BIT(28)
+#define ASP_INTR2_PHY_EVENT(intr) ((intr) ? BIT(30) | BIT(31) : \
+ BIT(24) | BIT(25))
#define ASP_WAKEUP_INTR2_OFFSET 0x1200
#define ASP_WAKEUP_INTR2_STATUS 0x0
@@ -33,6 +35,12 @@
#define ASP_WAKEUP_INTR2_FILT_1 BIT(3)
#define ASP_WAKEUP_INTR2_FW BIT(4)
+#define ASP_CTRL2_OFFSET 0x2000
+#define ASP_CTRL2_CORE_CLOCK_SELECT 0x0
+#define ASP_CTRL2_CORE_CLOCK_SELECT_MAIN BIT(0)
+#define ASP_CTRL2_CPU_CLOCK_SELECT 0x4
+#define ASP_CTRL2_CPU_CLOCK_SELECT_MAIN BIT(0)
+
#define ASP_TX_ANALYTICS_OFFSET 0x4c000
#define ASP_TX_ANALYTICS_CTRL 0x0
@@ -134,8 +142,11 @@ enum asp_rx_net_filter_block {
#define ASP_EDPKT_RX_PKT_CNT 0x138
#define ASP_EDPKT_HDR_EXTR_CNT 0x13c
#define ASP_EDPKT_HDR_OUT_CNT 0x140
+#define ASP_EDPKT_SPARE_REG 0x174
+#define ASP_EDPKT_SPARE_REG_EPHY_LPI BIT(4)
+#define ASP_EDPKT_SPARE_REG_GPHY_LPI BIT(3)
-#define ASP_CTRL 0x101000
+#define ASP_CTRL_OFFSET 0x101000
#define ASP_CTRL_ASP_SW_INIT 0x04
#define ASP_CTRL_ASP_SW_INIT_ACPUSS_CORE BIT(0)
#define ASP_CTRL_ASP_SW_INIT_ASP_TX BIT(1)
@@ -306,6 +317,7 @@ struct bcmasp_intf {
struct bcmasp_desc *rx_edpkt_cpu;
dma_addr_t rx_edpkt_dma_addr;
dma_addr_t rx_edpkt_dma_read;
+ dma_addr_t rx_edpkt_dma_valid;
/* RX buffer prefetcher ring*/
void *rx_ring_cpu;
@@ -336,8 +348,6 @@ struct bcmasp_intf {
/* Used if per intf wol irq */
int wol_irq;
unsigned int wol_irq_enabled:1;
-
- struct ethtool_eee eee;
};
#define NUM_NET_FILTERS 32
@@ -372,6 +382,8 @@ struct bcmasp_plat_data {
void (*init_wol)(struct bcmasp_priv *priv);
void (*enable_wol)(struct bcmasp_intf *intf, bool en);
void (*destroy_wol)(struct bcmasp_priv *priv);
+ void (*core_clock_select)(struct bcmasp_priv *priv, bool slow);
+ void (*eee_fixup)(struct bcmasp_intf *priv, bool en);
struct bcmasp_hw_info *hw_info;
};
@@ -390,6 +402,8 @@ struct bcmasp_priv {
void (*init_wol)(struct bcmasp_priv *priv);
void (*enable_wol)(struct bcmasp_intf *intf, bool en);
void (*destroy_wol)(struct bcmasp_priv *priv);
+ void (*core_clock_select)(struct bcmasp_priv *priv, bool slow);
+ void (*eee_fixup)(struct bcmasp_intf *intf, bool en);
void __iomem *base;
struct bcmasp_hw_info *hw_info;
@@ -530,7 +544,8 @@ BCMASP_CORE_IO_MACRO(rx_analytics, ASP_RX_ANALYTICS_OFFSET);
BCMASP_CORE_IO_MACRO(rx_ctrl, ASP_RX_CTRL_OFFSET);
BCMASP_CORE_IO_MACRO(rx_filter, ASP_RX_FILTER_OFFSET);
BCMASP_CORE_IO_MACRO(rx_edpkt, ASP_EDPKT_OFFSET);
-BCMASP_CORE_IO_MACRO(ctrl, ASP_CTRL);
+BCMASP_CORE_IO_MACRO(ctrl, ASP_CTRL_OFFSET);
+BCMASP_CORE_IO_MACRO(ctrl2, ASP_CTRL2_OFFSET);
struct bcmasp_intf *bcmasp_interface_create(struct bcmasp_priv *priv,
struct device_node *ndev_dn, int i);
@@ -541,6 +556,8 @@ void bcmasp_enable_tx_irq(struct bcmasp_intf *intf, int en);
void bcmasp_enable_rx_irq(struct bcmasp_intf *intf, int en);
+void bcmasp_enable_phy_irq(struct bcmasp_intf *intf, int en);
+
void bcmasp_flush_rx_port(struct bcmasp_intf *intf);
extern const struct ethtool_ops bcmasp_ethtool_ops;
@@ -582,5 +599,4 @@ int bcmasp_netfilt_get_all_active(struct bcmasp_intf *intf, u32 *rule_locs,
void bcmasp_netfilt_suspend(struct bcmasp_intf *intf);
-void bcmasp_eee_enable_set(struct bcmasp_intf *intf, bool enable);
#endif
diff --git a/drivers/net/ethernet/broadcom/asp2/bcmasp_ethtool.c b/drivers/net/ethernet/broadcom/asp2/bcmasp_ethtool.c
index 3188ece72092..a537c121d3e2 100644
--- a/drivers/net/ethernet/broadcom/asp2/bcmasp_ethtool.c
+++ b/drivers/net/ethernet/broadcom/asp2/bcmasp_ethtool.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
#define pr_fmt(fmt) "bcmasp_ethtool: " fmt
-#include <asm-generic/unaligned.h>
+#include <linux/unaligned.h>
#include <linux/ethtool.h>
#include <linux/netdevice.h>
#include <linux/platform_device.h>
@@ -101,14 +101,14 @@ static int bcmasp_get_sset_count(struct net_device *dev, int string_set)
static void bcmasp_get_strings(struct net_device *dev, u32 stringset,
u8 *data)
{
+ const char *str;
unsigned int i;
switch (stringset) {
case ETH_SS_STATS:
for (i = 0; i < BCMASP_STATS_LEN; i++) {
- memcpy(data + i * ETH_GSTRING_LEN,
- bcmasp_gstrings_stats[i].stat_string,
- ETH_GSTRING_LEN);
+ str = bcmasp_gstrings_stats[i].stat_string;
+ ethtool_puts(&data, str);
}
break;
default:
@@ -348,58 +348,19 @@ static int bcmasp_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
return err;
}
-void bcmasp_eee_enable_set(struct bcmasp_intf *intf, bool enable)
+static int bcmasp_get_eee(struct net_device *dev, struct ethtool_keee *e)
{
- u32 reg;
-
- reg = umac_rl(intf, UMC_EEE_CTRL);
- if (enable)
- reg |= EEE_EN;
- else
- reg &= ~EEE_EN;
- umac_wl(intf, reg, UMC_EEE_CTRL);
-
- intf->eee.eee_enabled = enable;
-}
-
-static int bcmasp_get_eee(struct net_device *dev, struct ethtool_eee *e)
-{
- struct bcmasp_intf *intf = netdev_priv(dev);
- struct ethtool_eee *p = &intf->eee;
-
if (!dev->phydev)
return -ENODEV;
- e->tx_lpi_enabled = p->tx_lpi_enabled;
- e->tx_lpi_timer = umac_rl(intf, UMC_EEE_LPI_TIMER);
-
return phy_ethtool_get_eee(dev->phydev, e);
}
-static int bcmasp_set_eee(struct net_device *dev, struct ethtool_eee *e)
+static int bcmasp_set_eee(struct net_device *dev, struct ethtool_keee *e)
{
- struct bcmasp_intf *intf = netdev_priv(dev);
- struct ethtool_eee *p = &intf->eee;
- int ret;
-
if (!dev->phydev)
return -ENODEV;
- if (!p->eee_enabled) {
- bcmasp_eee_enable_set(intf, false);
- } else {
- ret = phy_init_eee(dev->phydev, 0);
- if (ret) {
- netif_err(intf, hw, dev,
- "EEE initialization failed: %d\n", ret);
- return ret;
- }
-
- umac_wl(intf, e->tx_lpi_timer, UMC_EEE_LPI_TIMER);
- intf->eee.tx_lpi_enabled = e->tx_lpi_enabled;
- bcmasp_eee_enable_set(intf, true);
- }
-
return phy_ethtool_set_eee(dev->phydev, e);
}
@@ -496,4 +457,5 @@ const struct ethtool_ops bcmasp_ethtool_ops = {
.get_strings = bcmasp_get_strings,
.get_ethtool_stats = bcmasp_get_ethtool_stats,
.get_sset_count = bcmasp_get_sset_count,
+ .get_ts_info = ethtool_op_get_ts_info,
};
diff --git a/drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c b/drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c
index e429876c7291..45ec1a9214a2 100644
--- a/drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c
+++ b/drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c
@@ -322,6 +322,7 @@ static netdev_tx_t bcmasp_xmit(struct sk_buff *skb, struct net_device *dev)
}
/* Rewind so we do not have a hole */
spb_index = intf->tx_spb_index;
+ dev_kfree_skb(skb);
return NETDEV_TX_OK;
}
@@ -364,6 +365,9 @@ static netdev_tx_t bcmasp_xmit(struct sk_buff *skb, struct net_device *dev)
intf->tx_spb_index = spb_index;
intf->tx_spb_dma_valid = valid;
+
+ skb_tx_timestamp(skb);
+
bcmasp_intf_tx_write(intf, intf->tx_spb_dma_valid);
if (tx_spb_ring_full(intf, MAX_SKB_FRAGS + 1))
@@ -382,6 +386,7 @@ static void bcmasp_netif_start(struct net_device *dev)
bcmasp_enable_rx_irq(intf, 1);
bcmasp_enable_tx_irq(intf, 1);
+ bcmasp_enable_phy_irq(intf, 1);
phy_start(dev->phydev);
}
@@ -391,7 +396,9 @@ static void umac_reset(struct bcmasp_intf *intf)
umac_wl(intf, 0x0, UMC_CMD);
umac_wl(intf, UMC_CMD_SW_RESET, UMC_CMD);
usleep_range(10, 100);
- umac_wl(intf, 0x0, UMC_CMD);
+ /* We hold the umac in reset and bring it out of
+ * reset when phy link is up.
+ */
}
static void umac_set_hw_addr(struct bcmasp_intf *intf,
@@ -411,6 +418,8 @@ static void umac_enable_set(struct bcmasp_intf *intf, u32 mask,
u32 reg;
reg = umac_rl(intf, UMC_CMD);
+ if (reg & UMC_CMD_SW_RESET)
+ return;
if (enable)
reg |= mask;
else
@@ -429,13 +438,10 @@ static void umac_init(struct bcmasp_intf *intf)
umac_wl(intf, 0x800, UMC_FRM_LEN);
umac_wl(intf, 0xffff, UMC_PAUSE_CNTRL);
umac_wl(intf, 0x800, UMC_RX_MAX_PKT_SZ);
- umac_enable_set(intf, UMC_CMD_PROMISC, 1);
}
-static int bcmasp_tx_poll(struct napi_struct *napi, int budget)
+static int bcmasp_tx_reclaim(struct bcmasp_intf *intf)
{
- struct bcmasp_intf *intf =
- container_of(napi, struct bcmasp_intf, tx_napi);
struct bcmasp_intf_stats64 *stats = &intf->stats64;
struct device *kdev = &intf->parent->pdev->dev;
unsigned long read, released = 0;
@@ -478,10 +484,16 @@ static int bcmasp_tx_poll(struct napi_struct *napi, int budget)
DESC_RING_COUNT);
}
- /* Ensure all descriptors have been written to DRAM for the hardware
- * to see updated contents.
- */
- wmb();
+ return released;
+}
+
+static int bcmasp_tx_poll(struct napi_struct *napi, int budget)
+{
+ struct bcmasp_intf *intf =
+ container_of(napi, struct bcmasp_intf, tx_napi);
+ int released = 0;
+
+ released = bcmasp_tx_reclaim(intf);
napi_complete(&intf->tx_napi);
@@ -607,7 +619,6 @@ static void bcmasp_adj_link(struct net_device *dev)
struct phy_device *phydev = dev->phydev;
u32 cmd_bits = 0, reg;
int changed = 0;
- bool active;
if (intf->old_link != phydev->link) {
changed = 1;
@@ -657,10 +668,21 @@ static void bcmasp_adj_link(struct net_device *dev)
UMC_CMD_HD_EN | UMC_CMD_RX_PAUSE_IGNORE |
UMC_CMD_TX_PAUSE_IGNORE);
reg |= cmd_bits;
+ if (reg & UMC_CMD_SW_RESET) {
+ reg &= ~UMC_CMD_SW_RESET;
+ umac_wl(intf, reg, UMC_CMD);
+ udelay(2);
+ reg |= UMC_CMD_TX_EN | UMC_CMD_RX_EN | UMC_CMD_PROMISC;
+ }
umac_wl(intf, reg, UMC_CMD);
- active = phy_init_eee(phydev, 0) >= 0;
- bcmasp_eee_enable_set(intf, active);
+ umac_wl(intf, phydev->eee_cfg.tx_lpi_timer, UMC_EEE_LPI_TIMER);
+ reg = umac_rl(intf, UMC_EEE_CTRL);
+ if (phydev->enable_tx_lpi)
+ reg |= EEE_EN;
+ else
+ reg &= ~EEE_EN;
+ umac_wl(intf, reg, UMC_EEE_CTRL);
}
reg = rgmii_rl(intf, RGMII_OOB_CNTRL);
@@ -674,40 +696,78 @@ static void bcmasp_adj_link(struct net_device *dev)
phy_print_status(phydev);
}
-static int bcmasp_init_rx(struct bcmasp_intf *intf)
+static int bcmasp_alloc_buffers(struct bcmasp_intf *intf)
{
struct device *kdev = &intf->parent->pdev->dev;
struct page *buffer_pg;
- dma_addr_t dma;
- void *p;
- u32 reg;
- int ret;
+ /* Alloc RX */
intf->rx_buf_order = get_order(RING_BUFFER_SIZE);
buffer_pg = alloc_pages(GFP_KERNEL, intf->rx_buf_order);
if (!buffer_pg)
return -ENOMEM;
- dma = dma_map_page(kdev, buffer_pg, 0, RING_BUFFER_SIZE,
- DMA_FROM_DEVICE);
- if (dma_mapping_error(kdev, dma)) {
- __free_pages(buffer_pg, intf->rx_buf_order);
- return -ENOMEM;
- }
intf->rx_ring_cpu = page_to_virt(buffer_pg);
- intf->rx_ring_dma = dma;
- intf->rx_ring_dma_valid = intf->rx_ring_dma + RING_BUFFER_SIZE - 1;
+ intf->rx_ring_dma = dma_map_page(kdev, buffer_pg, 0, RING_BUFFER_SIZE,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(kdev, intf->rx_ring_dma))
+ goto free_rx_buffer;
+
+ intf->rx_edpkt_cpu = dma_alloc_coherent(kdev, DESC_RING_SIZE,
+ &intf->rx_edpkt_dma_addr, GFP_KERNEL);
+ if (!intf->rx_edpkt_cpu)
+ goto free_rx_buffer_dma;
+
+ /* Alloc TX */
+ intf->tx_spb_cpu = dma_alloc_coherent(kdev, DESC_RING_SIZE,
+ &intf->tx_spb_dma_addr, GFP_KERNEL);
+ if (!intf->tx_spb_cpu)
+ goto free_rx_edpkt_dma;
- p = dma_alloc_coherent(kdev, DESC_RING_SIZE, &intf->rx_edpkt_dma_addr,
+ intf->tx_cbs = kcalloc(DESC_RING_COUNT, sizeof(struct bcmasp_tx_cb),
GFP_KERNEL);
- if (!p) {
- ret = -ENOMEM;
- goto free_rx_ring;
- }
- intf->rx_edpkt_cpu = p;
+ if (!intf->tx_cbs)
+ goto free_tx_spb_dma;
- netif_napi_add(intf->ndev, &intf->rx_napi, bcmasp_rx_poll);
+ return 0;
+
+free_tx_spb_dma:
+ dma_free_coherent(kdev, DESC_RING_SIZE, intf->tx_spb_cpu,
+ intf->tx_spb_dma_addr);
+free_rx_edpkt_dma:
+ dma_free_coherent(kdev, DESC_RING_SIZE, intf->rx_edpkt_cpu,
+ intf->rx_edpkt_dma_addr);
+free_rx_buffer_dma:
+ dma_unmap_page(kdev, intf->rx_ring_dma, RING_BUFFER_SIZE,
+ DMA_FROM_DEVICE);
+free_rx_buffer:
+ __free_pages(buffer_pg, intf->rx_buf_order);
+ return -ENOMEM;
+}
+
+static void bcmasp_reclaim_free_buffers(struct bcmasp_intf *intf)
+{
+ struct device *kdev = &intf->parent->pdev->dev;
+
+ /* RX buffers */
+ dma_free_coherent(kdev, DESC_RING_SIZE, intf->rx_edpkt_cpu,
+ intf->rx_edpkt_dma_addr);
+ dma_unmap_page(kdev, intf->rx_ring_dma, RING_BUFFER_SIZE,
+ DMA_FROM_DEVICE);
+ __free_pages(virt_to_page(intf->rx_ring_cpu), intf->rx_buf_order);
+
+ /* TX buffers */
+ dma_free_coherent(kdev, DESC_RING_SIZE, intf->tx_spb_cpu,
+ intf->tx_spb_dma_addr);
+ kfree(intf->tx_cbs);
+}
+
+static void bcmasp_init_rx(struct bcmasp_intf *intf)
+{
+ /* Restart from index 0 */
+ intf->rx_ring_dma_valid = intf->rx_ring_dma + RING_BUFFER_SIZE - 1;
+ intf->rx_edpkt_dma_valid = intf->rx_edpkt_dma_addr + (DESC_RING_SIZE - 1);
intf->rx_edpkt_dma_read = intf->rx_edpkt_dma_addr;
intf->rx_edpkt_index = 0;
@@ -733,63 +793,23 @@ static int bcmasp_init_rx(struct bcmasp_intf *intf)
rx_edpkt_dma_wq(intf, intf->rx_edpkt_dma_addr, RX_EDPKT_DMA_WRITE);
rx_edpkt_dma_wq(intf, intf->rx_edpkt_dma_addr, RX_EDPKT_DMA_READ);
rx_edpkt_dma_wq(intf, intf->rx_edpkt_dma_addr, RX_EDPKT_DMA_BASE);
- rx_edpkt_dma_wq(intf, intf->rx_edpkt_dma_addr + (DESC_RING_SIZE - 1),
- RX_EDPKT_DMA_END);
- rx_edpkt_dma_wq(intf, intf->rx_edpkt_dma_addr + (DESC_RING_SIZE - 1),
- RX_EDPKT_DMA_VALID);
-
- reg = UMAC2FB_CFG_DEFAULT_EN |
- ((intf->channel + 11) << UMAC2FB_CFG_CHID_SHIFT);
- reg |= (0xd << UMAC2FB_CFG_OK_SEND_SHIFT);
- umac2fb_wl(intf, reg, UMAC2FB_CFG);
+ rx_edpkt_dma_wq(intf, intf->rx_edpkt_dma_valid, RX_EDPKT_DMA_END);
+ rx_edpkt_dma_wq(intf, intf->rx_edpkt_dma_valid, RX_EDPKT_DMA_VALID);
- return 0;
-
-free_rx_ring:
- dma_unmap_page(kdev, intf->rx_ring_dma, RING_BUFFER_SIZE,
- DMA_FROM_DEVICE);
- __free_pages(virt_to_page(intf->rx_ring_cpu), intf->rx_buf_order);
-
- return ret;
+ umac2fb_wl(intf, UMAC2FB_CFG_DEFAULT_EN | ((intf->channel + 11) <<
+ UMAC2FB_CFG_CHID_SHIFT) | (0xd << UMAC2FB_CFG_OK_SEND_SHIFT),
+ UMAC2FB_CFG);
}
-static void bcmasp_reclaim_free_all_rx(struct bcmasp_intf *intf)
-{
- struct device *kdev = &intf->parent->pdev->dev;
-
- dma_free_coherent(kdev, DESC_RING_SIZE, intf->rx_edpkt_cpu,
- intf->rx_edpkt_dma_addr);
- dma_unmap_page(kdev, intf->rx_ring_dma, RING_BUFFER_SIZE,
- DMA_FROM_DEVICE);
- __free_pages(virt_to_page(intf->rx_ring_cpu), intf->rx_buf_order);
-}
-static int bcmasp_init_tx(struct bcmasp_intf *intf)
+static void bcmasp_init_tx(struct bcmasp_intf *intf)
{
- struct device *kdev = &intf->parent->pdev->dev;
- void *p;
- int ret;
-
- p = dma_alloc_coherent(kdev, DESC_RING_SIZE, &intf->tx_spb_dma_addr,
- GFP_KERNEL);
- if (!p)
- return -ENOMEM;
-
- intf->tx_spb_cpu = p;
+ /* Restart from index 0 */
intf->tx_spb_dma_valid = intf->tx_spb_dma_addr + DESC_RING_SIZE - 1;
intf->tx_spb_dma_read = intf->tx_spb_dma_addr;
-
- intf->tx_cbs = kcalloc(DESC_RING_COUNT, sizeof(struct bcmasp_tx_cb),
- GFP_KERNEL);
- if (!intf->tx_cbs) {
- ret = -ENOMEM;
- goto free_tx_spb;
- }
-
intf->tx_spb_index = 0;
intf->tx_spb_clean_index = 0;
-
- netif_napi_add_tx(intf->ndev, &intf->tx_napi, bcmasp_tx_poll);
+ memset(intf->tx_cbs, 0, sizeof(struct bcmasp_tx_cb) * DESC_RING_COUNT);
/* Make sure channels are disabled */
tx_spb_ctrl_wl(intf, 0x0, TX_SPB_CTRL_ENABLE);
@@ -806,26 +826,6 @@ static int bcmasp_init_tx(struct bcmasp_intf *intf)
tx_spb_dma_wq(intf, intf->tx_spb_dma_addr, TX_SPB_DMA_BASE);
tx_spb_dma_wq(intf, intf->tx_spb_dma_valid, TX_SPB_DMA_END);
tx_spb_dma_wq(intf, intf->tx_spb_dma_valid, TX_SPB_DMA_VALID);
-
- return 0;
-
-free_tx_spb:
- dma_free_coherent(kdev, DESC_RING_SIZE, intf->tx_spb_cpu,
- intf->tx_spb_dma_addr);
-
- return ret;
-}
-
-static void bcmasp_reclaim_free_all_tx(struct bcmasp_intf *intf)
-{
- struct device *kdev = &intf->parent->pdev->dev;
-
- /* Free descriptors */
- dma_free_coherent(kdev, DESC_RING_SIZE, intf->tx_spb_cpu,
- intf->tx_spb_dma_addr);
-
- /* Free cbs */
- kfree(intf->tx_cbs);
}
static void bcmasp_ephy_enable_set(struct bcmasp_intf *intf, bool enable)
@@ -898,6 +898,8 @@ static void bcmasp_netif_deinit(struct net_device *dev)
} while (timeout-- > 0);
tx_spb_dma_wl(intf, 0x0, TX_SPB_DMA_FIFO_CTRL);
+ bcmasp_tx_reclaim(intf);
+
umac_enable_set(intf, UMC_CMD_TX_EN, 0);
phy_stop(dev->phydev);
@@ -913,12 +915,10 @@ static void bcmasp_netif_deinit(struct net_device *dev)
/* Disable interrupts */
bcmasp_enable_tx_irq(intf, 0);
bcmasp_enable_rx_irq(intf, 0);
+ bcmasp_enable_phy_irq(intf, 0);
netif_napi_del(&intf->tx_napi);
- bcmasp_reclaim_free_all_tx(intf);
-
netif_napi_del(&intf->rx_napi);
- bcmasp_reclaim_free_all_rx(intf);
}
static int bcmasp_stop(struct net_device *dev)
@@ -932,6 +932,8 @@ static int bcmasp_stop(struct net_device *dev)
bcmasp_netif_deinit(dev);
+ bcmasp_reclaim_free_buffers(intf);
+
phy_disconnect(dev->phydev);
/* Disable internal EPHY or external PHY */
@@ -1052,43 +1054,34 @@ static int bcmasp_netif_init(struct net_device *dev, bool phy_connect)
goto err_phy_disable;
}
+ if (intf->internal_phy)
+ dev->phydev->irq = PHY_MAC_INTERRUPT;
+
/* Indicate that the MAC is responsible for PHY PM */
phydev->mac_managed_pm = true;
- } else if (!intf->wolopts) {
- ret = phy_resume(dev->phydev);
- if (ret)
- goto err_phy_disable;
+
+ /* Set phylib's copy of the LPI timer */
+ phydev->eee_cfg.tx_lpi_timer = umac_rl(intf, UMC_EEE_LPI_TIMER);
}
umac_reset(intf);
umac_init(intf);
- /* Disable the UniMAC RX/TX */
- umac_enable_set(intf, (UMC_CMD_RX_EN | UMC_CMD_TX_EN), 0);
-
umac_set_hw_addr(intf, dev->dev_addr);
intf->old_duplex = -1;
intf->old_link = -1;
intf->old_pause = -1;
- ret = bcmasp_init_tx(intf);
- if (ret)
- goto err_phy_disconnect;
-
- /* Turn on asp */
+ bcmasp_init_tx(intf);
+ netif_napi_add_tx(intf->ndev, &intf->tx_napi, bcmasp_tx_poll);
bcmasp_enable_tx(intf, 1);
- ret = bcmasp_init_rx(intf);
- if (ret)
- goto err_reclaim_tx;
-
+ bcmasp_init_rx(intf);
+ netif_napi_add(intf->ndev, &intf->rx_napi, bcmasp_rx_poll);
bcmasp_enable_rx(intf, 1);
- /* Turn on UniMAC TX/RX */
- umac_enable_set(intf, (UMC_CMD_RX_EN | UMC_CMD_TX_EN), 1);
-
intf->crc_fwd = !!(umac_rl(intf, UMC_CMD) & UMC_CMD_CRC_FWD);
bcmasp_netif_start(dev);
@@ -1097,12 +1090,6 @@ static int bcmasp_netif_init(struct net_device *dev, bool phy_connect)
return 0;
-err_reclaim_tx:
- netif_napi_del(&intf->tx_napi);
- bcmasp_reclaim_free_all_tx(intf);
-err_phy_disconnect:
- if (phydev)
- phy_disconnect(phydev);
err_phy_disable:
if (intf->internal_phy)
bcmasp_ephy_enable_set(intf, false);
@@ -1118,13 +1105,24 @@ static int bcmasp_open(struct net_device *dev)
netif_dbg(intf, ifup, dev, "bcmasp open\n");
- ret = clk_prepare_enable(intf->parent->clk);
+ ret = bcmasp_alloc_buffers(intf);
if (ret)
return ret;
- ret = bcmasp_netif_init(dev, true);
+ ret = clk_prepare_enable(intf->parent->clk);
if (ret)
+ goto err_free_mem;
+
+ ret = bcmasp_netif_init(dev, true);
+ if (ret) {
clk_disable_unprepare(intf->parent->clk);
+ goto err_free_mem;
+ }
+
+ return ret;
+
+err_free_mem:
+ bcmasp_reclaim_free_buffers(intf);
return ret;
}
@@ -1325,7 +1323,14 @@ static void bcmasp_suspend_to_wol(struct bcmasp_intf *intf)
if (intf->wolopts & WAKE_FILTER)
bcmasp_netfilt_suspend(intf);
- /* UniMAC receive needs to be turned on */
+ /* Bring UniMAC out of reset if needed and enable RX */
+ reg = umac_rl(intf, UMC_CMD);
+ if (reg & UMC_CMD_SW_RESET)
+ reg &= ~UMC_CMD_SW_RESET;
+
+ reg |= UMC_CMD_RX_EN | UMC_CMD_PROMISC;
+ umac_wl(intf, reg, UMC_CMD);
+
umac_enable_set(intf, UMC_CMD_RX_EN, 1);
if (intf->parent->wol_irq > 0) {
@@ -1333,6 +1338,10 @@ static void bcmasp_suspend_to_wol(struct bcmasp_intf *intf)
ASP_WAKEUP_INTR2_MASK_CLEAR);
}
+ if (ndev->phydev && ndev->phydev->eee_cfg.eee_enabled &&
+ intf->parent->eee_fixup)
+ intf->parent->eee_fixup(intf, true);
+
netif_dbg(intf, wol, ndev, "entered WOL mode\n");
}
@@ -1340,7 +1349,6 @@ int bcmasp_interface_suspend(struct bcmasp_intf *intf)
{
struct device *kdev = &intf->parent->pdev->dev;
struct net_device *dev = intf->ndev;
- int ret = 0;
if (!netif_running(dev))
return 0;
@@ -1350,10 +1358,6 @@ int bcmasp_interface_suspend(struct bcmasp_intf *intf)
bcmasp_netif_deinit(dev);
if (!intf->wolopts) {
- ret = phy_suspend(dev->phydev);
- if (ret)
- goto out;
-
if (intf->internal_phy)
bcmasp_ephy_enable_set(intf, false);
else
@@ -1370,17 +1374,17 @@ int bcmasp_interface_suspend(struct bcmasp_intf *intf)
clk_disable_unprepare(intf->parent->clk);
- return ret;
-
-out:
- bcmasp_netif_init(dev, false);
- return ret;
+ return 0;
}
static void bcmasp_resume_from_wol(struct bcmasp_intf *intf)
{
u32 reg;
+ if (intf->ndev->phydev && intf->ndev->phydev->eee_cfg.eee_enabled &&
+ intf->parent->eee_fixup)
+ intf->parent->eee_fixup(intf, false);
+
reg = umac_rl(intf, UMC_MPD_CTRL);
reg &= ~UMC_MPD_CTRL_MPD_EN;
umac_wl(intf, reg, UMC_MPD_CTRL);
@@ -1409,9 +1413,6 @@ int bcmasp_interface_resume(struct bcmasp_intf *intf)
bcmasp_resume_from_wol(intf);
- if (intf->eee.eee_enabled)
- bcmasp_eee_enable_set(intf, true);
-
netif_device_attach(dev);
return 0;
diff --git a/drivers/net/ethernet/broadcom/b44.c b/drivers/net/ethernet/broadcom/b44.c
index 3e4fb3c3e834..e5809ad5eb82 100644
--- a/drivers/net/ethernet/broadcom/b44.c
+++ b/drivers/net/ethernet/broadcom/b44.c
@@ -1042,13 +1042,13 @@ static int b44_change_mtu(struct net_device *dev, int new_mtu)
/* We'll just catch it later when the
* device is up'd.
*/
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
return 0;
}
spin_lock_irq(&bp->lock);
b44_halt(bp);
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
b44_init_rings(bp);
b44_init_hw(bp, B44_FULL_RESET);
spin_unlock_irq(&bp->lock);
@@ -2009,12 +2009,14 @@ static int b44_set_pauseparam(struct net_device *dev,
bp->flags |= B44_FLAG_TX_PAUSE;
else
bp->flags &= ~B44_FLAG_TX_PAUSE;
- if (bp->flags & B44_FLAG_PAUSE_AUTO) {
- b44_halt(bp);
- b44_init_rings(bp);
- b44_init_hw(bp, B44_FULL_RESET);
- } else {
- __b44_set_flow_ctrl(bp, bp->flags);
+ if (netif_running(dev)) {
+ if (bp->flags & B44_FLAG_PAUSE_AUTO) {
+ b44_halt(bp);
+ b44_init_rings(bp);
+ b44_init_hw(bp, B44_FULL_RESET);
+ } else {
+ __b44_set_flow_ctrl(bp, bp->flags);
+ }
}
spin_unlock_irq(&bp->lock);
diff --git a/drivers/net/ethernet/broadcom/bcm4908_enet.c b/drivers/net/ethernet/broadcom/bcm4908_enet.c
index 72df1bb10172..203e8d0dd04b 100644
--- a/drivers/net/ethernet/broadcom/bcm4908_enet.c
+++ b/drivers/net/ethernet/broadcom/bcm4908_enet.c
@@ -789,7 +789,7 @@ static struct platform_driver bcm4908_enet_driver = {
.of_match_table = bcm4908_enet_of_match,
},
.probe = bcm4908_enet_probe,
- .remove_new = bcm4908_enet_remove,
+ .remove = bcm4908_enet_remove,
};
module_platform_driver(bcm4908_enet_driver);
diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
index 3196c4dea076..65e3a0656a4c 100644
--- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c
+++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
@@ -1339,14 +1339,14 @@ static int bcm_enet_get_sset_count(struct net_device *netdev,
static void bcm_enet_get_strings(struct net_device *netdev,
u32 stringset, u8 *data)
{
+ const char *str;
int i;
switch (stringset) {
case ETH_SS_STATS:
for (i = 0; i < BCM_ENET_STATS_LEN; i++) {
- memcpy(data + i * ETH_GSTRING_LEN,
- bcm_enet_gstrings_stats[i].stat_string,
- ETH_GSTRING_LEN);
+ str = bcm_enet_gstrings_stats[i].stat_string;
+ ethtool_puts(&data, str);
}
break;
}
@@ -1652,7 +1652,7 @@ static int bcm_enet_change_mtu(struct net_device *dev, int new_mtu)
priv->rx_frag_size = SKB_DATA_ALIGN(priv->rx_buf_offset + priv->rx_buf_size) +
SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
return 0;
}
@@ -1936,7 +1936,7 @@ static void bcm_enet_remove(struct platform_device *pdev)
static struct platform_driver bcm63xx_enet_driver = {
.probe = bcm_enet_probe,
- .remove_new = bcm_enet_remove,
+ .remove = bcm_enet_remove,
.driver = {
.name = "bcm63xx_enet",
},
@@ -2503,14 +2503,14 @@ static const struct bcm_enet_stats bcm_enetsw_gstrings_stats[] = {
static void bcm_enetsw_get_strings(struct net_device *netdev,
u32 stringset, u8 *data)
{
+ const char *str;
int i;
switch (stringset) {
case ETH_SS_STATS:
for (i = 0; i < BCM_ENETSW_STATS_LEN; i++) {
- memcpy(data + i * ETH_GSTRING_LEN,
- bcm_enetsw_gstrings_stats[i].stat_string,
- ETH_GSTRING_LEN);
+ str = bcm_enetsw_gstrings_stats[i].stat_string;
+ ethtool_puts(&data, str);
}
break;
}
@@ -2755,7 +2755,7 @@ static void bcm_enetsw_remove(struct platform_device *pdev)
static struct platform_driver bcm63xx_enetsw_driver = {
.probe = bcm_enetsw_probe,
- .remove_new = bcm_enetsw_remove,
+ .remove = bcm_enetsw_remove,
.driver = {
.name = "bcm63xx_enetsw",
},
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index c9faa8540859..bc4e1f3b3752 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -27,30 +27,6 @@
#include "bcmsysport.h"
-/* I/O accessors register helpers */
-#define BCM_SYSPORT_IO_MACRO(name, offset) \
-static inline u32 name##_readl(struct bcm_sysport_priv *priv, u32 off) \
-{ \
- u32 reg = readl_relaxed(priv->base + offset + off); \
- return reg; \
-} \
-static inline void name##_writel(struct bcm_sysport_priv *priv, \
- u32 val, u32 off) \
-{ \
- writel_relaxed(val, priv->base + offset + off); \
-} \
-
-BCM_SYSPORT_IO_MACRO(intrl2_0, SYS_PORT_INTRL2_0_OFFSET);
-BCM_SYSPORT_IO_MACRO(intrl2_1, SYS_PORT_INTRL2_1_OFFSET);
-BCM_SYSPORT_IO_MACRO(umac, SYS_PORT_UMAC_OFFSET);
-BCM_SYSPORT_IO_MACRO(gib, SYS_PORT_GIB_OFFSET);
-BCM_SYSPORT_IO_MACRO(tdma, SYS_PORT_TDMA_OFFSET);
-BCM_SYSPORT_IO_MACRO(rxchk, SYS_PORT_RXCHK_OFFSET);
-BCM_SYSPORT_IO_MACRO(txchk, SYS_PORT_TXCHK_OFFSET);
-BCM_SYSPORT_IO_MACRO(rbuf, SYS_PORT_RBUF_OFFSET);
-BCM_SYSPORT_IO_MACRO(tbuf, SYS_PORT_TBUF_OFFSET);
-BCM_SYSPORT_IO_MACRO(topctrl, SYS_PORT_TOPCTRL_OFFSET);
-
/* On SYSTEMPORT Lite, any register after RDMA_STATUS has the exact
* same layout, except it has been moved by 4 bytes up, *sigh*
*/
@@ -370,32 +346,22 @@ static void bcm_sysport_get_strings(struct net_device *dev,
{
struct bcm_sysport_priv *priv = netdev_priv(dev);
const struct bcm_sysport_stats *s;
- char buf[128];
- int i, j;
+ int i;
switch (stringset) {
case ETH_SS_STATS:
- for (i = 0, j = 0; i < BCM_SYSPORT_STATS_LEN; i++) {
+ for (i = 0; i < BCM_SYSPORT_STATS_LEN; i++) {
s = &bcm_sysport_gstrings_stats[i];
if (priv->is_lite &&
!bcm_sysport_lite_stat_valid(s->type))
continue;
- memcpy(data + j * ETH_GSTRING_LEN, s->stat_string,
- ETH_GSTRING_LEN);
- j++;
+ ethtool_puts(&data, s->stat_string);
}
for (i = 0; i < dev->num_tx_queues; i++) {
- snprintf(buf, sizeof(buf), "txq%d_packets", i);
- memcpy(data + j * ETH_GSTRING_LEN, buf,
- ETH_GSTRING_LEN);
- j++;
-
- snprintf(buf, sizeof(buf), "txq%d_bytes", i);
- memcpy(data + j * ETH_GSTRING_LEN, buf,
- ETH_GSTRING_LEN);
- j++;
+ ethtool_sprintf(&data, "txq%d_packets", i);
+ ethtool_sprintf(&data, "txq%d_bytes", i);
}
break;
default:
@@ -1053,7 +1019,7 @@ static int bcm_sysport_poll(struct napi_struct *napi, int budget)
if (priv->dim.use_dim) {
dim_update_sample(priv->dim.event_ctr, priv->dim.packets,
priv->dim.bytes, &dim_sample);
- net_dim(&priv->dim.dim, dim_sample);
+ net_dim(&priv->dim.dim, &dim_sample);
}
return work_done;
@@ -1359,6 +1325,7 @@ static netdev_tx_t bcm_sysport_xmit(struct sk_buff *skb,
netif_err(priv, tx_err, dev, "DMA map failed at %p (len=%d)\n",
skb->data, skb_len);
ret = NETDEV_TX_OK;
+ dev_kfree_skb_any(skb);
goto out;
}
@@ -1966,7 +1933,11 @@ static int bcm_sysport_open(struct net_device *dev)
unsigned int i;
int ret;
- clk_prepare_enable(priv->clk);
+ ret = clk_prepare_enable(priv->clk);
+ if (ret) {
+ netdev_err(dev, "could not enable priv clock\n");
+ return ret;
+ }
/* Reset UniMAC */
umac_reset(priv);
@@ -2624,7 +2595,11 @@ static int bcm_sysport_probe(struct platform_device *pdev)
goto err_deregister_notifier;
}
- clk_prepare_enable(priv->clk);
+ ret = clk_prepare_enable(priv->clk);
+ if (ret) {
+ dev_err(&pdev->dev, "could not enable priv clock\n");
+ goto err_deregister_netdev;
+ }
priv->rev = topctrl_readl(priv, REV_CNTL) & REV_MASK;
dev_info(&pdev->dev,
@@ -2638,6 +2613,8 @@ static int bcm_sysport_probe(struct platform_device *pdev)
return 0;
+err_deregister_netdev:
+ unregister_netdev(dev);
err_deregister_notifier:
unregister_netdevice_notifier(&priv->netdev_notifier);
err_deregister_fixed_link:
@@ -2807,7 +2784,12 @@ static int __maybe_unused bcm_sysport_resume(struct device *d)
if (!netif_running(dev))
return 0;
- clk_prepare_enable(priv->clk);
+ ret = clk_prepare_enable(priv->clk);
+ if (ret) {
+ netdev_err(dev, "could not enable priv clock\n");
+ return ret;
+ }
+
if (priv->wolopts)
clk_disable_unprepare(priv->wol_clk);
@@ -2899,7 +2881,7 @@ static SIMPLE_DEV_PM_OPS(bcm_sysport_pm_ops,
static struct platform_driver bcm_sysport_driver = {
.probe = bcm_sysport_probe,
- .remove_new = bcm_sysport_remove,
+ .remove = bcm_sysport_remove,
.driver = {
.name = "brcm-systemport",
.of_match_table = bcm_sysport_of_match,
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.h b/drivers/net/ethernet/broadcom/bcmsysport.h
index 335cf6631db5..a34296f989f1 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.h
+++ b/drivers/net/ethernet/broadcom/bcmsysport.h
@@ -773,4 +773,27 @@ struct bcm_sysport_priv {
struct bcm_sysport_tx_ring *ring_map[DSA_MAX_PORTS * 8];
};
+
+/* I/O accessors register helpers */
+#define BCM_SYSPORT_IO_MACRO(name, offset) \
+static inline u32 name##_readl(struct bcm_sysport_priv *priv, u32 off) \
+{ \
+ u32 reg = readl_relaxed(priv->base + (offset) + off); \
+ return reg; \
+} \
+static inline void name##_writel(struct bcm_sysport_priv *priv, \
+ u32 val, u32 off) \
+{ \
+ writel_relaxed(val, priv->base + (offset) + off); \
+} \
+
+BCM_SYSPORT_IO_MACRO(intrl2_0, SYS_PORT_INTRL2_0_OFFSET);
+BCM_SYSPORT_IO_MACRO(intrl2_1, SYS_PORT_INTRL2_1_OFFSET);
+BCM_SYSPORT_IO_MACRO(umac, SYS_PORT_UMAC_OFFSET);
+BCM_SYSPORT_IO_MACRO(gib, SYS_PORT_GIB_OFFSET);
+BCM_SYSPORT_IO_MACRO(tdma, SYS_PORT_TDMA_OFFSET);
+BCM_SYSPORT_IO_MACRO(rxchk, SYS_PORT_RXCHK_OFFSET);
+BCM_SYSPORT_IO_MACRO(rbuf, SYS_PORT_RBUF_OFFSET);
+BCM_SYSPORT_IO_MACRO(topctrl, SYS_PORT_TOPCTRL_OFFSET);
+
#endif /* __BCM_SYSPORT_H */
diff --git a/drivers/net/ethernet/broadcom/bgmac-platform.c b/drivers/net/ethernet/broadcom/bgmac-platform.c
index 77425c7a32db..4e266ce41180 100644
--- a/drivers/net/ethernet/broadcom/bgmac-platform.c
+++ b/drivers/net/ethernet/broadcom/bgmac-platform.c
@@ -171,6 +171,7 @@ static int platform_phy_connect(struct bgmac *bgmac)
static int bgmac_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
+ struct device_node *phy_node;
struct bgmac *bgmac;
struct resource *regs;
int ret;
@@ -236,7 +237,9 @@ static int bgmac_probe(struct platform_device *pdev)
bgmac->cco_ctl_maskset = platform_bgmac_cco_ctl_maskset;
bgmac->get_bus_clock = platform_bgmac_get_bus_clock;
bgmac->cmn_maskset32 = platform_bgmac_cmn_maskset32;
- if (of_parse_phandle(np, "phy-handle", 0)) {
+ phy_node = of_parse_phandle(np, "phy-handle", 0);
+ if (phy_node) {
+ of_node_put(phy_node);
bgmac->phy_connect = platform_phy_connect;
} else {
bgmac->phy_connect = bgmac_phy_connect_direct;
@@ -294,7 +297,7 @@ static struct platform_driver bgmac_enet_driver = {
.pm = BGMAC_PM_OPS
},
.probe = bgmac_probe,
- .remove_new = bgmac_remove,
+ .remove = bgmac_remove,
};
module_platform_driver(bgmac_enet_driver);
diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c
index 6ffdc4229407..a461ec612e95 100644
--- a/drivers/net/ethernet/broadcom/bgmac.c
+++ b/drivers/net/ethernet/broadcom/bgmac.c
@@ -1367,8 +1367,7 @@ static void bgmac_get_strings(struct net_device *dev, u32 stringset,
return;
for (i = 0; i < BGMAC_STATS_LEN; i++)
- strscpy(data + i * ETH_GSTRING_LEN,
- bgmac_get_strings_stats[i].name, ETH_GSTRING_LEN);
+ ethtool_puts(&data, bgmac_get_strings_stats[i].name);
}
static void bgmac_get_ethtool_stats(struct net_device *dev,
diff --git a/drivers/net/ethernet/broadcom/bgmac.h b/drivers/net/ethernet/broadcom/bgmac.h
index d73ef262991d..6fee9a41839c 100644
--- a/drivers/net/ethernet/broadcom/bgmac.h
+++ b/drivers/net/ethernet/broadcom/bgmac.h
@@ -328,8 +328,7 @@
#define BGMAC_RX_FRAME_OFFSET 30 /* There are 2 unused bytes between header and real data */
#define BGMAC_RX_BUF_OFFSET (NET_SKB_PAD + NET_IP_ALIGN - \
BGMAC_RX_FRAME_OFFSET)
-/* Jumbo frame size with FCS */
-#define BGMAC_RX_MAX_FRAME_SIZE 9724
+#define BGMAC_RX_MAX_FRAME_SIZE 1536
#define BGMAC_RX_BUF_SIZE (BGMAC_RX_FRAME_OFFSET + BGMAC_RX_MAX_FRAME_SIZE)
#define BGMAC_RX_ALLOC_SIZE (SKB_DATA_ALIGN(BGMAC_RX_BUF_SIZE + BGMAC_RX_BUF_OFFSET) + \
SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c
index 0d917a9699c5..6ec773e61182 100644
--- a/drivers/net/ethernet/broadcom/bnx2.c
+++ b/drivers/net/ethernet/broadcom/bnx2.c
@@ -367,6 +367,7 @@ static void bnx2_setup_cnic_irq_info(struct bnx2 *bp)
cp->irq_arr[0].status_blk = (void *)
((unsigned long) bnapi->status_blk.msi +
(BNX2_SBLK_MSIX_ALIGN_SIZE * sb_id));
+ cp->irq_arr[0].status_blk_map = bp->status_blk_mapping;
cp->irq_arr[0].status_blk_num = sb_id;
cp->num_irq = 1;
}
@@ -7911,7 +7912,7 @@ bnx2_change_mtu(struct net_device *dev, int new_mtu)
{
struct bnx2 *bp = netdev_priv(dev);
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
return bnx2_change_ring_size(bp, bp->rx_ring_size, bp->tx_ring_size,
false);
}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
index e2a4e1088b7f..9580ab83d387 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
@@ -1262,7 +1262,7 @@ enum {
struct bnx2x_fw_stats_req {
struct stats_query_header hdr;
- struct stats_query_entry query[FP_SB_MAX_E1x+
+ struct stats_query_entry query[FP_SB_MAX_E2 +
BNX2X_FIRST_QUEUE_QUERY_IDX];
};
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index e9c1e1bb5580..a8e07e51418f 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -147,10 +147,11 @@ void bnx2x_fill_fw_str(struct bnx2x *bp, char *buf, size_t buf_len)
phy_fw_ver[0] = '\0';
bnx2x_get_ext_phy_fw_version(&bp->link_params,
- phy_fw_ver, PHY_FW_VER_LEN);
- strscpy(buf, bp->fw_ver, buf_len);
- snprintf(buf + strlen(bp->fw_ver), 32 - strlen(bp->fw_ver),
- "bc %d.%d.%d%s%s",
+ phy_fw_ver, sizeof(phy_fw_ver));
+ /* This may become truncated. */
+ scnprintf(buf, buf_len,
+ "%sbc %d.%d.%d%s%s",
+ bp->fw_ver,
(bp->common.bc_ver & 0xff0000) >> 16,
(bp->common.bc_ver & 0xff00) >> 8,
(bp->common.bc_ver & 0xff),
@@ -3537,7 +3538,7 @@ static u8 bnx2x_set_pbd_csum_enc(struct bnx2x *bp, struct sk_buff *skb,
u32 *parsing_data, u32 xmit_type)
{
*parsing_data |=
- ((((u8 *)skb_inner_transport_header(skb) - skb->data) >> 1) <<
+ ((skb_inner_transport_offset(skb) >> 1) <<
ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) &
ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W;
@@ -3569,7 +3570,7 @@ static u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb,
u32 *parsing_data, u32 xmit_type)
{
*parsing_data |=
- ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) <<
+ ((skb_transport_offset(skb) >> 1) <<
ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) &
ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W;
@@ -3612,7 +3613,7 @@ static u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb,
struct eth_tx_parse_bd_e1x *pbd,
u32 xmit_type)
{
- u8 hlen = (skb_network_header(skb) - skb->data) >> 1;
+ u8 hlen = skb_network_offset(skb) >> 1;
/* for now NS flag is not used in Linux */
pbd->global_data =
@@ -3620,8 +3621,7 @@ static u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb,
((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT));
- pbd->ip_hlen_w = (skb_transport_header(skb) -
- skb_network_header(skb)) >> 1;
+ pbd->ip_hlen_w = skb_network_header_len(skb) >> 1;
hlen += pbd->ip_hlen_w;
@@ -3666,8 +3666,7 @@ static void bnx2x_update_pbds_gso_enc(struct sk_buff *skb,
u8 outerip_off, outerip_len = 0;
/* from outer IP to transport */
- hlen_w = (skb_inner_transport_header(skb) -
- skb_network_header(skb)) >> 1;
+ hlen_w = skb_inner_transport_offset(skb) >> 1;
/* transport len */
hlen_w += inner_tcp_hdrlen(skb) >> 1;
@@ -3713,7 +3712,7 @@ static void bnx2x_update_pbds_gso_enc(struct sk_buff *skb,
0, IPPROTO_TCP, 0));
}
- outerip_off = (skb_network_header(skb) - skb->data) >> 1;
+ outerip_off = (skb_network_offset(skb)) >> 1;
*global_data |=
outerip_off |
@@ -4903,7 +4902,7 @@ int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
* because the actual alloc size is
* only updated as part of load
*/
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
if (!bnx2x_mtu_allows_gro(new_mtu))
dev->features &= ~NETIF_F_GRO_HW;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
index d8b1824c334d..0bc1367fd649 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
@@ -1002,9 +1002,6 @@ static inline void bnx2x_set_fw_mac_addr(__le16 *fw_hi, __le16 *fw_mid,
static inline void bnx2x_free_rx_mem_pool(struct bnx2x *bp,
struct bnx2x_alloc_pool *pool)
{
- if (!pool->page)
- return;
-
put_page(pool->page);
pool->page = NULL;
@@ -1015,6 +1012,9 @@ static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
{
int i;
+ if (!fp->page_pool.page)
+ return;
+
if (fp->mode == TPA_MODE_DISABLED)
return;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
index 81d232e6d05f..44199855ebfb 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
@@ -39,34 +39,34 @@ static const struct {
int size;
char string[ETH_GSTRING_LEN];
} bnx2x_q_stats_arr[] = {
-/* 1 */ { Q_STATS_OFFSET32(total_bytes_received_hi), 8, "[%s]: rx_bytes" },
+/* 1 */ { Q_STATS_OFFSET32(total_bytes_received_hi), 8, "[%d]: rx_bytes" },
{ Q_STATS_OFFSET32(total_unicast_packets_received_hi),
- 8, "[%s]: rx_ucast_packets" },
+ 8, "[%d]: rx_ucast_packets" },
{ Q_STATS_OFFSET32(total_multicast_packets_received_hi),
- 8, "[%s]: rx_mcast_packets" },
+ 8, "[%d]: rx_mcast_packets" },
{ Q_STATS_OFFSET32(total_broadcast_packets_received_hi),
- 8, "[%s]: rx_bcast_packets" },
- { Q_STATS_OFFSET32(no_buff_discard_hi), 8, "[%s]: rx_discards" },
+ 8, "[%d]: rx_bcast_packets" },
+ { Q_STATS_OFFSET32(no_buff_discard_hi), 8, "[%d]: rx_discards" },
{ Q_STATS_OFFSET32(rx_err_discard_pkt),
- 4, "[%s]: rx_phy_ip_err_discards"},
+ 4, "[%d]: rx_phy_ip_err_discards"},
{ Q_STATS_OFFSET32(rx_skb_alloc_failed),
- 4, "[%s]: rx_skb_alloc_discard" },
- { Q_STATS_OFFSET32(hw_csum_err), 4, "[%s]: rx_csum_offload_errors" },
- { Q_STATS_OFFSET32(driver_xoff), 4, "[%s]: tx_exhaustion_events" },
- { Q_STATS_OFFSET32(total_bytes_transmitted_hi), 8, "[%s]: tx_bytes" },
+ 4, "[%d]: rx_skb_alloc_discard" },
+ { Q_STATS_OFFSET32(hw_csum_err), 4, "[%d]: rx_csum_offload_errors" },
+ { Q_STATS_OFFSET32(driver_xoff), 4, "[%d]: tx_exhaustion_events" },
+ { Q_STATS_OFFSET32(total_bytes_transmitted_hi), 8, "[%d]: tx_bytes" },
/* 10 */{ Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi),
- 8, "[%s]: tx_ucast_packets" },
+ 8, "[%d]: tx_ucast_packets" },
{ Q_STATS_OFFSET32(total_multicast_packets_transmitted_hi),
- 8, "[%s]: tx_mcast_packets" },
+ 8, "[%d]: tx_mcast_packets" },
{ Q_STATS_OFFSET32(total_broadcast_packets_transmitted_hi),
- 8, "[%s]: tx_bcast_packets" },
+ 8, "[%d]: tx_bcast_packets" },
{ Q_STATS_OFFSET32(total_tpa_aggregations_hi),
- 8, "[%s]: tpa_aggregations" },
+ 8, "[%d]: tpa_aggregations" },
{ Q_STATS_OFFSET32(total_tpa_aggregated_frames_hi),
- 8, "[%s]: tpa_aggregated_frames"},
- { Q_STATS_OFFSET32(total_tpa_bytes_hi), 8, "[%s]: tpa_bytes"},
+ 8, "[%d]: tpa_aggregated_frames"},
+ { Q_STATS_OFFSET32(total_tpa_bytes_hi), 8, "[%d]: tpa_bytes"},
{ Q_STATS_OFFSET32(driver_filtered_tx_pkt),
- 4, "[%s]: driver_filtered_tx_pkt" }
+ 4, "[%d]: driver_filtered_tx_pkt" }
};
#define BNX2X_NUM_Q_STATS ARRAY_SIZE(bnx2x_q_stats_arr)
@@ -1132,7 +1132,7 @@ static void bnx2x_get_drvinfo(struct net_device *dev,
}
memset(version, 0, sizeof(version));
- bnx2x_fill_fw_str(bp, version, ETHTOOL_FWVERS_LEN);
+ bnx2x_fill_fw_str(bp, version, sizeof(version));
strlcat(info->fw_version, version, sizeof(info->fw_version));
strscpy(info->bus_info, pci_name(bp->pdev), sizeof(info->bus_info));
@@ -2081,34 +2081,31 @@ static const char bnx2x_private_arr[BNX2X_PRI_FLAG_LEN][ETH_GSTRING_LEN] = {
"Storage only interface"
};
-static u32 bnx2x_eee_to_adv(u32 eee_adv)
+static void bnx2x_eee_to_linkmode(unsigned long *mode, u32 eee_adv)
{
- u32 modes = 0;
-
if (eee_adv & SHMEM_EEE_100M_ADV)
- modes |= ADVERTISED_100baseT_Full;
+ linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, mode);
if (eee_adv & SHMEM_EEE_1G_ADV)
- modes |= ADVERTISED_1000baseT_Full;
+ linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, mode);
if (eee_adv & SHMEM_EEE_10G_ADV)
- modes |= ADVERTISED_10000baseT_Full;
-
- return modes;
+ linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseT_Full_BIT, mode);
}
-static u32 bnx2x_adv_to_eee(u32 modes, u32 shift)
+static u32 bnx2x_linkmode_to_eee(const unsigned long *mode, u32 shift)
{
u32 eee_adv = 0;
- if (modes & ADVERTISED_100baseT_Full)
+
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, mode))
eee_adv |= SHMEM_EEE_100M_ADV;
- if (modes & ADVERTISED_1000baseT_Full)
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, mode))
eee_adv |= SHMEM_EEE_1G_ADV;
- if (modes & ADVERTISED_10000baseT_Full)
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_10000baseT_Full_BIT, mode))
eee_adv |= SHMEM_EEE_10G_ADV;
return eee_adv << shift;
}
-static int bnx2x_get_eee(struct net_device *dev, struct ethtool_eee *edata)
+static int bnx2x_get_eee(struct net_device *dev, struct ethtool_keee *edata)
{
struct bnx2x *bp = netdev_priv(dev);
u32 eee_cfg;
@@ -2120,16 +2117,17 @@ static int bnx2x_get_eee(struct net_device *dev, struct ethtool_eee *edata)
eee_cfg = bp->link_vars.eee_status;
- edata->supported =
- bnx2x_eee_to_adv((eee_cfg & SHMEM_EEE_SUPPORTED_MASK) >>
- SHMEM_EEE_SUPPORTED_SHIFT);
+ bnx2x_eee_to_linkmode(edata->supported,
+ (eee_cfg & SHMEM_EEE_SUPPORTED_MASK) >>
+ SHMEM_EEE_SUPPORTED_SHIFT);
+
+ bnx2x_eee_to_linkmode(edata->advertised,
+ (eee_cfg & SHMEM_EEE_ADV_STATUS_MASK) >>
+ SHMEM_EEE_ADV_STATUS_SHIFT);
- edata->advertised =
- bnx2x_eee_to_adv((eee_cfg & SHMEM_EEE_ADV_STATUS_MASK) >>
- SHMEM_EEE_ADV_STATUS_SHIFT);
- edata->lp_advertised =
- bnx2x_eee_to_adv((eee_cfg & SHMEM_EEE_LP_ADV_STATUS_MASK) >>
- SHMEM_EEE_LP_ADV_STATUS_SHIFT);
+ bnx2x_eee_to_linkmode(edata->lp_advertised,
+ (eee_cfg & SHMEM_EEE_LP_ADV_STATUS_MASK) >>
+ SHMEM_EEE_LP_ADV_STATUS_SHIFT);
/* SHMEM value is in 16u units --> Convert to 1u units. */
edata->tx_lpi_timer = (eee_cfg & SHMEM_EEE_TIMER_MASK) << 4;
@@ -2141,7 +2139,7 @@ static int bnx2x_get_eee(struct net_device *dev, struct ethtool_eee *edata)
return 0;
}
-static int bnx2x_set_eee(struct net_device *dev, struct ethtool_eee *edata)
+static int bnx2x_set_eee(struct net_device *dev, struct ethtool_keee *edata)
{
struct bnx2x *bp = netdev_priv(dev);
u32 eee_cfg;
@@ -2162,8 +2160,8 @@ static int bnx2x_set_eee(struct net_device *dev, struct ethtool_eee *edata)
return -EOPNOTSUPP;
}
- advertised = bnx2x_adv_to_eee(edata->advertised,
- SHMEM_EEE_ADV_STATUS_SHIFT);
+ advertised = bnx2x_linkmode_to_eee(edata->advertised,
+ SHMEM_EEE_ADV_STATUS_SHIFT);
if ((advertised != (eee_cfg & SHMEM_EEE_ADV_STATUS_MASK))) {
DP(BNX2X_MSG_ETHTOOL,
"Direct manipulation of EEE advertisement is not supported\n");
@@ -3186,49 +3184,43 @@ static u32 bnx2x_get_private_flags(struct net_device *dev)
static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
{
struct bnx2x *bp = netdev_priv(dev);
- int i, j, k, start;
- char queue_name[MAX_QUEUE_NAME_LEN+1];
+ const char *str;
+ int i, j, start;
switch (stringset) {
case ETH_SS_STATS:
- k = 0;
if (is_multi(bp)) {
for_each_eth_queue(bp, i) {
- memset(queue_name, 0, sizeof(queue_name));
- snprintf(queue_name, sizeof(queue_name),
- "%d", i);
- for (j = 0; j < BNX2X_NUM_Q_STATS; j++)
- snprintf(buf + (k + j)*ETH_GSTRING_LEN,
- ETH_GSTRING_LEN,
- bnx2x_q_stats_arr[j].string,
- queue_name);
- k += BNX2X_NUM_Q_STATS;
+ for (j = 0; j < BNX2X_NUM_Q_STATS; j++) {
+ str = bnx2x_q_stats_arr[j].string;
+ ethtool_sprintf(&buf, str, i);
+ }
}
}
- for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
+ for (i = 0; i < BNX2X_NUM_STATS; i++) {
if (HIDE_PORT_STAT(bp) && IS_PORT_STAT(i))
continue;
- strcpy(buf + (k + j)*ETH_GSTRING_LEN,
- bnx2x_stats_arr[i].string);
- j++;
+ ethtool_puts(&buf, bnx2x_stats_arr[i].string);
}
break;
case ETH_SS_TEST:
+ if (IS_VF(bp))
+ break;
/* First 4 tests cannot be done in MF mode */
if (!IS_MF(bp))
start = 0;
else
start = 4;
- memcpy(buf, bnx2x_tests_str_arr + start,
- ETH_GSTRING_LEN * BNX2X_NUM_TESTS(bp));
+ for (i = start; i < BNX2X_NUM_TESTS_SF; i++)
+ ethtool_puts(&buf, bnx2x_tests_str_arr[i]);
break;
case ETH_SS_PRIV_FLAGS:
- memcpy(buf, bnx2x_private_arr,
- ETH_GSTRING_LEN * BNX2X_PRI_FLAG_LEN);
+ for (i = 0; i < BNX2X_PRI_FLAG_LEN; i++)
+ ethtool_puts(&buf, bnx2x_private_arr[i]);
break;
}
}
@@ -3636,22 +3628,18 @@ static int bnx2x_set_channels(struct net_device *dev,
}
static int bnx2x_get_ts_info(struct net_device *dev,
- struct ethtool_ts_info *info)
+ struct kernel_ethtool_ts_info *info)
{
struct bnx2x *bp = netdev_priv(dev);
if (bp->flags & PTP_SUPPORTED) {
info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
- SOF_TIMESTAMPING_RX_SOFTWARE |
- SOF_TIMESTAMPING_SOFTWARE |
SOF_TIMESTAMPING_TX_HARDWARE |
SOF_TIMESTAMPING_RX_HARDWARE |
SOF_TIMESTAMPING_RAW_HARDWARE;
if (bp->ptp_clock)
info->phc_index = ptp_clock_index(bp->ptp_clock);
- else
- info->phc_index = -1;
info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
(1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
index 02808513ffe4..ea310057fe3a 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
@@ -6163,8 +6163,8 @@ static void bnx2x_link_int_ack(struct link_params *params,
static int bnx2x_null_format_ver(u32 spirom_ver, u8 *str, u16 *len)
{
- str[0] = '\0';
- (*len)--;
+ if (*len)
+ str[0] = '\0';
return 0;
}
@@ -6173,7 +6173,7 @@ static int bnx2x_format_ver(u32 num, u8 *str, u16 *len)
u16 ret;
if (*len < 10) {
- /* Need more than 10chars for this format */
+ /* Need more than 10 chars for this format */
bnx2x_null_format_ver(num, str, len);
return -EINVAL;
}
@@ -6188,8 +6188,8 @@ static int bnx2x_3_seq_format_ver(u32 num, u8 *str, u16 *len)
{
u16 ret;
- if (*len < 10) {
- /* Need more than 10chars for this format */
+ if (*len < 9) {
+ /* Need more than 9 chars for this format */
bnx2x_null_format_ver(num, str, len);
return -EINVAL;
}
@@ -6208,7 +6208,7 @@ int bnx2x_get_ext_phy_fw_version(struct link_params *params, u8 *version,
int status = 0;
u8 *ver_p = version;
u16 remain_len = len;
- if (version == NULL || params == NULL)
+ if (version == NULL || params == NULL || len == 0)
return -EINVAL;
bp = params->bp;
@@ -11546,7 +11546,7 @@ static int bnx2x_7101_format_ver(u32 spirom_ver, u8 *str, u16 *len)
str[2] = (spirom_ver & 0xFF0000) >> 16;
str[3] = (spirom_ver & 0xFF000000) >> 24;
str[4] = '\0';
- *len -= 5;
+ *len -= 4;
return 0;
}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 0d8e61c63c7c..678829646cec 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -14912,9 +14912,11 @@ void bnx2x_setup_cnic_irq_info(struct bnx2x *bp)
else
cp->irq_arr[0].status_blk = (void *)bp->cnic_sb.e1x_sb;
+ cp->irq_arr[0].status_blk_map = bp->cnic_sb_mapping;
cp->irq_arr[0].status_blk_num = bnx2x_cnic_fw_sb_id(bp);
cp->irq_arr[0].status_blk_num2 = bnx2x_cnic_igu_sb_id(bp);
cp->irq_arr[1].status_blk = bp->def_status_blk;
+ cp->irq_arr[1].status_blk_map = bp->def_status_blk_mapping;
cp->irq_arr[1].status_blk_num = DEF_SB_ID;
cp->irq_arr[1].status_blk_num2 = DEF_SB_IGU_ID;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
index 4e9215bce4ad..a018f251d198 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
@@ -868,6 +868,8 @@
#define DORQ_REG_VF_TYPE_VALUE_0 0x170258
#define DORQ_REG_VF_USAGE_CT_LIMIT 0x170340
+extern const u32 dmae_reg_go_c[];
+
/* [RW 4] Initial activity counter value on the load request; when the
shortcut is done. */
#define DORQ_REG_SHRT_ACT_CNT 0x170070
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
index 77d4cb4ad782..12198fc3ab22 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
@@ -2652,10 +2652,10 @@ int bnx2x_get_vf_config(struct net_device *dev, int vfidx,
/* vlan */
if (bulletin->valid_bitmap & (1 << VLAN_VALID))
/* vlan configured by ndo so its in bulletin board */
- memcpy(&ivi->vlan, &bulletin->vlan, VLAN_HLEN);
+ ivi->vlan = bulletin->vlan;
else
/* function has not been loaded yet. Show vlans as 0s */
- memset(&ivi->vlan, 0, VLAN_HLEN);
+ ivi->vlan = 0;
mutex_unlock(&bp->vfdb->bulletin_mutex);
}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
index 2bb133ae61c3..ba6729f2f9c0 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
@@ -23,8 +23,6 @@
#include "bnx2x_cmn.h"
#include "bnx2x_sriov.h"
-extern const u32 dmae_reg_go_c[];
-
/* Statistics */
/*
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 39845d556baf..55f553debd3b 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -69,6 +69,7 @@
#include "bnxt_tc.h"
#include "bnxt_devlink.h"
#include "bnxt_debugfs.h"
+#include "bnxt_coredump.h"
#include "bnxt_hwmon.h"
#define BNXT_TX_TIMEOUT (5 * HZ)
@@ -76,11 +77,10 @@
NETIF_MSG_TX_ERR)
MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("Broadcom BCM573xx network driver");
+MODULE_DESCRIPTION("Broadcom NetXtreme network driver");
#define BNXT_RX_OFFSET (NET_SKB_PAD + NET_IP_ALIGN)
#define BNXT_RX_DMA_OFFSET NET_SKB_PAD
-#define BNXT_RX_COPY_THRESH 256
#define BNXT_TX_PUSH_THRESH 164
@@ -137,6 +137,7 @@ static const struct {
[NETXTREME_E_VF_HV] = { "Broadcom NetXtreme-E Virtual Function for Hyper-V" },
[NETXTREME_E_P5_VF] = { "Broadcom BCM5750X NetXtreme-E Ethernet Virtual Function" },
[NETXTREME_E_P5_VF_HV] = { "Broadcom BCM5750X NetXtreme-E Virtual Function for Hyper-V" },
+ [NETXTREME_E_P7_VF] = { "Broadcom BCM5760X Virtual Function" },
};
static const struct pci_device_id bnxt_pci_tbl[] = {
@@ -211,6 +212,7 @@ static const struct pci_device_id bnxt_pci_tbl[] = {
{ PCI_VDEVICE(BROADCOM, 0x1807), .driver_data = NETXTREME_E_P5_VF },
{ PCI_VDEVICE(BROADCOM, 0x1808), .driver_data = NETXTREME_E_P5_VF_HV },
{ PCI_VDEVICE(BROADCOM, 0x1809), .driver_data = NETXTREME_E_P5_VF_HV },
+ { PCI_VDEVICE(BROADCOM, 0x1819), .driver_data = NETXTREME_E_P7_VF },
{ PCI_VDEVICE(BROADCOM, 0xd800), .driver_data = NETXTREME_S_VF },
#endif
{ 0 }
@@ -242,24 +244,78 @@ static const u16 bnxt_async_events_arr[] = {
ASYNC_EVENT_CMPL_EVENT_ID_PPS_TIMESTAMP,
ASYNC_EVENT_CMPL_EVENT_ID_ERROR_REPORT,
ASYNC_EVENT_CMPL_EVENT_ID_PHC_UPDATE,
+ ASYNC_EVENT_CMPL_EVENT_ID_DBG_BUF_PRODUCER,
+};
+
+const u16 bnxt_bstore_to_trace[] = {
+ [BNXT_CTX_SRT] = DBG_LOG_BUFFER_FLUSH_REQ_TYPE_SRT_TRACE,
+ [BNXT_CTX_SRT2] = DBG_LOG_BUFFER_FLUSH_REQ_TYPE_SRT2_TRACE,
+ [BNXT_CTX_CRT] = DBG_LOG_BUFFER_FLUSH_REQ_TYPE_CRT_TRACE,
+ [BNXT_CTX_CRT2] = DBG_LOG_BUFFER_FLUSH_REQ_TYPE_CRT2_TRACE,
+ [BNXT_CTX_RIGP0] = DBG_LOG_BUFFER_FLUSH_REQ_TYPE_RIGP0_TRACE,
+ [BNXT_CTX_L2HWRM] = DBG_LOG_BUFFER_FLUSH_REQ_TYPE_L2_HWRM_TRACE,
+ [BNXT_CTX_REHWRM] = DBG_LOG_BUFFER_FLUSH_REQ_TYPE_ROCE_HWRM_TRACE,
+ [BNXT_CTX_CA0] = DBG_LOG_BUFFER_FLUSH_REQ_TYPE_CA0_TRACE,
+ [BNXT_CTX_CA1] = DBG_LOG_BUFFER_FLUSH_REQ_TYPE_CA1_TRACE,
+ [BNXT_CTX_CA2] = DBG_LOG_BUFFER_FLUSH_REQ_TYPE_CA2_TRACE,
+ [BNXT_CTX_RIGP1] = DBG_LOG_BUFFER_FLUSH_REQ_TYPE_RIGP1_TRACE,
};
static struct workqueue_struct *bnxt_pf_wq;
+#define BNXT_IPV6_MASK_ALL {{{ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, \
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }}}
+#define BNXT_IPV6_MASK_NONE {{{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }}}
+
+const struct bnxt_flow_masks BNXT_FLOW_MASK_NONE = {
+ .ports = {
+ .src = 0,
+ .dst = 0,
+ },
+ .addrs = {
+ .v6addrs = {
+ .src = BNXT_IPV6_MASK_NONE,
+ .dst = BNXT_IPV6_MASK_NONE,
+ },
+ },
+};
+
+const struct bnxt_flow_masks BNXT_FLOW_IPV6_MASK_ALL = {
+ .ports = {
+ .src = cpu_to_be16(0xffff),
+ .dst = cpu_to_be16(0xffff),
+ },
+ .addrs = {
+ .v6addrs = {
+ .src = BNXT_IPV6_MASK_ALL,
+ .dst = BNXT_IPV6_MASK_ALL,
+ },
+ },
+};
+
+const struct bnxt_flow_masks BNXT_FLOW_IPV4_MASK_ALL = {
+ .ports = {
+ .src = cpu_to_be16(0xffff),
+ .dst = cpu_to_be16(0xffff),
+ },
+ .addrs = {
+ .v4addrs = {
+ .src = cpu_to_be32(0xffffffff),
+ .dst = cpu_to_be32(0xffffffff),
+ },
+ },
+};
+
static bool bnxt_vf_pciid(enum board_idx idx)
{
return (idx == NETXTREME_C_VF || idx == NETXTREME_E_VF ||
idx == NETXTREME_S_VF || idx == NETXTREME_C_VF_HV ||
idx == NETXTREME_E_VF_HV || idx == NETXTREME_E_P5_VF ||
- idx == NETXTREME_E_P5_VF_HV);
+ idx == NETXTREME_E_P5_VF_HV || idx == NETXTREME_E_P7_VF);
}
#define DB_CP_REARM_FLAGS (DB_KEY_CP | DB_IDX_VALID)
#define DB_CP_FLAGS (DB_KEY_CP | DB_IDX_VALID | DB_IRQ_DIS)
-#define DB_CP_IRQ_DIS_FLAGS (DB_KEY_CP | DB_IRQ_DIS)
-
-#define BNXT_CP_DB_IRQ_DIS(db) \
- writel(DB_CP_IRQ_DIS_FLAGS, db)
#define BNXT_DB_CQ(db, idx) \
writel(DB_CP_FLAGS | DB_RING_IDX(db, idx), (db)->doorbell)
@@ -411,8 +467,9 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
dma_addr_t mapping;
unsigned int length, pad = 0;
u32 len, free_size, vlan_tag_flags, cfa_action, flags;
- u16 prod, last_frag;
+ struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
struct pci_dev *pdev = bp->pdev;
+ u16 prod, last_frag, txts_prod;
struct bnxt_tx_ring_info *txr;
struct bnxt_sw_tx_bd *tx_buf;
__le32 lflags = 0;
@@ -464,23 +521,29 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
vlan_tag_flags |= 1 << TX_BD_CFA_META_TPID_SHIFT;
}
- if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
- struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
+ if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && ptp &&
+ ptp->tx_tstamp_en) {
+ if (bp->fw_cap & BNXT_FW_CAP_TX_TS_CMP) {
+ lflags |= cpu_to_le32(TX_BD_FLAGS_STAMP);
+ tx_buf->is_ts_pkt = 1;
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+ } else if (!skb_is_gso(skb)) {
+ u16 seq_id, hdr_off;
- if (ptp && ptp->tx_tstamp_en && !skb_is_gso(skb) &&
- atomic_dec_if_positive(&ptp->tx_avail) >= 0) {
- if (!bnxt_ptp_parse(skb, &ptp->tx_seqid,
- &ptp->tx_hdr_off)) {
+ if (!bnxt_ptp_parse(skb, &seq_id, &hdr_off) &&
+ !bnxt_ptp_get_txts_prod(ptp, &txts_prod)) {
if (vlan_tag_flags)
- ptp->tx_hdr_off += VLAN_HLEN;
+ hdr_off += VLAN_HLEN;
lflags |= cpu_to_le32(TX_BD_FLAGS_STAMP);
+ tx_buf->is_ts_pkt = 1;
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
- } else {
- atomic_inc(&bp->ptp_cfg->tx_avail);
+
+ ptp->txts_req[txts_prod].tx_seqid = seq_id;
+ ptp->txts_req[txts_prod].tx_hdr_off = hdr_off;
+ tx_buf->txts_prod = txts_prod;
}
}
}
-
if (unlikely(skb->no_fcs))
lflags |= cpu_to_le32(TX_BD_FLAGS_NO_CRC);
@@ -687,9 +750,6 @@ tx_done:
return NETDEV_TX_OK;
tx_dma_error:
- if (BNXT_TX_PTP_IS_SET(lflags))
- atomic_inc(&bp->ptp_cfg->tx_avail);
-
last_frag = i;
/* start back at beginning and unmap skb */
@@ -711,6 +771,13 @@ tx_dma_error:
tx_free:
dev_kfree_skb_any(skb);
tx_kick_pending:
+ if (BNXT_TX_PTP_IS_SET(lflags)) {
+ txr->tx_buf_ring[txr->tx_prod].is_ts_pkt = 0;
+ atomic64_inc(&bp->ptp_cfg->stats.ts_err);
+ if (!(bp->fw_cap & BNXT_FW_CAP_TX_TS_CMP))
+ /* set SKB to err so PTP worker will clean up */
+ ptp->txts_req[txts_prod].tx_skb = ERR_PTR(-EIO);
+ }
if (txr->kick_pending)
bnxt_txr_db_kick(bp, txr, txr->tx_prod);
txr->tx_buf_ring[txr->tx_prod].skb = NULL;
@@ -718,7 +785,8 @@ tx_kick_pending:
return NETDEV_TX_OK;
}
-static void __bnxt_tx_int(struct bnxt *bp, struct bnxt_tx_ring_info *txr,
+/* Returns true if some remaining TX packets not processed. */
+static bool __bnxt_tx_int(struct bnxt *bp, struct bnxt_tx_ring_info *txr,
int budget)
{
struct netdev_queue *txq = netdev_get_tx_queue(bp->dev, txr->txq_index);
@@ -727,24 +795,33 @@ static void __bnxt_tx_int(struct bnxt *bp, struct bnxt_tx_ring_info *txr,
unsigned int tx_bytes = 0;
u16 cons = txr->tx_cons;
int tx_pkts = 0;
+ bool rc = false;
while (RING_TX(bp, cons) != hw_cons) {
struct bnxt_sw_tx_bd *tx_buf;
struct sk_buff *skb;
+ bool is_ts_pkt;
int j, last;
tx_buf = &txr->tx_buf_ring[RING_TX(bp, cons)];
- cons = NEXT_TX(cons);
skb = tx_buf->skb;
- tx_buf->skb = NULL;
if (unlikely(!skb)) {
bnxt_sched_reset_txr(bp, txr, cons);
- return;
+ return rc;
+ }
+
+ is_ts_pkt = tx_buf->is_ts_pkt;
+ if (is_ts_pkt && (bp->fw_cap & BNXT_FW_CAP_TX_TS_CMP)) {
+ rc = true;
+ break;
}
+ cons = NEXT_TX(cons);
tx_pkts++;
tx_bytes += skb->len;
+ tx_buf->skb = NULL;
+ tx_buf->is_ts_pkt = 0;
if (tx_buf->is_push) {
tx_buf->is_push = 0;
@@ -764,13 +841,11 @@ static void __bnxt_tx_int(struct bnxt *bp, struct bnxt_tx_ring_info *txr,
skb_frag_size(&skb_shinfo(skb)->frags[j]),
DMA_TO_DEVICE);
}
- if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
+ if (unlikely(is_ts_pkt)) {
if (BNXT_CHIP_P5(bp)) {
/* PTP worker takes ownership of the skb */
- if (!bnxt_get_tx_ts_p5(bp, skb))
- skb = NULL;
- else
- atomic_inc(&bp->ptp_cfg->tx_avail);
+ bnxt_get_tx_ts_p5(bp, skb, tx_buf->txts_prod);
+ skb = NULL;
}
}
@@ -785,18 +860,27 @@ next_tx_int:
__netif_txq_completed_wake(txq, tx_pkts, tx_bytes,
bnxt_tx_avail(bp, txr), bp->tx_wake_thresh,
READ_ONCE(txr->dev_state) == BNXT_DEV_STATE_CLOSING);
+
+ return rc;
}
static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
{
struct bnxt_tx_ring_info *txr;
+ bool more = false;
int i;
bnxt_for_each_napi_tx(i, bnapi, txr) {
if (txr->tx_hw_cons != RING_TX(bp, txr->tx_cons))
- __bnxt_tx_int(bp, txr, budget);
+ more |= __bnxt_tx_int(bp, txr, budget);
}
- bnapi->events &= ~BNXT_TX_CMP_EVENT;
+ if (!more)
+ bnapi->events &= ~BNXT_TX_CMP_EVENT;
+}
+
+static bool bnxt_separate_head_pool(void)
+{
+ return PAGE_SIZE > BNXT_RX_PAGE_SIZE;
}
static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping,
@@ -821,27 +905,19 @@ static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping,
}
static inline u8 *__bnxt_alloc_rx_frag(struct bnxt *bp, dma_addr_t *mapping,
+ struct bnxt_rx_ring_info *rxr,
gfp_t gfp)
{
- u8 *data;
- struct pci_dev *pdev = bp->pdev;
+ unsigned int offset;
+ struct page *page;
- if (gfp == GFP_ATOMIC)
- data = napi_alloc_frag(bp->rx_buf_size);
- else
- data = netdev_alloc_frag(bp->rx_buf_size);
- if (!data)
+ page = page_pool_alloc_frag(rxr->head_pool, &offset,
+ bp->rx_buf_size, gfp);
+ if (!page)
return NULL;
- *mapping = dma_map_single_attrs(&pdev->dev, data + bp->rx_dma_offset,
- bp->rx_buf_use_size, bp->rx_dir,
- DMA_ATTR_WEAK_ORDERING);
-
- if (dma_mapping_error(&pdev->dev, *mapping)) {
- skb_free_frag(data);
- data = NULL;
- }
- return data;
+ *mapping = page_pool_get_dma_addr(page) + bp->rx_dma_offset + offset;
+ return page_address(page) + offset;
}
int bnxt_alloc_rx_data(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
@@ -863,7 +939,7 @@ int bnxt_alloc_rx_data(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
rx_buf->data = page;
rx_buf->data_ptr = page_address(page) + offset + bp->rx_offset;
} else {
- u8 *data = __bnxt_alloc_rx_frag(bp, &mapping, gfp);
+ u8 *data = __bnxt_alloc_rx_frag(bp, &mapping, rxr, gfp);
if (!data)
return -ENOMEM;
@@ -1114,13 +1190,14 @@ static struct sk_buff *bnxt_rx_skb(struct bnxt *bp,
}
skb = napi_build_skb(data, bp->rx_buf_size);
- dma_unmap_single_attrs(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size,
- bp->rx_dir, DMA_ATTR_WEAK_ORDERING);
+ dma_sync_single_for_cpu(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size,
+ bp->rx_dir);
if (!skb) {
- skb_free_frag(data);
+ page_pool_free_va(rxr->head_pool, data, true);
return NULL;
}
+ skb_mark_for_recycle(skb);
skb_reserve(skb, bp->rx_offset);
skb_put(skb, offset_and_len & 0xffff);
return skb;
@@ -1253,9 +1330,9 @@ static int bnxt_agg_bufs_valid(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
return RX_AGG_CMP_VALID(agg, *raw_cons);
}
-static inline struct sk_buff *bnxt_copy_skb(struct bnxt_napi *bnapi, u8 *data,
- unsigned int len,
- dma_addr_t mapping)
+static struct sk_buff *bnxt_copy_data(struct bnxt_napi *bnapi, u8 *data,
+ unsigned int len,
+ dma_addr_t mapping)
{
struct bnxt *bp = bnapi->bp;
struct pci_dev *pdev = bp->pdev;
@@ -1265,16 +1342,49 @@ static inline struct sk_buff *bnxt_copy_skb(struct bnxt_napi *bnapi, u8 *data,
if (!skb)
return NULL;
- dma_sync_single_for_cpu(&pdev->dev, mapping, bp->rx_copy_thresh,
+ dma_sync_single_for_cpu(&pdev->dev, mapping, bp->rx_copybreak,
bp->rx_dir);
memcpy(skb->data - NET_IP_ALIGN, data - NET_IP_ALIGN,
len + NET_IP_ALIGN);
- dma_sync_single_for_device(&pdev->dev, mapping, bp->rx_copy_thresh,
+ dma_sync_single_for_device(&pdev->dev, mapping, bp->rx_copybreak,
bp->rx_dir);
skb_put(skb, len);
+
+ return skb;
+}
+
+static struct sk_buff *bnxt_copy_skb(struct bnxt_napi *bnapi, u8 *data,
+ unsigned int len,
+ dma_addr_t mapping)
+{
+ return bnxt_copy_data(bnapi, data, len, mapping);
+}
+
+static struct sk_buff *bnxt_copy_xdp(struct bnxt_napi *bnapi,
+ struct xdp_buff *xdp,
+ unsigned int len,
+ dma_addr_t mapping)
+{
+ unsigned int metasize = 0;
+ u8 *data = xdp->data;
+ struct sk_buff *skb;
+
+ len = xdp->data_end - xdp->data_meta;
+ metasize = xdp->data - xdp->data_meta;
+ data = xdp->data_meta;
+
+ skb = bnxt_copy_data(bnapi, data, len, mapping);
+ if (!skb)
+ return skb;
+
+ if (metasize) {
+ skb_metadata_set(skb, metasize);
+ __skb_pull(skb, metasize);
+ }
+
return skb;
}
@@ -1420,7 +1530,7 @@ static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
if (TPA_START_IS_IPV6(tpa_start1))
tpa_info->gso_type = SKB_GSO_TCPV6;
/* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */
- else if (cmp_type == CMP_TYPE_RX_L2_TPA_START_CMP &&
+ else if (!BNXT_CHIP_P4_PLUS(bp) &&
TPA_START_HASH_TYPE(tpa_start) == 3)
tpa_info->gso_type = SKB_GSO_TCPV6;
tpa_info->rss_hash =
@@ -1731,21 +1841,22 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
return NULL;
}
- if (len <= bp->rx_copy_thresh) {
+ if (len <= bp->rx_copybreak) {
skb = bnxt_copy_skb(bnapi, data_ptr, len, mapping);
if (!skb) {
bnxt_abort_tpa(cpr, idx, agg_bufs);
- cpr->sw_stats.rx.rx_oom_discards += 1;
+ cpr->sw_stats->rx.rx_oom_discards += 1;
return NULL;
}
} else {
u8 *new_data;
dma_addr_t new_mapping;
- new_data = __bnxt_alloc_rx_frag(bp, &new_mapping, GFP_ATOMIC);
+ new_data = __bnxt_alloc_rx_frag(bp, &new_mapping, rxr,
+ GFP_ATOMIC);
if (!new_data) {
bnxt_abort_tpa(cpr, idx, agg_bufs);
- cpr->sw_stats.rx.rx_oom_discards += 1;
+ cpr->sw_stats->rx.rx_oom_discards += 1;
return NULL;
}
@@ -1754,16 +1865,16 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
tpa_info->mapping = new_mapping;
skb = napi_build_skb(data, bp->rx_buf_size);
- dma_unmap_single_attrs(&bp->pdev->dev, mapping,
- bp->rx_buf_use_size, bp->rx_dir,
- DMA_ATTR_WEAK_ORDERING);
+ dma_sync_single_for_cpu(&bp->pdev->dev, mapping,
+ bp->rx_buf_use_size, bp->rx_dir);
if (!skb) {
- skb_free_frag(data);
+ page_pool_free_va(rxr->head_pool, data, true);
bnxt_abort_tpa(cpr, idx, agg_bufs);
- cpr->sw_stats.rx.rx_oom_discards += 1;
+ cpr->sw_stats->rx.rx_oom_discards += 1;
return NULL;
}
+ skb_mark_for_recycle(skb);
skb_reserve(skb, bp->rx_offset);
skb_put(skb, len);
}
@@ -1772,7 +1883,7 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
skb = bnxt_rx_agg_pages_skb(bp, cpr, skb, idx, agg_bufs, true);
if (!skb) {
/* Page reuse already handled by bnxt_rx_pages(). */
- cpr->sw_stats.rx.rx_oom_discards += 1;
+ cpr->sw_stats->rx.rx_oom_discards += 1;
return NULL;
}
}
@@ -1927,6 +2038,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
struct rx_cmp_ext *rxcmp1;
u32 tmp_raw_cons = *raw_cons;
u16 cons, prod, cp_cons = RING_CMP(tmp_raw_cons);
+ struct skb_shared_info *sinfo;
struct bnxt_sw_rx_bd *rx_buf;
unsigned int len;
u8 *data_ptr, agg_bufs, cmp_type;
@@ -2030,7 +2142,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
rc = -EIO;
if (rx_err & RX_CMPL_ERRORS_BUFFER_ERROR_MASK) {
- bnapi->cp_ring.sw_stats.rx.rx_buf_errors++;
+ bnapi->cp_ring.sw_stats->rx.rx_buf_errors++;
if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS) &&
!(bp->fw_cap & BNXT_FW_CAP_RING_MONITOR)) {
netdev_warn_once(bp->dev, "RX buffer error %x\n",
@@ -2051,24 +2163,31 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
u32 frag_len = bnxt_rx_agg_pages_xdp(bp, cpr, &xdp,
cp_cons, agg_bufs,
false);
- if (!frag_len) {
- cpr->sw_stats.rx.rx_oom_discards += 1;
- rc = -ENOMEM;
- goto next_rx;
- }
+ if (!frag_len)
+ goto oom_next_rx;
+
}
xdp_active = true;
}
if (xdp_active) {
- if (bnxt_rx_xdp(bp, rxr, cons, xdp, data, &data_ptr, &len, event)) {
+ if (bnxt_rx_xdp(bp, rxr, cons, &xdp, data, &data_ptr, &len, event)) {
rc = 1;
goto next_rx;
}
+ if (xdp_buff_has_frags(&xdp)) {
+ sinfo = xdp_get_shared_info_from_buff(&xdp);
+ agg_bufs = sinfo->nr_frags;
+ } else {
+ agg_bufs = 0;
+ }
}
- if (len <= bp->rx_copy_thresh) {
- skb = bnxt_copy_skb(bnapi, data_ptr, len, dma_addr);
+ if (len <= bp->rx_copybreak) {
+ if (!xdp_active)
+ skb = bnxt_copy_skb(bnapi, data_ptr, len, dma_addr);
+ else
+ skb = bnxt_copy_xdp(bnapi, &xdp, len, dma_addr);
bnxt_reuse_rx_data(rxr, cons, data);
if (!skb) {
if (agg_bufs) {
@@ -2078,9 +2197,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
else
bnxt_xdp_buff_frags_free(rxr, &xdp);
}
- cpr->sw_stats.rx.rx_oom_discards += 1;
- rc = -ENOMEM;
- goto next_rx;
+ goto oom_next_rx;
}
} else {
u32 payload;
@@ -2091,29 +2208,22 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
payload = 0;
skb = bp->rx_skb_func(bp, rxr, cons, data, data_ptr, dma_addr,
payload | len);
- if (!skb) {
- cpr->sw_stats.rx.rx_oom_discards += 1;
- rc = -ENOMEM;
- goto next_rx;
- }
+ if (!skb)
+ goto oom_next_rx;
}
if (agg_bufs) {
if (!xdp_active) {
skb = bnxt_rx_agg_pages_skb(bp, cpr, skb, cp_cons, agg_bufs, false);
- if (!skb) {
- cpr->sw_stats.rx.rx_oom_discards += 1;
- rc = -ENOMEM;
- goto next_rx;
- }
+ if (!skb)
+ goto oom_next_rx;
} else {
- skb = bnxt_xdp_build_skb(bp, skb, agg_bufs, rxr->page_pool, &xdp, rxcmp1);
+ skb = bnxt_xdp_build_skb(bp, skb, agg_bufs,
+ rxr->page_pool, &xdp);
if (!skb) {
/* we should be able to free the old skb here */
bnxt_xdp_buff_frags_free(rxr, &xdp);
- cpr->sw_stats.rx.rx_oom_discards += 1;
- rc = -ENOMEM;
- goto next_rx;
+ goto oom_next_rx;
}
}
}
@@ -2124,15 +2234,13 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
if (cmp_type == CMP_TYPE_RX_L2_V3_CMP) {
type = bnxt_rss_ext_op(bp, rxcmp);
} else {
- u32 hash_type = RX_CMP_HASH_TYPE(rxcmp);
+ u32 itypes = RX_CMP_ITYPES(rxcmp);
- /* RSS profiles 1 and 3 with extract code 0 for inner
- * 4-tuple
- */
- if (hash_type != 1 && hash_type != 3)
- type = PKT_HASH_TYPE_L3;
- else
+ if (itypes == RX_CMP_FLAGS_ITYPE_TCP ||
+ itypes == RX_CMP_FLAGS_ITYPE_UDP)
type = PKT_HASH_TYPE_L4;
+ else
+ type = PKT_HASH_TYPE_L3;
}
skb_set_hash(skb, le32_to_cpu(rxcmp->rx_cmp_rss_hash), type);
}
@@ -2156,7 +2264,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
} else {
if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L4_CS_ERR_BITS) {
if (dev->features & NETIF_F_RXCSUM)
- bnapi->cp_ring.sw_stats.rx.rx_l4_csum_errors++;
+ bnapi->cp_ring.sw_stats->rx.rx_l4_csum_errors++;
}
}
@@ -2167,9 +2275,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
if (!bnxt_get_rx_ts_p5(bp, &ts, cmpl_ts)) {
struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
- spin_lock_bh(&ptp->ptp_lock);
- ns = timecounter_cyc2time(&ptp->tc, ts);
- spin_unlock_bh(&ptp->ptp_lock);
+ ns = bnxt_timecounter_cyc2time(ptp, ts);
memset(skb_hwtstamps(skb), 0,
sizeof(*skb_hwtstamps(skb)));
skb_hwtstamps(skb)->hwtstamp = ns_to_ktime(ns);
@@ -2191,6 +2297,11 @@ next_rx_no_prod_no_len:
*raw_cons = tmp_raw_cons;
return rc;
+
+oom_next_rx:
+ cpr->sw_stats->rx.rx_oom_discards += 1;
+ rc = -ENOMEM;
+ goto next_rx;
}
/* In netpoll mode, if we are using a combined completion ring, we need to
@@ -2237,7 +2348,7 @@ static int bnxt_force_rx_discard(struct bnxt *bp,
}
rc = bnxt_rx_pkt(bp, cpr, raw_cons, event);
if (rc && rc != -EBUSY)
- cpr->sw_stats.rx.rx_netpoll_discards += 1;
+ cpr->sw_stats->rx.rx_netpoll_discards += 1;
return rc;
}
@@ -2371,6 +2482,59 @@ static bool bnxt_auto_speed_updated(struct bnxt_link_info *link_info)
return false;
}
+bool bnxt_bs_trace_avail(struct bnxt *bp, u16 type)
+{
+ u32 flags = bp->ctx->ctx_arr[type].flags;
+
+ return (flags & BNXT_CTX_MEM_TYPE_VALID) &&
+ ((flags & BNXT_CTX_MEM_FW_TRACE) ||
+ (flags & BNXT_CTX_MEM_FW_BIN_TRACE));
+}
+
+static void bnxt_bs_trace_init(struct bnxt *bp, struct bnxt_ctx_mem_type *ctxm)
+{
+ u32 mem_size, pages, rem_bytes, magic_byte_offset;
+ u16 trace_type = bnxt_bstore_to_trace[ctxm->type];
+ struct bnxt_ctx_pg_info *ctx_pg = ctxm->pg_info;
+ struct bnxt_ring_mem_info *rmem, *rmem_pg_tbl;
+ struct bnxt_bs_trace_info *bs_trace;
+ int last_pg;
+
+ if (ctxm->instance_bmap && ctxm->instance_bmap > 1)
+ return;
+
+ mem_size = ctxm->max_entries * ctxm->entry_size;
+ rem_bytes = mem_size % BNXT_PAGE_SIZE;
+ pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE);
+
+ last_pg = (pages - 1) & (MAX_CTX_PAGES - 1);
+ magic_byte_offset = (rem_bytes ? rem_bytes : BNXT_PAGE_SIZE) - 1;
+
+ rmem = &ctx_pg[0].ring_mem;
+ bs_trace = &bp->bs_trace[trace_type];
+ bs_trace->ctx_type = ctxm->type;
+ bs_trace->trace_type = trace_type;
+ if (pages > MAX_CTX_PAGES) {
+ int last_pg_dir = rmem->nr_pages - 1;
+
+ rmem_pg_tbl = &ctx_pg[0].ctx_pg_tbl[last_pg_dir]->ring_mem;
+ bs_trace->magic_byte = rmem_pg_tbl->pg_arr[last_pg];
+ } else {
+ bs_trace->magic_byte = rmem->pg_arr[last_pg];
+ }
+ bs_trace->magic_byte += magic_byte_offset;
+ *bs_trace->magic_byte = BNXT_TRACE_BUF_MAGIC_BYTE;
+}
+
+#define BNXT_EVENT_BUF_PRODUCER_TYPE(data1) \
+ (((data1) & ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_MASK) >>\
+ ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_SFT)
+
+#define BNXT_EVENT_BUF_PRODUCER_OFFSET(data2) \
+ (((data2) & \
+ ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA2_CURR_OFF_MASK) >>\
+ ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA2_CURR_OFF_SFT)
+
#define BNXT_EVENT_THERMAL_CURRENT_TEMP(data2) \
((data2) & \
ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA2_CURRENT_TEMP_MASK)
@@ -2446,6 +2610,9 @@ static bool bnxt_event_error_report(struct bnxt *bp, u32 data1, u32 data2)
}
return false;
}
+ case ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_DUAL_DATA_RATE_NOT_SUPPORTED:
+ netdev_warn(bp->dev, "Speed change not supported with dual rate transceivers on this board\n");
+ break;
default:
netdev_err(bp->dev, "FW reported unknown error type %u\n",
err_type);
@@ -2661,17 +2828,18 @@ static int bnxt_async_event_process(struct bnxt *bp,
case ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_PHC_RTC_UPDATE:
if (BNXT_PTP_USE_RTC(bp)) {
struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
+ unsigned long flags;
u64 ns;
if (!ptp)
goto async_event_process_exit;
- spin_lock_bh(&ptp->ptp_lock);
bnxt_ptp_update_current_time(bp);
ns = (((u64)BNXT_EVENT_PHC_RTC_UPDATE(data1) <<
BNXT_PHC_BITS) | ptp->current_time);
+ write_seqlock_irqsave(&ptp->ptp_lock, flags);
bnxt_ptp_rtc_timecounter_init(ptp, ns);
- spin_unlock_bh(&ptp->ptp_lock);
+ write_sequnlock_irqrestore(&ptp->ptp_lock, flags);
}
break;
}
@@ -2683,11 +2851,19 @@ static int bnxt_async_event_process(struct bnxt *bp,
hwrm_update_token(bp, seq_id, BNXT_HWRM_DEFERRED);
goto async_event_process_exit;
}
+ case ASYNC_EVENT_CMPL_EVENT_ID_DBG_BUF_PRODUCER: {
+ u16 type = (u16)BNXT_EVENT_BUF_PRODUCER_TYPE(data1);
+ u32 offset = BNXT_EVENT_BUF_PRODUCER_OFFSET(data2);
+
+ bnxt_bs_trace_check_wrap(&bp->bs_trace[type], offset);
+ goto async_event_process_exit;
+ }
default:
goto async_event_process_exit;
}
__bnxt_queue_sp_work(bp);
async_event_process_exit:
+ bnxt_ulp_async_events(bp, cmpl);
return 0;
}
@@ -2730,6 +2906,13 @@ static int bnxt_hwrm_handler(struct bnxt *bp, struct tx_cmp *txcmp)
return 0;
}
+static bool bnxt_vnic_is_active(struct bnxt *bp)
+{
+ struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
+
+ return vnic->fw_vnic_id != INVALID_HW_RING_ID && vnic->mru > 0;
+}
+
static irqreturn_t bnxt_msix(int irq, void *dev_instance)
{
struct bnxt_napi *bnapi = dev_instance;
@@ -2754,34 +2937,6 @@ static inline int bnxt_has_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
return TX_CMP_VALID(txcmp, raw_cons);
}
-static irqreturn_t bnxt_inta(int irq, void *dev_instance)
-{
- struct bnxt_napi *bnapi = dev_instance;
- struct bnxt *bp = bnapi->bp;
- struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
- u32 cons = RING_CMP(cpr->cp_raw_cons);
- u32 int_status;
-
- prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]);
-
- if (!bnxt_has_work(bp, cpr)) {
- int_status = readl(bp->bar0 + BNXT_CAG_REG_LEGACY_INT_STATUS);
- /* return if erroneous interrupt */
- if (!(int_status & (0x10000 << cpr->cp_ring_struct.fw_ring_id)))
- return IRQ_NONE;
- }
-
- /* disable ring IRQ */
- BNXT_CP_DB_IRQ_DIS(cpr->cp_db.doorbell);
-
- /* Return here if interrupt is shared and is disabled. */
- if (unlikely(atomic_read(&bp->intr_sem) != 0))
- return IRQ_HANDLED;
-
- napi_schedule(&bnapi->napi);
- return IRQ_HANDLED;
-}
-
static int __bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
int budget)
{
@@ -2831,6 +2986,8 @@ static int __bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
cpr->has_more_work = 1;
break;
}
+ } else if (cmp_type == CMP_TYPE_TX_L2_PKT_TS_CMP) {
+ bnxt_tx_ts_cmp(bp, bnapi, (struct tx_ts_cmp *)txcmp);
} else if (cmp_type >= CMP_TYPE_RX_L2_CMP &&
cmp_type <= CMP_TYPE_RX_L2_TPA_START_V3_CMP) {
if (likely(budget))
@@ -2862,8 +3019,10 @@ static int __bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
}
}
- if (event & BNXT_REDIRECT_EVENT)
+ if (event & BNXT_REDIRECT_EVENT) {
xdp_do_flush();
+ event &= ~BNXT_REDIRECT_EVENT;
+ }
if (event & BNXT_TX_EVENT) {
struct bnxt_tx_ring_info *txr = bnapi->tx_ring[0];
@@ -2873,6 +3032,7 @@ static int __bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
wmb();
bnxt_db_write_relaxed(bp, &txr->tx_db, prod);
+ event &= ~BNXT_TX_EVENT;
}
cpr->cp_raw_cons = raw_cons;
@@ -2890,13 +3050,14 @@ static void __bnxt_poll_work_done(struct bnxt *bp, struct bnxt_napi *bnapi,
struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
+ bnapi->events &= ~BNXT_RX_EVENT;
}
if (bnapi->events & BNXT_AGG_EVENT) {
struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
+ bnapi->events &= ~BNXT_AGG_EVENT;
}
- bnapi->events &= BNXT_TX_CMP_EVENT;
}
static int bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
@@ -3019,14 +3180,14 @@ static int bnxt_poll(struct napi_struct *napi, int budget)
break;
}
}
- if (bp->flags & BNXT_FLAG_DIM) {
+ if ((bp->flags & BNXT_FLAG_DIM) && bnxt_vnic_is_active(bp)) {
struct dim_sample dim_sample = {};
dim_update_sample(cpr->event_ctr,
cpr->rx_packets,
cpr->rx_bytes,
&dim_sample);
- net_dim(&cpr->dim, dim_sample);
+ net_dim(&cpr->dim, &dim_sample);
}
return work_done;
}
@@ -3150,14 +3311,14 @@ static int bnxt_poll_p5(struct napi_struct *napi, int budget)
poll_done:
cpr_rx = &cpr->cp_ring_arr[0];
if (cpr_rx->cp_ring_type == BNXT_NQ_HDL_TYPE_RX &&
- (bp->flags & BNXT_FLAG_DIM)) {
+ (bp->flags & BNXT_FLAG_DIM) && bnxt_vnic_is_active(bp)) {
struct dim_sample dim_sample = {};
dim_update_sample(cpr->event_ctr,
cpr_rx->rx_packets,
cpr_rx->rx_bytes,
&dim_sample);
- net_dim(&cpr->dim, dim_sample);
+ net_dim(&cpr->dim, &dim_sample);
}
return work_done;
}
@@ -3233,62 +3394,34 @@ static void bnxt_free_tx_skbs(struct bnxt *bp)
}
}
-static void bnxt_free_one_rx_ring_skbs(struct bnxt *bp, int ring_nr)
+static void bnxt_free_one_rx_ring(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
{
- struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr];
- struct pci_dev *pdev = bp->pdev;
- struct bnxt_tpa_idx_map *map;
- int i, max_idx, max_agg_idx;
+ int i, max_idx;
max_idx = bp->rx_nr_pages * RX_DESC_CNT;
- max_agg_idx = bp->rx_agg_nr_pages * RX_DESC_CNT;
- if (!rxr->rx_tpa)
- goto skip_rx_tpa_free;
-
- for (i = 0; i < bp->max_tpa; i++) {
- struct bnxt_tpa_info *tpa_info = &rxr->rx_tpa[i];
- u8 *data = tpa_info->data;
-
- if (!data)
- continue;
-
- dma_unmap_single_attrs(&pdev->dev, tpa_info->mapping,
- bp->rx_buf_use_size, bp->rx_dir,
- DMA_ATTR_WEAK_ORDERING);
-
- tpa_info->data = NULL;
-
- skb_free_frag(data);
- }
-
-skip_rx_tpa_free:
- if (!rxr->rx_buf_ring)
- goto skip_rx_buf_free;
for (i = 0; i < max_idx; i++) {
struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[i];
- dma_addr_t mapping = rx_buf->mapping;
void *data = rx_buf->data;
if (!data)
continue;
rx_buf->data = NULL;
- if (BNXT_RX_PAGE_MODE(bp)) {
+ if (BNXT_RX_PAGE_MODE(bp))
page_pool_recycle_direct(rxr->page_pool, data);
- } else {
- dma_unmap_single_attrs(&pdev->dev, mapping,
- bp->rx_buf_use_size, bp->rx_dir,
- DMA_ATTR_WEAK_ORDERING);
- skb_free_frag(data);
- }
+ else
+ page_pool_free_va(rxr->head_pool, data, true);
}
+}
-skip_rx_buf_free:
- if (!rxr->rx_agg_ring)
- goto skip_rx_agg_free;
+static void bnxt_free_one_rx_agg_ring(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
+{
+ int i, max_idx;
+
+ max_idx = bp->rx_agg_nr_pages * RX_DESC_CNT;
- for (i = 0; i < max_agg_idx; i++) {
+ for (i = 0; i < max_idx; i++) {
struct bnxt_sw_rx_agg_bd *rx_agg_buf = &rxr->rx_agg_ring[i];
struct page *page = rx_agg_buf->page;
@@ -3300,6 +3433,46 @@ skip_rx_buf_free:
page_pool_recycle_direct(rxr->page_pool, page);
}
+}
+
+static void bnxt_free_one_tpa_info_data(struct bnxt *bp,
+ struct bnxt_rx_ring_info *rxr)
+{
+ int i;
+
+ for (i = 0; i < bp->max_tpa; i++) {
+ struct bnxt_tpa_info *tpa_info = &rxr->rx_tpa[i];
+ u8 *data = tpa_info->data;
+
+ if (!data)
+ continue;
+
+ tpa_info->data = NULL;
+ page_pool_free_va(rxr->head_pool, data, false);
+ }
+}
+
+static void bnxt_free_one_rx_ring_skbs(struct bnxt *bp,
+ struct bnxt_rx_ring_info *rxr)
+{
+ struct bnxt_tpa_idx_map *map;
+
+ if (!rxr->rx_tpa)
+ goto skip_rx_tpa_free;
+
+ bnxt_free_one_tpa_info_data(bp, rxr);
+
+skip_rx_tpa_free:
+ if (!rxr->rx_buf_ring)
+ goto skip_rx_buf_free;
+
+ bnxt_free_one_rx_ring(bp, rxr);
+
+skip_rx_buf_free:
+ if (!rxr->rx_agg_ring)
+ goto skip_rx_agg_free;
+
+ bnxt_free_one_rx_agg_ring(bp, rxr);
skip_rx_agg_free:
map = rxr->rx_tpa_idx_map;
@@ -3315,7 +3488,7 @@ static void bnxt_free_rx_skbs(struct bnxt *bp)
return;
for (i = 0; i < bp->rx_nr_rings; i++)
- bnxt_free_one_rx_ring_skbs(bp, i);
+ bnxt_free_one_rx_ring_skbs(bp, &bp->rx_ring[i]);
}
static void bnxt_free_skbs(struct bnxt *bp)
@@ -3341,6 +3514,35 @@ static void bnxt_init_ctx_mem(struct bnxt_ctx_mem_type *ctxm, void *p, int len)
*(p2 + i + offset) = init_val;
}
+static size_t __bnxt_copy_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem,
+ void *buf, size_t offset, size_t head,
+ size_t tail)
+{
+ int i, head_page, start_idx, source_offset;
+ size_t len, rem_len, total_len, max_bytes;
+
+ head_page = head / rmem->page_size;
+ source_offset = head % rmem->page_size;
+ total_len = (tail - head) & MAX_CTX_BYTES_MASK;
+ if (!total_len)
+ total_len = MAX_CTX_BYTES;
+ start_idx = head_page % MAX_CTX_PAGES;
+ max_bytes = (rmem->nr_pages - start_idx) * rmem->page_size -
+ source_offset;
+ total_len = min(total_len, max_bytes);
+ rem_len = total_len;
+
+ for (i = start_idx; rem_len; i++, source_offset = 0) {
+ len = min((size_t)(rmem->page_size - source_offset), rem_len);
+ if (buf)
+ memcpy(buf + offset, rmem->pg_arr[i] + source_offset,
+ len);
+ offset += len;
+ rem_len -= len;
+ }
+ return total_len;
+}
+
static void bnxt_free_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
{
struct pci_dev *pdev = bp->pdev;
@@ -3427,29 +3629,64 @@ static int bnxt_alloc_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
return 0;
}
+static void bnxt_free_one_tpa_info(struct bnxt *bp,
+ struct bnxt_rx_ring_info *rxr)
+{
+ int i;
+
+ kfree(rxr->rx_tpa_idx_map);
+ rxr->rx_tpa_idx_map = NULL;
+ if (rxr->rx_tpa) {
+ for (i = 0; i < bp->max_tpa; i++) {
+ kfree(rxr->rx_tpa[i].agg_arr);
+ rxr->rx_tpa[i].agg_arr = NULL;
+ }
+ }
+ kfree(rxr->rx_tpa);
+ rxr->rx_tpa = NULL;
+}
+
static void bnxt_free_tpa_info(struct bnxt *bp)
{
- int i, j;
+ int i;
for (i = 0; i < bp->rx_nr_rings; i++) {
struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
- kfree(rxr->rx_tpa_idx_map);
- rxr->rx_tpa_idx_map = NULL;
- if (rxr->rx_tpa) {
- for (j = 0; j < bp->max_tpa; j++) {
- kfree(rxr->rx_tpa[j].agg_arr);
- rxr->rx_tpa[j].agg_arr = NULL;
- }
- }
- kfree(rxr->rx_tpa);
- rxr->rx_tpa = NULL;
+ bnxt_free_one_tpa_info(bp, rxr);
}
}
+static int bnxt_alloc_one_tpa_info(struct bnxt *bp,
+ struct bnxt_rx_ring_info *rxr)
+{
+ struct rx_agg_cmp *agg;
+ int i;
+
+ rxr->rx_tpa = kcalloc(bp->max_tpa, sizeof(struct bnxt_tpa_info),
+ GFP_KERNEL);
+ if (!rxr->rx_tpa)
+ return -ENOMEM;
+
+ if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
+ return 0;
+ for (i = 0; i < bp->max_tpa; i++) {
+ agg = kcalloc(MAX_SKB_FRAGS, sizeof(*agg), GFP_KERNEL);
+ if (!agg)
+ return -ENOMEM;
+ rxr->rx_tpa[i].agg_arr = agg;
+ }
+ rxr->rx_tpa_idx_map = kzalloc(sizeof(*rxr->rx_tpa_idx_map),
+ GFP_KERNEL);
+ if (!rxr->rx_tpa_idx_map)
+ return -ENOMEM;
+
+ return 0;
+}
+
static int bnxt_alloc_tpa_info(struct bnxt *bp)
{
- int i, j;
+ int i, rc;
bp->max_tpa = MAX_TPA;
if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
@@ -3460,25 +3697,10 @@ static int bnxt_alloc_tpa_info(struct bnxt *bp)
for (i = 0; i < bp->rx_nr_rings; i++) {
struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
- struct rx_agg_cmp *agg;
-
- rxr->rx_tpa = kcalloc(bp->max_tpa, sizeof(struct bnxt_tpa_info),
- GFP_KERNEL);
- if (!rxr->rx_tpa)
- return -ENOMEM;
- if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
- continue;
- for (j = 0; j < bp->max_tpa; j++) {
- agg = kcalloc(MAX_SKB_FRAGS, sizeof(*agg), GFP_KERNEL);
- if (!agg)
- return -ENOMEM;
- rxr->rx_tpa[j].agg_arr = agg;
- }
- rxr->rx_tpa_idx_map = kzalloc(sizeof(*rxr->rx_tpa_idx_map),
- GFP_KERNEL);
- if (!rxr->rx_tpa_idx_map)
- return -ENOMEM;
+ rc = bnxt_alloc_one_tpa_info(bp, rxr);
+ if (rc)
+ return rc;
}
return 0;
}
@@ -3502,7 +3724,9 @@ static void bnxt_free_rx_rings(struct bnxt *bp)
xdp_rxq_info_unreg(&rxr->xdp_rxq);
page_pool_destroy(rxr->page_pool);
- rxr->page_pool = NULL;
+ if (bnxt_separate_head_pool())
+ page_pool_destroy(rxr->head_pool);
+ rxr->page_pool = rxr->head_pool = NULL;
kfree(rxr->rx_agg_bmap);
rxr->rx_agg_bmap = NULL;
@@ -3516,14 +3740,16 @@ static void bnxt_free_rx_rings(struct bnxt *bp)
}
static int bnxt_alloc_rx_page_pool(struct bnxt *bp,
- struct bnxt_rx_ring_info *rxr)
+ struct bnxt_rx_ring_info *rxr,
+ int numa_node)
{
struct page_pool_params pp = { 0 };
+ struct page_pool *pool;
pp.pool_size = bp->rx_agg_ring_size;
if (BNXT_RX_PAGE_MODE(bp))
pp.pool_size += bp->rx_ring_size;
- pp.nid = dev_to_node(&bp->pdev->dev);
+ pp.nid = numa_node;
pp.napi = &rxr->bnapi->napi;
pp.netdev = bp->dev;
pp.dev = &bp->pdev->dev;
@@ -3531,19 +3757,44 @@ static int bnxt_alloc_rx_page_pool(struct bnxt *bp,
pp.max_len = PAGE_SIZE;
pp.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
- rxr->page_pool = page_pool_create(&pp);
- if (IS_ERR(rxr->page_pool)) {
- int err = PTR_ERR(rxr->page_pool);
+ pool = page_pool_create(&pp);
+ if (IS_ERR(pool))
+ return PTR_ERR(pool);
+ rxr->page_pool = pool;
- rxr->page_pool = NULL;
- return err;
+ if (bnxt_separate_head_pool()) {
+ pp.pool_size = max(bp->rx_ring_size, 1024);
+ pool = page_pool_create(&pp);
+ if (IS_ERR(pool))
+ goto err_destroy_pp;
}
+ rxr->head_pool = pool;
+
+ return 0;
+
+err_destroy_pp:
+ page_pool_destroy(rxr->page_pool);
+ rxr->page_pool = NULL;
+ return PTR_ERR(pool);
+}
+
+static int bnxt_alloc_rx_agg_bmap(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
+{
+ u16 mem_size;
+
+ rxr->rx_agg_bmap_size = bp->rx_agg_ring_mask + 1;
+ mem_size = rxr->rx_agg_bmap_size / 8;
+ rxr->rx_agg_bmap = kzalloc(mem_size, GFP_KERNEL);
+ if (!rxr->rx_agg_bmap)
+ return -ENOMEM;
+
return 0;
}
static int bnxt_alloc_rx_rings(struct bnxt *bp)
{
- int i, rc = 0, agg_rings = 0;
+ int numa_node = dev_to_node(&bp->pdev->dev);
+ int i, rc = 0, agg_rings = 0, cpu;
if (!bp->rx_ring)
return -ENOMEM;
@@ -3554,10 +3805,15 @@ static int bnxt_alloc_rx_rings(struct bnxt *bp)
for (i = 0; i < bp->rx_nr_rings; i++) {
struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
struct bnxt_ring_struct *ring;
+ int cpu_node;
ring = &rxr->rx_ring_struct;
- rc = bnxt_alloc_rx_page_pool(bp, rxr);
+ cpu = cpumask_local_spread(i, numa_node);
+ cpu_node = cpu_to_node(cpu);
+ netdev_dbg(bp->dev, "Allocating page pool for rx_ring[%d] on numa_node: %d\n",
+ i, cpu_node);
+ rc = bnxt_alloc_rx_page_pool(bp, rxr, cpu_node);
if (rc)
return rc;
@@ -3579,19 +3835,15 @@ static int bnxt_alloc_rx_rings(struct bnxt *bp)
ring->grp_idx = i;
if (agg_rings) {
- u16 mem_size;
-
ring = &rxr->rx_agg_ring_struct;
rc = bnxt_alloc_ring(bp, &ring->ring_mem);
if (rc)
return rc;
ring->grp_idx = i;
- rxr->rx_agg_bmap_size = bp->rx_agg_ring_mask + 1;
- mem_size = rxr->rx_agg_bmap_size / 8;
- rxr->rx_agg_bmap = kzalloc(mem_size, GFP_KERNEL);
- if (!rxr->rx_agg_bmap)
- return -ENOMEM;
+ rc = bnxt_alloc_rx_agg_bmap(bp, rxr);
+ if (rc)
+ return rc;
}
}
if (bp->flags & BNXT_FLAG_TPA)
@@ -3816,13 +4068,12 @@ static int bnxt_alloc_cp_sub_ring(struct bnxt *bp,
static int bnxt_alloc_cp_rings(struct bnxt *bp)
{
bool sh = !!(bp->flags & BNXT_FLAG_SHARED_RINGS);
- int i, j, rc, ulp_base_vec, ulp_msix;
+ int i, j, rc, ulp_msix;
int tcs = bp->num_tc;
if (!tcs)
tcs = 1;
ulp_msix = bnxt_get_ulp_msix_num(bp);
- ulp_base_vec = bnxt_get_ulp_msix_base(bp);
for (i = 0, j = 0; i < bp->cp_nr_rings; i++) {
struct bnxt_napi *bnapi = bp->bnapi[i];
struct bnxt_cp_ring_info *cpr, *cpr2;
@@ -3841,10 +4092,7 @@ static int bnxt_alloc_cp_rings(struct bnxt *bp)
if (rc)
return rc;
- if (ulp_msix && i >= ulp_base_vec)
- ring->map_idx = i + ulp_msix;
- else
- ring->map_idx = i;
+ ring->map_idx = ulp_msix + i;
if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
continue;
@@ -3874,6 +4122,7 @@ static int bnxt_alloc_cp_rings(struct bnxt *bp)
if (rc)
return rc;
cpr2->bnapi = bnapi;
+ cpr2->sw_stats = cpr->sw_stats;
cpr2->cp_idx = k;
if (!k && rx) {
bp->rx_ring[i].rx_cpr = cpr2;
@@ -3892,6 +4141,63 @@ static int bnxt_alloc_cp_rings(struct bnxt *bp)
return 0;
}
+static void bnxt_init_rx_ring_struct(struct bnxt *bp,
+ struct bnxt_rx_ring_info *rxr)
+{
+ struct bnxt_ring_mem_info *rmem;
+ struct bnxt_ring_struct *ring;
+
+ ring = &rxr->rx_ring_struct;
+ rmem = &ring->ring_mem;
+ rmem->nr_pages = bp->rx_nr_pages;
+ rmem->page_size = HW_RXBD_RING_SIZE;
+ rmem->pg_arr = (void **)rxr->rx_desc_ring;
+ rmem->dma_arr = rxr->rx_desc_mapping;
+ rmem->vmem_size = SW_RXBD_RING_SIZE * bp->rx_nr_pages;
+ rmem->vmem = (void **)&rxr->rx_buf_ring;
+
+ ring = &rxr->rx_agg_ring_struct;
+ rmem = &ring->ring_mem;
+ rmem->nr_pages = bp->rx_agg_nr_pages;
+ rmem->page_size = HW_RXBD_RING_SIZE;
+ rmem->pg_arr = (void **)rxr->rx_agg_desc_ring;
+ rmem->dma_arr = rxr->rx_agg_desc_mapping;
+ rmem->vmem_size = SW_RXBD_AGG_RING_SIZE * bp->rx_agg_nr_pages;
+ rmem->vmem = (void **)&rxr->rx_agg_ring;
+}
+
+static void bnxt_reset_rx_ring_struct(struct bnxt *bp,
+ struct bnxt_rx_ring_info *rxr)
+{
+ struct bnxt_ring_mem_info *rmem;
+ struct bnxt_ring_struct *ring;
+ int i;
+
+ rxr->page_pool->p.napi = NULL;
+ rxr->page_pool = NULL;
+ memset(&rxr->xdp_rxq, 0, sizeof(struct xdp_rxq_info));
+
+ ring = &rxr->rx_ring_struct;
+ rmem = &ring->ring_mem;
+ rmem->pg_tbl = NULL;
+ rmem->pg_tbl_map = 0;
+ for (i = 0; i < rmem->nr_pages; i++) {
+ rmem->pg_arr[i] = NULL;
+ rmem->dma_arr[i] = 0;
+ }
+ *rmem->vmem = NULL;
+
+ ring = &rxr->rx_agg_ring_struct;
+ rmem = &ring->ring_mem;
+ rmem->pg_tbl = NULL;
+ rmem->pg_tbl_map = 0;
+ for (i = 0; i < rmem->nr_pages; i++) {
+ rmem->pg_arr[i] = NULL;
+ rmem->dma_arr[i] = 0;
+ }
+ *rmem->vmem = NULL;
+}
+
static void bnxt_init_ring_struct(struct bnxt *bp)
{
int i, j;
@@ -3974,58 +4280,88 @@ static void bnxt_init_rxbd_pages(struct bnxt_ring_struct *ring, u32 type)
}
}
-static int bnxt_alloc_one_rx_ring(struct bnxt *bp, int ring_nr)
+static void bnxt_alloc_one_rx_ring_skb(struct bnxt *bp,
+ struct bnxt_rx_ring_info *rxr,
+ int ring_nr)
{
- struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr];
- struct net_device *dev = bp->dev;
u32 prod;
int i;
prod = rxr->rx_prod;
for (i = 0; i < bp->rx_ring_size; i++) {
if (bnxt_alloc_rx_data(bp, rxr, prod, GFP_KERNEL)) {
- netdev_warn(dev, "init'ed rx ring %d with %d/%d skbs only\n",
+ netdev_warn(bp->dev, "init'ed rx ring %d with %d/%d skbs only\n",
ring_nr, i, bp->rx_ring_size);
break;
}
prod = NEXT_RX(prod);
}
rxr->rx_prod = prod;
+}
- if (!(bp->flags & BNXT_FLAG_AGG_RINGS))
- return 0;
+static void bnxt_alloc_one_rx_ring_page(struct bnxt *bp,
+ struct bnxt_rx_ring_info *rxr,
+ int ring_nr)
+{
+ u32 prod;
+ int i;
prod = rxr->rx_agg_prod;
for (i = 0; i < bp->rx_agg_ring_size; i++) {
if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_KERNEL)) {
- netdev_warn(dev, "init'ed rx ring %d with %d/%d pages only\n",
+ netdev_warn(bp->dev, "init'ed rx ring %d with %d/%d pages only\n",
ring_nr, i, bp->rx_ring_size);
break;
}
prod = NEXT_RX_AGG(prod);
}
rxr->rx_agg_prod = prod;
+}
- if (rxr->rx_tpa) {
- dma_addr_t mapping;
- u8 *data;
+static int bnxt_alloc_one_tpa_info_data(struct bnxt *bp,
+ struct bnxt_rx_ring_info *rxr)
+{
+ dma_addr_t mapping;
+ u8 *data;
+ int i;
- for (i = 0; i < bp->max_tpa; i++) {
- data = __bnxt_alloc_rx_frag(bp, &mapping, GFP_KERNEL);
- if (!data)
- return -ENOMEM;
+ for (i = 0; i < bp->max_tpa; i++) {
+ data = __bnxt_alloc_rx_frag(bp, &mapping, rxr,
+ GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
- rxr->rx_tpa[i].data = data;
- rxr->rx_tpa[i].data_ptr = data + bp->rx_offset;
- rxr->rx_tpa[i].mapping = mapping;
- }
+ rxr->rx_tpa[i].data = data;
+ rxr->rx_tpa[i].data_ptr = data + bp->rx_offset;
+ rxr->rx_tpa[i].mapping = mapping;
}
+
return 0;
}
-static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr)
+static int bnxt_alloc_one_rx_ring(struct bnxt *bp, int ring_nr)
+{
+ struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr];
+ int rc;
+
+ bnxt_alloc_one_rx_ring_skb(bp, rxr, ring_nr);
+
+ if (!(bp->flags & BNXT_FLAG_AGG_RINGS))
+ return 0;
+
+ bnxt_alloc_one_rx_ring_page(bp, rxr, ring_nr);
+
+ if (rxr->rx_tpa) {
+ rc = bnxt_alloc_one_tpa_info_data(bp, rxr);
+ if (rc)
+ return rc;
+ }
+ return 0;
+}
+
+static void bnxt_init_one_rx_ring_rxbd(struct bnxt *bp,
+ struct bnxt_rx_ring_info *rxr)
{
- struct bnxt_rx_ring_info *rxr;
struct bnxt_ring_struct *ring;
u32 type;
@@ -4035,28 +4371,43 @@ static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr)
if (NET_IP_ALIGN == 2)
type |= RX_BD_FLAGS_SOP;
- rxr = &bp->rx_ring[ring_nr];
ring = &rxr->rx_ring_struct;
bnxt_init_rxbd_pages(ring, type);
-
- netif_queue_set_napi(bp->dev, ring_nr, NETDEV_QUEUE_TYPE_RX,
- &rxr->bnapi->napi);
-
- if (BNXT_RX_PAGE_MODE(bp) && bp->xdp_prog) {
- bpf_prog_add(bp->xdp_prog, 1);
- rxr->xdp_prog = bp->xdp_prog;
- }
ring->fw_ring_id = INVALID_HW_RING_ID;
+}
+
+static void bnxt_init_one_rx_agg_ring_rxbd(struct bnxt *bp,
+ struct bnxt_rx_ring_info *rxr)
+{
+ struct bnxt_ring_struct *ring;
+ u32 type;
ring = &rxr->rx_agg_ring_struct;
ring->fw_ring_id = INVALID_HW_RING_ID;
-
if ((bp->flags & BNXT_FLAG_AGG_RINGS)) {
type = ((u32)BNXT_RX_PAGE_SIZE << RX_BD_LEN_SHIFT) |
RX_BD_TYPE_RX_AGG_BD | RX_BD_FLAGS_SOP;
bnxt_init_rxbd_pages(ring, type);
}
+}
+
+static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr)
+{
+ struct bnxt_rx_ring_info *rxr;
+
+ rxr = &bp->rx_ring[ring_nr];
+ bnxt_init_one_rx_ring_rxbd(bp, rxr);
+
+ netif_queue_set_napi(bp->dev, ring_nr, NETDEV_QUEUE_TYPE_RX,
+ &rxr->bnapi->napi);
+
+ if (BNXT_RX_PAGE_MODE(bp) && bp->xdp_prog) {
+ bpf_prog_add(bp->xdp_prog, 1);
+ rxr->xdp_prog = bp->xdp_prog;
+ }
+
+ bnxt_init_one_rx_agg_ring_rxbd(bp, rxr);
return bnxt_alloc_one_rx_ring(bp, ring_nr);
}
@@ -4168,8 +4519,12 @@ static int bnxt_alloc_vnics(struct bnxt *bp)
int num_vnics = 1;
#ifdef CONFIG_RFS_ACCEL
- if ((bp->flags & (BNXT_FLAG_RFS | BNXT_FLAG_CHIP_P5_PLUS)) == BNXT_FLAG_RFS)
- num_vnics += bp->rx_nr_rings;
+ if (bp->flags & BNXT_FLAG_RFS) {
+ if (BNXT_SUPPORTS_NTUPLE_VNIC(bp))
+ num_vnics++;
+ else if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
+ num_vnics += bp->rx_nr_rings;
+ }
#endif
if (BNXT_CHIP_TYPE_NITRO_A0(bp))
@@ -4186,6 +4541,7 @@ static int bnxt_alloc_vnics(struct bnxt *bp)
static void bnxt_init_vnics(struct bnxt *bp)
{
+ struct bnxt_vnic_info *vnic0 = &bp->vnic_info[BNXT_VNIC_DEFAULT];
int i;
for (i = 0; i < bp->nr_vnics; i++) {
@@ -4193,26 +4549,40 @@ static void bnxt_init_vnics(struct bnxt *bp)
int j;
vnic->fw_vnic_id = INVALID_HW_RING_ID;
+ vnic->vnic_id = i;
for (j = 0; j < BNXT_MAX_CTX_PER_VNIC; j++)
vnic->fw_rss_cos_lb_ctx[j] = INVALID_HW_RING_ID;
vnic->fw_l2_ctx_id = INVALID_HW_RING_ID;
if (bp->vnic_info[i].rss_hash_key) {
- if (!i) {
+ if (i == BNXT_VNIC_DEFAULT) {
u8 *key = (void *)vnic->rss_hash_key;
int k;
+ if (!bp->rss_hash_key_valid &&
+ !bp->rss_hash_key_updated) {
+ get_random_bytes(bp->rss_hash_key,
+ HW_HASH_KEY_SIZE);
+ bp->rss_hash_key_updated = true;
+ }
+
+ memcpy(vnic->rss_hash_key, bp->rss_hash_key,
+ HW_HASH_KEY_SIZE);
+
+ if (!bp->rss_hash_key_updated)
+ continue;
+
+ bp->rss_hash_key_updated = false;
+ bp->rss_hash_key_valid = true;
+
bp->toeplitz_prefix = 0;
- get_random_bytes(vnic->rss_hash_key,
- HW_HASH_KEY_SIZE);
for (k = 0; k < 8; k++) {
bp->toeplitz_prefix <<= 8;
bp->toeplitz_prefix |= key[k];
}
} else {
- memcpy(vnic->rss_hash_key,
- bp->vnic_info[0].rss_hash_key,
+ memcpy(vnic->rss_hash_key, vnic0->rss_hash_key,
HW_HASH_KEY_SIZE);
}
}
@@ -4247,6 +4617,17 @@ void bnxt_set_tpa_flags(struct bnxt *bp)
bp->flags |= BNXT_FLAG_GRO;
}
+static void bnxt_init_ring_params(struct bnxt *bp)
+{
+ unsigned int rx_size;
+
+ bp->rx_copybreak = BNXT_DEFAULT_RX_COPYBREAK;
+ /* Try to fit 4 chunks into a 4k page */
+ rx_size = SZ_1K -
+ NET_SKB_PAD - SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+ bp->dev->cfg->hds_thresh = max(BNXT_DEFAULT_RX_COPYBREAK, rx_size);
+}
+
/* bp->rx_ring_size, bp->tx_ring_size, dev->mtu, BNXT_FLAG_{G|L}RO flags must
* be set on entry.
*/
@@ -4261,12 +4642,11 @@ void bnxt_set_ring_params(struct bnxt *bp)
rx_space = rx_size + ALIGN(max(NET_SKB_PAD, XDP_PACKET_HEADROOM), 8) +
SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
- bp->rx_copy_thresh = BNXT_RX_COPY_THRESH;
ring_size = bp->rx_ring_size;
bp->rx_agg_ring_size = 0;
bp->rx_agg_nr_pages = 0;
- if (bp->flags & BNXT_FLAG_TPA)
+ if (bp->flags & BNXT_FLAG_TPA || bp->flags & BNXT_FLAG_HDS)
agg_factor = min_t(u32, 4, 65536 / BNXT_RX_PAGE_SIZE);
bp->flags &= ~BNXT_FLAG_JUMBO;
@@ -4306,7 +4686,10 @@ void bnxt_set_ring_params(struct bnxt *bp)
ALIGN(max(NET_SKB_PAD, XDP_PACKET_HEADROOM), 8) -
SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
} else {
- rx_size = SKB_DATA_ALIGN(BNXT_RX_COPY_THRESH + NET_IP_ALIGN);
+ rx_size = max3(BNXT_DEFAULT_RX_COPYBREAK,
+ bp->rx_copybreak,
+ bp->dev->cfg_pending->hds_thresh);
+ rx_size = SKB_DATA_ALIGN(rx_size + NET_IP_ALIGN);
rx_space = rx_size + NET_SKB_PAD +
SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
}
@@ -4347,12 +4730,12 @@ void bnxt_set_ring_params(struct bnxt *bp)
/* Changing allocation mode of RX rings.
* TODO: Update when extending xdp_rxq_info to support allocation modes.
*/
-int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode)
+static void __bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode)
{
struct net_device *dev = bp->dev;
if (page_mode) {
- bp->flags &= ~BNXT_FLAG_AGG_RINGS;
+ bp->flags &= ~(BNXT_FLAG_AGG_RINGS | BNXT_FLAG_NO_AGG_RINGS);
bp->flags |= BNXT_FLAG_RX_PAGE_MODE;
if (bp->xdp_prog->aux->xdp_has_frags)
@@ -4368,15 +4751,30 @@ int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode)
bp->rx_skb_func = bnxt_rx_page_skb;
}
bp->rx_dir = DMA_BIDIRECTIONAL;
- /* Disable LRO or GRO_HW */
- netdev_update_features(dev);
} else {
dev->max_mtu = bp->max_mtu;
bp->flags &= ~BNXT_FLAG_RX_PAGE_MODE;
bp->rx_dir = DMA_FROM_DEVICE;
bp->rx_skb_func = bnxt_rx_skb;
}
- return 0;
+}
+
+void bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode)
+{
+ __bnxt_set_rx_skb_mode(bp, page_mode);
+
+ if (!page_mode) {
+ int rx, tx;
+
+ bnxt_get_max_rings(bp, &rx, &tx, true);
+ if (rx > 1) {
+ bp->flags &= ~BNXT_FLAG_NO_AGG_RINGS;
+ bp->dev->hw_features |= NETIF_F_LRO;
+ }
+ }
+
+ /* Update LRO and GRO_HW availability */
+ netdev_update_features(bp->dev);
}
static void bnxt_free_vnic_attributes(struct bnxt *bp)
@@ -4696,6 +5094,9 @@ static void bnxt_free_ring_stats(struct bnxt *bp)
struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
bnxt_free_stats_mem(bp, &cpr->stats);
+
+ kfree(cpr->sw_stats);
+ cpr->sw_stats = NULL;
}
}
@@ -4710,6 +5111,10 @@ static int bnxt_alloc_stats(struct bnxt *bp)
struct bnxt_napi *bnapi = bp->bnapi[i];
struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
+ cpr->sw_stats = kzalloc(sizeof(*cpr->sw_stats), GFP_KERNEL);
+ if (!cpr->sw_stats)
+ return -ENOMEM;
+
cpr->stats.len = size;
rc = bnxt_alloc_stats_mem(bp, &cpr->stats, !i);
if (rc)
@@ -4798,6 +5203,44 @@ static void bnxt_clear_ring_indices(struct bnxt *bp)
}
}
+void bnxt_insert_usr_fltr(struct bnxt *bp, struct bnxt_filter_base *fltr)
+{
+ u8 type = fltr->type, flags = fltr->flags;
+
+ INIT_LIST_HEAD(&fltr->list);
+ if ((type == BNXT_FLTR_TYPE_L2 && flags & BNXT_ACT_RING_DST) ||
+ (type == BNXT_FLTR_TYPE_NTUPLE && flags & BNXT_ACT_NO_AGING))
+ list_add_tail(&fltr->list, &bp->usr_fltr_list);
+}
+
+void bnxt_del_one_usr_fltr(struct bnxt *bp, struct bnxt_filter_base *fltr)
+{
+ if (!list_empty(&fltr->list))
+ list_del_init(&fltr->list);
+}
+
+static void bnxt_clear_usr_fltrs(struct bnxt *bp, bool all)
+{
+ struct bnxt_filter_base *usr_fltr, *tmp;
+
+ list_for_each_entry_safe(usr_fltr, tmp, &bp->usr_fltr_list, list) {
+ if (!all && usr_fltr->type == BNXT_FLTR_TYPE_L2)
+ continue;
+ bnxt_del_one_usr_fltr(bp, usr_fltr);
+ }
+}
+
+static void bnxt_del_fltr(struct bnxt *bp, struct bnxt_filter_base *fltr)
+{
+ hlist_del(&fltr->hash);
+ bnxt_del_one_usr_fltr(bp, fltr);
+ if (fltr->flags) {
+ clear_bit(fltr->sw_id, bp->ntp_fltr_bmap);
+ bp->ntp_fltr_count--;
+ }
+ kfree(fltr);
+}
+
static void bnxt_free_ntp_fltrs(struct bnxt *bp, bool all)
{
int i;
@@ -4813,12 +5256,10 @@ static void bnxt_free_ntp_fltrs(struct bnxt *bp, bool all)
head = &bp->ntp_fltr_hash_tbl[i];
hlist_for_each_entry_safe(fltr, tmp, head, base.hash) {
bnxt_del_l2_filter(bp, fltr->l2_fltr);
- if (!all && (fltr->base.flags & BNXT_ACT_FUNC_DST))
+ if (!all && ((fltr->base.flags & BNXT_ACT_FUNC_DST) ||
+ !list_empty(&fltr->base.list)))
continue;
- hlist_del(&fltr->base.hash);
- clear_bit(fltr->base.sw_id, bp->ntp_fltr_bmap);
- bp->ntp_fltr_count--;
- kfree(fltr);
+ bnxt_del_fltr(bp, &fltr->base);
}
}
if (!all)
@@ -4840,7 +5281,7 @@ static int bnxt_alloc_ntp_fltrs(struct bnxt *bp)
INIT_HLIST_HEAD(&bp->ntp_fltr_hash_tbl[i]);
bp->ntp_fltr_count = 0;
- bp->ntp_fltr_bmap = bitmap_zalloc(BNXT_MAX_FLTR, GFP_KERNEL);
+ bp->ntp_fltr_bmap = bitmap_zalloc(bp->max_fltr, GFP_KERNEL);
if (!bp->ntp_fltr_bmap)
rc = -ENOMEM;
@@ -4859,14 +5300,10 @@ static void bnxt_free_l2_filters(struct bnxt *bp, bool all)
head = &bp->l2_fltr_hash_tbl[i];
hlist_for_each_entry_safe(fltr, tmp, head, base.hash) {
- if (!all && (fltr->base.flags & BNXT_ACT_FUNC_DST))
+ if (!all && ((fltr->base.flags & BNXT_ACT_FUNC_DST) ||
+ !list_empty(&fltr->base.list)))
continue;
- hlist_del(&fltr->base.hash);
- if (fltr->base.flags) {
- clear_bit(fltr->base.sw_id, bp->ntp_fltr_bmap);
- bp->ntp_fltr_count--;
- }
- kfree(fltr);
+ bnxt_del_fltr(bp, &fltr->base);
}
}
}
@@ -5039,8 +5476,13 @@ static int bnxt_alloc_mem(struct bnxt *bp, bool irq_re_init)
if (rc)
goto alloc_mem_err;
- bp->vnic_info[0].flags |= BNXT_VNIC_RSS_FLAG | BNXT_VNIC_MCAST_FLAG |
- BNXT_VNIC_UCAST_FLAG;
+ bp->vnic_info[BNXT_VNIC_DEFAULT].flags |= BNXT_VNIC_RSS_FLAG |
+ BNXT_VNIC_MCAST_FLAG |
+ BNXT_VNIC_UCAST_FLAG;
+ if (BNXT_SUPPORTS_NTUPLE_VNIC(bp) && (bp->flags & BNXT_FLAG_RFS))
+ bp->vnic_info[BNXT_VNIC_NTUPLE].flags |=
+ BNXT_VNIC_RSS_FLAG | BNXT_VNIC_NTUPLE_FLAG;
+
rc = bnxt_alloc_vnic_attributes(bp);
if (rc)
goto alloc_mem_err;
@@ -5342,6 +5784,7 @@ void bnxt_del_l2_filter(struct bnxt *bp, struct bnxt_l2_filter *fltr)
return;
}
hlist_del_rcu(&fltr->base.hash);
+ bnxt_del_one_usr_fltr(bp, &fltr->base);
if (fltr->base.flags) {
clear_bit(fltr->base.sw_id, bp->ntp_fltr_bmap);
bp->ntp_fltr_count--;
@@ -5480,13 +5923,15 @@ static int bnxt_init_l2_filter(struct bnxt *bp, struct bnxt_l2_filter *fltr,
int bit_id;
bit_id = bitmap_find_free_region(bp->ntp_fltr_bmap,
- BNXT_MAX_FLTR, 0);
+ bp->max_fltr, 0);
if (bit_id < 0)
return -ENOMEM;
fltr->base.sw_id = (u16)bit_id;
+ bp->ntp_fltr_count++;
}
head = &bp->l2_fltr_hash_tbl[idx];
hlist_add_head_rcu(&fltr->base.hash, head);
+ bnxt_insert_usr_fltr(bp, &fltr->base);
set_bit(BNXT_FLTR_INSERTED, &fltr->base.state);
atomic_set(&fltr->refcnt, 1);
return 0;
@@ -5519,6 +5964,40 @@ static struct bnxt_l2_filter *bnxt_alloc_l2_filter(struct bnxt *bp,
return fltr;
}
+struct bnxt_l2_filter *bnxt_alloc_new_l2_filter(struct bnxt *bp,
+ struct bnxt_l2_key *key,
+ u16 flags)
+{
+ struct bnxt_l2_filter *fltr;
+ u32 idx;
+ int rc;
+
+ idx = jhash2(&key->filter_key, BNXT_L2_KEY_SIZE, bp->hash_seed) &
+ BNXT_L2_FLTR_HASH_MASK;
+ spin_lock_bh(&bp->ntp_fltr_lock);
+ fltr = __bnxt_lookup_l2_filter(bp, key, idx);
+ if (fltr) {
+ fltr = ERR_PTR(-EEXIST);
+ goto l2_filter_exit;
+ }
+ fltr = kzalloc(sizeof(*fltr), GFP_ATOMIC);
+ if (!fltr) {
+ fltr = ERR_PTR(-ENOMEM);
+ goto l2_filter_exit;
+ }
+ fltr->base.flags = flags;
+ rc = bnxt_init_l2_filter(bp, fltr, key, idx);
+ if (rc) {
+ spin_unlock_bh(&bp->ntp_fltr_lock);
+ bnxt_del_l2_filter(bp, fltr);
+ return ERR_PTR(rc);
+ }
+
+l2_filter_exit:
+ spin_unlock_bh(&bp->ntp_fltr_lock);
+ return fltr;
+}
+
static u16 bnxt_vf_target_id(struct bnxt_pf_info *pf, u16 vf_idx)
{
#ifdef CONFIG_BNXT_SRIOV
@@ -5650,15 +6129,55 @@ void bnxt_fill_ipv6_mask(__be32 mask[4])
mask[i] = cpu_to_be32(~0);
}
+static void
+bnxt_cfg_rfs_ring_tbl_idx(struct bnxt *bp,
+ struct hwrm_cfa_ntuple_filter_alloc_input *req,
+ struct bnxt_ntuple_filter *fltr)
+{
+ u16 rxq = fltr->base.rxq;
+
+ if (fltr->base.flags & BNXT_ACT_RSS_CTX) {
+ struct ethtool_rxfh_context *ctx;
+ struct bnxt_rss_ctx *rss_ctx;
+ struct bnxt_vnic_info *vnic;
+
+ ctx = xa_load(&bp->dev->ethtool->rss_ctx,
+ fltr->base.fw_vnic_id);
+ if (ctx) {
+ rss_ctx = ethtool_rxfh_context_priv(ctx);
+ vnic = &rss_ctx->vnic;
+
+ req->dst_id = cpu_to_le16(vnic->fw_vnic_id);
+ }
+ return;
+ }
+ if (BNXT_SUPPORTS_NTUPLE_VNIC(bp)) {
+ struct bnxt_vnic_info *vnic;
+ u32 enables;
+
+ vnic = &bp->vnic_info[BNXT_VNIC_NTUPLE];
+ req->dst_id = cpu_to_le16(vnic->fw_vnic_id);
+ enables = CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_RFS_RING_TBL_IDX;
+ req->enables |= cpu_to_le32(enables);
+ req->rfs_ring_tbl_idx = cpu_to_le16(rxq);
+ } else {
+ u32 flags;
+
+ flags = CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DEST_RFS_RING_IDX;
+ req->flags |= cpu_to_le32(flags);
+ req->dst_id = cpu_to_le16(rxq);
+ }
+}
+
int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp,
struct bnxt_ntuple_filter *fltr)
{
struct hwrm_cfa_ntuple_filter_alloc_output *resp;
struct hwrm_cfa_ntuple_filter_alloc_input *req;
+ struct bnxt_flow_masks *masks = &fltr->fmasks;
struct flow_keys *keys = &fltr->fkeys;
struct bnxt_l2_filter *l2_fltr;
struct bnxt_vnic_info *vnic;
- u32 flags = 0;
int rc;
rc = hwrm_req_init(bp, req, HWRM_CFA_NTUPLE_FILTER_ALLOC);
@@ -5668,16 +6187,16 @@ int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp,
l2_fltr = fltr->l2_fltr;
req->l2_filter_id = l2_fltr->base.filter_id;
-
- if (bp->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2) {
- flags = CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DEST_RFS_RING_IDX;
- req->dst_id = cpu_to_le16(fltr->base.rxq);
+ if (fltr->base.flags & BNXT_ACT_DROP) {
+ req->flags =
+ cpu_to_le32(CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DROP);
+ } else if (bp->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2) {
+ bnxt_cfg_rfs_ring_tbl_idx(bp, req, fltr);
} else {
vnic = &bp->vnic_info[fltr->base.rxq + 1];
req->dst_id = cpu_to_le16(vnic->fw_vnic_id);
}
- req->flags = cpu_to_le32(flags);
- req->enables = cpu_to_le32(BNXT_NTP_FLTR_FLAGS);
+ req->enables |= cpu_to_le32(BNXT_NTP_FLTR_FLAGS);
req->ethertype = htons(ETH_P_IP);
req->ip_addr_type = CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4;
@@ -5687,25 +6206,15 @@ int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp,
req->ethertype = htons(ETH_P_IPV6);
req->ip_addr_type =
CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6;
- if (fltr->ntuple_flags & BNXT_NTUPLE_MATCH_SRC_IP) {
- *(struct in6_addr *)&req->src_ipaddr[0] =
- keys->addrs.v6addrs.src;
- bnxt_fill_ipv6_mask(req->src_ipaddr_mask);
- }
- if (fltr->ntuple_flags & BNXT_NTUPLE_MATCH_DST_IP) {
- *(struct in6_addr *)&req->dst_ipaddr[0] =
- keys->addrs.v6addrs.dst;
- bnxt_fill_ipv6_mask(req->dst_ipaddr_mask);
- }
+ *(struct in6_addr *)&req->src_ipaddr[0] = keys->addrs.v6addrs.src;
+ *(struct in6_addr *)&req->src_ipaddr_mask[0] = masks->addrs.v6addrs.src;
+ *(struct in6_addr *)&req->dst_ipaddr[0] = keys->addrs.v6addrs.dst;
+ *(struct in6_addr *)&req->dst_ipaddr_mask[0] = masks->addrs.v6addrs.dst;
} else {
- if (fltr->ntuple_flags & BNXT_NTUPLE_MATCH_SRC_IP) {
- req->src_ipaddr[0] = keys->addrs.v4addrs.src;
- req->src_ipaddr_mask[0] = cpu_to_be32(0xffffffff);
- }
- if (fltr->ntuple_flags & BNXT_NTUPLE_MATCH_DST_IP) {
- req->dst_ipaddr[0] = keys->addrs.v4addrs.dst;
- req->dst_ipaddr_mask[0] = cpu_to_be32(0xffffffff);
- }
+ req->src_ipaddr[0] = keys->addrs.v4addrs.src;
+ req->src_ipaddr_mask[0] = masks->addrs.v4addrs.src;
+ req->dst_ipaddr[0] = keys->addrs.v4addrs.dst;
+ req->dst_ipaddr_mask[0] = masks->addrs.v4addrs.dst;
}
if (keys->control.flags & FLOW_DIS_ENCAPSULATION) {
req->enables |= cpu_to_le32(BNXT_NTP_TUNNEL_FLTR_FLAG);
@@ -5713,14 +6222,10 @@ int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp,
CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL;
}
- if (fltr->ntuple_flags & BNXT_NTUPLE_MATCH_SRC_PORT) {
- req->src_port = keys->ports.src;
- req->src_port_mask = cpu_to_be16(0xffff);
- }
- if (fltr->ntuple_flags & BNXT_NTUPLE_MATCH_DST_PORT) {
- req->dst_port = keys->ports.dst;
- req->dst_port_mask = cpu_to_be16(0xffff);
- }
+ req->src_port = keys->ports.src;
+ req->src_port_mask = masks->ports.src;
+ req->dst_port = keys->ports.dst;
+ req->dst_port_mask = masks->ports.dst;
resp = hwrm_req_hold(bp, req);
rc = hwrm_req_send(bp, req);
@@ -5794,9 +6299,9 @@ static void bnxt_hwrm_vnic_update_tunl_tpa(struct bnxt *bp,
req->tnl_tpa_en_bitmap = cpu_to_le32(tunl_tpa_bmap);
}
-static int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, u16 vnic_id, u32 tpa_flags)
+int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, struct bnxt_vnic_info *vnic,
+ u32 tpa_flags)
{
- struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
u16 max_aggs = VNIC_TPA_CFG_REQ_MAX_AGGS_MAX;
struct hwrm_vnic_tpa_cfg_input *req;
int rc;
@@ -5891,16 +6396,19 @@ static int bnxt_alloc_rss_indir_tbl(struct bnxt *bp)
entries = HW_HASH_INDEX_SIZE;
bp->rss_indir_tbl_entries = entries;
- bp->rss_indir_tbl = kmalloc_array(entries, sizeof(*bp->rss_indir_tbl),
- GFP_KERNEL);
+ bp->rss_indir_tbl =
+ kmalloc_array(entries, sizeof(*bp->rss_indir_tbl), GFP_KERNEL);
if (!bp->rss_indir_tbl)
return -ENOMEM;
+
return 0;
}
-static void bnxt_set_dflt_rss_indir_tbl(struct bnxt *bp)
+void bnxt_set_dflt_rss_indir_tbl(struct bnxt *bp,
+ struct ethtool_rxfh_context *rss_ctx)
{
u16 max_rings, max_entries, pad, i;
+ u32 *rss_indir_tbl;
if (!bp->rx_nr_rings)
return;
@@ -5911,18 +6419,22 @@ static void bnxt_set_dflt_rss_indir_tbl(struct bnxt *bp)
max_rings = bp->rx_nr_rings;
max_entries = bnxt_get_rxfh_indir_size(bp->dev);
+ if (rss_ctx)
+ rss_indir_tbl = ethtool_rxfh_context_indir(rss_ctx);
+ else
+ rss_indir_tbl = &bp->rss_indir_tbl[0];
for (i = 0; i < max_entries; i++)
- bp->rss_indir_tbl[i] = ethtool_rxfh_indir_default(i, max_rings);
+ rss_indir_tbl[i] = ethtool_rxfh_indir_default(i, max_rings);
pad = bp->rss_indir_tbl_entries - max_entries;
if (pad)
- memset(&bp->rss_indir_tbl[i], 0, pad * sizeof(u16));
+ memset(&rss_indir_tbl[i], 0, pad * sizeof(*rss_indir_tbl));
}
static u16 bnxt_get_max_rss_ring(struct bnxt *bp)
{
- u16 i, tbl_size, max_ring = 0;
+ u32 i, tbl_size, max_ring = 0;
if (!bp->rss_indir_tbl)
return 0;
@@ -5971,7 +6483,12 @@ static void bnxt_fill_hw_rss_tbl_p5(struct bnxt *bp,
for (i = 0; i < tbl_size; i++) {
u16 ring_id, j;
- j = bp->rss_indir_tbl[i];
+ if (vnic->flags & BNXT_VNIC_NTUPLE_FLAG)
+ j = ethtool_rxfh_indir_default(i, bp->rx_nr_rings);
+ else if (vnic->flags & BNXT_VNIC_RSSCTX_FLAG)
+ j = ethtool_rxfh_context_indir(vnic->rss_ctx)[i];
+ else
+ j = bp->rss_indir_tbl[i];
rxr = &bp->rx_ring[j];
ring_id = rxr->rx_ring_struct.fw_ring_id;
@@ -5985,10 +6502,13 @@ static void
__bnxt_hwrm_vnic_set_rss(struct bnxt *bp, struct hwrm_vnic_rss_cfg_input *req,
struct bnxt_vnic_info *vnic)
{
- if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
bnxt_fill_hw_rss_tbl_p5(bp, vnic);
- else
+ if (bp->flags & BNXT_FLAG_CHIP_P7)
+ req->flags |= VNIC_RSS_CFG_REQ_FLAGS_IPSEC_HASH_TYPE_CFG_SUPPORT;
+ } else {
bnxt_fill_hw_rss_tbl(bp, vnic);
+ }
if (bp->rss_hash_delta) {
req->hash_type = cpu_to_le32(bp->rss_hash_delta);
@@ -6004,9 +6524,9 @@ __bnxt_hwrm_vnic_set_rss(struct bnxt *bp, struct hwrm_vnic_rss_cfg_input *req,
req->hash_key_tbl_addr = cpu_to_le64(vnic->rss_hash_key_dma_addr);
}
-static int bnxt_hwrm_vnic_set_rss(struct bnxt *bp, u16 vnic_id, bool set_rss)
+static int bnxt_hwrm_vnic_set_rss(struct bnxt *bp, struct bnxt_vnic_info *vnic,
+ bool set_rss)
{
- struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
struct hwrm_vnic_rss_cfg_input *req;
int rc;
@@ -6024,9 +6544,9 @@ static int bnxt_hwrm_vnic_set_rss(struct bnxt *bp, u16 vnic_id, bool set_rss)
return hwrm_req_send(bp, req);
}
-static int bnxt_hwrm_vnic_set_rss_p5(struct bnxt *bp, u16 vnic_id, bool set_rss)
+static int bnxt_hwrm_vnic_set_rss_p5(struct bnxt *bp,
+ struct bnxt_vnic_info *vnic, bool set_rss)
{
- struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
struct hwrm_vnic_rss_cfg_input *req;
dma_addr_t ring_tbl_map;
u32 i, nr_ctxs;
@@ -6061,7 +6581,7 @@ exit:
static void bnxt_hwrm_update_rss_hash_cfg(struct bnxt *bp)
{
- struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
+ struct bnxt_vnic_info *vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT];
struct hwrm_vnic_rss_qcfg_output *resp;
struct hwrm_vnic_rss_qcfg_input *req;
@@ -6079,9 +6599,9 @@ static void bnxt_hwrm_update_rss_hash_cfg(struct bnxt *bp)
hwrm_req_drop(bp, req);
}
-static int bnxt_hwrm_vnic_set_hds(struct bnxt *bp, u16 vnic_id)
+static int bnxt_hwrm_vnic_set_hds(struct bnxt *bp, struct bnxt_vnic_info *vnic)
{
- struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
+ u16 hds_thresh = (u16)bp->dev->cfg_pending->hds_thresh;
struct hwrm_vnic_plcmodes_cfg_input *req;
int rc;
@@ -6091,22 +6611,21 @@ static int bnxt_hwrm_vnic_set_hds(struct bnxt *bp, u16 vnic_id)
req->flags = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_JUMBO_PLACEMENT);
req->enables = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_ENABLES_JUMBO_THRESH_VALID);
+ req->jumbo_thresh = cpu_to_le16(bp->rx_buf_use_size);
- if (BNXT_RX_PAGE_MODE(bp)) {
- req->jumbo_thresh = cpu_to_le16(bp->rx_buf_use_size);
- } else {
+ if (!BNXT_RX_PAGE_MODE(bp) && (bp->flags & BNXT_FLAG_AGG_RINGS)) {
req->flags |= cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV4 |
VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV6);
req->enables |=
cpu_to_le32(VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_THRESHOLD_VALID);
- req->jumbo_thresh = cpu_to_le16(bp->rx_copy_thresh);
- req->hds_threshold = cpu_to_le16(bp->rx_copy_thresh);
+ req->hds_threshold = cpu_to_le16(hds_thresh);
}
req->vnic_id = cpu_to_le32(vnic->fw_vnic_id);
return hwrm_req_send(bp, req);
}
-static void bnxt_hwrm_vnic_ctx_free_one(struct bnxt *bp, u16 vnic_id,
+static void bnxt_hwrm_vnic_ctx_free_one(struct bnxt *bp,
+ struct bnxt_vnic_info *vnic,
u16 ctx_idx)
{
struct hwrm_vnic_rss_cos_lb_ctx_free_input *req;
@@ -6115,10 +6634,10 @@ static void bnxt_hwrm_vnic_ctx_free_one(struct bnxt *bp, u16 vnic_id,
return;
req->rss_cos_lb_ctx_id =
- cpu_to_le16(bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx]);
+ cpu_to_le16(vnic->fw_rss_cos_lb_ctx[ctx_idx]);
hwrm_req_send(bp, req);
- bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx] = INVALID_HW_RING_ID;
+ vnic->fw_rss_cos_lb_ctx[ctx_idx] = INVALID_HW_RING_ID;
}
static void bnxt_hwrm_vnic_ctx_free(struct bnxt *bp)
@@ -6130,13 +6649,14 @@ static void bnxt_hwrm_vnic_ctx_free(struct bnxt *bp)
for (j = 0; j < BNXT_MAX_CTX_PER_VNIC; j++) {
if (vnic->fw_rss_cos_lb_ctx[j] != INVALID_HW_RING_ID)
- bnxt_hwrm_vnic_ctx_free_one(bp, i, j);
+ bnxt_hwrm_vnic_ctx_free_one(bp, vnic, j);
}
}
bp->rsscos_nr_ctxs = 0;
}
-static int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, u16 vnic_id, u16 ctx_idx)
+static int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp,
+ struct bnxt_vnic_info *vnic, u16 ctx_idx)
{
struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp;
struct hwrm_vnic_rss_cos_lb_ctx_alloc_input *req;
@@ -6149,7 +6669,7 @@ static int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, u16 vnic_id, u16 ctx_idx)
resp = hwrm_req_hold(bp, req);
rc = hwrm_req_send(bp, req);
if (!rc)
- bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx] =
+ vnic->fw_rss_cos_lb_ctx[ctx_idx] =
le16_to_cpu(resp->rss_cos_lb_ctx_id);
hwrm_req_drop(bp, req);
@@ -6163,9 +6683,9 @@ static u32 bnxt_get_roce_vnic_mode(struct bnxt *bp)
return VNIC_CFG_REQ_FLAGS_ROCE_DUAL_VNIC_MODE;
}
-int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id)
+int bnxt_hwrm_vnic_cfg(struct bnxt *bp, struct bnxt_vnic_info *vnic)
{
- struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
+ struct bnxt_vnic_info *vnic0 = &bp->vnic_info[BNXT_VNIC_DEFAULT];
struct hwrm_vnic_cfg_input *req;
unsigned int ring = 0, grp_idx;
u16 def_vlan = 0;
@@ -6194,8 +6714,7 @@ int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id)
req->enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE |
VNIC_CFG_REQ_ENABLES_MRU);
} else if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG) {
- req->rss_rule =
- cpu_to_le16(bp->vnic_info[0].fw_rss_cos_lb_ctx[0]);
+ req->rss_rule = cpu_to_le16(vnic0->fw_rss_cos_lb_ctx[0]);
req->enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE |
VNIC_CFG_REQ_ENABLES_MRU);
req->flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_RSS_DFLT_CR_MODE);
@@ -6214,15 +6733,16 @@ int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id)
if (vnic->flags & BNXT_VNIC_RSS_FLAG)
ring = 0;
else if (vnic->flags & BNXT_VNIC_RFS_FLAG)
- ring = vnic_id - 1;
- else if ((vnic_id == 1) && BNXT_CHIP_TYPE_NITRO_A0(bp))
+ ring = vnic->vnic_id - 1;
+ else if ((vnic->vnic_id == 1) && BNXT_CHIP_TYPE_NITRO_A0(bp))
ring = bp->rx_nr_rings - 1;
grp_idx = bp->rx_ring[ring].bnapi->index;
req->dflt_ring_grp = cpu_to_le16(bp->grp_info[grp_idx].fw_grp_id);
req->lb_rule = cpu_to_le16(0xffff);
vnic_mru:
- req->mru = cpu_to_le16(bp->dev->mtu + ETH_HLEN + VLAN_HLEN);
+ vnic->mru = bp->dev->mtu + ETH_HLEN + VLAN_HLEN;
+ req->mru = cpu_to_le16(vnic->mru);
req->vnic_id = cpu_to_le16(vnic->fw_vnic_id);
#ifdef CONFIG_BNXT_SRIOV
@@ -6231,25 +6751,25 @@ vnic_mru:
#endif
if ((bp->flags & BNXT_FLAG_STRIP_VLAN) || def_vlan)
req->flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_VLAN_STRIP_MODE);
- if (!vnic_id && bnxt_ulp_registered(bp->edev))
+ if (vnic->vnic_id == BNXT_VNIC_DEFAULT && bnxt_ulp_registered(bp->edev))
req->flags |= cpu_to_le32(bnxt_get_roce_vnic_mode(bp));
return hwrm_req_send(bp, req);
}
-static void bnxt_hwrm_vnic_free_one(struct bnxt *bp, u16 vnic_id)
+static void bnxt_hwrm_vnic_free_one(struct bnxt *bp,
+ struct bnxt_vnic_info *vnic)
{
- if (bp->vnic_info[vnic_id].fw_vnic_id != INVALID_HW_RING_ID) {
+ if (vnic->fw_vnic_id != INVALID_HW_RING_ID) {
struct hwrm_vnic_free_input *req;
if (hwrm_req_init(bp, req, HWRM_VNIC_FREE))
return;
- req->vnic_id =
- cpu_to_le32(bp->vnic_info[vnic_id].fw_vnic_id);
+ req->vnic_id = cpu_to_le32(vnic->fw_vnic_id);
hwrm_req_send(bp, req);
- bp->vnic_info[vnic_id].fw_vnic_id = INVALID_HW_RING_ID;
+ vnic->fw_vnic_id = INVALID_HW_RING_ID;
}
}
@@ -6258,15 +6778,14 @@ static void bnxt_hwrm_vnic_free(struct bnxt *bp)
u16 i;
for (i = 0; i < bp->nr_vnics; i++)
- bnxt_hwrm_vnic_free_one(bp, i);
+ bnxt_hwrm_vnic_free_one(bp, &bp->vnic_info[i]);
}
-static int bnxt_hwrm_vnic_alloc(struct bnxt *bp, u16 vnic_id,
- unsigned int start_rx_ring_idx,
- unsigned int nr_rings)
+int bnxt_hwrm_vnic_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic,
+ unsigned int start_rx_ring_idx,
+ unsigned int nr_rings)
{
unsigned int i, j, grp_idx, end_idx = start_rx_ring_idx + nr_rings;
- struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
struct hwrm_vnic_alloc_output *resp;
struct hwrm_vnic_alloc_input *req;
int rc;
@@ -6292,7 +6811,7 @@ static int bnxt_hwrm_vnic_alloc(struct bnxt *bp, u16 vnic_id,
vnic_no_ring_grps:
for (i = 0; i < BNXT_MAX_CTX_PER_VNIC; i++)
vnic->fw_rss_cos_lb_ctx[i] = INVALID_HW_RING_ID;
- if (vnic_id == 0)
+ if (vnic->vnic_id == BNXT_VNIC_DEFAULT)
req->flags = cpu_to_le32(VNIC_ALLOC_REQ_FLAGS_DEFAULT);
resp = hwrm_req_hold(bp, req);
@@ -6351,6 +6870,16 @@ static int bnxt_hwrm_vnic_qcaps(struct bnxt *bp)
}
if (flags & VNIC_QCAPS_RESP_FLAGS_HW_TUNNEL_TPA_CAP)
bp->fw_cap |= BNXT_FW_CAP_VNIC_TUNNEL_TPA;
+ if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_AH_SPI_IPV4_CAP)
+ bp->rss_cap |= BNXT_RSS_CAP_AH_V4_RSS_CAP;
+ if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_AH_SPI_IPV6_CAP)
+ bp->rss_cap |= BNXT_RSS_CAP_AH_V6_RSS_CAP;
+ if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_ESP_SPI_IPV4_CAP)
+ bp->rss_cap |= BNXT_RSS_CAP_ESP_V4_RSS_CAP;
+ if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_ESP_SPI_IPV6_CAP)
+ bp->rss_cap |= BNXT_RSS_CAP_ESP_V6_RSS_CAP;
+ if (flags & VNIC_QCAPS_RESP_FLAGS_RE_FLUSH_CAP)
+ bp->fw_cap |= BNXT_FW_CAP_VNIC_RE_FLUSH;
}
hwrm_req_drop(bp, req);
return rc;
@@ -6446,6 +6975,7 @@ static int hwrm_ring_alloc_send_msg(struct bnxt *bp,
switch (ring_type) {
case HWRM_RING_ALLOC_TX: {
struct bnxt_tx_ring_info *txr;
+ u16 flags = 0;
txr = container_of(ring, struct bnxt_tx_ring_info,
tx_ring_struct);
@@ -6459,6 +6989,9 @@ static int hwrm_ring_alloc_send_msg(struct bnxt *bp,
if (bp->flags & BNXT_FLAG_TX_COAL_CMPL)
req->cmpl_coal_cnt =
RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_64;
+ if ((bp->fw_cap & BNXT_FW_CAP_TX_TS_CMP) && bp->ptp_cfg)
+ flags |= RING_ALLOC_REQ_FLAGS_TX_PKT_TS_CMPL_ENABLE;
+ req->flags = cpu_to_le16(flags);
break;
}
case HWRM_RING_ALLOC_RX:
@@ -6504,15 +7037,14 @@ static int hwrm_ring_alloc_send_msg(struct bnxt *bp,
req->cq_handle = cpu_to_le64(ring->handle);
req->enables |= cpu_to_le32(
RING_ALLOC_REQ_ENABLES_NQ_RING_ID_VALID);
- } else if (bp->flags & BNXT_FLAG_USING_MSIX) {
+ } else {
req->int_mode = RING_ALLOC_REQ_INT_MODE_MSIX;
}
break;
case HWRM_RING_ALLOC_NQ:
req->ring_type = RING_ALLOC_REQ_RING_TYPE_NQ;
req->length = cpu_to_le32(bp->cp_ring_mask + 1);
- if (bp->flags & BNXT_FLAG_USING_MSIX)
- req->int_mode = RING_ALLOC_REQ_INT_MODE_MSIX;
+ req->int_mode = RING_ALLOC_REQ_INT_MODE_MSIX;
break;
default:
netdev_err(bp->dev, "hwrm alloc invalid ring type %d\n",
@@ -6632,6 +7164,48 @@ static void bnxt_set_db(struct bnxt *bp, struct bnxt_db_info *db, u32 ring_type,
bnxt_set_db_mask(bp, db, ring_type);
}
+static int bnxt_hwrm_rx_ring_alloc(struct bnxt *bp,
+ struct bnxt_rx_ring_info *rxr)
+{
+ struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
+ struct bnxt_napi *bnapi = rxr->bnapi;
+ u32 type = HWRM_RING_ALLOC_RX;
+ u32 map_idx = bnapi->index;
+ int rc;
+
+ rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
+ if (rc)
+ return rc;
+
+ bnxt_set_db(bp, &rxr->rx_db, type, map_idx, ring->fw_ring_id);
+ bp->grp_info[map_idx].rx_fw_ring_id = ring->fw_ring_id;
+
+ return 0;
+}
+
+static int bnxt_hwrm_rx_agg_ring_alloc(struct bnxt *bp,
+ struct bnxt_rx_ring_info *rxr)
+{
+ struct bnxt_ring_struct *ring = &rxr->rx_agg_ring_struct;
+ u32 type = HWRM_RING_ALLOC_AGG;
+ u32 grp_idx = ring->grp_idx;
+ u32 map_idx;
+ int rc;
+
+ map_idx = grp_idx + bp->rx_nr_rings;
+ rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
+ if (rc)
+ return rc;
+
+ bnxt_set_db(bp, &rxr->rx_agg_db, type, map_idx,
+ ring->fw_ring_id);
+ bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
+ bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
+ bp->grp_info[grp_idx].agg_fw_ring_id = ring->fw_ring_id;
+
+ return 0;
+}
+
static int bnxt_hwrm_ring_alloc(struct bnxt *bp)
{
bool agg_rings = !!(bp->flags & BNXT_FLAG_AGG_RINGS);
@@ -6697,24 +7271,21 @@ static int bnxt_hwrm_ring_alloc(struct bnxt *bp)
bnxt_set_db(bp, &txr->tx_db, type, map_idx, ring->fw_ring_id);
}
- type = HWRM_RING_ALLOC_RX;
for (i = 0; i < bp->rx_nr_rings; i++) {
struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
- struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
- struct bnxt_napi *bnapi = rxr->bnapi;
- u32 map_idx = bnapi->index;
- rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
+ rc = bnxt_hwrm_rx_ring_alloc(bp, rxr);
if (rc)
goto err_out;
- bnxt_set_db(bp, &rxr->rx_db, type, map_idx, ring->fw_ring_id);
/* If we have agg rings, post agg buffers first. */
if (!agg_rings)
bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
- bp->grp_info[map_idx].rx_fw_ring_id = ring->fw_ring_id;
if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
struct bnxt_cp_ring_info *cpr2 = rxr->rx_cpr;
+ struct bnxt_napi *bnapi = rxr->bnapi;
u32 type2 = HWRM_RING_ALLOC_CMPL;
+ struct bnxt_ring_struct *ring;
+ u32 map_idx = bnapi->index;
ring = &cpr2->cp_ring_struct;
ring->handle = BNXT_SET_NQ_HDL(cpr2);
@@ -6728,29 +7299,36 @@ static int bnxt_hwrm_ring_alloc(struct bnxt *bp)
}
if (agg_rings) {
- type = HWRM_RING_ALLOC_AGG;
for (i = 0; i < bp->rx_nr_rings; i++) {
- struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
- struct bnxt_ring_struct *ring =
- &rxr->rx_agg_ring_struct;
- u32 grp_idx = ring->grp_idx;
- u32 map_idx = grp_idx + bp->rx_nr_rings;
-
- rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
+ rc = bnxt_hwrm_rx_agg_ring_alloc(bp, &bp->rx_ring[i]);
if (rc)
goto err_out;
-
- bnxt_set_db(bp, &rxr->rx_agg_db, type, map_idx,
- ring->fw_ring_id);
- bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
- bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
- bp->grp_info[grp_idx].agg_fw_ring_id = ring->fw_ring_id;
}
}
err_out:
return rc;
}
+static void bnxt_cancel_dim(struct bnxt *bp)
+{
+ int i;
+
+ /* DIM work is initialized in bnxt_enable_napi(). Proceed only
+ * if NAPI is enabled.
+ */
+ if (!bp->bnapi || test_bit(BNXT_STATE_NAPI_DISABLED, &bp->state))
+ return;
+
+ /* Make sure NAPI sees that the VNIC is disabled */
+ synchronize_net();
+ for (i = 0; i < bp->rx_nr_rings; i++) {
+ struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
+ struct bnxt_napi *bnapi = rxr->bnapi;
+
+ cancel_work_sync(&bnapi->cp_ring.dim.work);
+ }
+}
+
static int hwrm_ring_free_send_msg(struct bnxt *bp,
struct bnxt_ring_struct *ring,
u32 ring_type, int cmpl_ring_id)
@@ -6784,6 +7362,50 @@ exit:
return 0;
}
+static void bnxt_hwrm_rx_ring_free(struct bnxt *bp,
+ struct bnxt_rx_ring_info *rxr,
+ bool close_path)
+{
+ struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
+ u32 grp_idx = rxr->bnapi->index;
+ u32 cmpl_ring_id;
+
+ if (ring->fw_ring_id == INVALID_HW_RING_ID)
+ return;
+
+ cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr);
+ hwrm_ring_free_send_msg(bp, ring,
+ RING_FREE_REQ_RING_TYPE_RX,
+ close_path ? cmpl_ring_id :
+ INVALID_HW_RING_ID);
+ ring->fw_ring_id = INVALID_HW_RING_ID;
+ bp->grp_info[grp_idx].rx_fw_ring_id = INVALID_HW_RING_ID;
+}
+
+static void bnxt_hwrm_rx_agg_ring_free(struct bnxt *bp,
+ struct bnxt_rx_ring_info *rxr,
+ bool close_path)
+{
+ struct bnxt_ring_struct *ring = &rxr->rx_agg_ring_struct;
+ u32 grp_idx = rxr->bnapi->index;
+ u32 type, cmpl_ring_id;
+
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
+ type = RING_FREE_REQ_RING_TYPE_RX_AGG;
+ else
+ type = RING_FREE_REQ_RING_TYPE_RX;
+
+ if (ring->fw_ring_id == INVALID_HW_RING_ID)
+ return;
+
+ cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr);
+ hwrm_ring_free_send_msg(bp, ring, type,
+ close_path ? cmpl_ring_id :
+ INVALID_HW_RING_ID);
+ ring->fw_ring_id = INVALID_HW_RING_ID;
+ bp->grp_info[grp_idx].agg_fw_ring_id = INVALID_HW_RING_ID;
+}
+
static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path)
{
u32 type;
@@ -6807,43 +7429,10 @@ static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path)
}
}
+ bnxt_cancel_dim(bp);
for (i = 0; i < bp->rx_nr_rings; i++) {
- struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
- struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
- u32 grp_idx = rxr->bnapi->index;
-
- if (ring->fw_ring_id != INVALID_HW_RING_ID) {
- u32 cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr);
-
- hwrm_ring_free_send_msg(bp, ring,
- RING_FREE_REQ_RING_TYPE_RX,
- close_path ? cmpl_ring_id :
- INVALID_HW_RING_ID);
- ring->fw_ring_id = INVALID_HW_RING_ID;
- bp->grp_info[grp_idx].rx_fw_ring_id =
- INVALID_HW_RING_ID;
- }
- }
-
- if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
- type = RING_FREE_REQ_RING_TYPE_RX_AGG;
- else
- type = RING_FREE_REQ_RING_TYPE_RX;
- for (i = 0; i < bp->rx_nr_rings; i++) {
- struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
- struct bnxt_ring_struct *ring = &rxr->rx_agg_ring_struct;
- u32 grp_idx = rxr->bnapi->index;
-
- if (ring->fw_ring_id != INVALID_HW_RING_ID) {
- u32 cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr);
-
- hwrm_ring_free_send_msg(bp, ring, type,
- close_path ? cmpl_ring_id :
- INVALID_HW_RING_ID);
- ring->fw_ring_id = INVALID_HW_RING_ID;
- bp->grp_info[grp_idx].agg_fw_ring_id =
- INVALID_HW_RING_ID;
- }
+ bnxt_hwrm_rx_ring_free(bp, &bp->rx_ring[i], close_path);
+ bnxt_hwrm_rx_agg_ring_free(bp, &bp->rx_ring[i], close_path);
}
/* The completion rings are about to be freed. After that the
@@ -6918,6 +7507,7 @@ static int bnxt_hwrm_get_rings(struct bnxt *bp)
hw_resc->resv_hw_ring_grps =
le32_to_cpu(resp->alloc_hw_ring_grps);
hw_resc->resv_vnics = le16_to_cpu(resp->alloc_vnics);
+ hw_resc->resv_rsscos_ctxs = le16_to_cpu(resp->alloc_rsscos_ctx);
cp = le16_to_cpu(resp->alloc_cmpl_rings);
stats = le16_to_cpu(resp->alloc_stat_ctx);
hw_resc->resv_irqs = cp;
@@ -6973,8 +7563,7 @@ int __bnxt_hwrm_get_tx_rings(struct bnxt *bp, u16 fid, int *tx_rings)
static bool bnxt_rfs_supported(struct bnxt *bp);
static struct hwrm_func_cfg_input *
-__bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
- int ring_grps, int cp_rings, int stats, int vnics)
+__bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr)
{
struct hwrm_func_cfg_input *req;
u32 enables = 0;
@@ -6983,52 +7572,42 @@ __bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
return NULL;
req->fid = cpu_to_le16(0xffff);
- enables |= tx_rings ? FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS : 0;
- req->num_tx_rings = cpu_to_le16(tx_rings);
+ enables |= hwr->tx ? FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS : 0;
+ req->num_tx_rings = cpu_to_le16(hwr->tx);
if (BNXT_NEW_RM(bp)) {
- enables |= rx_rings ? FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS : 0;
- enables |= stats ? FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
+ enables |= hwr->rx ? FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS : 0;
+ enables |= hwr->stat ? FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
- enables |= cp_rings ? FUNC_CFG_REQ_ENABLES_NUM_MSIX : 0;
- enables |= tx_rings + ring_grps ?
+ enables |= hwr->cp ? FUNC_CFG_REQ_ENABLES_NUM_MSIX : 0;
+ enables |= hwr->cp_p5 ?
FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
- enables |= rx_rings ?
- FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
} else {
- enables |= cp_rings ?
+ enables |= hwr->cp ?
FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
- enables |= ring_grps ?
- FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS |
- FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
+ enables |= hwr->grp ?
+ FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0;
}
- enables |= vnics ? FUNC_CFG_REQ_ENABLES_NUM_VNICS : 0;
-
- req->num_rx_rings = cpu_to_le16(rx_rings);
+ enables |= hwr->vnic ? FUNC_CFG_REQ_ENABLES_NUM_VNICS : 0;
+ enables |= hwr->rss_ctx ? FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS :
+ 0;
+ req->num_rx_rings = cpu_to_le16(hwr->rx);
+ req->num_rsscos_ctxs = cpu_to_le16(hwr->rss_ctx);
if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
- u16 rss_ctx = bnxt_get_nr_rss_ctxs(bp, ring_grps);
-
- req->num_cmpl_rings = cpu_to_le16(tx_rings + ring_grps);
- req->num_msix = cpu_to_le16(cp_rings);
- req->num_rsscos_ctxs = cpu_to_le16(rss_ctx);
+ req->num_cmpl_rings = cpu_to_le16(hwr->cp_p5);
+ req->num_msix = cpu_to_le16(hwr->cp);
} else {
- req->num_cmpl_rings = cpu_to_le16(cp_rings);
- req->num_hw_ring_grps = cpu_to_le16(ring_grps);
- req->num_rsscos_ctxs = cpu_to_le16(1);
- if (!(bp->rss_cap & BNXT_RSS_CAP_NEW_RSS_CAP) &&
- bnxt_rfs_supported(bp))
- req->num_rsscos_ctxs =
- cpu_to_le16(ring_grps + 1);
+ req->num_cmpl_rings = cpu_to_le16(hwr->cp);
+ req->num_hw_ring_grps = cpu_to_le16(hwr->grp);
}
- req->num_stat_ctxs = cpu_to_le16(stats);
- req->num_vnics = cpu_to_le16(vnics);
+ req->num_stat_ctxs = cpu_to_le16(hwr->stat);
+ req->num_vnics = cpu_to_le16(hwr->vnic);
}
req->enables = cpu_to_le32(enables);
return req;
}
static struct hwrm_func_vf_cfg_input *
-__bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
- int ring_grps, int cp_rings, int stats, int vnics)
+__bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr)
{
struct hwrm_func_vf_cfg_input *req;
u32 enables = 0;
@@ -7036,51 +7615,46 @@ __bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
if (hwrm_req_init(bp, req, HWRM_FUNC_VF_CFG))
return NULL;
- enables |= tx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_TX_RINGS : 0;
- enables |= rx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_RX_RINGS |
- FUNC_VF_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
- enables |= stats ? FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
+ enables |= hwr->tx ? FUNC_VF_CFG_REQ_ENABLES_NUM_TX_RINGS : 0;
+ enables |= hwr->rx ? FUNC_VF_CFG_REQ_ENABLES_NUM_RX_RINGS |
+ FUNC_VF_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
+ enables |= hwr->stat ? FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
+ enables |= hwr->rss_ctx ? FUNC_VF_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
- enables |= tx_rings + ring_grps ?
+ enables |= hwr->cp_p5 ?
FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
} else {
- enables |= cp_rings ?
- FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
- enables |= ring_grps ?
+ enables |= hwr->cp ? FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
+ enables |= hwr->grp ?
FUNC_VF_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0;
}
- enables |= vnics ? FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS : 0;
+ enables |= hwr->vnic ? FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS : 0;
enables |= FUNC_VF_CFG_REQ_ENABLES_NUM_L2_CTXS;
req->num_l2_ctxs = cpu_to_le16(BNXT_VF_MAX_L2_CTX);
- req->num_tx_rings = cpu_to_le16(tx_rings);
- req->num_rx_rings = cpu_to_le16(rx_rings);
+ req->num_tx_rings = cpu_to_le16(hwr->tx);
+ req->num_rx_rings = cpu_to_le16(hwr->rx);
+ req->num_rsscos_ctxs = cpu_to_le16(hwr->rss_ctx);
if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
- u16 rss_ctx = bnxt_get_nr_rss_ctxs(bp, ring_grps);
-
- req->num_cmpl_rings = cpu_to_le16(tx_rings + ring_grps);
- req->num_rsscos_ctxs = cpu_to_le16(rss_ctx);
+ req->num_cmpl_rings = cpu_to_le16(hwr->cp_p5);
} else {
- req->num_cmpl_rings = cpu_to_le16(cp_rings);
- req->num_hw_ring_grps = cpu_to_le16(ring_grps);
- req->num_rsscos_ctxs = cpu_to_le16(BNXT_VF_MAX_RSS_CTX);
+ req->num_cmpl_rings = cpu_to_le16(hwr->cp);
+ req->num_hw_ring_grps = cpu_to_le16(hwr->grp);
}
- req->num_stat_ctxs = cpu_to_le16(stats);
- req->num_vnics = cpu_to_le16(vnics);
+ req->num_stat_ctxs = cpu_to_le16(hwr->stat);
+ req->num_vnics = cpu_to_le16(hwr->vnic);
req->enables = cpu_to_le32(enables);
return req;
}
static int
-bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
- int ring_grps, int cp_rings, int stats, int vnics)
+bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr)
{
struct hwrm_func_cfg_input *req;
int rc;
- req = __bnxt_hwrm_reserve_pf_rings(bp, tx_rings, rx_rings, ring_grps,
- cp_rings, stats, vnics);
+ req = __bnxt_hwrm_reserve_pf_rings(bp, hwr);
if (!req)
return -ENOMEM;
@@ -7094,25 +7668,23 @@ bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
return rc;
if (bp->hwrm_spec_code < 0x10601)
- bp->hw_resc.resv_tx_rings = tx_rings;
+ bp->hw_resc.resv_tx_rings = hwr->tx;
return bnxt_hwrm_get_rings(bp);
}
static int
-bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
- int ring_grps, int cp_rings, int stats, int vnics)
+bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr)
{
struct hwrm_func_vf_cfg_input *req;
int rc;
if (!BNXT_NEW_RM(bp)) {
- bp->hw_resc.resv_tx_rings = tx_rings;
+ bp->hw_resc.resv_tx_rings = hwr->tx;
return 0;
}
- req = __bnxt_hwrm_reserve_vf_rings(bp, tx_rings, rx_rings, ring_grps,
- cp_rings, stats, vnics);
+ req = __bnxt_hwrm_reserve_vf_rings(bp, hwr);
if (!req)
return -ENOMEM;
@@ -7123,30 +7695,17 @@ bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
return bnxt_hwrm_get_rings(bp);
}
-static int bnxt_hwrm_reserve_rings(struct bnxt *bp, int tx, int rx, int grp,
- int cp, int stat, int vnic)
+static int bnxt_hwrm_reserve_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr)
{
if (BNXT_PF(bp))
- return bnxt_hwrm_reserve_pf_rings(bp, tx, rx, grp, cp, stat,
- vnic);
+ return bnxt_hwrm_reserve_pf_rings(bp, hwr);
else
- return bnxt_hwrm_reserve_vf_rings(bp, tx, rx, grp, cp, stat,
- vnic);
+ return bnxt_hwrm_reserve_vf_rings(bp, hwr);
}
int bnxt_nq_rings_in_use(struct bnxt *bp)
{
- int cp = bp->cp_nr_rings;
- int ulp_msix, ulp_base;
-
- ulp_msix = bnxt_get_ulp_msix_num(bp);
- if (ulp_msix) {
- ulp_base = bnxt_get_ulp_msix_base(bp);
- cp += ulp_msix;
- if ((ulp_base + ulp_msix) > cp)
- cp = ulp_base + ulp_msix;
- }
- return cp;
+ return bp->cp_nr_rings + bnxt_get_ulp_msix_num(bp);
}
static int bnxt_cp_rings_in_use(struct bnxt *bp)
@@ -7162,16 +7721,25 @@ static int bnxt_cp_rings_in_use(struct bnxt *bp)
static int bnxt_get_func_stat_ctxs(struct bnxt *bp)
{
- int ulp_stat = bnxt_get_ulp_stat_ctxs(bp);
- int cp = bp->cp_nr_rings;
-
- if (!ulp_stat)
- return cp;
+ return bp->cp_nr_rings + bnxt_get_ulp_stat_ctxs(bp);
+}
- if (bnxt_nq_rings_in_use(bp) > cp + bnxt_get_ulp_msix_num(bp))
- return bnxt_get_ulp_msix_base(bp) + ulp_stat;
+static int bnxt_get_total_rss_ctxs(struct bnxt *bp, struct bnxt_hw_rings *hwr)
+{
+ if (!hwr->grp)
+ return 0;
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
+ int rss_ctx = bnxt_get_nr_rss_ctxs(bp, hwr->grp);
- return cp + ulp_stat;
+ if (BNXT_SUPPORTS_NTUPLE_VNIC(bp))
+ rss_ctx *= hwr->vnic;
+ return rss_ctx;
+ }
+ if (BNXT_VF(bp))
+ return BNXT_VF_MAX_RSS_CTX;
+ if (!(bp->rss_cap & BNXT_RSS_CAP_NEW_RSS_CAP) && bnxt_rfs_supported(bp))
+ return hwr->grp + 1;
+ return 1;
}
/* Check if a default RSS map needs to be setup. This function is only
@@ -7185,34 +7753,46 @@ static void bnxt_check_rss_tbl_no_rmgr(struct bnxt *bp)
if (hw_resc->resv_rx_rings != bp->rx_nr_rings) {
hw_resc->resv_rx_rings = bp->rx_nr_rings;
if (!netif_is_rxfh_configured(bp->dev))
- bnxt_set_dflt_rss_indir_tbl(bp);
+ bnxt_set_dflt_rss_indir_tbl(bp, NULL);
}
}
+static int bnxt_get_total_vnics(struct bnxt *bp, int rx_rings)
+{
+ if (bp->flags & BNXT_FLAG_RFS) {
+ if (BNXT_SUPPORTS_NTUPLE_VNIC(bp))
+ return 2 + bp->num_rss_ctx;
+ if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
+ return rx_rings + 1;
+ }
+ return 1;
+}
+
static bool bnxt_need_reserve_rings(struct bnxt *bp)
{
struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
int cp = bnxt_cp_rings_in_use(bp);
int nq = bnxt_nq_rings_in_use(bp);
int rx = bp->rx_nr_rings, stat;
- int vnic = 1, grp = rx;
-
- if (hw_resc->resv_tx_rings != bp->tx_nr_rings &&
- bp->hwrm_spec_code >= 0x10601)
- return true;
+ int vnic, grp = rx;
/* Old firmware does not need RX ring reservations but we still
* need to setup a default RSS map when needed. With new firmware
* we go through RX ring reservations first and then set up the
* RSS map for the successfully reserved RX rings when needed.
*/
- if (!BNXT_NEW_RM(bp)) {
+ if (!BNXT_NEW_RM(bp))
bnxt_check_rss_tbl_no_rmgr(bp);
+
+ if (hw_resc->resv_tx_rings != bp->tx_nr_rings &&
+ bp->hwrm_spec_code >= 0x10601)
+ return true;
+
+ if (!BNXT_NEW_RM(bp))
return false;
- }
- if ((bp->flags & BNXT_FLAG_RFS) &&
- !(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
- vnic = rx + 1;
+
+ vnic = bnxt_get_total_vnics(bp, rx);
+
if (bp->flags & BNXT_FLAG_AGG_RINGS)
rx <<= 1;
stat = bnxt_get_func_stat_ctxs(bp);
@@ -7227,47 +7807,81 @@ static bool bnxt_need_reserve_rings(struct bnxt *bp)
return false;
}
-static int __bnxt_reserve_rings(struct bnxt *bp)
+static void bnxt_copy_reserved_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr)
{
struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
- int cp = bnxt_nq_rings_in_use(bp);
- int tx = bp->tx_nr_rings;
- int rx = bp->rx_nr_rings;
- int grp, rx_rings, rc;
- int vnic = 1, stat;
+
+ hwr->tx = hw_resc->resv_tx_rings;
+ if (BNXT_NEW_RM(bp)) {
+ hwr->rx = hw_resc->resv_rx_rings;
+ hwr->cp = hw_resc->resv_irqs;
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
+ hwr->cp_p5 = hw_resc->resv_cp_rings;
+ hwr->grp = hw_resc->resv_hw_ring_grps;
+ hwr->vnic = hw_resc->resv_vnics;
+ hwr->stat = hw_resc->resv_stat_ctxs;
+ hwr->rss_ctx = hw_resc->resv_rsscos_ctxs;
+ }
+}
+
+static bool bnxt_rings_ok(struct bnxt *bp, struct bnxt_hw_rings *hwr)
+{
+ return hwr->tx && hwr->rx && hwr->cp && hwr->grp && hwr->vnic &&
+ hwr->stat && (hwr->cp_p5 || !(bp->flags & BNXT_FLAG_CHIP_P5_PLUS));
+}
+
+static int bnxt_get_avail_msix(struct bnxt *bp, int num);
+
+static int __bnxt_reserve_rings(struct bnxt *bp)
+{
+ struct bnxt_hw_rings hwr = {0};
+ int rx_rings, old_rx_rings, rc;
+ int cp = bp->cp_nr_rings;
+ int ulp_msix = 0;
bool sh = false;
int tx_cp;
if (!bnxt_need_reserve_rings(bp))
return 0;
+ if (BNXT_NEW_RM(bp) && !bnxt_ulp_registered(bp->edev)) {
+ ulp_msix = bnxt_get_avail_msix(bp, bp->ulp_num_msix_want);
+ if (!ulp_msix)
+ bnxt_set_ulp_stat_ctxs(bp, 0);
+
+ if (ulp_msix > bp->ulp_num_msix_want)
+ ulp_msix = bp->ulp_num_msix_want;
+ hwr.cp = cp + ulp_msix;
+ } else {
+ hwr.cp = bnxt_nq_rings_in_use(bp);
+ }
+
+ hwr.tx = bp->tx_nr_rings;
+ hwr.rx = bp->rx_nr_rings;
if (bp->flags & BNXT_FLAG_SHARED_RINGS)
sh = true;
- if ((bp->flags & BNXT_FLAG_RFS) &&
- !(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
- vnic = rx + 1;
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
+ hwr.cp_p5 = hwr.rx + hwr.tx;
+
+ hwr.vnic = bnxt_get_total_vnics(bp, hwr.rx);
+
if (bp->flags & BNXT_FLAG_AGG_RINGS)
- rx <<= 1;
- grp = bp->rx_nr_rings;
- stat = bnxt_get_func_stat_ctxs(bp);
+ hwr.rx <<= 1;
+ hwr.grp = bp->rx_nr_rings;
+ hwr.rss_ctx = bnxt_get_total_rss_ctxs(bp, &hwr);
+ hwr.stat = bnxt_get_func_stat_ctxs(bp);
+ old_rx_rings = bp->hw_resc.resv_rx_rings;
- rc = bnxt_hwrm_reserve_rings(bp, tx, rx, grp, cp, stat, vnic);
+ rc = bnxt_hwrm_reserve_rings(bp, &hwr);
if (rc)
return rc;
- tx = hw_resc->resv_tx_rings;
- if (BNXT_NEW_RM(bp)) {
- rx = hw_resc->resv_rx_rings;
- cp = hw_resc->resv_irqs;
- grp = hw_resc->resv_hw_ring_grps;
- vnic = hw_resc->resv_vnics;
- stat = hw_resc->resv_stat_ctxs;
- }
+ bnxt_copy_reserved_rings(bp, &hwr);
- rx_rings = rx;
+ rx_rings = hwr.rx;
if (bp->flags & BNXT_FLAG_AGG_RINGS) {
- if (rx >= 2) {
- rx_rings = rx >> 1;
+ if (hwr.rx >= 2) {
+ rx_rings = hwr.rx >> 1;
} else {
if (netif_running(bp->dev))
return -ENOMEM;
@@ -7279,17 +7893,17 @@ static int __bnxt_reserve_rings(struct bnxt *bp)
bnxt_set_ring_params(bp);
}
}
- rx_rings = min_t(int, rx_rings, grp);
- cp = min_t(int, cp, bp->cp_nr_rings);
- if (stat > bnxt_get_ulp_stat_ctxs(bp))
- stat -= bnxt_get_ulp_stat_ctxs(bp);
- cp = min_t(int, cp, stat);
- rc = bnxt_trim_rings(bp, &rx_rings, &tx, cp, sh);
+ rx_rings = min_t(int, rx_rings, hwr.grp);
+ hwr.cp = min_t(int, hwr.cp, bp->cp_nr_rings);
+ if (hwr.stat > bnxt_get_ulp_stat_ctxs(bp))
+ hwr.stat -= bnxt_get_ulp_stat_ctxs(bp);
+ hwr.cp = min_t(int, hwr.cp, hwr.stat);
+ rc = bnxt_trim_rings(bp, &rx_rings, &hwr.tx, hwr.cp, sh);
if (bp->flags & BNXT_FLAG_AGG_RINGS)
- rx = rx_rings << 1;
- tx_cp = bnxt_num_tx_to_cp(bp, tx);
- cp = sh ? max_t(int, tx_cp, rx_rings) : tx_cp + rx_rings;
- bp->tx_nr_rings = tx;
+ hwr.rx = rx_rings << 1;
+ tx_cp = bnxt_num_tx_to_cp(bp, hwr.tx);
+ hwr.cp = sh ? max_t(int, tx_cp, rx_rings) : tx_cp + rx_rings;
+ bp->tx_nr_rings = hwr.tx;
/* If we cannot reserve all the RX rings, reset the RSS map only
* if absolutely necessary
@@ -7306,20 +7920,32 @@ static int __bnxt_reserve_rings(struct bnxt *bp)
}
}
bp->rx_nr_rings = rx_rings;
- bp->cp_nr_rings = cp;
+ bp->cp_nr_rings = hwr.cp;
- if (!tx || !rx || !cp || !grp || !vnic || !stat)
+ if (!bnxt_rings_ok(bp, &hwr))
return -ENOMEM;
- if (!netif_is_rxfh_configured(bp->dev))
- bnxt_set_dflt_rss_indir_tbl(bp);
+ if (old_rx_rings != bp->hw_resc.resv_rx_rings &&
+ !netif_is_rxfh_configured(bp->dev))
+ bnxt_set_dflt_rss_indir_tbl(bp, NULL);
+
+ if (!bnxt_ulp_registered(bp->edev) && BNXT_NEW_RM(bp)) {
+ int resv_msix, resv_ctx, ulp_ctxs;
+ struct bnxt_hw_resc *hw_resc;
+
+ hw_resc = &bp->hw_resc;
+ resv_msix = hw_resc->resv_irqs - bp->cp_nr_rings;
+ ulp_msix = min_t(int, resv_msix, ulp_msix);
+ bnxt_set_ulp_msix_num(bp, ulp_msix);
+ resv_ctx = hw_resc->resv_stat_ctxs - bp->cp_nr_rings;
+ ulp_ctxs = min(resv_ctx, bnxt_get_ulp_stat_ctxs(bp));
+ bnxt_set_ulp_stat_ctxs(bp, ulp_ctxs);
+ }
return rc;
}
-static int bnxt_hwrm_check_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
- int ring_grps, int cp_rings, int stats,
- int vnics)
+static int bnxt_hwrm_check_vf_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr)
{
struct hwrm_func_vf_cfg_input *req;
u32 flags;
@@ -7327,8 +7953,7 @@ static int bnxt_hwrm_check_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
if (!BNXT_NEW_RM(bp))
return 0;
- req = __bnxt_hwrm_reserve_vf_rings(bp, tx_rings, rx_rings, ring_grps,
- cp_rings, stats, vnics);
+ req = __bnxt_hwrm_reserve_vf_rings(bp, hwr);
flags = FUNC_VF_CFG_REQ_FLAGS_TX_ASSETS_TEST |
FUNC_VF_CFG_REQ_FLAGS_RX_ASSETS_TEST |
FUNC_VF_CFG_REQ_FLAGS_CMPL_ASSETS_TEST |
@@ -7342,15 +7967,12 @@ static int bnxt_hwrm_check_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
return hwrm_req_send_silent(bp, req);
}
-static int bnxt_hwrm_check_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
- int ring_grps, int cp_rings, int stats,
- int vnics)
+static int bnxt_hwrm_check_pf_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr)
{
struct hwrm_func_cfg_input *req;
u32 flags;
- req = __bnxt_hwrm_reserve_pf_rings(bp, tx_rings, rx_rings, ring_grps,
- cp_rings, stats, vnics);
+ req = __bnxt_hwrm_reserve_pf_rings(bp, hwr);
flags = FUNC_CFG_REQ_FLAGS_TX_ASSETS_TEST;
if (BNXT_NEW_RM(bp)) {
flags |= FUNC_CFG_REQ_FLAGS_RX_ASSETS_TEST |
@@ -7368,20 +7990,15 @@ static int bnxt_hwrm_check_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
return hwrm_req_send_silent(bp, req);
}
-static int bnxt_hwrm_check_rings(struct bnxt *bp, int tx_rings, int rx_rings,
- int ring_grps, int cp_rings, int stats,
- int vnics)
+static int bnxt_hwrm_check_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr)
{
if (bp->hwrm_spec_code < 0x10801)
return 0;
if (BNXT_PF(bp))
- return bnxt_hwrm_check_pf_rings(bp, tx_rings, rx_rings,
- ring_grps, cp_rings, stats,
- vnics);
+ return bnxt_hwrm_check_pf_rings(bp, hwr);
- return bnxt_hwrm_check_vf_rings(bp, tx_rings, rx_rings, ring_grps,
- cp_rings, stats, vnics);
+ return bnxt_hwrm_check_vf_rings(bp, hwr);
}
static void bnxt_hwrm_coal_params_qcaps(struct bnxt *bp)
@@ -7726,16 +8343,20 @@ static int bnxt_hwrm_func_qcfg(struct bnxt *bp)
if (rc)
goto func_qcfg_exit;
+ flags = le16_to_cpu(resp->flags);
#ifdef CONFIG_BNXT_SRIOV
if (BNXT_VF(bp)) {
struct bnxt_vf_info *vf = &bp->vf;
vf->vlan = le16_to_cpu(resp->vlan) & VLAN_VID_MASK;
+ if (flags & FUNC_QCFG_RESP_FLAGS_TRUSTED_VF)
+ vf->flags |= BNXT_VF_TRUST;
+ else
+ vf->flags &= ~BNXT_VF_TRUST;
} else {
bp->pf.registered_vfs = le16_to_cpu(resp->registered_vfs);
}
#endif
- flags = le16_to_cpu(resp->flags);
if (flags & (FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED |
FUNC_QCFG_RESP_FLAGS_FW_LLDP_AGENT_ENABLED)) {
bp->fw_cap |= BNXT_FW_CAP_LLDP_AGENT;
@@ -7748,6 +8369,9 @@ static int bnxt_hwrm_func_qcfg(struct bnxt *bp)
if (flags & FUNC_QCFG_RESP_FLAGS_RING_MONITOR_ENABLED)
bp->fw_cap |= BNXT_FW_CAP_RING_MONITOR;
+ if (flags & FUNC_QCFG_RESP_FLAGS_ENABLE_RDMA_SRIOV)
+ bp->fw_cap |= BNXT_FW_CAP_ENABLE_RDMA_SRIOV;
+
switch (resp->port_partition_type) {
case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_0:
case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_5:
@@ -7809,7 +8433,7 @@ static int bnxt_alloc_all_ctx_pg_info(struct bnxt *bp, int ctx_max)
struct bnxt_ctx_mem_type *ctxm = &ctx->ctx_arr[type];
int n = 1;
- if (!ctxm->max_entries)
+ if (!ctxm->max_entries || ctxm->pg_info)
continue;
if (ctxm->instance_bmap)
@@ -7821,6 +8445,9 @@ static int bnxt_alloc_all_ctx_pg_info(struct bnxt *bp, int ctx_max)
return 0;
}
+static void bnxt_free_one_ctx_mem(struct bnxt *bp,
+ struct bnxt_ctx_mem_type *ctxm, bool force);
+
#define BNXT_CTX_INIT_VALID(flags) \
(!!((flags) & \
FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_ENABLE_CTX_KIND_INIT))
@@ -7829,7 +8456,7 @@ static int bnxt_hwrm_func_backing_store_qcaps_v2(struct bnxt *bp)
{
struct hwrm_func_backing_store_qcaps_v2_output *resp;
struct hwrm_func_backing_store_qcaps_v2_input *req;
- struct bnxt_ctx_mem_info *ctx;
+ struct bnxt_ctx_mem_info *ctx = bp->ctx;
u16 type;
int rc;
@@ -7837,16 +8464,20 @@ static int bnxt_hwrm_func_backing_store_qcaps_v2(struct bnxt *bp)
if (rc)
return rc;
- ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
- bp->ctx = ctx;
+ if (!ctx) {
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+ bp->ctx = ctx;
+ }
resp = hwrm_req_hold(bp, req);
for (type = 0; type < BNXT_CTX_V2_MAX; ) {
struct bnxt_ctx_mem_type *ctxm = &ctx->ctx_arr[type];
u8 init_val, init_off, i;
+ u32 max_entries;
+ u16 entry_size;
__le32 *p;
u32 flags;
@@ -7856,15 +8487,26 @@ static int bnxt_hwrm_func_backing_store_qcaps_v2(struct bnxt *bp)
goto ctx_done;
flags = le32_to_cpu(resp->flags);
type = le16_to_cpu(resp->next_valid_type);
- if (!(flags & FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_TYPE_VALID))
+ if (!(flags & BNXT_CTX_MEM_TYPE_VALID)) {
+ bnxt_free_one_ctx_mem(bp, ctxm, true);
continue;
-
+ }
+ entry_size = le16_to_cpu(resp->entry_size);
+ max_entries = le32_to_cpu(resp->max_num_entries);
+ if (ctxm->mem_valid) {
+ if (!(flags & BNXT_CTX_MEM_PERSIST) ||
+ ctxm->entry_size != entry_size ||
+ ctxm->max_entries != max_entries)
+ bnxt_free_one_ctx_mem(bp, ctxm, true);
+ else
+ continue;
+ }
ctxm->type = le16_to_cpu(resp->type);
- ctxm->entry_size = le16_to_cpu(resp->entry_size);
+ ctxm->entry_size = entry_size;
ctxm->flags = flags;
ctxm->instance_bmap = le32_to_cpu(resp->instance_bit_map);
ctxm->entry_multiple = resp->entry_multiple;
- ctxm->max_entries = le32_to_cpu(resp->max_num_entries);
+ ctxm->max_entries = max_entries;
ctxm->min_entries = le32_to_cpu(resp->min_num_entries);
init_val = resp->ctx_init_value;
init_off = resp->ctx_init_offset;
@@ -7889,7 +8531,8 @@ static int bnxt_hwrm_func_backing_store_qcaps(struct bnxt *bp)
struct hwrm_func_backing_store_qcaps_input *req;
int rc;
- if (bp->hwrm_spec_code < 0x10902 || BNXT_VF(bp) || bp->ctx)
+ if (bp->hwrm_spec_code < 0x10902 || BNXT_VF(bp) ||
+ (bp->ctx && bp->ctx->flags & BNXT_CTX_FLAG_INITED))
return 0;
if (bp->fw_cap & BNXT_FW_CAP_BACKING_STORE_V2)
@@ -8230,6 +8873,36 @@ static int bnxt_alloc_ctx_pg_tbls(struct bnxt *bp,
return rc;
}
+static size_t bnxt_copy_ctx_pg_tbls(struct bnxt *bp,
+ struct bnxt_ctx_pg_info *ctx_pg,
+ void *buf, size_t offset, size_t head,
+ size_t tail)
+{
+ struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
+ size_t nr_pages = ctx_pg->nr_pages;
+ int page_size = rmem->page_size;
+ size_t len = 0, total_len = 0;
+ u16 depth = rmem->depth;
+
+ tail %= nr_pages * page_size;
+ do {
+ if (depth > 1) {
+ int i = head / (page_size * MAX_CTX_PAGES);
+ struct bnxt_ctx_pg_info *pg_tbl;
+
+ pg_tbl = ctx_pg->ctx_pg_tbl[i];
+ rmem = &pg_tbl->ring_mem;
+ }
+ len = __bnxt_copy_ring(bp, rmem, buf, offset, head, tail);
+ head += len;
+ offset += len;
+ total_len += len;
+ if (head >= nr_pages * page_size)
+ head = 0;
+ } while (head != tail);
+ return total_len;
+}
+
static void bnxt_free_ctx_pg_tbls(struct bnxt *bp,
struct bnxt_ctx_pg_info *ctx_pg)
{
@@ -8280,6 +8953,8 @@ static int bnxt_setup_ctxm_pg_tbls(struct bnxt *bp,
rc = bnxt_alloc_ctx_pg_tbls(bp, &ctx_pg[i], mem_size, pg_lvl,
ctxm->init_value ? ctxm : NULL);
}
+ if (!rc)
+ ctxm->mem_valid = 1;
return rc;
}
@@ -8306,6 +8981,16 @@ static int bnxt_hwrm_func_backing_store_cfg_v2(struct bnxt *bp,
hwrm_req_hold(bp, req);
req->type = cpu_to_le16(ctxm->type);
req->entry_size = cpu_to_le16(ctxm->entry_size);
+ if ((ctxm->flags & BNXT_CTX_MEM_PERSIST) &&
+ bnxt_bs_trace_avail(bp, ctxm->type)) {
+ struct bnxt_bs_trace_info *bs_trace;
+ u32 enables;
+
+ enables = FUNC_BACKING_STORE_CFG_V2_REQ_ENABLES_NEXT_BS_OFFSET;
+ req->enables = cpu_to_le32(enables);
+ bs_trace = &bp->bs_trace[bnxt_bstore_to_trace[ctxm->type]];
+ req->next_bs_offset = cpu_to_le32(bs_trace->last_offset);
+ }
req->subtype_valid_cnt = ctxm->split_entry_cnt;
for (i = 0, p = &req->split_entry_0; i < ctxm->split_entry_cnt; i++)
p[i] = cpu_to_le32(ctxm->split[i]);
@@ -8335,21 +9020,42 @@ static int bnxt_backing_store_cfg_v2(struct bnxt *bp, u32 ena)
{
struct bnxt_ctx_mem_info *ctx = bp->ctx;
struct bnxt_ctx_mem_type *ctxm;
- u16 last_type;
+ u16 last_type = BNXT_CTX_INV;
int rc = 0;
u16 type;
- if (!ena)
- return 0;
- else if (ena & FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM)
- last_type = BNXT_CTX_MAX - 1;
- else
- last_type = BNXT_CTX_L2_MAX - 1;
+ for (type = BNXT_CTX_SRT; type <= BNXT_CTX_RIGP1; type++) {
+ ctxm = &ctx->ctx_arr[type];
+ if (!bnxt_bs_trace_avail(bp, type))
+ continue;
+ if (!ctxm->mem_valid) {
+ rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm,
+ ctxm->max_entries, 1);
+ if (rc) {
+ netdev_warn(bp->dev, "Unable to setup ctx page for type:0x%x.\n",
+ type);
+ continue;
+ }
+ bnxt_bs_trace_init(bp, ctxm);
+ }
+ last_type = type;
+ }
+
+ if (last_type == BNXT_CTX_INV) {
+ if (!ena)
+ return 0;
+ else if (ena & FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM)
+ last_type = BNXT_CTX_MAX - 1;
+ else
+ last_type = BNXT_CTX_L2_MAX - 1;
+ }
ctx->ctx_arr[last_type].last = 1;
for (type = 0 ; type < BNXT_CTX_V2_MAX; type++) {
ctxm = &ctx->ctx_arr[type];
+ if (!ctxm->mem_valid)
+ continue;
rc = bnxt_hwrm_func_backing_store_cfg_v2(bp, ctxm, ctxm->last);
if (rc)
return rc;
@@ -8357,21 +9063,63 @@ static int bnxt_backing_store_cfg_v2(struct bnxt *bp, u32 ena)
return 0;
}
-void bnxt_free_ctx_mem(struct bnxt *bp)
+/**
+ * __bnxt_copy_ctx_mem - copy host context memory
+ * @bp: The driver context
+ * @ctxm: The pointer to the context memory type
+ * @buf: The destination buffer or NULL to just obtain the length
+ * @offset: The buffer offset to copy the data to
+ * @head: The head offset of context memory to copy from
+ * @tail: The tail offset (last byte + 1) of context memory to end the copy
+ *
+ * This function is called for debugging purposes to dump the host context
+ * used by the chip.
+ *
+ * Return: Length of memory copied
+ */
+static size_t __bnxt_copy_ctx_mem(struct bnxt *bp,
+ struct bnxt_ctx_mem_type *ctxm, void *buf,
+ size_t offset, size_t head, size_t tail)
{
- struct bnxt_ctx_mem_info *ctx = bp->ctx;
- u16 type;
+ struct bnxt_ctx_pg_info *ctx_pg = ctxm->pg_info;
+ size_t len = 0, total_len = 0;
+ int i, n = 1;
- if (!ctx)
- return;
+ if (!ctx_pg)
+ return 0;
- for (type = 0; type < BNXT_CTX_V2_MAX; type++) {
- struct bnxt_ctx_mem_type *ctxm = &ctx->ctx_arr[type];
- struct bnxt_ctx_pg_info *ctx_pg = ctxm->pg_info;
- int i, n = 1;
+ if (ctxm->instance_bmap)
+ n = hweight32(ctxm->instance_bmap);
+ for (i = 0; i < n; i++) {
+ len = bnxt_copy_ctx_pg_tbls(bp, &ctx_pg[i], buf, offset, head,
+ tail);
+ offset += len;
+ total_len += len;
+ }
+ return total_len;
+}
- if (!ctx_pg)
- continue;
+size_t bnxt_copy_ctx_mem(struct bnxt *bp, struct bnxt_ctx_mem_type *ctxm,
+ void *buf, size_t offset)
+{
+ size_t tail = ctxm->max_entries * ctxm->entry_size;
+
+ return __bnxt_copy_ctx_mem(bp, ctxm, buf, offset, 0, tail);
+}
+
+static void bnxt_free_one_ctx_mem(struct bnxt *bp,
+ struct bnxt_ctx_mem_type *ctxm, bool force)
+{
+ struct bnxt_ctx_pg_info *ctx_pg;
+ int i, n = 1;
+
+ ctxm->last = 0;
+
+ if (ctxm->mem_valid && !force && (ctxm->flags & BNXT_CTX_MEM_PERSIST))
+ return;
+
+ ctx_pg = ctxm->pg_info;
+ if (ctx_pg) {
if (ctxm->instance_bmap)
n = hweight32(ctxm->instance_bmap);
for (i = 0; i < n; i++)
@@ -8379,11 +9127,27 @@ void bnxt_free_ctx_mem(struct bnxt *bp)
kfree(ctx_pg);
ctxm->pg_info = NULL;
+ ctxm->mem_valid = 0;
}
+ memset(ctxm, 0, sizeof(*ctxm));
+}
+
+void bnxt_free_ctx_mem(struct bnxt *bp, bool force)
+{
+ struct bnxt_ctx_mem_info *ctx = bp->ctx;
+ u16 type;
+
+ if (!ctx)
+ return;
+
+ for (type = 0; type < BNXT_CTX_V2_MAX; type++)
+ bnxt_free_one_ctx_mem(bp, &ctx->ctx_arr[type], force);
ctx->flags &= ~BNXT_CTX_FLAG_INITED;
- kfree(ctx);
- bp->ctx = NULL;
+ if (force) {
+ kfree(ctx);
+ bp->ctx = NULL;
+ }
}
static int bnxt_alloc_ctx_mem(struct bnxt *bp)
@@ -8421,10 +9185,18 @@ static int bnxt_alloc_ctx_mem(struct bnxt *bp)
ena = 0;
if ((bp->flags & BNXT_FLAG_ROCE_CAP) && !is_kdump_kernel()) {
pg_lvl = 2;
- extra_qps = min_t(u32, 65536, max_qps - l2_qps - qp1_qps);
- /* allocate extra qps if fw supports RoCE fast qp destroy feature */
- extra_qps += fast_qpmd_qps;
- extra_srqs = min_t(u32, 8192, max_srqs - srqs);
+ if (BNXT_SW_RES_LMT(bp)) {
+ extra_qps = max_qps - l2_qps - qp1_qps;
+ extra_srqs = max_srqs - srqs;
+ } else {
+ extra_qps = min_t(u32, 65536,
+ max_qps - l2_qps - qp1_qps);
+ /* allocate extra qps if fw supports RoCE fast qp
+ * destroy feature
+ */
+ extra_qps += fast_qpmd_qps;
+ extra_srqs = min_t(u32, 8192, max_srqs - srqs);
+ }
if (fast_qpmd_qps)
ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP_FAST_QPMD;
}
@@ -8460,14 +9232,20 @@ static int bnxt_alloc_ctx_mem(struct bnxt *bp)
goto skip_rdma;
ctxm = &ctx->ctx_arr[BNXT_CTX_MRAV];
- /* 128K extra is needed to accommodate static AH context
- * allocation by f/w.
- */
- num_mr = min_t(u32, ctxm->max_entries / 2, 1024 * 256);
- num_ah = min_t(u32, num_mr, 1024 * 128);
- ctxm->split_entry_cnt = BNXT_CTX_MRAV_AV_SPLIT_ENTRY + 1;
- if (!ctxm->mrav_av_entries || ctxm->mrav_av_entries > num_ah)
- ctxm->mrav_av_entries = num_ah;
+ if (BNXT_SW_RES_LMT(bp) &&
+ ctxm->split_entry_cnt == BNXT_CTX_MRAV_AV_SPLIT_ENTRY + 1) {
+ num_ah = ctxm->mrav_av_entries;
+ num_mr = ctxm->max_entries - num_ah;
+ } else {
+ /* 128K extra is needed to accommodate static AH context
+ * allocation by f/w.
+ */
+ num_mr = min_t(u32, ctxm->max_entries / 2, 1024 * 256);
+ num_ah = min_t(u32, num_mr, 1024 * 128);
+ ctxm->split_entry_cnt = BNXT_CTX_MRAV_AV_SPLIT_ENTRY + 1;
+ if (!ctxm->mrav_av_entries || ctxm->mrav_av_entries > num_ah)
+ ctxm->mrav_av_entries = num_ah;
+ }
rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, num_mr + num_ah, 2);
if (rc)
@@ -8511,6 +9289,80 @@ skip_rdma:
return 0;
}
+static int bnxt_hwrm_crash_dump_mem_cfg(struct bnxt *bp)
+{
+ struct hwrm_dbg_crashdump_medium_cfg_input *req;
+ u16 page_attr;
+ int rc;
+
+ if (!(bp->fw_dbg_cap & DBG_QCAPS_RESP_FLAGS_CRASHDUMP_HOST_DDR))
+ return 0;
+
+ rc = hwrm_req_init(bp, req, HWRM_DBG_CRASHDUMP_MEDIUM_CFG);
+ if (rc)
+ return rc;
+
+ if (BNXT_PAGE_SIZE == 0x2000)
+ page_attr = DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_PG_8K;
+ else if (BNXT_PAGE_SIZE == 0x10000)
+ page_attr = DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_PG_64K;
+ else
+ page_attr = DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_PG_4K;
+ req->pg_size_lvl = cpu_to_le16(page_attr |
+ bp->fw_crash_mem->ring_mem.depth);
+ req->pbl = cpu_to_le64(bp->fw_crash_mem->ring_mem.pg_tbl_map);
+ req->size = cpu_to_le32(bp->fw_crash_len);
+ req->output_dest_flags = cpu_to_le16(BNXT_DBG_CR_DUMP_MDM_CFG_DDR);
+ return hwrm_req_send(bp, req);
+}
+
+static void bnxt_free_crash_dump_mem(struct bnxt *bp)
+{
+ if (bp->fw_crash_mem) {
+ bnxt_free_ctx_pg_tbls(bp, bp->fw_crash_mem);
+ kfree(bp->fw_crash_mem);
+ bp->fw_crash_mem = NULL;
+ }
+}
+
+static int bnxt_alloc_crash_dump_mem(struct bnxt *bp)
+{
+ u32 mem_size = 0;
+ int rc;
+
+ if (!(bp->fw_dbg_cap & DBG_QCAPS_RESP_FLAGS_CRASHDUMP_HOST_DDR))
+ return 0;
+
+ rc = bnxt_hwrm_get_dump_len(bp, BNXT_DUMP_CRASH, &mem_size);
+ if (rc)
+ return rc;
+
+ mem_size = round_up(mem_size, 4);
+
+ /* keep and use the existing pages */
+ if (bp->fw_crash_mem &&
+ mem_size <= bp->fw_crash_mem->nr_pages * BNXT_PAGE_SIZE)
+ goto alloc_done;
+
+ if (bp->fw_crash_mem)
+ bnxt_free_ctx_pg_tbls(bp, bp->fw_crash_mem);
+ else
+ bp->fw_crash_mem = kzalloc(sizeof(*bp->fw_crash_mem),
+ GFP_KERNEL);
+ if (!bp->fw_crash_mem)
+ return -ENOMEM;
+
+ rc = bnxt_alloc_ctx_pg_tbls(bp, bp->fw_crash_mem, mem_size, 1, NULL);
+ if (rc) {
+ bnxt_free_crash_dump_mem(bp);
+ return rc;
+ }
+
+alloc_done:
+ bp->fw_crash_len = mem_size;
+ return 0;
+}
+
int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp, bool all)
{
struct hwrm_func_resource_qcaps_output *resp;
@@ -8574,11 +9426,10 @@ static int __bnxt_hwrm_ptp_qcfg(struct bnxt *bp)
struct hwrm_port_mac_ptp_qcfg_output *resp;
struct hwrm_port_mac_ptp_qcfg_input *req;
struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
- bool phc_cfg;
u8 flags;
int rc;
- if (bp->hwrm_spec_code < 0x10801 || !BNXT_CHIP_P5(bp)) {
+ if (bp->hwrm_spec_code < 0x10801 || !BNXT_CHIP_P5_PLUS(bp)) {
rc = -ENODEV;
goto no_ptp;
}
@@ -8594,7 +9445,8 @@ static int __bnxt_hwrm_ptp_qcfg(struct bnxt *bp)
goto exit;
flags = resp->flags;
- if (!(flags & PORT_MAC_PTP_QCFG_RESP_FLAGS_HWRM_ACCESS)) {
+ if (BNXT_CHIP_P5_AND_MINUS(bp) &&
+ !(flags & PORT_MAC_PTP_QCFG_RESP_FLAGS_HWRM_ACCESS)) {
rc = -ENODEV;
goto exit;
}
@@ -8607,18 +9459,22 @@ static int __bnxt_hwrm_ptp_qcfg(struct bnxt *bp)
ptp->bp = bp;
bp->ptp_cfg = ptp;
}
- if (flags & PORT_MAC_PTP_QCFG_RESP_FLAGS_PARTIAL_DIRECT_ACCESS_REF_CLOCK) {
+
+ if (flags &
+ (PORT_MAC_PTP_QCFG_RESP_FLAGS_PARTIAL_DIRECT_ACCESS_REF_CLOCK |
+ PORT_MAC_PTP_QCFG_RESP_FLAGS_64B_PHC_TIME)) {
ptp->refclk_regs[0] = le32_to_cpu(resp->ts_ref_clock_reg_lower);
ptp->refclk_regs[1] = le32_to_cpu(resp->ts_ref_clock_reg_upper);
- } else if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
+ } else if (BNXT_CHIP_P5(bp)) {
ptp->refclk_regs[0] = BNXT_TS_REG_TIMESYNC_TS0_LOWER;
ptp->refclk_regs[1] = BNXT_TS_REG_TIMESYNC_TS0_UPPER;
} else {
rc = -ENODEV;
goto exit;
}
- phc_cfg = (flags & PORT_MAC_PTP_QCFG_RESP_FLAGS_RTC_CONFIGURED) != 0;
- rc = bnxt_ptp_init(bp, phc_cfg);
+ ptp->rtc_configured =
+ (flags & PORT_MAC_PTP_QCFG_RESP_FLAGS_RTC_CONFIGURED) != 0;
+ rc = bnxt_ptp_init(bp);
if (rc)
netdev_warn(bp->dev, "PTP initialization failed.\n");
exit:
@@ -8682,6 +9538,8 @@ static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
bp->fw_cap |= BNXT_FW_CAP_HOT_RESET_IF;
if (BNXT_PF(bp) && (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_FW_LIVEPATCH_SUPPORTED))
bp->fw_cap |= BNXT_FW_CAP_LIVEPATCH;
+ if (BNXT_PF(bp) && (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_DFLT_VLAN_TPID_PCP_SUPPORTED))
+ bp->fw_cap |= BNXT_FW_CAP_DFLT_VLAN_TPID_PCP;
if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_BS_V2_SUPPORTED)
bp->fw_cap |= BNXT_FW_CAP_BACKING_STORE_V2;
if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_TX_COAL_CMPL_CAP)
@@ -8692,6 +9550,14 @@ static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
bp->fw_cap |= BNXT_FW_CAP_RX_ALL_PKT_TS;
if (flags_ext2 & FUNC_QCAPS_RESP_FLAGS_EXT2_UDP_GSO_SUPPORTED)
bp->flags |= BNXT_FLAG_UDP_GSO_CAP;
+ if (flags_ext2 & FUNC_QCAPS_RESP_FLAGS_EXT2_TX_PKT_TS_CMPL_SUPPORTED)
+ bp->fw_cap |= BNXT_FW_CAP_TX_TS_CMP;
+ if (flags_ext2 &
+ FUNC_QCAPS_RESP_FLAGS_EXT2_SW_MAX_RESOURCE_LIMITS_SUPPORTED)
+ bp->fw_cap |= BNXT_FW_CAP_SW_MAX_RESOURCE_LIMITS;
+ if (BNXT_PF(bp) &&
+ (flags_ext2 & FUNC_QCAPS_RESP_FLAGS_EXT2_ROCE_VF_RESOURCE_MGMT_SUPPORTED))
+ bp->fw_cap |= BNXT_FW_CAP_ROCE_VF_RESC_MGMT_SUPPORTED;
bp->tx_push_thresh = 0;
if ((flags & FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED) &&
@@ -8709,6 +9575,13 @@ static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
hw_resc->max_vnics = le16_to_cpu(resp->max_vnics);
hw_resc->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
+ hw_resc->max_encap_records = le32_to_cpu(resp->max_encap_records);
+ hw_resc->max_decap_records = le32_to_cpu(resp->max_decap_records);
+ hw_resc->max_tx_em_flows = le32_to_cpu(resp->max_tx_em_flows);
+ hw_resc->max_tx_wm_flows = le32_to_cpu(resp->max_tx_wm_flows);
+ hw_resc->max_rx_em_flows = le32_to_cpu(resp->max_rx_em_flows);
+ hw_resc->max_rx_wm_flows = le32_to_cpu(resp->max_rx_wm_flows);
+
if (BNXT_PF(bp)) {
struct bnxt_pf_info *pf = &bp->pf;
@@ -8717,12 +9590,6 @@ static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
memcpy(pf->mac_addr, resp->mac_address, ETH_ALEN);
pf->first_vf_id = le16_to_cpu(resp->first_vf_id);
pf->max_vfs = le16_to_cpu(resp->max_vfs);
- pf->max_encap_records = le32_to_cpu(resp->max_encap_records);
- pf->max_decap_records = le32_to_cpu(resp->max_decap_records);
- pf->max_tx_em_flows = le32_to_cpu(resp->max_tx_em_flows);
- pf->max_tx_wm_flows = le32_to_cpu(resp->max_tx_wm_flows);
- pf->max_rx_em_flows = le32_to_cpu(resp->max_rx_em_flows);
- pf->max_rx_wm_flows = le32_to_cpu(resp->max_rx_wm_flows);
bp->flags &= ~BNXT_FLAG_WOL_CAP;
if (flags & FUNC_QCAPS_RESP_FLAGS_WOL_MAGICPKT_SUPPORTED)
bp->flags |= BNXT_FLAG_WOL_CAP;
@@ -8741,6 +9608,7 @@ static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
memcpy(vf->mac_addr, resp->mac_address, ETH_ALEN);
#endif
}
+ bp->tso_max_segs = le16_to_cpu(resp->max_tso_segs);
hwrm_func_qcaps_exit:
hwrm_req_drop(bp, req);
@@ -8825,6 +9693,14 @@ static int bnxt_hwrm_cfa_adv_flow_mgnt_qcaps(struct bnxt *bp)
CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RFS_RING_TBL_IDX_V2_SUPPORTED)
bp->fw_cap |= BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2;
+ if (flags &
+ CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RFS_RING_TBL_IDX_V3_SUPPORTED)
+ bp->fw_cap |= BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V3;
+
+ if (flags &
+ CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_NTUPLE_FLOW_RX_EXT_IP_PROTO_SUPPORTED)
+ bp->fw_cap |= BNXT_FW_CAP_CFA_NTUPLE_RX_EXT_IP_PROTO;
+
hwrm_cfa_adv_qcaps_exit:
hwrm_req_drop(bp, req);
return rc;
@@ -8906,7 +9782,7 @@ static void bnxt_try_map_fw_health_reg(struct bnxt *bp)
BNXT_FW_HEALTH_WIN_BASE +
BNXT_GRC_REG_CHIP_NUM);
}
- if (!BNXT_CHIP_P5(bp))
+ if (!BNXT_CHIP_P5_PLUS(bp))
return;
status_loc = BNXT_GRC_REG_STATUS_P5 |
@@ -9493,7 +10369,7 @@ static int bnxt_set_tpa(struct bnxt *bp, bool set_tpa)
else if (BNXT_NO_FW_ACCESS(bp))
return 0;
for (i = 0; i < bp->nr_vnics; i++) {
- rc = bnxt_hwrm_vnic_set_tpa(bp, i, tpa_flags);
+ rc = bnxt_hwrm_vnic_set_tpa(bp, &bp->vnic_info[i], tpa_flags);
if (rc) {
netdev_err(bp->dev, "hwrm vnic set tpa failure rc for vnic %d: %x\n",
i, rc);
@@ -9508,7 +10384,7 @@ static void bnxt_hwrm_clear_vnic_rss(struct bnxt *bp)
int i;
for (i = 0; i < bp->nr_vnics; i++)
- bnxt_hwrm_vnic_set_rss(bp, i, false);
+ bnxt_hwrm_vnic_set_rss(bp, &bp->vnic_info[i], false);
}
static void bnxt_clear_vnic(struct bnxt *bp)
@@ -9586,28 +10462,27 @@ static int bnxt_hwrm_set_cache_line_size(struct bnxt *bp, int size)
return hwrm_req_send(bp, req);
}
-static int __bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id)
+static int __bnxt_setup_vnic(struct bnxt *bp, struct bnxt_vnic_info *vnic)
{
- struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
int rc;
if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG)
goto skip_rss_ctx;
/* allocate context for vnic */
- rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, 0);
+ rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic, 0);
if (rc) {
netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n",
- vnic_id, rc);
+ vnic->vnic_id, rc);
goto vnic_setup_err;
}
bp->rsscos_nr_ctxs++;
if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
- rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, 1);
+ rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic, 1);
if (rc) {
netdev_err(bp->dev, "hwrm vnic %d cos ctx alloc failure rc: %x\n",
- vnic_id, rc);
+ vnic->vnic_id, rc);
goto vnic_setup_err;
}
bp->rsscos_nr_ctxs++;
@@ -9615,26 +10490,26 @@ static int __bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id)
skip_rss_ctx:
/* configure default vnic, ring grp */
- rc = bnxt_hwrm_vnic_cfg(bp, vnic_id);
+ rc = bnxt_hwrm_vnic_cfg(bp, vnic);
if (rc) {
netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n",
- vnic_id, rc);
+ vnic->vnic_id, rc);
goto vnic_setup_err;
}
/* Enable RSS hashing on vnic */
- rc = bnxt_hwrm_vnic_set_rss(bp, vnic_id, true);
+ rc = bnxt_hwrm_vnic_set_rss(bp, vnic, true);
if (rc) {
netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %x\n",
- vnic_id, rc);
+ vnic->vnic_id, rc);
goto vnic_setup_err;
}
if (bp->flags & BNXT_FLAG_AGG_RINGS) {
- rc = bnxt_hwrm_vnic_set_hds(bp, vnic_id);
+ rc = bnxt_hwrm_vnic_set_hds(bp, vnic);
if (rc) {
netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n",
- vnic_id, rc);
+ vnic->vnic_id, rc);
}
}
@@ -9642,16 +10517,53 @@ vnic_setup_err:
return rc;
}
-static int __bnxt_setup_vnic_p5(struct bnxt *bp, u16 vnic_id)
+int bnxt_hwrm_vnic_update(struct bnxt *bp, struct bnxt_vnic_info *vnic,
+ u8 valid)
+{
+ struct hwrm_vnic_update_input *req;
+ int rc;
+
+ rc = hwrm_req_init(bp, req, HWRM_VNIC_UPDATE);
+ if (rc)
+ return rc;
+
+ req->vnic_id = cpu_to_le32(vnic->fw_vnic_id);
+
+ if (valid & VNIC_UPDATE_REQ_ENABLES_MRU_VALID)
+ req->mru = cpu_to_le16(vnic->mru);
+
+ req->enables = cpu_to_le32(valid);
+
+ return hwrm_req_send(bp, req);
+}
+
+int bnxt_hwrm_vnic_rss_cfg_p5(struct bnxt *bp, struct bnxt_vnic_info *vnic)
+{
+ int rc;
+
+ rc = bnxt_hwrm_vnic_set_rss_p5(bp, vnic, true);
+ if (rc) {
+ netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %d\n",
+ vnic->vnic_id, rc);
+ return rc;
+ }
+ rc = bnxt_hwrm_vnic_cfg(bp, vnic);
+ if (rc)
+ netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n",
+ vnic->vnic_id, rc);
+ return rc;
+}
+
+int __bnxt_setup_vnic_p5(struct bnxt *bp, struct bnxt_vnic_info *vnic)
{
int rc, i, nr_ctxs;
nr_ctxs = bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings);
for (i = 0; i < nr_ctxs; i++) {
- rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, i);
+ rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic, i);
if (rc) {
netdev_err(bp->dev, "hwrm vnic %d ctx %d alloc failure rc: %x\n",
- vnic_id, i, rc);
+ vnic->vnic_id, i, rc);
break;
}
bp->rsscos_nr_ctxs++;
@@ -9659,45 +10571,57 @@ static int __bnxt_setup_vnic_p5(struct bnxt *bp, u16 vnic_id)
if (i < nr_ctxs)
return -ENOMEM;
- rc = bnxt_hwrm_vnic_set_rss_p5(bp, vnic_id, true);
- if (rc) {
- netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %d\n",
- vnic_id, rc);
- return rc;
- }
- rc = bnxt_hwrm_vnic_cfg(bp, vnic_id);
- if (rc) {
- netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n",
- vnic_id, rc);
+ rc = bnxt_hwrm_vnic_rss_cfg_p5(bp, vnic);
+ if (rc)
return rc;
- }
+
if (bp->flags & BNXT_FLAG_AGG_RINGS) {
- rc = bnxt_hwrm_vnic_set_hds(bp, vnic_id);
+ rc = bnxt_hwrm_vnic_set_hds(bp, vnic);
if (rc) {
netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n",
- vnic_id, rc);
+ vnic->vnic_id, rc);
}
}
return rc;
}
-static int bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id)
+static int bnxt_setup_vnic(struct bnxt *bp, struct bnxt_vnic_info *vnic)
{
if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
- return __bnxt_setup_vnic_p5(bp, vnic_id);
+ return __bnxt_setup_vnic_p5(bp, vnic);
else
- return __bnxt_setup_vnic(bp, vnic_id);
+ return __bnxt_setup_vnic(bp, vnic);
+}
+
+static int bnxt_alloc_and_setup_vnic(struct bnxt *bp,
+ struct bnxt_vnic_info *vnic,
+ u16 start_rx_ring_idx, int rx_rings)
+{
+ int rc;
+
+ rc = bnxt_hwrm_vnic_alloc(bp, vnic, start_rx_ring_idx, rx_rings);
+ if (rc) {
+ netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n",
+ vnic->vnic_id, rc);
+ return rc;
+ }
+ return bnxt_setup_vnic(bp, vnic);
}
static int bnxt_alloc_rfs_vnics(struct bnxt *bp)
{
+ struct bnxt_vnic_info *vnic;
int i, rc = 0;
+ if (BNXT_SUPPORTS_NTUPLE_VNIC(bp)) {
+ vnic = &bp->vnic_info[BNXT_VNIC_NTUPLE];
+ return bnxt_alloc_and_setup_vnic(bp, vnic, 0, bp->rx_nr_rings);
+ }
+
if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
return 0;
for (i = 0; i < bp->rx_nr_rings; i++) {
- struct bnxt_vnic_info *vnic;
u16 vnic_id = i + 1;
u16 ring_id = i;
@@ -9708,19 +10632,82 @@ static int bnxt_alloc_rfs_vnics(struct bnxt *bp)
vnic->flags |= BNXT_VNIC_RFS_FLAG;
if (bp->rss_cap & BNXT_RSS_CAP_NEW_RSS_CAP)
vnic->flags |= BNXT_VNIC_RFS_NEW_RSS_FLAG;
- rc = bnxt_hwrm_vnic_alloc(bp, vnic_id, ring_id, 1);
- if (rc) {
- netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n",
- vnic_id, rc);
- break;
- }
- rc = bnxt_setup_vnic(bp, vnic_id);
- if (rc)
+ if (bnxt_alloc_and_setup_vnic(bp, &bp->vnic_info[vnic_id], ring_id, 1))
break;
}
return rc;
}
+void bnxt_del_one_rss_ctx(struct bnxt *bp, struct bnxt_rss_ctx *rss_ctx,
+ bool all)
+{
+ struct bnxt_vnic_info *vnic = &rss_ctx->vnic;
+ struct bnxt_filter_base *usr_fltr, *tmp;
+ struct bnxt_ntuple_filter *ntp_fltr;
+ int i;
+
+ if (netif_running(bp->dev)) {
+ bnxt_hwrm_vnic_free_one(bp, &rss_ctx->vnic);
+ for (i = 0; i < BNXT_MAX_CTX_PER_VNIC; i++) {
+ if (vnic->fw_rss_cos_lb_ctx[i] != INVALID_HW_RING_ID)
+ bnxt_hwrm_vnic_ctx_free_one(bp, vnic, i);
+ }
+ }
+ if (!all)
+ return;
+
+ list_for_each_entry_safe(usr_fltr, tmp, &bp->usr_fltr_list, list) {
+ if ((usr_fltr->flags & BNXT_ACT_RSS_CTX) &&
+ usr_fltr->fw_vnic_id == rss_ctx->index) {
+ ntp_fltr = container_of(usr_fltr,
+ struct bnxt_ntuple_filter,
+ base);
+ bnxt_hwrm_cfa_ntuple_filter_free(bp, ntp_fltr);
+ bnxt_del_ntp_filter(bp, ntp_fltr);
+ bnxt_del_one_usr_fltr(bp, usr_fltr);
+ }
+ }
+
+ if (vnic->rss_table)
+ dma_free_coherent(&bp->pdev->dev, vnic->rss_table_size,
+ vnic->rss_table,
+ vnic->rss_table_dma_addr);
+ bp->num_rss_ctx--;
+}
+
+static void bnxt_hwrm_realloc_rss_ctx_vnic(struct bnxt *bp)
+{
+ bool set_tpa = !!(bp->flags & BNXT_FLAG_TPA);
+ struct ethtool_rxfh_context *ctx;
+ unsigned long context;
+
+ xa_for_each(&bp->dev->ethtool->rss_ctx, context, ctx) {
+ struct bnxt_rss_ctx *rss_ctx = ethtool_rxfh_context_priv(ctx);
+ struct bnxt_vnic_info *vnic = &rss_ctx->vnic;
+
+ if (bnxt_hwrm_vnic_alloc(bp, vnic, 0, bp->rx_nr_rings) ||
+ bnxt_hwrm_vnic_set_tpa(bp, vnic, set_tpa) ||
+ __bnxt_setup_vnic_p5(bp, vnic)) {
+ netdev_err(bp->dev, "Failed to restore RSS ctx %d\n",
+ rss_ctx->index);
+ bnxt_del_one_rss_ctx(bp, rss_ctx, true);
+ ethtool_rxfh_context_lost(bp->dev, rss_ctx->index);
+ }
+ }
+}
+
+static void bnxt_clear_rss_ctxs(struct bnxt *bp)
+{
+ struct ethtool_rxfh_context *ctx;
+ unsigned long context;
+
+ xa_for_each(&bp->dev->ethtool->rss_ctx, context, ctx) {
+ struct bnxt_rss_ctx *rss_ctx = ethtool_rxfh_context_priv(ctx);
+
+ bnxt_del_one_rss_ctx(bp, rss_ctx, false);
+ }
+}
+
/* Allow PF, trusted VFs and VFs with default VLAN to be in promiscuous mode */
static bool bnxt_promisc_ok(struct bnxt *bp)
{
@@ -9733,16 +10720,17 @@ static bool bnxt_promisc_ok(struct bnxt *bp)
static int bnxt_setup_nitroa0_vnic(struct bnxt *bp)
{
+ struct bnxt_vnic_info *vnic = &bp->vnic_info[1];
unsigned int rc = 0;
- rc = bnxt_hwrm_vnic_alloc(bp, 1, bp->rx_nr_rings - 1, 1);
+ rc = bnxt_hwrm_vnic_alloc(bp, vnic, bp->rx_nr_rings - 1, 1);
if (rc) {
netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n",
rc);
return rc;
}
- rc = bnxt_hwrm_vnic_cfg(bp, 1);
+ rc = bnxt_hwrm_vnic_cfg(bp, vnic);
if (rc) {
netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n",
rc);
@@ -9756,7 +10744,7 @@ static bool bnxt_mc_list_updated(struct bnxt *, u32 *);
static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init)
{
- struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
+ struct bnxt_vnic_info *vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT];
int rc = 0;
unsigned int rx_nr_rings = bp->rx_nr_rings;
@@ -9785,7 +10773,7 @@ static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init)
rx_nr_rings--;
/* default vnic 0 */
- rc = bnxt_hwrm_vnic_alloc(bp, 0, 0, rx_nr_rings);
+ rc = bnxt_hwrm_vnic_alloc(bp, vnic, 0, rx_nr_rings);
if (rc) {
netdev_err(bp->dev, "hwrm vnic alloc failure rc: %x\n", rc);
goto err_out;
@@ -9794,7 +10782,7 @@ static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init)
if (BNXT_VF(bp))
bnxt_hwrm_func_qcfg(bp);
- rc = bnxt_setup_vnic(bp, 0);
+ rc = bnxt_setup_vnic(bp, vnic);
if (rc)
goto err_out;
if (bp->rss_cap & BNXT_RSS_CAP_RSS_HASH_TYPE_DELTA)
@@ -10013,22 +11001,32 @@ static void bnxt_setup_msix(struct bnxt *bp)
}
}
-static void bnxt_setup_inta(struct bnxt *bp)
+static int bnxt_init_int_mode(struct bnxt *bp);
+
+static int bnxt_change_msix(struct bnxt *bp, int total)
{
- const int len = sizeof(bp->irq_tbl[0].name);
+ struct msi_map map;
+ int i;
- if (bp->num_tc) {
- netdev_reset_tc(bp->dev);
- bp->num_tc = 0;
+ /* add MSIX to the end if needed */
+ for (i = bp->total_irqs; i < total; i++) {
+ map = pci_msix_alloc_irq_at(bp->pdev, i, NULL);
+ if (map.index < 0)
+ return bp->total_irqs;
+ bp->irq_tbl[i].vector = map.virq;
+ bp->total_irqs++;
}
- snprintf(bp->irq_tbl[0].name, len, "%s-%s-%d", bp->dev->name, "TxRx",
- 0);
- bp->irq_tbl[0].handler = bnxt_inta;
+ /* trim MSIX from the end if needed */
+ for (i = bp->total_irqs; i > total; i--) {
+ map.index = i - 1;
+ map.virq = bp->irq_tbl[i - 1].vector;
+ pci_msix_free_irq(bp->pdev, map);
+ bp->total_irqs--;
+ }
+ return bp->total_irqs;
}
-static int bnxt_init_int_mode(struct bnxt *bp);
-
static int bnxt_setup_int_mode(struct bnxt *bp)
{
int rc;
@@ -10039,10 +11037,7 @@ static int bnxt_setup_int_mode(struct bnxt *bp)
return rc ?: -ENODEV;
}
- if (bp->flags & BNXT_FLAG_USING_MSIX)
- bnxt_setup_msix(bp);
- else
- bnxt_setup_inta(bp);
+ bnxt_setup_msix(bp);
rc = bnxt_set_real_num_queues(bp);
return rc;
@@ -10109,19 +11104,10 @@ unsigned int bnxt_get_avail_stat_ctxs_for_en(struct bnxt *bp)
return bnxt_get_max_func_stat_ctxs(bp) - bnxt_get_func_stat_ctxs(bp);
}
-int bnxt_get_avail_msix(struct bnxt *bp, int num)
+static int bnxt_get_avail_msix(struct bnxt *bp, int num)
{
- int max_cp = bnxt_get_max_func_cp_rings(bp);
int max_irq = bnxt_get_max_func_irqs(bp);
int total_req = bp->cp_nr_rings + num;
- int max_idx, avail_msix;
-
- max_idx = bp->total_irqs;
- if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
- max_idx = min_t(int, bp->total_irqs, max_cp);
- avail_msix = max_idx - bp->cp_nr_rings;
- if (!BNXT_NEW_RM(bp) || avail_msix >= num)
- return avail_msix;
if (max_irq < total_req) {
num = max_irq - bp->cp_nr_rings;
@@ -10139,10 +11125,9 @@ static int bnxt_get_num_msix(struct bnxt *bp)
return bnxt_nq_rings_in_use(bp);
}
-static int bnxt_init_msix(struct bnxt *bp)
+static int bnxt_init_int_mode(struct bnxt *bp)
{
- int i, total_vecs, max, rc = 0, min = 1, ulp_msix, tx_cp;
- struct msix_entry *msix_ent;
+ int i, total_vecs, max, rc = 0, min = 1, ulp_msix, tx_cp, tbl_size;
total_vecs = bnxt_get_num_msix(bp);
max = bnxt_get_max_func_irqs(bp);
@@ -10152,29 +11137,24 @@ static int bnxt_init_msix(struct bnxt *bp)
if (!total_vecs)
return 0;
- msix_ent = kcalloc(total_vecs, sizeof(struct msix_entry), GFP_KERNEL);
- if (!msix_ent)
- return -ENOMEM;
-
- for (i = 0; i < total_vecs; i++) {
- msix_ent[i].entry = i;
- msix_ent[i].vector = 0;
- }
-
if (!(bp->flags & BNXT_FLAG_SHARED_RINGS))
min = 2;
- total_vecs = pci_enable_msix_range(bp->pdev, msix_ent, min, total_vecs);
+ total_vecs = pci_alloc_irq_vectors(bp->pdev, min, total_vecs,
+ PCI_IRQ_MSIX);
ulp_msix = bnxt_get_ulp_msix_num(bp);
if (total_vecs < 0 || total_vecs < ulp_msix) {
rc = -ENODEV;
goto msix_setup_exit;
}
- bp->irq_tbl = kcalloc(total_vecs, sizeof(struct bnxt_irq), GFP_KERNEL);
+ tbl_size = total_vecs;
+ if (pci_msix_can_alloc_dyn(bp->pdev))
+ tbl_size = max;
+ bp->irq_tbl = kcalloc(tbl_size, sizeof(*bp->irq_tbl), GFP_KERNEL);
if (bp->irq_tbl) {
for (i = 0; i < total_vecs; i++)
- bp->irq_tbl[i].vector = msix_ent[i].vector;
+ bp->irq_tbl[i].vector = pci_irq_vector(bp->pdev, i);
bp->total_irqs = total_vecs;
/* Trim rings based upon num of vectors allocated */
@@ -10192,78 +11172,61 @@ static int bnxt_init_msix(struct bnxt *bp)
rc = -ENOMEM;
goto msix_setup_exit;
}
- bp->flags |= BNXT_FLAG_USING_MSIX;
- kfree(msix_ent);
return 0;
msix_setup_exit:
- netdev_err(bp->dev, "bnxt_init_msix err: %x\n", rc);
+ netdev_err(bp->dev, "bnxt_init_int_mode err: %x\n", rc);
kfree(bp->irq_tbl);
bp->irq_tbl = NULL;
- pci_disable_msix(bp->pdev);
- kfree(msix_ent);
- return rc;
-}
-
-static int bnxt_init_inta(struct bnxt *bp)
-{
- bp->irq_tbl = kzalloc(sizeof(struct bnxt_irq), GFP_KERNEL);
- if (!bp->irq_tbl)
- return -ENOMEM;
-
- bp->total_irqs = 1;
- bp->rx_nr_rings = 1;
- bp->tx_nr_rings = 1;
- bp->cp_nr_rings = 1;
- bp->flags |= BNXT_FLAG_SHARED_RINGS;
- bp->irq_tbl[0].vector = bp->pdev->irq;
- return 0;
-}
-
-static int bnxt_init_int_mode(struct bnxt *bp)
-{
- int rc = -ENODEV;
-
- if (bp->flags & BNXT_FLAG_MSIX_CAP)
- rc = bnxt_init_msix(bp);
-
- if (!(bp->flags & BNXT_FLAG_USING_MSIX) && BNXT_PF(bp)) {
- /* fallback to INTA */
- rc = bnxt_init_inta(bp);
- }
+ pci_free_irq_vectors(bp->pdev);
return rc;
}
static void bnxt_clear_int_mode(struct bnxt *bp)
{
- if (bp->flags & BNXT_FLAG_USING_MSIX)
- pci_disable_msix(bp->pdev);
+ pci_free_irq_vectors(bp->pdev);
kfree(bp->irq_tbl);
bp->irq_tbl = NULL;
- bp->flags &= ~BNXT_FLAG_USING_MSIX;
}
int bnxt_reserve_rings(struct bnxt *bp, bool irq_re_init)
{
bool irq_cleared = false;
+ bool irq_change = false;
int tcs = bp->num_tc;
+ int irqs_required;
int rc;
if (!bnxt_need_reserve_rings(bp))
return 0;
- if (irq_re_init && BNXT_NEW_RM(bp) &&
- bnxt_get_num_msix(bp) != bp->total_irqs) {
- bnxt_ulp_irq_stop(bp);
- bnxt_clear_int_mode(bp);
- irq_cleared = true;
+ if (BNXT_NEW_RM(bp) && !bnxt_ulp_registered(bp->edev)) {
+ int ulp_msix = bnxt_get_avail_msix(bp, bp->ulp_num_msix_want);
+
+ if (ulp_msix > bp->ulp_num_msix_want)
+ ulp_msix = bp->ulp_num_msix_want;
+ irqs_required = ulp_msix + bp->cp_nr_rings;
+ } else {
+ irqs_required = bnxt_get_num_msix(bp);
+ }
+
+ if (irq_re_init && BNXT_NEW_RM(bp) && irqs_required != bp->total_irqs) {
+ irq_change = true;
+ if (!pci_msix_can_alloc_dyn(bp->pdev)) {
+ bnxt_ulp_irq_stop(bp);
+ bnxt_clear_int_mode(bp);
+ irq_cleared = true;
+ }
}
rc = __bnxt_reserve_rings(bp);
if (irq_cleared) {
if (!rc)
rc = bnxt_init_int_mode(bp);
bnxt_ulp_irq_restart(bp, rc);
+ } else if (irq_change && !rc) {
+ if (bnxt_change_msix(bp, irqs_required) != irqs_required)
+ rc = -ENOSPC;
}
if (rc) {
netdev_err(bp->dev, "ring reservation/IRQ init failure rc: %d\n", rc);
@@ -10301,7 +11264,7 @@ static void bnxt_free_irq(struct bnxt *bp)
irq = &bp->irq_tbl[map_idx];
if (irq->requested) {
if (irq->have_cpumask) {
- irq_set_affinity_hint(irq->vector, NULL);
+ irq_update_affinity_hint(irq->vector, NULL);
free_cpumask_var(irq->cpu_mask);
irq->have_cpumask = 0;
}
@@ -10329,9 +11292,6 @@ static int bnxt_request_irq(struct bnxt *bp)
#ifdef CONFIG_RFS_ACCEL
rmap = bp->dev->rx_cpu_rmap;
#endif
- if (!(bp->flags & BNXT_FLAG_USING_MSIX))
- flags = IRQF_SHARED;
-
for (i = 0, j = 0; i < bp->cp_nr_rings; i++) {
int map_idx = bnxt_cp_num_to_irq_num(bp, i);
struct bnxt_irq *irq = &bp->irq_tbl[map_idx];
@@ -10359,10 +11319,10 @@ static int bnxt_request_irq(struct bnxt *bp)
irq->have_cpumask = 1;
cpumask_set_cpu(cpumask_local_spread(i, numa_node),
irq->cpu_mask);
- rc = irq_set_affinity_hint(irq->vector, irq->cpu_mask);
+ rc = irq_update_affinity_hint(irq->vector, irq->cpu_mask);
if (rc) {
netdev_warn(bp->dev,
- "Set affinity failed, IRQ = %d\n",
+ "Update affinity hint failed, IRQ = %d\n",
irq->vector);
break;
}
@@ -10396,29 +11356,23 @@ static void bnxt_del_napi(struct bnxt *bp)
static void bnxt_init_napi(struct bnxt *bp)
{
- int i;
+ int (*poll_fn)(struct napi_struct *, int) = bnxt_poll;
unsigned int cp_nr_rings = bp->cp_nr_rings;
struct bnxt_napi *bnapi;
+ int i;
- if (bp->flags & BNXT_FLAG_USING_MSIX) {
- int (*poll_fn)(struct napi_struct *, int) = bnxt_poll;
-
- if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
- poll_fn = bnxt_poll_p5;
- else if (BNXT_CHIP_TYPE_NITRO_A0(bp))
- cp_nr_rings--;
- for (i = 0; i < cp_nr_rings; i++) {
- bnapi = bp->bnapi[i];
- netif_napi_add(bp->dev, &bnapi->napi, poll_fn);
- }
- if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
- bnapi = bp->bnapi[cp_nr_rings];
- netif_napi_add(bp->dev, &bnapi->napi,
- bnxt_poll_nitroa0);
- }
- } else {
- bnapi = bp->bnapi[0];
- netif_napi_add(bp->dev, &bnapi->napi, bnxt_poll);
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
+ poll_fn = bnxt_poll_p5;
+ else if (BNXT_CHIP_TYPE_NITRO_A0(bp))
+ cp_nr_rings--;
+ for (i = 0; i < cp_nr_rings; i++) {
+ bnapi = bp->bnapi[i];
+ netif_napi_add_config(bp->dev, &bnapi->napi, poll_fn,
+ bnapi->index);
+ }
+ if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
+ bnapi = bp->bnapi[cp_nr_rings];
+ netif_napi_add(bp->dev, &bnapi->napi, bnxt_poll_nitroa0);
}
}
@@ -10436,12 +11390,10 @@ static void bnxt_disable_napi(struct bnxt *bp)
cpr = &bnapi->cp_ring;
if (bnapi->tx_fault)
- cpr->sw_stats.tx.tx_resets++;
+ cpr->sw_stats->tx.tx_resets++;
if (bnapi->in_reset)
- cpr->sw_stats.rx.rx_resets++;
+ cpr->sw_stats->rx.rx_resets++;
napi_disable(&bnapi->napi);
- if (bnapi->rx_ring)
- cancel_work_sync(&cpr->dim.work);
}
}
@@ -10621,10 +11573,10 @@ static int bnxt_hwrm_phy_qcaps(struct bnxt *bp)
bp->phy_flags = resp->flags | (le16_to_cpu(resp->flags2) << 8);
if (resp->flags & PORT_PHY_QCAPS_RESP_FLAGS_EEE_SUPPORTED) {
- struct ethtool_eee *eee = &bp->eee;
+ struct ethtool_keee *eee = &bp->eee;
u16 fw_speeds = le16_to_cpu(resp->supported_speeds_eee_mode);
- eee->supported = _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
+ _bnxt_fw_to_linkmode(eee->supported, fw_speeds);
bp->lpi_tmr_lo = le32_to_cpu(resp->tx_lpi_timer_low) &
PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_LOW_MASK;
bp->lpi_tmr_hi = le32_to_cpu(resp->valid_tx_lpi_timer_high) &
@@ -10661,6 +11613,26 @@ hwrm_phy_qcaps_exit:
return rc;
}
+static void bnxt_hwrm_mac_qcaps(struct bnxt *bp)
+{
+ struct hwrm_port_mac_qcaps_output *resp;
+ struct hwrm_port_mac_qcaps_input *req;
+ int rc;
+
+ if (bp->hwrm_spec_code < 0x10a03)
+ return;
+
+ rc = hwrm_req_init(bp, req, HWRM_PORT_MAC_QCAPS);
+ if (rc)
+ return;
+
+ resp = hwrm_req_hold(bp, req);
+ rc = hwrm_req_send_silent(bp, req);
+ if (!rc)
+ bp->mac_flags = resp->flags;
+ hwrm_req_drop(bp, req);
+}
+
static bool bnxt_support_dropped(u16 advertising, u16 supported)
{
u16 diff = advertising ^ supported;
@@ -10766,7 +11738,7 @@ int bnxt_update_link(struct bnxt *bp, bool chng_link_state)
link_info->module_status = resp->module_status;
if (bp->phy_flags & BNXT_PHY_FL_EEE_CAP) {
- struct ethtool_eee *eee = &bp->eee;
+ struct ethtool_keee *eee = &bp->eee;
u16 fw_speeds;
eee->eee_active = 0;
@@ -10775,8 +11747,7 @@ int bnxt_update_link(struct bnxt *bp, bool chng_link_state)
eee->eee_active = 1;
fw_speeds = le16_to_cpu(
resp->link_partner_adv_eee_link_speed_mask);
- eee->lp_advertised =
- _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
+ _bnxt_fw_to_linkmode(eee->lp_advertised, fw_speeds);
}
/* Pull initial EEE config */
@@ -10786,8 +11757,7 @@ int bnxt_update_link(struct bnxt *bp, bool chng_link_state)
eee->eee_enabled = 1;
fw_speeds = le16_to_cpu(resp->adv_eee_link_speed_mask);
- eee->advertised =
- _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
+ _bnxt_fw_to_linkmode(eee->advertised, fw_speeds);
if (resp->eee_config_phy_addr &
PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_TX_LPI) {
@@ -10957,7 +11927,7 @@ int bnxt_hwrm_set_pause(struct bnxt *bp)
static void bnxt_hwrm_set_eee(struct bnxt *bp,
struct hwrm_port_phy_cfg_input *req)
{
- struct ethtool_eee *eee = &bp->eee;
+ struct ethtool_keee *eee = &bp->eee;
if (eee->eee_enabled) {
u16 eee_speeds;
@@ -11087,6 +12057,7 @@ static void bnxt_clear_reservations(struct bnxt *bp, bool fw_reset)
hw_resc->resv_rx_rings = 0;
hw_resc->resv_hw_ring_grps = 0;
hw_resc->resv_vnics = 0;
+ hw_resc->resv_rsscos_ctxs = 0;
if (!fw_reset) {
bp->tx_nr_rings = 0;
bp->rx_nr_rings = 0;
@@ -11174,8 +12145,8 @@ static int bnxt_hwrm_if_change(struct bnxt *bp, bool up)
if (fw_reset) {
set_bit(BNXT_STATE_FW_RESET_DET, &bp->state);
if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
- bnxt_ulp_stop(bp);
- bnxt_free_ctx_mem(bp);
+ bnxt_ulp_irq_stop(bp);
+ bnxt_free_ctx_mem(bp, false);
bnxt_dcb_free(bp);
rc = bnxt_fw_init_one(bp);
if (rc) {
@@ -11322,22 +12293,25 @@ static void bnxt_get_wol_settings(struct bnxt *bp)
static bool bnxt_eee_config_ok(struct bnxt *bp)
{
- struct ethtool_eee *eee = &bp->eee;
+ struct ethtool_keee *eee = &bp->eee;
struct bnxt_link_info *link_info = &bp->link_info;
if (!(bp->phy_flags & BNXT_PHY_FL_EEE_CAP))
return true;
if (eee->eee_enabled) {
- u32 advertising =
- _bnxt_fw_to_ethtool_adv_spds(link_info->advertising, 0);
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(advertising);
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(tmp);
+
+ _bnxt_fw_to_linkmode(advertising, link_info->advertising);
if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
eee->eee_enabled = 0;
return false;
}
- if (eee->advertised & ~advertising) {
- eee->advertised = advertising & eee->supported;
+ if (linkmode_andnot(tmp, eee->advertised, advertising)) {
+ linkmode_and(eee->advertised, advertising,
+ eee->supported);
return false;
}
}
@@ -11404,20 +12378,6 @@ static int bnxt_update_phy_setting(struct bnxt *bp)
return rc;
}
-/* Common routine to pre-map certain register block to different GRC window.
- * A PF has 16 4K windows and a VF has 4 4K windows. However, only 15 windows
- * in PF and 3 windows in VF that can be customized to map in different
- * register blocks.
- */
-static void bnxt_preset_reg_win(struct bnxt *bp)
-{
- if (BNXT_PF(bp)) {
- /* CAG registers map to GRC window #4 */
- writel(BNXT_CAG_REG_BASE,
- bp->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT + 12);
- }
-}
-
static int bnxt_init_dflt_ring_mode(struct bnxt *bp);
static int bnxt_reinit_after_abort(struct bnxt *bp)
@@ -11442,11 +12402,86 @@ static int bnxt_reinit_after_abort(struct bnxt *bp)
return rc;
}
+static void bnxt_cfg_one_usr_fltr(struct bnxt *bp, struct bnxt_filter_base *fltr)
+{
+ struct bnxt_ntuple_filter *ntp_fltr;
+ struct bnxt_l2_filter *l2_fltr;
+
+ if (list_empty(&fltr->list))
+ return;
+
+ if (fltr->type == BNXT_FLTR_TYPE_NTUPLE) {
+ ntp_fltr = container_of(fltr, struct bnxt_ntuple_filter, base);
+ l2_fltr = bp->vnic_info[BNXT_VNIC_DEFAULT].l2_filters[0];
+ atomic_inc(&l2_fltr->refcnt);
+ ntp_fltr->l2_fltr = l2_fltr;
+ if (bnxt_hwrm_cfa_ntuple_filter_alloc(bp, ntp_fltr)) {
+ bnxt_del_ntp_filter(bp, ntp_fltr);
+ netdev_err(bp->dev, "restoring previously configured ntuple filter id %d failed\n",
+ fltr->sw_id);
+ }
+ } else if (fltr->type == BNXT_FLTR_TYPE_L2) {
+ l2_fltr = container_of(fltr, struct bnxt_l2_filter, base);
+ if (bnxt_hwrm_l2_filter_alloc(bp, l2_fltr)) {
+ bnxt_del_l2_filter(bp, l2_fltr);
+ netdev_err(bp->dev, "restoring previously configured l2 filter id %d failed\n",
+ fltr->sw_id);
+ }
+ }
+}
+
+static void bnxt_cfg_usr_fltrs(struct bnxt *bp)
+{
+ struct bnxt_filter_base *usr_fltr, *tmp;
+
+ list_for_each_entry_safe(usr_fltr, tmp, &bp->usr_fltr_list, list)
+ bnxt_cfg_one_usr_fltr(bp, usr_fltr);
+}
+
+static int bnxt_set_xps_mapping(struct bnxt *bp)
+{
+ int numa_node = dev_to_node(&bp->pdev->dev);
+ unsigned int q_idx, map_idx, cpu, i;
+ const struct cpumask *cpu_mask_ptr;
+ int nr_cpus = num_online_cpus();
+ cpumask_t *q_map;
+ int rc = 0;
+
+ q_map = kcalloc(bp->tx_nr_rings_per_tc, sizeof(*q_map), GFP_KERNEL);
+ if (!q_map)
+ return -ENOMEM;
+
+ /* Create CPU mask for all TX queues across MQPRIO traffic classes.
+ * Each TC has the same number of TX queues. The nth TX queue for each
+ * TC will have the same CPU mask.
+ */
+ for (i = 0; i < nr_cpus; i++) {
+ map_idx = i % bp->tx_nr_rings_per_tc;
+ cpu = cpumask_local_spread(i, numa_node);
+ cpu_mask_ptr = get_cpu_mask(cpu);
+ cpumask_or(&q_map[map_idx], &q_map[map_idx], cpu_mask_ptr);
+ }
+
+ /* Register CPU mask for each TX queue except the ones marked for XDP */
+ for (q_idx = 0; q_idx < bp->dev->real_num_tx_queues; q_idx++) {
+ map_idx = q_idx % bp->tx_nr_rings_per_tc;
+ rc = netif_set_xps_queue(bp->dev, &q_map[map_idx], q_idx);
+ if (rc) {
+ netdev_warn(bp->dev, "Error setting XPS for q:%d\n",
+ q_idx);
+ break;
+ }
+ }
+
+ kfree(q_map);
+
+ return rc;
+}
+
static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
{
int rc = 0;
- bnxt_preset_reg_win(bp);
netif_carrier_off(bp->dev);
if (irq_re_init) {
/* Reserve rings now if none were reserved at driver probe. */
@@ -11459,12 +12494,6 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
rc = bnxt_reserve_rings(bp, irq_re_init);
if (rc)
return rc;
- if ((bp->flags & BNXT_FLAG_RFS) &&
- !(bp->flags & BNXT_FLAG_USING_MSIX)) {
- /* disable RFS if falling back to INTA */
- bp->dev->hw_features &= ~NETIF_F_NTUPLE;
- bp->flags &= ~BNXT_FLAG_RFS;
- }
rc = bnxt_alloc_mem(bp, irq_re_init);
if (rc) {
@@ -11504,8 +12533,12 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
}
}
- if (irq_re_init)
+ if (irq_re_init) {
udp_tunnel_nic_reset_ntf(bp->dev);
+ rc = bnxt_set_xps_mapping(bp);
+ if (rc)
+ netdev_warn(bp->dev, "failed to set xps mapping\n");
+ }
if (bp->tx_nr_rings_xdp < num_possible_cpus()) {
if (!static_key_enabled(&bnxt_xdp_locking_key))
@@ -11526,8 +12559,13 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
/* VF-reps may need to be re-opened after the PF is re-opened */
if (BNXT_PF(bp))
bnxt_vf_reps_open(bp);
+ if (bp->ptp_cfg && !(bp->fw_cap & BNXT_FW_CAP_TX_TS_CMP))
+ WRITE_ONCE(bp->ptp_cfg->tx_avail, BNXT_MAX_TX_TS);
bnxt_ptp_init_rtc(bp, true);
bnxt_ptp_cfg_tstamp_filters(bp);
+ if (BNXT_SUPPORTS_MULTI_RSS_CTX(bp))
+ bnxt_hwrm_realloc_rss_ctx_vnic(bp);
+ bnxt_cfg_usr_fltrs(bp);
return 0;
open_err_irq:
@@ -11641,10 +12679,9 @@ static int bnxt_open(struct net_device *dev)
bnxt_hwrm_if_change(bp, false);
} else {
if (test_and_clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state)) {
- if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
- bnxt_ulp_start(bp, 0);
- bnxt_reenable_sriov(bp);
- }
+ if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
+ bnxt_queue_sp_work(bp,
+ BNXT_RESTART_ULP_SP_EVENT);
}
}
@@ -11675,6 +12712,8 @@ static void __bnxt_close_nic(struct bnxt *bp, bool irq_re_init,
while (bnxt_drv_busy(bp))
msleep(20);
+ if (BNXT_SUPPORTS_MULTI_RSS_CTX(bp))
+ bnxt_clear_rss_ctxs(bp);
/* Flush rings and disable interrupts */
bnxt_shutdown_nic(bp, irq_re_init);
@@ -11874,8 +12913,8 @@ static void bnxt_get_ring_stats(struct bnxt *bp,
stats->tx_dropped += BNXT_GET_RING_STATS64(sw, tx_error_pkts);
stats->rx_dropped +=
- cpr->sw_stats.rx.rx_netpoll_discards +
- cpr->sw_stats.rx.rx_oom_discards;
+ cpr->sw_stats->rx.rx_netpoll_discards +
+ cpr->sw_stats->rx.rx_oom_discards;
}
}
@@ -11942,7 +12981,7 @@ static void bnxt_get_one_ring_err_stats(struct bnxt *bp,
struct bnxt_total_ring_err_stats *stats,
struct bnxt_cp_ring_info *cpr)
{
- struct bnxt_sw_stats *sw_stats = &cpr->sw_stats;
+ struct bnxt_sw_stats *sw_stats = cpr->sw_stats;
u64 *hw_stats = cpr->stats.sw_stats;
stats->rx_total_l4_csum_errors += sw_stats->rx.rx_l4_csum_errors;
@@ -11969,8 +13008,8 @@ void bnxt_get_ring_err_stats(struct bnxt *bp,
static bool bnxt_mc_list_updated(struct bnxt *bp, u32 *rx_mask)
{
+ struct bnxt_vnic_info *vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT];
struct net_device *dev = bp->dev;
- struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
struct netdev_hw_addr *ha;
u8 *haddr;
int mc_count = 0;
@@ -12004,7 +13043,7 @@ static bool bnxt_mc_list_updated(struct bnxt *bp, u32 *rx_mask)
static bool bnxt_uc_list_updated(struct bnxt *bp)
{
struct net_device *dev = bp->dev;
- struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
+ struct bnxt_vnic_info *vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT];
struct netdev_hw_addr *ha;
int off = 0;
@@ -12031,7 +13070,7 @@ static void bnxt_set_rx_mode(struct net_device *dev)
if (!test_bit(BNXT_STATE_OPEN, &bp->state))
return;
- vnic = &bp->vnic_info[0];
+ vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT];
mask = vnic->rx_mask;
mask &= ~(CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS |
CFA_L2_SET_RX_MASK_REQ_MASK_MCAST |
@@ -12062,7 +13101,7 @@ static void bnxt_set_rx_mode(struct net_device *dev)
static int bnxt_cfg_rx_mode(struct bnxt *bp)
{
struct net_device *dev = bp->dev;
- struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
+ struct bnxt_vnic_info *vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT];
struct netdev_hw_addr *ha;
int i, off = 0, rc;
bool uc_update;
@@ -12172,23 +13211,27 @@ static bool bnxt_rfs_supported(struct bnxt *bp)
}
/* If runtime conditions support RFS */
-static bool bnxt_rfs_capable(struct bnxt *bp)
+bool bnxt_rfs_capable(struct bnxt *bp, bool new_rss_ctx)
{
- int vnics, max_vnics, max_rss_ctxs;
+ struct bnxt_hw_rings hwr = {0};
+ int max_vnics, max_rss_ctxs;
- if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
+ if ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) &&
+ !BNXT_SUPPORTS_NTUPLE_VNIC(bp))
return bnxt_rfs_supported(bp);
- if (!(bp->flags & BNXT_FLAG_MSIX_CAP) || !bnxt_can_reserve_rings(bp) || !bp->rx_nr_rings)
+
+ if (!bnxt_can_reserve_rings(bp) || !bp->rx_nr_rings)
return false;
- vnics = 1 + bp->rx_nr_rings;
+ hwr.grp = bp->rx_nr_rings;
+ hwr.vnic = bnxt_get_total_vnics(bp, bp->rx_nr_rings);
+ if (new_rss_ctx)
+ hwr.vnic++;
+ hwr.rss_ctx = bnxt_get_total_rss_ctxs(bp, &hwr);
max_vnics = bnxt_get_max_func_vnics(bp);
max_rss_ctxs = bnxt_get_max_func_rss_ctxs(bp);
- /* RSS contexts not a limiting factor */
- if (bp->rss_cap & BNXT_RSS_CAP_NEW_RSS_CAP)
- max_rss_ctxs = max_vnics;
- if (vnics > max_vnics || vnics > max_rss_ctxs) {
+ if (hwr.vnic > max_vnics || hwr.rss_ctx > max_rss_ctxs) {
if (bp->rx_nr_rings > 1)
netdev_warn(bp->dev,
"Not enough resources to support NTUPLE filters, enough resources for up to %d rx rings\n",
@@ -12199,15 +13242,23 @@ static bool bnxt_rfs_capable(struct bnxt *bp)
if (!BNXT_NEW_RM(bp))
return true;
- if (vnics == bp->hw_resc.resv_vnics)
+ /* Do not reduce VNIC and RSS ctx reservations. There is a FW
+ * issue that will mess up the default VNIC if we reduce the
+ * reservations.
+ */
+ if (hwr.vnic <= bp->hw_resc.resv_vnics &&
+ hwr.rss_ctx <= bp->hw_resc.resv_rsscos_ctxs)
return true;
- bnxt_hwrm_reserve_rings(bp, 0, 0, 0, 0, 0, vnics);
- if (vnics <= bp->hw_resc.resv_vnics)
+ bnxt_hwrm_reserve_rings(bp, &hwr);
+ if (hwr.vnic <= bp->hw_resc.resv_vnics &&
+ hwr.rss_ctx <= bp->hw_resc.resv_rsscos_ctxs)
return true;
netdev_warn(bp->dev, "Unable to reserve resources to support NTUPLE filters.\n");
- bnxt_hwrm_reserve_rings(bp, 0, 0, 0, 0, 0, 1);
+ hwr.vnic = 1;
+ hwr.rss_ctx = 0;
+ bnxt_hwrm_reserve_rings(bp, &hwr);
return false;
}
@@ -12217,7 +13268,7 @@ static netdev_features_t bnxt_fix_features(struct net_device *dev,
struct bnxt *bp = netdev_priv(dev);
netdev_features_t vlan_features;
- if ((features & NETIF_F_NTUPLE) && !bnxt_rfs_capable(bp))
+ if ((features & NETIF_F_NTUPLE) && !bnxt_rfs_capable(bp, false))
features &= ~NETIF_F_NTUPLE;
if ((bp->flags & BNXT_FLAG_NO_AGG_RINGS) || bp->xdp_prog)
@@ -12229,7 +13280,7 @@ static netdev_features_t bnxt_fix_features(struct net_device *dev,
if (features & NETIF_F_GRO_HW)
features &= ~NETIF_F_LRO;
- /* Both CTAG and STAG VLAN accelaration on the RX side have to be
+ /* Both CTAG and STAG VLAN acceleration on the RX side have to be
* turned on or off together.
*/
vlan_features = features & BNXT_HW_FEATURE_VLAN_ALL_RX;
@@ -12246,14 +13297,24 @@ static netdev_features_t bnxt_fix_features(struct net_device *dev,
return features;
}
+static int bnxt_reinit_features(struct bnxt *bp, bool irq_re_init,
+ bool link_re_init, u32 flags, bool update_tpa)
+{
+ bnxt_close_nic(bp, irq_re_init, link_re_init);
+ bp->flags = flags;
+ if (update_tpa)
+ bnxt_set_ring_params(bp);
+ return bnxt_open_nic(bp, irq_re_init, link_re_init);
+}
+
static int bnxt_set_features(struct net_device *dev, netdev_features_t features)
{
+ bool update_tpa = false, update_ntuple = false;
struct bnxt *bp = netdev_priv(dev);
u32 flags = bp->flags;
u32 changes;
int rc = 0;
bool re_init = false;
- bool update_tpa = false;
flags &= ~BNXT_FLAG_ALL_CONFIG_FEATS;
if (features & NETIF_F_GRO_HW)
@@ -12269,6 +13330,8 @@ static int bnxt_set_features(struct net_device *dev, netdev_features_t features)
if (features & NETIF_F_NTUPLE)
flags |= BNXT_FLAG_RFS;
+ else
+ bnxt_clear_usr_fltrs(bp, true);
changes = flags ^ bp->flags;
if (changes & BNXT_FLAG_TPA) {
@@ -12282,6 +13345,9 @@ static int bnxt_set_features(struct net_device *dev, netdev_features_t features)
if (changes & ~BNXT_FLAG_TPA)
re_init = true;
+ if (changes & BNXT_FLAG_RFS)
+ update_ntuple = true;
+
if (flags != bp->flags) {
u32 old_flags = bp->flags;
@@ -12292,14 +13358,12 @@ static int bnxt_set_features(struct net_device *dev, netdev_features_t features)
return rc;
}
- if (re_init) {
- bnxt_close_nic(bp, false, false);
- bp->flags = flags;
- if (update_tpa)
- bnxt_set_ring_params(bp);
+ if (update_ntuple)
+ return bnxt_reinit_features(bp, true, false, flags, update_tpa);
+
+ if (re_init)
+ return bnxt_reinit_features(bp, false, false, flags, update_tpa);
- return bnxt_open_nic(bp, false, false);
- }
if (update_tpa) {
bp->flags = flags;
rc = bnxt_set_tpa(bp,
@@ -12596,17 +13660,8 @@ static void bnxt_reset_task(struct bnxt *bp, bool silent)
if (!silent)
bnxt_dbg_dump_states(bp);
if (netif_running(bp->dev)) {
- int rc;
-
- if (silent) {
- bnxt_close_nic(bp, false, false);
- bnxt_open_nic(bp, false, false);
- } else {
- bnxt_ulp_stop(bp);
- bnxt_close_nic(bp, true, false);
- rc = bnxt_open_nic(bp, true, false);
- bnxt_ulp_start(bp, rc);
- }
+ bnxt_close_nic(bp, !silent, false);
+ bnxt_open_nic(bp, !silent, false);
}
}
@@ -12756,7 +13811,7 @@ static void bnxt_rx_ring_reset(struct bnxt *bp)
bnxt_reset_task(bp, true);
break;
}
- bnxt_free_one_rx_ring_skbs(bp, i);
+ bnxt_free_one_rx_ring_skbs(bp, rxr);
rxr->rx_prod = 0;
rxr->rx_agg_prod = 0;
rxr->rx_sw_agg_prod = 0;
@@ -12764,7 +13819,7 @@ static void bnxt_rx_ring_reset(struct bnxt *bp)
rxr->bnapi->in_reset = false;
bnxt_alloc_one_rx_ring(bp, i);
cpr = &rxr->bnapi->cp_ring;
- cpr->sw_stats.rx.rx_resets++;
+ cpr->sw_stats->rx.rx_resets++;
if (bp->flags & BNXT_FLAG_AGG_RINGS)
bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
@@ -12774,9 +13829,18 @@ static void bnxt_rx_ring_reset(struct bnxt *bp)
bnxt_rtnl_unlock_sp(bp);
}
+static void bnxt_fw_fatal_close(struct bnxt *bp)
+{
+ bnxt_tx_disable(bp);
+ bnxt_disable_napi(bp);
+ bnxt_disable_int_sync(bp);
+ bnxt_free_irq(bp);
+ bnxt_clear_int_mode(bp);
+ pci_disable_device(bp->pdev);
+}
+
static void bnxt_fw_reset_close(struct bnxt *bp)
{
- bnxt_ulp_stop(bp);
/* When firmware is in fatal state, quiesce device and disable
* bus master to prevent any potential bad DMAs before freeing
* kernel memory.
@@ -12787,12 +13851,7 @@ static void bnxt_fw_reset_close(struct bnxt *bp)
pci_read_config_word(bp->pdev, PCI_SUBSYSTEM_ID, &val);
if (val == 0xffff)
bp->fw_reset_min_dsecs = 0;
- bnxt_tx_disable(bp);
- bnxt_disable_napi(bp);
- bnxt_disable_int_sync(bp);
- bnxt_free_irq(bp);
- bnxt_clear_int_mode(bp);
- pci_disable_device(bp->pdev);
+ bnxt_fw_fatal_close(bp);
}
__bnxt_close_nic(bp, true, false);
bnxt_vf_reps_free(bp);
@@ -12800,7 +13859,7 @@ static void bnxt_fw_reset_close(struct bnxt *bp)
bnxt_hwrm_func_drv_unrgtr(bp);
if (pci_is_enabled(bp->pdev))
pci_disable_device(bp->pdev);
- bnxt_free_ctx_mem(bp);
+ bnxt_free_ctx_mem(bp, false);
}
static bool is_bnxt_fw_ok(struct bnxt *bp)
@@ -12834,10 +13893,13 @@ static void bnxt_force_fw_reset(struct bnxt *bp)
test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
return;
+ /* we have to serialize with bnxt_refclk_read()*/
if (ptp) {
- spin_lock_bh(&ptp->ptp_lock);
+ unsigned long flags;
+
+ write_seqlock_irqsave(&ptp->ptp_lock, flags);
set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
- spin_unlock_bh(&ptp->ptp_lock);
+ write_sequnlock_irqrestore(&ptp->ptp_lock, flags);
} else {
set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
}
@@ -12862,6 +13924,7 @@ void bnxt_fw_exception(struct bnxt *bp)
{
netdev_warn(bp->dev, "Detected firmware fatal condition, initiating reset\n");
set_bit(BNXT_STATE_FW_FATAL_COND, &bp->state);
+ bnxt_ulp_stop(bp);
bnxt_rtnl_lock_sp(bp);
bnxt_force_fw_reset(bp);
bnxt_rtnl_unlock_sp(bp);
@@ -12893,16 +13956,20 @@ static int bnxt_get_registered_vfs(struct bnxt *bp)
void bnxt_fw_reset(struct bnxt *bp)
{
+ bnxt_ulp_stop(bp);
bnxt_rtnl_lock_sp(bp);
if (test_bit(BNXT_STATE_OPEN, &bp->state) &&
!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
int n = 0, tmo;
+ /* we have to serialize with bnxt_refclk_read()*/
if (ptp) {
- spin_lock_bh(&ptp->ptp_lock);
+ unsigned long flags;
+
+ write_seqlock_irqsave(&ptp->ptp_lock, flags);
set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
- spin_unlock_bh(&ptp->ptp_lock);
+ write_sequnlock_irqrestore(&ptp->ptp_lock, flags);
} else {
set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
}
@@ -12971,7 +14038,7 @@ static void bnxt_chk_missed_irq(struct bnxt *bp)
bnxt_dbg_hwrm_ring_info_get(bp,
DBG_RING_INFO_GET_REQ_RING_TYPE_L2_CMPL,
fw_ring_id, &val[0], &val[1]);
- cpr->sw_stats.cmn.missed_irqs++;
+ cpr->sw_stats->cmn.missed_irqs++;
}
}
}
@@ -13017,6 +14084,12 @@ static void bnxt_fw_echo_reply(struct bnxt *bp)
hwrm_req_send(bp, req);
}
+static void bnxt_ulp_restart(struct bnxt *bp)
+{
+ bnxt_ulp_stop(bp);
+ bnxt_ulp_start(bp, 0);
+}
+
static void bnxt_sp_task(struct work_struct *work)
{
struct bnxt *bp = container_of(work, struct bnxt, sp_task);
@@ -13028,6 +14101,11 @@ static void bnxt_sp_task(struct work_struct *work)
return;
}
+ if (test_and_clear_bit(BNXT_RESTART_ULP_SP_EVENT, &bp->sp_event)) {
+ bnxt_ulp_restart(bp);
+ bnxt_reenable_sriov(bp);
+ }
+
if (test_and_clear_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event))
bnxt_cfg_rx_mode(bp);
@@ -13129,9 +14207,9 @@ int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs,
int tx_xdp)
{
int max_rx, max_tx, max_cp, tx_sets = 1, tx_cp;
- int tx_rings_needed, stats;
+ struct bnxt_hw_rings hwr = {0};
int rx_rings = rx;
- int cp, vnics;
+ int rc;
if (tcs)
tx_sets = tcs;
@@ -13144,26 +14222,43 @@ int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs,
if (bp->flags & BNXT_FLAG_AGG_RINGS)
rx_rings <<= 1;
- tx_rings_needed = tx * tx_sets + tx_xdp;
- if (max_tx < tx_rings_needed)
+ hwr.rx = rx_rings;
+ hwr.tx = tx * tx_sets + tx_xdp;
+ if (max_tx < hwr.tx)
return -ENOMEM;
- vnics = 1;
- if ((bp->flags & (BNXT_FLAG_RFS | BNXT_FLAG_CHIP_P5_PLUS)) ==
- BNXT_FLAG_RFS)
- vnics += rx;
+ hwr.vnic = bnxt_get_total_vnics(bp, rx);
- tx_cp = __bnxt_num_tx_to_cp(bp, tx_rings_needed, tx_sets, tx_xdp);
- cp = sh ? max_t(int, tx_cp, rx) : tx_cp + rx;
- if (max_cp < cp)
+ tx_cp = __bnxt_num_tx_to_cp(bp, hwr.tx, tx_sets, tx_xdp);
+ hwr.cp = sh ? max_t(int, tx_cp, rx) : tx_cp + rx;
+ if (max_cp < hwr.cp)
return -ENOMEM;
- stats = cp;
+ hwr.stat = hwr.cp;
if (BNXT_NEW_RM(bp)) {
- cp += bnxt_get_ulp_msix_num(bp);
- stats += bnxt_get_ulp_stat_ctxs(bp);
+ hwr.cp += bnxt_get_ulp_msix_num_in_use(bp);
+ hwr.stat += bnxt_get_ulp_stat_ctxs_in_use(bp);
+ hwr.grp = rx;
+ hwr.rss_ctx = bnxt_get_total_rss_ctxs(bp, &hwr);
+ }
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
+ hwr.cp_p5 = hwr.tx + rx;
+ rc = bnxt_hwrm_check_rings(bp, &hwr);
+ if (!rc && pci_msix_can_alloc_dyn(bp->pdev)) {
+ if (!bnxt_ulp_registered(bp->edev)) {
+ hwr.cp += bnxt_get_ulp_msix_num(bp);
+ hwr.cp = min_t(int, hwr.cp, bnxt_get_max_func_irqs(bp));
+ }
+ if (hwr.cp > bp->total_irqs) {
+ int total_msix = bnxt_change_msix(bp, hwr.cp);
+
+ if (total_msix < hwr.cp) {
+ netdev_warn(bp->dev, "Unable to allocate %d MSIX vectors, maximum available %d\n",
+ hwr.cp, total_msix);
+ rc = -ENOSPC;
+ }
+ }
}
- return bnxt_hwrm_check_rings(bp, tx_rings_needed, rx_rings, rx, cp,
- stats, vnics);
+ return rc;
}
static void bnxt_unmap_bars(struct bnxt *bp, struct pci_dev *pdev)
@@ -13301,6 +14396,19 @@ static int bnxt_fw_init_one_p2(struct bnxt *bp)
if (rc)
return -ENODEV;
+ rc = bnxt_alloc_crash_dump_mem(bp);
+ if (rc)
+ netdev_warn(bp->dev, "crash dump mem alloc failure rc: %d\n",
+ rc);
+ if (!rc) {
+ rc = bnxt_hwrm_crash_dump_mem_cfg(bp);
+ if (rc) {
+ bnxt_free_crash_dump_mem(bp);
+ netdev_warn(bp->dev,
+ "hwrm crash dump mem failure rc: %d\n", rc);
+ }
+ }
+
if (bnxt_fw_pre_resv_vnics(bp))
bp->fw_cap |= BNXT_FW_CAP_PRE_RESV_VNICS;
@@ -13340,7 +14448,7 @@ static void bnxt_set_dflt_rfs(struct bnxt *bp)
bp->flags &= ~BNXT_FLAG_RFS;
if (bnxt_rfs_supported(bp)) {
dev->hw_features |= NETIF_F_NTUPLE;
- if (bnxt_rfs_capable(bp)) {
+ if (bnxt_rfs_capable(bp, false)) {
bp->flags |= BNXT_FLAG_RFS;
dev->features |= NETIF_F_NTUPLE;
}
@@ -13484,10 +14592,8 @@ static bool bnxt_fw_reset_timeout(struct bnxt *bp)
static void bnxt_fw_reset_abort(struct bnxt *bp, int rc)
{
clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
- if (bp->fw_reset_state != BNXT_FW_RESET_STATE_POLL_VF) {
- bnxt_ulp_start(bp, rc);
+ if (bp->fw_reset_state != BNXT_FW_RESET_STATE_POLL_VF)
bnxt_dl_health_fw_status_update(bp, false);
- }
bp->fw_reset_state = 0;
dev_close(bp->dev);
}
@@ -13518,7 +14624,7 @@ static void bnxt_fw_reset_task(struct work_struct *work)
bp->fw_reset_state = 0;
netdev_err(bp->dev, "Firmware reset aborted, bnxt_get_registered_vfs() returns %d\n",
n);
- return;
+ goto ulp_start;
}
bnxt_queue_fw_reset_work(bp, HZ / 10);
return;
@@ -13528,7 +14634,7 @@ static void bnxt_fw_reset_task(struct work_struct *work)
if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) {
bnxt_fw_reset_abort(bp, rc);
rtnl_unlock();
- return;
+ goto ulp_start;
}
bnxt_fw_reset_close(bp);
if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) {
@@ -13621,7 +14727,7 @@ static void bnxt_fw_reset_task(struct work_struct *work)
netdev_err(bp->dev, "bnxt_open() failed during FW reset\n");
bnxt_fw_reset_abort(bp, rc);
rtnl_unlock();
- return;
+ goto ulp_start;
}
if ((bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) &&
@@ -13633,10 +14739,6 @@ static void bnxt_fw_reset_task(struct work_struct *work)
/* Make sure fw_reset_state is 0 before clearing the flag */
smp_mb__before_atomic();
clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
- bnxt_ulp_start(bp, 0);
- bnxt_reenable_sriov(bp);
- bnxt_vf_reps_alloc(bp);
- bnxt_vf_reps_open(bp);
bnxt_ptp_reapply_pps(bp);
clear_bit(BNXT_STATE_FW_ACTIVATE, &bp->state);
if (test_and_clear_bit(BNXT_STATE_RECOVER, &bp->state)) {
@@ -13644,6 +14746,12 @@ static void bnxt_fw_reset_task(struct work_struct *work)
bnxt_dl_health_fw_status_update(bp, true);
}
rtnl_unlock();
+ bnxt_ulp_start(bp, 0);
+ bnxt_reenable_sriov(bp);
+ rtnl_lock();
+ bnxt_vf_reps_alloc(bp);
+ bnxt_vf_reps_open(bp);
+ rtnl_unlock();
break;
}
return;
@@ -13659,6 +14767,8 @@ fw_reset_abort:
rtnl_lock();
bnxt_fw_reset_abort(bp, rc);
rtnl_unlock();
+ulp_start:
+ bnxt_ulp_start(bp, rc);
}
static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev)
@@ -13766,6 +14876,7 @@ static int bnxt_change_mac_addr(struct net_device *dev, void *p)
return rc;
eth_hw_addr_set(dev, addr->sa_data);
+ bnxt_clear_usr_fltrs(bp, true);
if (netif_running(dev)) {
bnxt_close_nic(bp, false, false);
rc = bnxt_open_nic(bp, false, false);
@@ -13782,7 +14893,15 @@ static int bnxt_change_mtu(struct net_device *dev, int new_mtu)
if (netif_running(dev))
bnxt_close_nic(bp, true, false);
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
+
+ /* MTU change may change the AGG ring settings if an XDP multi-buffer
+ * program is attached. We need to set the AGG rings settings and
+ * rx_skb_func accordingly.
+ */
+ if (READ_ONCE(bp->xdp_prog))
+ bnxt_set_rx_skb_mode(bp, true);
+
bnxt_set_ring_params(bp);
if (netif_running(dev))
@@ -13888,7 +15007,7 @@ u32 bnxt_get_ntp_filter_idx(struct bnxt *bp, struct flow_keys *fkeys,
if (skb)
return skb_get_hash_raw(skb) & BNXT_NTP_FLTR_HASH_MASK;
- vnic = &bp->vnic_info[0];
+ vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT];
return bnxt_toeplitz(bp, fkeys, (void *)vnic->rss_hash_key);
}
@@ -13899,7 +15018,7 @@ int bnxt_insert_ntp_filter(struct bnxt *bp, struct bnxt_ntuple_filter *fltr,
int bit_id;
spin_lock_bh(&bp->ntp_fltr_lock);
- bit_id = bitmap_find_free_region(bp->ntp_fltr_bmap, BNXT_MAX_FLTR, 0);
+ bit_id = bitmap_find_free_region(bp->ntp_fltr_bmap, bp->max_fltr, 0);
if (bit_id < 0) {
spin_unlock_bh(&bp->ntp_fltr_lock);
return -ENOMEM;
@@ -13911,6 +15030,7 @@ int bnxt_insert_ntp_filter(struct bnxt *bp, struct bnxt_ntuple_filter *fltr,
head = &bp->ntp_fltr_hash_tbl[idx];
hlist_add_head_rcu(&fltr->base.hash, head);
set_bit(BNXT_FLTR_INSERTED, &fltr->base.state);
+ bnxt_insert_usr_fltr(bp, &fltr->base);
bp->ntp_fltr_count++;
spin_unlock_bh(&bp->ntp_fltr_lock);
return 0;
@@ -13919,45 +15039,39 @@ int bnxt_insert_ntp_filter(struct bnxt *bp, struct bnxt_ntuple_filter *fltr,
static bool bnxt_fltr_match(struct bnxt_ntuple_filter *f1,
struct bnxt_ntuple_filter *f2)
{
+ struct bnxt_flow_masks *masks1 = &f1->fmasks;
+ struct bnxt_flow_masks *masks2 = &f2->fmasks;
struct flow_keys *keys1 = &f1->fkeys;
struct flow_keys *keys2 = &f2->fkeys;
- if (f1->ntuple_flags != f2->ntuple_flags)
- return false;
-
if (keys1->basic.n_proto != keys2->basic.n_proto ||
keys1->basic.ip_proto != keys2->basic.ip_proto)
return false;
if (keys1->basic.n_proto == htons(ETH_P_IP)) {
- if (((f1->ntuple_flags & BNXT_NTUPLE_MATCH_SRC_IP) &&
- keys1->addrs.v4addrs.src != keys2->addrs.v4addrs.src) ||
- ((f1->ntuple_flags & BNXT_NTUPLE_MATCH_DST_IP) &&
- keys1->addrs.v4addrs.dst != keys2->addrs.v4addrs.dst))
+ if (keys1->addrs.v4addrs.src != keys2->addrs.v4addrs.src ||
+ masks1->addrs.v4addrs.src != masks2->addrs.v4addrs.src ||
+ keys1->addrs.v4addrs.dst != keys2->addrs.v4addrs.dst ||
+ masks1->addrs.v4addrs.dst != masks2->addrs.v4addrs.dst)
return false;
} else {
- if (((f1->ntuple_flags & BNXT_NTUPLE_MATCH_SRC_IP) &&
- memcmp(&keys1->addrs.v6addrs.src,
- &keys2->addrs.v6addrs.src,
- sizeof(keys1->addrs.v6addrs.src))) ||
- ((f1->ntuple_flags & BNXT_NTUPLE_MATCH_DST_IP) &&
- memcmp(&keys1->addrs.v6addrs.dst,
- &keys2->addrs.v6addrs.dst,
- sizeof(keys1->addrs.v6addrs.dst))))
+ if (!ipv6_addr_equal(&keys1->addrs.v6addrs.src,
+ &keys2->addrs.v6addrs.src) ||
+ !ipv6_addr_equal(&masks1->addrs.v6addrs.src,
+ &masks2->addrs.v6addrs.src) ||
+ !ipv6_addr_equal(&keys1->addrs.v6addrs.dst,
+ &keys2->addrs.v6addrs.dst) ||
+ !ipv6_addr_equal(&masks1->addrs.v6addrs.dst,
+ &masks2->addrs.v6addrs.dst))
return false;
}
- if (((f1->ntuple_flags & BNXT_NTUPLE_MATCH_SRC_PORT) &&
- keys1->ports.src != keys2->ports.src) ||
- ((f1->ntuple_flags & BNXT_NTUPLE_MATCH_DST_PORT) &&
- keys1->ports.dst != keys2->ports.dst))
- return false;
-
- if (keys1->control.flags == keys2->control.flags &&
- f1->l2_fltr == f2->l2_fltr)
- return true;
-
- return false;
+ return keys1->ports.src == keys2->ports.src &&
+ masks1->ports.src == masks2->ports.src &&
+ keys1->ports.dst == keys2->ports.dst &&
+ masks1->ports.dst == masks2->ports.dst &&
+ keys1->control.flags == keys2->control.flags &&
+ f1->l2_fltr == f2->l2_fltr;
}
struct bnxt_ntuple_filter *
@@ -13988,7 +15102,7 @@ static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
u32 flags;
if (ether_addr_equal(dev->dev_addr, eth->h_dest)) {
- l2_fltr = bp->vnic_info[0].l2_filters[0];
+ l2_fltr = bp->vnic_info[BNXT_VNIC_DEFAULT].l2_filters[0];
atomic_inc(&l2_fltr->refcnt);
} else {
struct bnxt_l2_key key;
@@ -14022,10 +15136,13 @@ static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
rc = -EPROTONOSUPPORT;
goto err_free;
}
- if (fkeys->basic.n_proto == htons(ETH_P_IPV6) &&
- bp->hwrm_spec_code < 0x10601) {
- rc = -EPROTONOSUPPORT;
- goto err_free;
+ new_fltr->fmasks = BNXT_FLOW_IPV4_MASK_ALL;
+ if (fkeys->basic.n_proto == htons(ETH_P_IPV6)) {
+ if (bp->hwrm_spec_code < 0x10601) {
+ rc = -EPROTONOSUPPORT;
+ goto err_free;
+ }
+ new_fltr->fmasks = BNXT_FLOW_IPV6_MASK_ALL;
}
flags = fkeys->control.flags;
if (((flags & FLOW_DIS_ENCAPSULATION) &&
@@ -14033,9 +15150,7 @@ static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
rc = -EPROTONOSUPPORT;
goto err_free;
}
-
new_fltr->l2_fltr = l2_fltr;
- new_fltr->ntuple_flags = BNXT_NTUPLE_MATCH_ALL;
idx = bnxt_get_ntp_filter_idx(bp, fkeys, skb);
rcu_read_lock();
@@ -14070,6 +15185,7 @@ void bnxt_del_ntp_filter(struct bnxt *bp, struct bnxt_ntuple_filter *fltr)
return;
}
hlist_del_rcu(&fltr->base.hash);
+ bnxt_del_one_usr_fltr(bp, &fltr->base);
bp->ntp_fltr_count--;
spin_unlock_bh(&bp->ntp_fltr_lock);
bnxt_del_l2_filter(bp, fltr->l2_fltr);
@@ -14195,12 +15311,9 @@ static int bnxt_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
if (!br_spec)
return -EINVAL;
- nla_for_each_nested(attr, br_spec, rem) {
+ nla_for_each_nested_type(attr, IFLA_BRIDGE_MODE, br_spec, rem) {
u16 mode;
- if (nla_type(attr) != IFLA_BRIDGE_MODE)
- continue;
-
mode = nla_get_u16(attr);
if (mode == bp->br_mode)
break;
@@ -14264,6 +15377,339 @@ static const struct net_device_ops bnxt_netdev_ops = {
.ndo_bridge_setlink = bnxt_bridge_setlink,
};
+static void bnxt_get_queue_stats_rx(struct net_device *dev, int i,
+ struct netdev_queue_stats_rx *stats)
+{
+ struct bnxt *bp = netdev_priv(dev);
+ struct bnxt_cp_ring_info *cpr;
+ u64 *sw;
+
+ if (!bp->bnapi)
+ return;
+
+ cpr = &bp->bnapi[i]->cp_ring;
+ sw = cpr->stats.sw_stats;
+
+ stats->packets = 0;
+ stats->packets += BNXT_GET_RING_STATS64(sw, rx_ucast_pkts);
+ stats->packets += BNXT_GET_RING_STATS64(sw, rx_mcast_pkts);
+ stats->packets += BNXT_GET_RING_STATS64(sw, rx_bcast_pkts);
+
+ stats->bytes = 0;
+ stats->bytes += BNXT_GET_RING_STATS64(sw, rx_ucast_bytes);
+ stats->bytes += BNXT_GET_RING_STATS64(sw, rx_mcast_bytes);
+ stats->bytes += BNXT_GET_RING_STATS64(sw, rx_bcast_bytes);
+
+ stats->alloc_fail = cpr->sw_stats->rx.rx_oom_discards;
+}
+
+static void bnxt_get_queue_stats_tx(struct net_device *dev, int i,
+ struct netdev_queue_stats_tx *stats)
+{
+ struct bnxt *bp = netdev_priv(dev);
+ struct bnxt_napi *bnapi;
+ u64 *sw;
+
+ if (!bp->tx_ring)
+ return;
+
+ bnapi = bp->tx_ring[bp->tx_ring_map[i]].bnapi;
+ sw = bnapi->cp_ring.stats.sw_stats;
+
+ stats->packets = 0;
+ stats->packets += BNXT_GET_RING_STATS64(sw, tx_ucast_pkts);
+ stats->packets += BNXT_GET_RING_STATS64(sw, tx_mcast_pkts);
+ stats->packets += BNXT_GET_RING_STATS64(sw, tx_bcast_pkts);
+
+ stats->bytes = 0;
+ stats->bytes += BNXT_GET_RING_STATS64(sw, tx_ucast_bytes);
+ stats->bytes += BNXT_GET_RING_STATS64(sw, tx_mcast_bytes);
+ stats->bytes += BNXT_GET_RING_STATS64(sw, tx_bcast_bytes);
+}
+
+static void bnxt_get_base_stats(struct net_device *dev,
+ struct netdev_queue_stats_rx *rx,
+ struct netdev_queue_stats_tx *tx)
+{
+ struct bnxt *bp = netdev_priv(dev);
+
+ rx->packets = bp->net_stats_prev.rx_packets;
+ rx->bytes = bp->net_stats_prev.rx_bytes;
+ rx->alloc_fail = bp->ring_err_stats_prev.rx_total_oom_discards;
+
+ tx->packets = bp->net_stats_prev.tx_packets;
+ tx->bytes = bp->net_stats_prev.tx_bytes;
+}
+
+static const struct netdev_stat_ops bnxt_stat_ops = {
+ .get_queue_stats_rx = bnxt_get_queue_stats_rx,
+ .get_queue_stats_tx = bnxt_get_queue_stats_tx,
+ .get_base_stats = bnxt_get_base_stats,
+};
+
+static int bnxt_queue_mem_alloc(struct net_device *dev, void *qmem, int idx)
+{
+ struct bnxt_rx_ring_info *rxr, *clone;
+ struct bnxt *bp = netdev_priv(dev);
+ struct bnxt_ring_struct *ring;
+ int rc;
+
+ if (!bp->rx_ring)
+ return -ENETDOWN;
+
+ rxr = &bp->rx_ring[idx];
+ clone = qmem;
+ memcpy(clone, rxr, sizeof(*rxr));
+ bnxt_init_rx_ring_struct(bp, clone);
+ bnxt_reset_rx_ring_struct(bp, clone);
+
+ clone->rx_prod = 0;
+ clone->rx_agg_prod = 0;
+ clone->rx_sw_agg_prod = 0;
+ clone->rx_next_cons = 0;
+
+ rc = bnxt_alloc_rx_page_pool(bp, clone, rxr->page_pool->p.nid);
+ if (rc)
+ return rc;
+
+ rc = xdp_rxq_info_reg(&clone->xdp_rxq, bp->dev, idx, 0);
+ if (rc < 0)
+ goto err_page_pool_destroy;
+
+ rc = xdp_rxq_info_reg_mem_model(&clone->xdp_rxq,
+ MEM_TYPE_PAGE_POOL,
+ clone->page_pool);
+ if (rc)
+ goto err_rxq_info_unreg;
+
+ ring = &clone->rx_ring_struct;
+ rc = bnxt_alloc_ring(bp, &ring->ring_mem);
+ if (rc)
+ goto err_free_rx_ring;
+
+ if (bp->flags & BNXT_FLAG_AGG_RINGS) {
+ ring = &clone->rx_agg_ring_struct;
+ rc = bnxt_alloc_ring(bp, &ring->ring_mem);
+ if (rc)
+ goto err_free_rx_agg_ring;
+
+ rc = bnxt_alloc_rx_agg_bmap(bp, clone);
+ if (rc)
+ goto err_free_rx_agg_ring;
+ }
+
+ if (bp->flags & BNXT_FLAG_TPA) {
+ rc = bnxt_alloc_one_tpa_info(bp, clone);
+ if (rc)
+ goto err_free_tpa_info;
+ }
+
+ bnxt_init_one_rx_ring_rxbd(bp, clone);
+ bnxt_init_one_rx_agg_ring_rxbd(bp, clone);
+
+ bnxt_alloc_one_rx_ring_skb(bp, clone, idx);
+ if (bp->flags & BNXT_FLAG_AGG_RINGS)
+ bnxt_alloc_one_rx_ring_page(bp, clone, idx);
+ if (bp->flags & BNXT_FLAG_TPA)
+ bnxt_alloc_one_tpa_info_data(bp, clone);
+
+ return 0;
+
+err_free_tpa_info:
+ bnxt_free_one_tpa_info(bp, clone);
+err_free_rx_agg_ring:
+ bnxt_free_ring(bp, &clone->rx_agg_ring_struct.ring_mem);
+err_free_rx_ring:
+ bnxt_free_ring(bp, &clone->rx_ring_struct.ring_mem);
+err_rxq_info_unreg:
+ xdp_rxq_info_unreg(&clone->xdp_rxq);
+err_page_pool_destroy:
+ page_pool_destroy(clone->page_pool);
+ if (bnxt_separate_head_pool())
+ page_pool_destroy(clone->head_pool);
+ clone->page_pool = NULL;
+ clone->head_pool = NULL;
+ return rc;
+}
+
+static void bnxt_queue_mem_free(struct net_device *dev, void *qmem)
+{
+ struct bnxt_rx_ring_info *rxr = qmem;
+ struct bnxt *bp = netdev_priv(dev);
+ struct bnxt_ring_struct *ring;
+
+ bnxt_free_one_rx_ring_skbs(bp, rxr);
+ bnxt_free_one_tpa_info(bp, rxr);
+
+ xdp_rxq_info_unreg(&rxr->xdp_rxq);
+
+ page_pool_destroy(rxr->page_pool);
+ if (bnxt_separate_head_pool())
+ page_pool_destroy(rxr->head_pool);
+ rxr->page_pool = NULL;
+ rxr->head_pool = NULL;
+
+ ring = &rxr->rx_ring_struct;
+ bnxt_free_ring(bp, &ring->ring_mem);
+
+ ring = &rxr->rx_agg_ring_struct;
+ bnxt_free_ring(bp, &ring->ring_mem);
+
+ kfree(rxr->rx_agg_bmap);
+ rxr->rx_agg_bmap = NULL;
+}
+
+static void bnxt_copy_rx_ring(struct bnxt *bp,
+ struct bnxt_rx_ring_info *dst,
+ struct bnxt_rx_ring_info *src)
+{
+ struct bnxt_ring_mem_info *dst_rmem, *src_rmem;
+ struct bnxt_ring_struct *dst_ring, *src_ring;
+ int i;
+
+ dst_ring = &dst->rx_ring_struct;
+ dst_rmem = &dst_ring->ring_mem;
+ src_ring = &src->rx_ring_struct;
+ src_rmem = &src_ring->ring_mem;
+
+ WARN_ON(dst_rmem->nr_pages != src_rmem->nr_pages);
+ WARN_ON(dst_rmem->page_size != src_rmem->page_size);
+ WARN_ON(dst_rmem->flags != src_rmem->flags);
+ WARN_ON(dst_rmem->depth != src_rmem->depth);
+ WARN_ON(dst_rmem->vmem_size != src_rmem->vmem_size);
+ WARN_ON(dst_rmem->ctx_mem != src_rmem->ctx_mem);
+
+ dst_rmem->pg_tbl = src_rmem->pg_tbl;
+ dst_rmem->pg_tbl_map = src_rmem->pg_tbl_map;
+ *dst_rmem->vmem = *src_rmem->vmem;
+ for (i = 0; i < dst_rmem->nr_pages; i++) {
+ dst_rmem->pg_arr[i] = src_rmem->pg_arr[i];
+ dst_rmem->dma_arr[i] = src_rmem->dma_arr[i];
+ }
+
+ if (!(bp->flags & BNXT_FLAG_AGG_RINGS))
+ return;
+
+ dst_ring = &dst->rx_agg_ring_struct;
+ dst_rmem = &dst_ring->ring_mem;
+ src_ring = &src->rx_agg_ring_struct;
+ src_rmem = &src_ring->ring_mem;
+
+ WARN_ON(dst_rmem->nr_pages != src_rmem->nr_pages);
+ WARN_ON(dst_rmem->page_size != src_rmem->page_size);
+ WARN_ON(dst_rmem->flags != src_rmem->flags);
+ WARN_ON(dst_rmem->depth != src_rmem->depth);
+ WARN_ON(dst_rmem->vmem_size != src_rmem->vmem_size);
+ WARN_ON(dst_rmem->ctx_mem != src_rmem->ctx_mem);
+ WARN_ON(dst->rx_agg_bmap_size != src->rx_agg_bmap_size);
+
+ dst_rmem->pg_tbl = src_rmem->pg_tbl;
+ dst_rmem->pg_tbl_map = src_rmem->pg_tbl_map;
+ *dst_rmem->vmem = *src_rmem->vmem;
+ for (i = 0; i < dst_rmem->nr_pages; i++) {
+ dst_rmem->pg_arr[i] = src_rmem->pg_arr[i];
+ dst_rmem->dma_arr[i] = src_rmem->dma_arr[i];
+ }
+
+ dst->rx_agg_bmap = src->rx_agg_bmap;
+}
+
+static int bnxt_queue_start(struct net_device *dev, void *qmem, int idx)
+{
+ struct bnxt *bp = netdev_priv(dev);
+ struct bnxt_rx_ring_info *rxr, *clone;
+ struct bnxt_cp_ring_info *cpr;
+ struct bnxt_vnic_info *vnic;
+ int i, rc;
+
+ rxr = &bp->rx_ring[idx];
+ clone = qmem;
+
+ rxr->rx_prod = clone->rx_prod;
+ rxr->rx_agg_prod = clone->rx_agg_prod;
+ rxr->rx_sw_agg_prod = clone->rx_sw_agg_prod;
+ rxr->rx_next_cons = clone->rx_next_cons;
+ rxr->rx_tpa = clone->rx_tpa;
+ rxr->rx_tpa_idx_map = clone->rx_tpa_idx_map;
+ rxr->page_pool = clone->page_pool;
+ rxr->head_pool = clone->head_pool;
+ rxr->xdp_rxq = clone->xdp_rxq;
+
+ bnxt_copy_rx_ring(bp, rxr, clone);
+
+ rc = bnxt_hwrm_rx_ring_alloc(bp, rxr);
+ if (rc)
+ return rc;
+ rc = bnxt_hwrm_rx_agg_ring_alloc(bp, rxr);
+ if (rc)
+ goto err_free_hwrm_rx_ring;
+
+ bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
+ if (bp->flags & BNXT_FLAG_AGG_RINGS)
+ bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
+
+ cpr = &rxr->bnapi->cp_ring;
+ cpr->sw_stats->rx.rx_resets++;
+
+ for (i = 0; i <= bp->nr_vnics; i++) {
+ vnic = &bp->vnic_info[i];
+
+ rc = bnxt_hwrm_vnic_set_rss_p5(bp, vnic, true);
+ if (rc) {
+ netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %d\n",
+ vnic->vnic_id, rc);
+ return rc;
+ }
+ vnic->mru = bp->dev->mtu + ETH_HLEN + VLAN_HLEN;
+ bnxt_hwrm_vnic_update(bp, vnic,
+ VNIC_UPDATE_REQ_ENABLES_MRU_VALID);
+ }
+
+ return 0;
+
+err_free_hwrm_rx_ring:
+ bnxt_hwrm_rx_ring_free(bp, rxr, false);
+ return rc;
+}
+
+static int bnxt_queue_stop(struct net_device *dev, void *qmem, int idx)
+{
+ struct bnxt *bp = netdev_priv(dev);
+ struct bnxt_rx_ring_info *rxr;
+ struct bnxt_vnic_info *vnic;
+ int i;
+
+ for (i = 0; i <= bp->nr_vnics; i++) {
+ vnic = &bp->vnic_info[i];
+ vnic->mru = 0;
+ bnxt_hwrm_vnic_update(bp, vnic,
+ VNIC_UPDATE_REQ_ENABLES_MRU_VALID);
+ }
+ /* Make sure NAPI sees that the VNIC is disabled */
+ synchronize_net();
+ rxr = &bp->rx_ring[idx];
+ cancel_work_sync(&rxr->bnapi->cp_ring.dim.work);
+ bnxt_hwrm_rx_ring_free(bp, rxr, false);
+ bnxt_hwrm_rx_agg_ring_free(bp, rxr, false);
+ rxr->rx_next_cons = 0;
+ page_pool_disable_direct_recycling(rxr->page_pool);
+ if (bnxt_separate_head_pool())
+ page_pool_disable_direct_recycling(rxr->head_pool);
+
+ memcpy(qmem, rxr, sizeof(*rxr));
+ bnxt_init_rx_ring_struct(bp, qmem);
+
+ return 0;
+}
+
+static const struct netdev_queue_mgmt_ops bnxt_queue_mgmt_ops = {
+ .ndo_queue_mem_size = sizeof(struct bnxt_rx_ring_info),
+ .ndo_queue_mem_alloc = bnxt_queue_mem_alloc,
+ .ndo_queue_mem_free = bnxt_queue_mem_free,
+ .ndo_queue_start = bnxt_queue_start,
+ .ndo_queue_stop = bnxt_queue_stop,
+};
+
static void bnxt_remove_one(struct pci_dev *pdev)
{
struct net_device *dev = pci_get_drvdata(pdev);
@@ -14272,12 +15718,16 @@ static void bnxt_remove_one(struct pci_dev *pdev)
if (BNXT_PF(bp))
bnxt_sriov_disable(bp);
- bnxt_rdma_aux_device_uninit(bp);
+ bnxt_rdma_aux_device_del(bp);
bnxt_ptp_clear(bp);
unregister_netdev(dev);
+
+ bnxt_rdma_aux_device_uninit(bp);
+
bnxt_free_l2_filters(bp, true);
bnxt_free_ntp_fltrs(bp, true);
+ WARN_ON(bp->num_rss_ctx);
clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
/* Flush any pending tasks */
cancel_work_sync(&bp->sp_task);
@@ -14299,7 +15749,8 @@ static void bnxt_remove_one(struct pci_dev *pdev)
kfree(bp->fw_health);
bp->fw_health = NULL;
bnxt_cleanup_pci(bp);
- bnxt_free_ctx_mem(bp);
+ bnxt_free_ctx_mem(bp, true);
+ bnxt_free_crash_dump_mem(bp);
kfree(bp->rss_indir_tbl);
bp->rss_indir_tbl = NULL;
bnxt_free_port_stats(bp);
@@ -14322,6 +15773,10 @@ static int bnxt_probe_phy(struct bnxt *bp, bool fw_dflt)
bp->dev->priv_flags |= IFF_SUPP_NOFCS;
else
bp->dev->priv_flags &= ~IFF_SUPP_NOFCS;
+
+ bp->mac_flags = 0;
+ bnxt_hwrm_mac_qcaps(bp);
+
if (!fw_dflt)
return 0;
@@ -14366,8 +15821,9 @@ static void _bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx,
*max_rx = hw_resc->max_rx_rings;
*max_cp = bnxt_get_max_func_cp_rings_for_en(bp);
max_irq = min_t(int, bnxt_get_max_func_irqs(bp) -
- bnxt_get_ulp_msix_num(bp),
- hw_resc->max_stat_ctxs - bnxt_get_ulp_stat_ctxs(bp));
+ bnxt_get_ulp_msix_num_in_use(bp),
+ hw_resc->max_stat_ctxs -
+ bnxt_get_ulp_stat_ctxs_in_use(bp));
if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
*max_cp = min_t(int, *max_cp, max_irq);
max_ring_grps = hw_resc->max_hw_ring_grps;
@@ -14463,6 +15919,7 @@ static void bnxt_trim_dflt_sh_rings(struct bnxt *bp)
static int bnxt_set_dflt_rings(struct bnxt *bp, bool sh)
{
int dflt_rings, max_rx_rings, max_tx_rings, rc;
+ int avail_msix;
if (!bnxt_can_reserve_rings(bp))
return 0;
@@ -14490,6 +15947,14 @@ static int bnxt_set_dflt_rings(struct bnxt *bp, bool sh)
bp->cp_nr_rings = bp->tx_nr_rings_per_tc + bp->rx_nr_rings;
bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
+ avail_msix = bnxt_get_max_func_irqs(bp) - bp->cp_nr_rings;
+ if (avail_msix >= BNXT_MIN_ROCE_CP_RINGS) {
+ int ulp_num_msix = min(avail_msix, bp->ulp_num_msix_want);
+
+ bnxt_set_ulp_msix_num(bp, ulp_num_msix);
+ bnxt_set_dflt_ulp_stat_ctxs(bp);
+ }
+
rc = __bnxt_reserve_rings(bp);
if (rc && rc != -ENODEV)
netdev_warn(bp->dev, "Unable to reserve tx rings\n");
@@ -14669,6 +16134,7 @@ void bnxt_print_device_info(struct bnxt *bp)
static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
+ struct bnxt_hw_resc *hw_resc;
struct net_device *dev;
struct bnxt *bp;
int rc, max_irqs;
@@ -14676,6 +16142,11 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (pci_is_bridge(pdev))
return -ENODEV;
+ if (!pdev->msix_cap) {
+ dev_err(&pdev->dev, "MSIX capability not found, aborting\n");
+ return -ENODEV;
+ }
+
/* Clear any pending DMA transactions from crash kernel
* while loading driver in capture kernel.
*/
@@ -14702,14 +16173,12 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (BNXT_PF(bp))
SET_NETDEV_DEVLINK_PORT(dev, &bp->dl_port);
- if (pdev->msix_cap)
- bp->flags |= BNXT_FLAG_MSIX_CAP;
-
rc = bnxt_init_board(pdev, dev);
if (rc < 0)
goto init_err_free;
dev->netdev_ops = &bnxt_netdev_ops;
+ dev->stat_ops = &bnxt_stat_ops;
dev->watchdog_timeo = BNXT_TX_TIMEOUT;
dev->ethtool_ops = &bnxt_ethtool_ops;
pci_set_drvdata(pdev, dev);
@@ -14790,6 +16259,8 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
dev->priv_flags |= IFF_UNICAST_FLT;
netif_set_tso_max_size(dev, GSO_MAX_SIZE);
+ if (bp->tso_max_segs)
+ netif_set_tso_max_segs(dev, bp->tso_max_segs);
dev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
NETDEV_XDP_ACT_RX_SG;
@@ -14827,10 +16298,18 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (rc)
goto init_err_pci_clean;
+ hw_resc = &bp->hw_resc;
+ bp->max_fltr = hw_resc->max_rx_em_flows + hw_resc->max_rx_wm_flows +
+ BNXT_L2_FLTR_MAX_FLTR;
+ /* Older firmware may not report these filters properly */
+ if (bp->max_fltr < BNXT_MAX_FLTR)
+ bp->max_fltr = BNXT_MAX_FLTR;
bnxt_init_l2_fltr_tbl(bp);
- bnxt_set_rx_skb_mode(bp, false);
+ __bnxt_set_rx_skb_mode(bp, false);
bnxt_set_tpa_flags(bp);
+ bnxt_init_ring_params(bp);
bnxt_set_ring_params(bp);
+ bnxt_rdma_aux_device_init(bp);
rc = bnxt_set_dflt_rings(bp, true);
if (rc) {
if (BNXT_VF(bp) && rc == -ENODEV) {
@@ -14879,13 +16358,20 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (rc)
goto init_err_dl;
+ INIT_LIST_HEAD(&bp->usr_fltr_list);
+
+ if (BNXT_SUPPORTS_NTUPLE_VNIC(bp))
+ bp->rss_cap |= BNXT_RSS_CAP_MULTI_RSS_CTX;
+ if (BNXT_SUPPORTS_QUEUE_API(bp))
+ dev->queue_mgmt_ops = &bnxt_queue_mgmt_ops;
+
rc = register_netdev(dev);
if (rc)
goto init_err_cleanup;
bnxt_dl_fw_reporters_create(bp);
- bnxt_rdma_aux_device_init(bp);
+ bnxt_rdma_aux_device_add(bp);
bnxt_print_device_info(bp);
@@ -14893,6 +16379,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
return 0;
init_err_cleanup:
+ bnxt_rdma_aux_device_uninit(bp);
bnxt_dl_unregister(bp);
init_err_dl:
bnxt_shutdown_tc(bp);
@@ -14909,7 +16396,8 @@ init_err_pci_clean:
kfree(bp->fw_health);
bp->fw_health = NULL;
bnxt_cleanup_pci(bp);
- bnxt_free_ctx_mem(bp);
+ bnxt_free_ctx_mem(bp, true);
+ bnxt_free_crash_dump_mem(bp);
kfree(bp->rss_indir_tbl);
bp->rss_indir_tbl = NULL;
@@ -14934,6 +16422,7 @@ static void bnxt_shutdown(struct pci_dev *pdev)
if (netif_running(dev))
dev_close(dev);
+ bnxt_ptp_clear(bp);
bnxt_clear_int_mode(bp);
pci_disable_device(pdev);
@@ -14953,15 +16442,17 @@ static int bnxt_suspend(struct device *device)
struct bnxt *bp = netdev_priv(dev);
int rc = 0;
- rtnl_lock();
bnxt_ulp_stop(bp);
+
+ rtnl_lock();
if (netif_running(dev)) {
netif_device_detach(dev);
rc = bnxt_close(dev);
}
bnxt_hwrm_func_drv_unrgtr(bp);
+ bnxt_ptp_clear(bp);
pci_disable_device(bp->pdev);
- bnxt_free_ctx_mem(bp);
+ bnxt_free_ctx_mem(bp, false);
rtnl_unlock();
return rc;
}
@@ -15000,7 +16491,13 @@ static int bnxt_resume(struct device *device)
rc = -ENODEV;
goto resume_exit;
}
+ if (bp->fw_crash_mem)
+ bnxt_hwrm_crash_dump_mem_cfg(bp);
+ if (bnxt_ptp_init(bp)) {
+ kfree(bp->ptp_cfg);
+ bp->ptp_cfg = NULL;
+ }
bnxt_get_wol_settings(bp);
if (netif_running(dev)) {
rc = bnxt_open(dev);
@@ -15009,10 +16506,10 @@ static int bnxt_resume(struct device *device)
}
resume_exit:
+ rtnl_unlock();
bnxt_ulp_start(bp, rc);
if (!rc)
bnxt_reenable_sriov(bp);
- rtnl_unlock();
return rc;
}
@@ -15038,28 +16535,40 @@ static pci_ers_result_t bnxt_io_error_detected(struct pci_dev *pdev,
{
struct net_device *netdev = pci_get_drvdata(pdev);
struct bnxt *bp = netdev_priv(netdev);
+ bool abort = false;
netdev_info(netdev, "PCI I/O error detected\n");
+ bnxt_ulp_stop(bp);
+
rtnl_lock();
netif_device_detach(netdev);
- bnxt_ulp_stop(bp);
+ if (test_and_set_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
+ netdev_err(bp->dev, "Firmware reset already in progress\n");
+ abort = true;
+ }
- if (state == pci_channel_io_perm_failure) {
+ if (abort || state == pci_channel_io_perm_failure) {
rtnl_unlock();
return PCI_ERS_RESULT_DISCONNECT;
}
- if (state == pci_channel_io_frozen)
+ /* Link is not reliable anymore if state is pci_channel_io_frozen
+ * so we disable bus master to prevent any potential bad DMAs before
+ * freeing kernel memory.
+ */
+ if (state == pci_channel_io_frozen) {
set_bit(BNXT_STATE_PCI_CHANNEL_IO_FROZEN, &bp->state);
+ bnxt_fw_fatal_close(bp);
+ }
if (netif_running(netdev))
- bnxt_close(netdev);
+ __bnxt_close_nic(bp, true, true);
if (pci_is_enabled(pdev))
pci_disable_device(pdev);
- bnxt_free_ctx_mem(bp);
+ bnxt_free_ctx_mem(bp, false);
rtnl_unlock();
/* Request a slot slot reset. */
@@ -15071,7 +16580,7 @@ static pci_ers_result_t bnxt_io_error_detected(struct pci_dev *pdev,
* @pdev: Pointer to PCI device
*
* Restart the card from scratch, as if from a cold-boot.
- * At this point, the card has exprienced a hard reset,
+ * At this point, the card has experienced a hard reset,
* followed by fixups by BIOS, and has its config space
* set up identically to what it was at cold boot.
*/
@@ -15086,6 +16595,10 @@ static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev)
netdev_info(bp->dev, "PCI Slot Reset\n");
+ if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS) &&
+ test_bit(BNXT_STATE_PCI_CHANNEL_IO_FROZEN, &bp->state))
+ msleep(900);
+
rtnl_lock();
if (pci_enable_device(pdev)) {
@@ -15095,7 +16608,7 @@ static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev)
pci_set_master(pdev);
/* Upon fatal error, our device internal logic that latches to
* BAR value is getting reset and will restore only upon
- * rewritting the BARs.
+ * rewriting the BARs.
*
* As pci_restore_state() does not re-write the BARs if the
* value is same as saved value earlier, driver needs to
@@ -15139,6 +16652,7 @@ static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev)
}
reset_exit:
+ clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
bnxt_clear_reservations(bp, true);
rtnl_unlock();
@@ -15162,16 +16676,20 @@ static void bnxt_io_resume(struct pci_dev *pdev)
rtnl_lock();
err = bnxt_hwrm_func_qcaps(bp);
- if (!err && netif_running(netdev))
- err = bnxt_open(netdev);
-
- bnxt_ulp_start(bp, err);
if (!err) {
- bnxt_reenable_sriov(bp);
- netif_device_attach(netdev);
+ if (netif_running(netdev))
+ err = bnxt_open(netdev);
+ else
+ err = bnxt_reserve_rings(bp, true);
}
+ if (!err)
+ netif_device_attach(netdev);
+
rtnl_unlock();
+ bnxt_ulp_start(bp, err);
+ if (!err)
+ bnxt_reenable_sriov(bp);
}
static const struct pci_error_handlers bnxt_err_handler = {
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index 47338b48ca20..2373f423a523 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -34,6 +34,9 @@
#include <linux/firmware/broadcom/tee_bnxt_fw.h>
#endif
+#define BNXT_DEFAULT_RX_COPYBREAK 256
+#define BNXT_MAX_RX_COPYBREAK 1024
+
extern struct list_head bnxt_block_cb_list;
struct page_pool;
@@ -181,6 +184,32 @@ struct tx_cmp {
#define TX_CMP_SQ_CONS_IDX(txcmp) \
(le32_to_cpu((txcmp)->sq_cons_idx) & TX_CMP_SQ_CONS_IDX_MASK)
+struct tx_ts_cmp {
+ __le32 tx_ts_cmp_flags_type;
+ #define TX_TS_CMP_FLAGS_ERROR (1 << 6)
+ #define TX_TS_CMP_FLAGS_TS_TYPE (1 << 7)
+ #define TX_TS_CMP_FLAGS_TS_TYPE_PM (0 << 7)
+ #define TX_TS_CMP_FLAGS_TS_TYPE_PA (1 << 7)
+ #define TX_TS_CMP_FLAGS_TS_FALLBACK (1 << 8)
+ #define TX_TS_CMP_TS_SUB_NS (0xf << 12)
+ #define TX_TS_CMP_TS_NS_MID (0xffff << 16)
+ #define TX_TS_CMP_TS_NS_MID_SFT 16
+ u32 tx_ts_cmp_opaque;
+ __le32 tx_ts_cmp_errors_v;
+ #define TX_TS_CMP_V (1 << 0)
+ #define TX_TS_CMP_TS_INVALID_ERR (1 << 10)
+ __le32 tx_ts_cmp_ts_ns_lo;
+};
+
+#define BNXT_GET_TX_TS_48B_NS(tscmp) \
+ (le32_to_cpu((tscmp)->tx_ts_cmp_ts_ns_lo) | \
+ ((u64)(le32_to_cpu((tscmp)->tx_ts_cmp_flags_type) & \
+ TX_TS_CMP_TS_NS_MID) << TX_TS_CMP_TS_NS_MID_SFT))
+
+#define BNXT_TX_TS_ERR(tscmp) \
+ (((tscmp)->tx_ts_cmp_flags_type & cpu_to_le32(TX_TS_CMP_FLAGS_ERROR)) &&\
+ ((tscmp)->tx_ts_cmp_errors_v & cpu_to_le32(TX_TS_CMP_TS_INVALID_ERR)))
+
struct rx_cmp {
__le32 rx_cmp_len_flags_type;
#define RX_CMP_CMP_TYPE (0x3f << 0)
@@ -241,6 +270,9 @@ struct rx_cmp {
(((le32_to_cpu((rxcmp)->rx_cmp_misc_v1) & RX_CMP_RSS_HASH_TYPE) >>\
RX_CMP_RSS_HASH_TYPE_SHIFT) & RSS_PROFILE_ID_MASK)
+#define RX_CMP_ITYPES(rxcmp) \
+ (le32_to_cpu((rxcmp)->rx_cmp_len_flags_type) & RX_CMP_FLAGS_ITYPES_MASK)
+
#define RX_CMP_V3_HASH_TYPE_LEGACY(rxcmp) \
((le32_to_cpu((rxcmp)->rx_cmp_misc_v1) & RX_CMP_V3_RSS_EXT_OP_LEGACY) >>\
RX_CMP_V3_RSS_EXT_OP_LEGACY_SHIFT)
@@ -352,7 +384,7 @@ struct rx_agg_cmp {
u32 rx_agg_cmp_opaque;
__le32 rx_agg_cmp_v;
#define RX_AGG_CMP_V (1 << 0)
- #define RX_AGG_CMP_AGG_ID (0xffff << 16)
+ #define RX_AGG_CMP_AGG_ID (0x0fff << 16)
#define RX_AGG_CMP_AGG_ID_SHIFT 16
__le32 rx_agg_cmp_unused;
};
@@ -390,7 +422,7 @@ struct rx_tpa_start_cmp {
#define RX_TPA_START_CMP_V3_RSS_HASH_TYPE_SHIFT 7
#define RX_TPA_START_CMP_AGG_ID (0x7f << 25)
#define RX_TPA_START_CMP_AGG_ID_SHIFT 25
- #define RX_TPA_START_CMP_AGG_ID_P5 (0xffff << 16)
+ #define RX_TPA_START_CMP_AGG_ID_P5 (0x0fff << 16)
#define RX_TPA_START_CMP_AGG_ID_SHIFT_P5 16
#define RX_TPA_START_CMP_METADATA1 (0xf << 28)
#define RX_TPA_START_CMP_METADATA1_SHIFT 28
@@ -514,7 +546,7 @@ struct rx_tpa_end_cmp {
#define RX_TPA_END_CMP_PAYLOAD_OFFSET_SHIFT 16
#define RX_TPA_END_CMP_AGG_ID (0x7f << 25)
#define RX_TPA_END_CMP_AGG_ID_SHIFT 25
- #define RX_TPA_END_CMP_AGG_ID_P5 (0xffff << 16)
+ #define RX_TPA_END_CMP_AGG_ID_P5 (0x0fff << 16)
#define RX_TPA_END_CMP_AGG_ID_SHIFT_P5 16
__le32 rx_tpa_end_cmp_tsdelta;
@@ -848,11 +880,14 @@ struct bnxt_sw_tx_bd {
DEFINE_DMA_UNMAP_ADDR(mapping);
DEFINE_DMA_UNMAP_LEN(len);
struct page *page;
- u8 is_gso;
+ u8 is_ts_pkt;
u8 is_push;
u8 action;
unsigned short nr_frags;
- u16 rx_prod;
+ union {
+ u16 rx_prod;
+ u16 txts_prod;
+ };
};
struct bnxt_sw_rx_bd {
@@ -1076,6 +1111,7 @@ struct bnxt_rx_ring_info {
struct bnxt_ring_struct rx_agg_ring_struct;
struct xdp_rxq_info xdp_rxq;
struct page_pool *page_pool;
+ struct page_pool *head_pool;
};
struct bnxt_rx_sw_stats {
@@ -1152,7 +1188,7 @@ struct bnxt_cp_ring_info {
struct bnxt_stats_mem stats;
u32 hw_stats_ctx_id;
- struct bnxt_sw_stats sw_stats;
+ struct bnxt_sw_stats *sw_stats;
struct bnxt_ring_struct cp_ring_struct;
@@ -1188,12 +1224,15 @@ struct bnxt_napi {
bool in_reset;
};
+/* "TxRx", 2 hypens, plus maximum integer */
+#define BNXT_IRQ_NAME_EXTRA 17
+
struct bnxt_irq {
irq_handler_t handler;
unsigned int vector;
u8 requested:1;
u8 have_cpumask:1;
- char name[IFNAMSIZ + 2];
+ char name[IFNAMSIZ + BNXT_IRQ_NAME_EXTRA];
cpumask_var_t cpu_mask;
};
@@ -1213,11 +1252,15 @@ struct bnxt_ring_grp_info {
u16 cp_fw_ring_id;
};
+#define BNXT_VNIC_DEFAULT 0
+#define BNXT_VNIC_NTUPLE 1
+
struct bnxt_vnic_info {
u16 fw_vnic_id; /* returned by Chimp during alloc */
#define BNXT_MAX_CTX_PER_VNIC 8
u16 fw_rss_cos_lb_ctx[BNXT_MAX_CTX_PER_VNIC];
u16 fw_l2_ctx_id;
+ u16 mru;
#define BNXT_MAX_UC_ADDRS 4
struct bnxt_l2_filter *l2_filters[BNXT_MAX_UC_ADDRS];
/* index 0 always dev_addr */
@@ -1252,11 +1295,35 @@ struct bnxt_vnic_info {
#define BNXT_VNIC_MCAST_FLAG 4
#define BNXT_VNIC_UCAST_FLAG 8
#define BNXT_VNIC_RFS_NEW_RSS_FLAG 0x10
+#define BNXT_VNIC_NTUPLE_FLAG 0x20
+#define BNXT_VNIC_RSSCTX_FLAG 0x40
+ struct ethtool_rxfh_context *rss_ctx;
+ u32 vnic_id;
+};
+
+struct bnxt_rss_ctx {
+ struct bnxt_vnic_info vnic;
+ u8 index;
+};
+
+#define BNXT_MAX_ETH_RSS_CTX 32
+#define BNXT_VNIC_ID_INVALID 0xffffffff
+
+struct bnxt_hw_rings {
+ int tx;
+ int rx;
+ int grp;
+ int cp;
+ int cp_p5;
+ int stat;
+ int vnic;
+ int rss_ctx;
};
struct bnxt_hw_resc {
u16 min_rsscos_ctxs;
u16 max_rsscos_ctxs;
+ u16 resv_rsscos_ctxs;
u16 min_cp_rings;
u16 max_cp_rings;
u16 resv_cp_rings;
@@ -1281,6 +1348,12 @@ struct bnxt_hw_resc {
u16 max_nqs;
u16 max_irqs;
u16 resv_irqs;
+ u32 max_encap_records;
+ u32 max_decap_records;
+ u32 max_tx_em_flows;
+ u32 max_tx_wm_flows;
+ u32 max_rx_em_flows;
+ u32 max_rx_wm_flows;
};
#if defined(CONFIG_BNXT_SRIOV)
@@ -1293,7 +1366,6 @@ struct bnxt_vf_info {
u16 vlan;
u16 func_qcfg_flags;
u32 flags;
-#define BNXT_VF_QOS 0x1
#define BNXT_VF_SPOOFCHK 0x2
#define BNXT_VF_LINK_FORCED 0x4
#define BNXT_VF_LINK_UP 0x8
@@ -1315,12 +1387,6 @@ struct bnxt_pf_info {
u16 active_vfs;
u16 registered_vfs;
u16 max_vfs;
- u32 max_encap_records;
- u32 max_decap_records;
- u32 max_tx_em_flows;
- u32 max_tx_wm_flows;
- u32 max_rx_em_flows;
- u32 max_rx_wm_flows;
unsigned long *vf_event_bmap;
u16 hwrm_cmd_req_pages;
u8 vf_resv_strategy;
@@ -1334,6 +1400,7 @@ struct bnxt_pf_info {
struct bnxt_filter_base {
struct hlist_node hash;
+ struct list_head list;
__le64 filter_id;
u8 type;
#define BNXT_FLTR_TYPE_NTUPLE 1
@@ -1343,6 +1410,7 @@ struct bnxt_filter_base {
#define BNXT_ACT_RING_DST 2
#define BNXT_ACT_FUNC_DST 4
#define BNXT_ACT_NO_AGING 8
+#define BNXT_ACT_RSS_CTX 0x10
u16 sw_id;
u16 rxq;
u16 fw_vnic_id;
@@ -1355,19 +1423,21 @@ struct bnxt_filter_base {
struct rcu_head rcu;
};
+struct bnxt_flow_masks {
+ struct flow_dissector_key_ports ports;
+ struct flow_dissector_key_addrs addrs;
+};
+
+extern const struct bnxt_flow_masks BNXT_FLOW_MASK_NONE;
+extern const struct bnxt_flow_masks BNXT_FLOW_IPV6_MASK_ALL;
+extern const struct bnxt_flow_masks BNXT_FLOW_IPV4_MASK_ALL;
+
struct bnxt_ntuple_filter {
+ /* base filter must be the first member */
struct bnxt_filter_base base;
struct flow_keys fkeys;
+ struct bnxt_flow_masks fmasks;
struct bnxt_l2_filter *l2_fltr;
- u32 ntuple_flags;
-#define BNXT_NTUPLE_MATCH_SRC_IP 1
-#define BNXT_NTUPLE_MATCH_DST_IP 2
-#define BNXT_NTUPLE_MATCH_SRC_PORT 4
-#define BNXT_NTUPLE_MATCH_DST_PORT 8
-#define BNXT_NTUPLE_MATCH_ALL (BNXT_NTUPLE_MATCH_SRC_IP | \
- BNXT_NTUPLE_MATCH_DST_IP | \
- BNXT_NTUPLE_MATCH_SRC_PORT | \
- BNXT_NTUPLE_MATCH_DST_PORT)
u32 flow_id;
};
@@ -1394,11 +1464,63 @@ struct bnxt_ipv6_tuple {
#define BNXT_L2_KEY_SIZE (sizeof(struct bnxt_l2_key) / 4)
struct bnxt_l2_filter {
+ /* base filter must be the first member */
struct bnxt_filter_base base;
struct bnxt_l2_key l2_key;
atomic_t refcnt;
};
+/* Compat version of hwrm_port_phy_qcfg_output capped at 96 bytes. The
+ * first 95 bytes are identical to hwrm_port_phy_qcfg_output in bnxt_hsi.h.
+ * The last valid byte in the compat version is different.
+ */
+struct hwrm_port_phy_qcfg_output_compat {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 link;
+ u8 active_fec_signal_mode;
+ __le16 link_speed;
+ u8 duplex_cfg;
+ u8 pause;
+ __le16 support_speeds;
+ __le16 force_link_speed;
+ u8 auto_mode;
+ u8 auto_pause;
+ __le16 auto_link_speed;
+ __le16 auto_link_speed_mask;
+ u8 wirespeed;
+ u8 lpbk;
+ u8 force_pause;
+ u8 module_status;
+ __le32 preemphasis;
+ u8 phy_maj;
+ u8 phy_min;
+ u8 phy_bld;
+ u8 phy_type;
+ u8 media_type;
+ u8 xcvr_pkg_type;
+ u8 eee_config_phy_addr;
+ u8 parallel_detect;
+ __le16 link_partner_adv_speeds;
+ u8 link_partner_adv_auto_mode;
+ u8 link_partner_adv_pause;
+ __le16 adv_eee_link_speed_mask;
+ __le16 link_partner_adv_eee_link_speed_mask;
+ __le32 xcvr_identifier_type_tx_lpi_timer;
+ __le16 fec_cfg;
+ u8 duplex_state;
+ u8 option_flags;
+ char phy_vendor_name[16];
+ char phy_vendor_partnumber[16];
+ __le16 support_pam4_speeds;
+ __le16 force_pam4_link_speed;
+ __le16 auto_pam4_link_speed_mask;
+ u8 link_partner_pam4_adv_speeds;
+ u8 valid;
+};
+
struct bnxt_link_info {
u8 phy_type;
u8 media_type;
@@ -1643,8 +1765,6 @@ struct bnxt_test_info {
#define BNXT_GRCPF_REG_CHIMP_COMM 0x0
#define BNXT_GRCPF_REG_CHIMP_COMM_TRIGGER 0x100
#define BNXT_GRCPF_REG_WINDOW_BASE_OUT 0x400
-#define BNXT_CAG_REG_LEGACY_INT_STATUS 0x4014
-#define BNXT_CAG_REG_BASE 0x300000
#define BNXT_GRC_REG_STATUS_P5 0x520
@@ -1735,6 +1855,8 @@ struct bnxt_vf_rep {
#define MAX_CTX_PAGES (BNXT_PAGE_SIZE / 8)
#define MAX_CTX_TOTAL_PAGES (MAX_CTX_PAGES * MAX_CTX_PAGES)
+#define MAX_CTX_BYTES ((size_t)MAX_CTX_TOTAL_PAGES * BNXT_PAGE_SIZE)
+#define MAX_CTX_BYTES_MASK (MAX_CTX_BYTES - 1)
struct bnxt_ctx_pg_info {
u32 entries;
@@ -1767,6 +1889,13 @@ struct bnxt_ctx_mem_type {
u16 entry_size;
u32 flags;
#define BNXT_CTX_MEM_TYPE_VALID FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_TYPE_VALID
+#define BNXT_CTX_MEM_FW_TRACE \
+ FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_FW_DBG_TRACE
+#define BNXT_CTX_MEM_FW_BIN_TRACE \
+ FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_FW_BIN_DBG_TRACE
+#define BNXT_CTX_MEM_PERSIST \
+ FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_NEXT_BS_OFFSET
+
u32 instance_bmap;
u8 init_value;
u8 entry_multiple;
@@ -1775,6 +1904,7 @@ struct bnxt_ctx_mem_type {
u32 max_entries;
u32 min_entries;
u8 last:1;
+ u8 mem_valid:1;
u8 split_entry_cnt;
#define BNXT_MAX_SPLIT_ENTRY 4
union {
@@ -1806,21 +1936,30 @@ struct bnxt_ctx_mem_type {
#define BNXT_CTX_FTQM FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_FP_TQM_RING
#define BNXT_CTX_MRAV FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_MRAV
#define BNXT_CTX_TIM FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_TIM
-#define BNXT_CTX_TKC FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_TKC
-#define BNXT_CTX_RKC FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_RKC
+#define BNXT_CTX_TCK FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_TX_CK
+#define BNXT_CTX_RCK FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_RX_CK
#define BNXT_CTX_MTQM FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_MP_TQM_RING
#define BNXT_CTX_SQDBS FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SQ_DB_SHADOW
#define BNXT_CTX_RQDBS FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_RQ_DB_SHADOW
#define BNXT_CTX_SRQDBS FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SRQ_DB_SHADOW
#define BNXT_CTX_CQDBS FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CQ_DB_SHADOW
-#define BNXT_CTX_QTKC FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_QUIC_TKC
-#define BNXT_CTX_QRKC FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_QUIC_RKC
#define BNXT_CTX_TBLSC FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_TBL_SCOPE
#define BNXT_CTX_XPAR FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_XID_PARTITION
+#define BNXT_CTX_SRT FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SRT_TRACE
+#define BNXT_CTX_SRT2 FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SRT2_TRACE
+#define BNXT_CTX_CRT FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CRT_TRACE
+#define BNXT_CTX_CRT2 FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CRT2_TRACE
+#define BNXT_CTX_RIGP0 FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_RIGP0_TRACE
+#define BNXT_CTX_L2HWRM FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_L2_HWRM_TRACE
+#define BNXT_CTX_REHWRM FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_ROCE_HWRM_TRACE
+#define BNXT_CTX_CA0 FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CA0_TRACE
+#define BNXT_CTX_CA1 FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CA1_TRACE
+#define BNXT_CTX_CA2 FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CA2_TRACE
+#define BNXT_CTX_RIGP1 FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_RIGP1_TRACE
#define BNXT_CTX_MAX (BNXT_CTX_TIM + 1)
#define BNXT_CTX_L2_MAX (BNXT_CTX_FTQM + 1)
-#define BNXT_CTX_V2_MAX (BNXT_CTX_XPAR + 1)
+#define BNXT_CTX_V2_MAX (BNXT_CTX_RIGP1 + 1)
#define BNXT_CTX_INV ((u16)-1)
struct bnxt_ctx_mem_info {
@@ -1978,8 +2117,29 @@ enum board_idx {
NETXTREME_E_VF_HV,
NETXTREME_E_P5_VF,
NETXTREME_E_P5_VF_HV,
+ NETXTREME_E_P7_VF,
+};
+
+#define BNXT_TRACE_BUF_MAGIC_BYTE ((u8)0xbc)
+#define BNXT_TRACE_MAX 11
+
+struct bnxt_bs_trace_info {
+ u8 *magic_byte;
+ u32 last_offset;
+ u8 wrapped:1;
+ u16 ctx_type;
+ u16 trace_type;
};
+static inline void bnxt_bs_trace_check_wrap(struct bnxt_bs_trace_info *bs_trace,
+ u32 offset)
+{
+ if (!bs_trace->wrapped &&
+ *bs_trace->magic_byte != BNXT_TRACE_BUF_MAGIC_BYTE)
+ bs_trace->wrapped = 1;
+ bs_trace->last_offset = offset;
+}
+
struct bnxt {
void __iomem *bar0;
void __iomem *bar1;
@@ -2084,10 +2244,6 @@ struct bnxt {
#define BNXT_FLAG_TPA (BNXT_FLAG_LRO | BNXT_FLAG_GRO)
#define BNXT_FLAG_JUMBO 0x10
#define BNXT_FLAG_STRIP_VLAN 0x20
- #define BNXT_FLAG_AGG_RINGS (BNXT_FLAG_JUMBO | BNXT_FLAG_GRO | \
- BNXT_FLAG_LRO)
- #define BNXT_FLAG_USING_MSIX 0x40
- #define BNXT_FLAG_MSIX_CAP 0x80
#define BNXT_FLAG_RFS 0x100
#define BNXT_FLAG_SHARED_RINGS 0x200
#define BNXT_FLAG_PORT_STATS 0x400
@@ -2108,6 +2264,9 @@ struct bnxt {
#define BNXT_FLAG_ROCE_MIRROR_CAP 0x4000000
#define BNXT_FLAG_TX_COAL_CMPL 0x8000000
#define BNXT_FLAG_PORT_STATS_EXT 0x10000000
+ #define BNXT_FLAG_HDS 0x20000000
+ #define BNXT_FLAG_AGG_RINGS (BNXT_FLAG_JUMBO | BNXT_FLAG_GRO | \
+ BNXT_FLAG_LRO | BNXT_FLAG_HDS)
#define BNXT_FLAG_ALL_CONFIG_FEATS (BNXT_FLAG_TPA | \
BNXT_FLAG_RFS | \
@@ -2115,6 +2274,11 @@ struct bnxt {
#define BNXT_PF(bp) (!((bp)->flags & BNXT_FLAG_VF))
#define BNXT_VF(bp) ((bp)->flags & BNXT_FLAG_VF)
+#ifdef CONFIG_BNXT_SRIOV
+#define BNXT_VF_IS_TRUSTED(bp) ((bp)->vf.flags & BNXT_VF_TRUST)
+#else
+#define BNXT_VF_IS_TRUSTED(bp) 0
+#endif
#define BNXT_NPAR(bp) ((bp)->port_partition_type)
#define BNXT_MH(bp) ((bp)->flags & BNXT_FLAG_MULTI_HOST)
#define BNXT_SINGLE_PF(bp) (BNXT_PF(bp) && !BNXT_NPAR(bp) && !BNXT_MH(bp))
@@ -2150,9 +2314,17 @@ struct bnxt {
(BNXT_CHIP_NUM_58700((bp)->chip_num) && \
!BNXT_CHIP_TYPE_NITRO_A0(bp)))
+/* Chip class phase 3.x */
+#define BNXT_CHIP_P3(bp) \
+ (BNXT_CHIP_NUM_57X0X((bp)->chip_num) || \
+ BNXT_CHIP_TYPE_NITRO_A0(bp))
+
#define BNXT_CHIP_P4_PLUS(bp) \
(BNXT_CHIP_P4(bp) || BNXT_CHIP_P5_PLUS(bp))
+#define BNXT_CHIP_P5_AND_MINUS(bp) \
+ (BNXT_CHIP_P3(bp) || BNXT_CHIP_P4(bp) || BNXT_CHIP_P5(bp))
+
struct bnxt_aux_priv *aux_priv;
struct bnxt_en_dev *edev;
@@ -2179,7 +2351,7 @@ struct bnxt {
enum dma_data_direction rx_dir;
u32 rx_ring_size;
u32 rx_agg_ring_size;
- u32 rx_copy_thresh;
+ u32 rx_copybreak;
u32 rx_ring_mask;
u32 rx_agg_ring_mask;
int rx_nr_pages;
@@ -2207,8 +2379,9 @@ struct bnxt {
/* grp_info indexed by completion ring index */
struct bnxt_ring_grp_info *grp_info;
struct bnxt_vnic_info *vnic_info;
+ u32 num_rss_ctx;
int nr_vnics;
- u16 *rss_indir_tbl;
+ u32 *rss_indir_tbl;
u16 rss_indir_tbl_entries;
u32 rss_hash_cfg;
u32 rss_hash_delta;
@@ -2217,8 +2390,18 @@ struct bnxt {
#define BNXT_RSS_CAP_UDP_RSS_CAP BIT(1)
#define BNXT_RSS_CAP_NEW_RSS_CAP BIT(2)
#define BNXT_RSS_CAP_RSS_TCAM BIT(3)
+#define BNXT_RSS_CAP_AH_V4_RSS_CAP BIT(4)
+#define BNXT_RSS_CAP_AH_V6_RSS_CAP BIT(5)
+#define BNXT_RSS_CAP_ESP_V4_RSS_CAP BIT(6)
+#define BNXT_RSS_CAP_ESP_V6_RSS_CAP BIT(7)
+#define BNXT_RSS_CAP_MULTI_RSS_CTX BIT(8)
+
+ u8 rss_hash_key[HW_HASH_KEY_SIZE];
+ u8 rss_hash_key_valid:1;
+ u8 rss_hash_key_updated:1;
u16 max_mtu;
+ u16 tso_max_segs;
u8 max_tc;
u8 max_lltc; /* lossless TCs */
struct bnxt_queue_info q_info[BNXT_MAX_QUEUE];
@@ -2256,6 +2439,7 @@ struct bnxt {
struct bnxt_irq *irq_tbl;
int total_irqs;
+ int ulp_num_msix_want;
u8 mac_addr[ETH_ALEN];
#ifdef CONFIG_BNXT_DCB
@@ -2274,6 +2458,8 @@ struct bnxt {
#define BNXT_FW_CAP_DCBX_AGENT BIT_ULL(2)
#define BNXT_FW_CAP_NEW_RM BIT_ULL(3)
#define BNXT_FW_CAP_IF_CHANGE BIT_ULL(4)
+ #define BNXT_FW_CAP_ENABLE_RDMA_SRIOV BIT_ULL(5)
+ #define BNXT_FW_CAP_ROCE_VF_RESC_MGMT_SUPPORTED BIT_ULL(6)
#define BNXT_FW_CAP_KONG_MB_CHNL BIT_ULL(7)
#define BNXT_FW_CAP_OVS_64BIT_HANDLE BIT_ULL(10)
#define BNXT_FW_CAP_TRUSTED_VF BIT_ULL(11)
@@ -2283,6 +2469,7 @@ struct bnxt {
#define BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2 BIT_ULL(16)
#define BNXT_FW_CAP_PCIE_STATS_SUPPORTED BIT_ULL(17)
#define BNXT_FW_CAP_EXT_STATS_SUPPORTED BIT_ULL(18)
+ #define BNXT_FW_CAP_TX_TS_CMP BIT_ULL(19)
#define BNXT_FW_CAP_ERR_RECOVER_RELOAD BIT_ULL(20)
#define BNXT_FW_CAP_HOT_RESET BIT_ULL(21)
#define BNXT_FW_CAP_PTP_RTC BIT_ULL(22)
@@ -2301,12 +2488,32 @@ struct bnxt {
#define BNXT_FW_CAP_PRE_RESV_VNICS BIT_ULL(35)
#define BNXT_FW_CAP_BACKING_STORE_V2 BIT_ULL(36)
#define BNXT_FW_CAP_VNIC_TUNNEL_TPA BIT_ULL(37)
+ #define BNXT_FW_CAP_CFA_NTUPLE_RX_EXT_IP_PROTO BIT_ULL(38)
+ #define BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V3 BIT_ULL(39)
+ #define BNXT_FW_CAP_VNIC_RE_FLUSH BIT_ULL(40)
+ #define BNXT_FW_CAP_SW_MAX_RESOURCE_LIMITS BIT_ULL(41)
u32 fw_dbg_cap;
#define BNXT_NEW_RM(bp) ((bp)->fw_cap & BNXT_FW_CAP_NEW_RM)
#define BNXT_PTP_USE_RTC(bp) (!BNXT_MH(bp) && \
((bp)->fw_cap & BNXT_FW_CAP_PTP_RTC))
+#define BNXT_SUPPORTS_NTUPLE_VNIC(bp) \
+ (BNXT_PF(bp) && ((bp)->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V3))
+
+#define BNXT_SUPPORTS_MULTI_RSS_CTX(bp) \
+ (BNXT_PF(bp) && BNXT_SUPPORTS_NTUPLE_VNIC(bp) && \
+ ((bp)->rss_cap & BNXT_RSS_CAP_MULTI_RSS_CTX))
+#define BNXT_SUPPORTS_QUEUE_API(bp) \
+ (BNXT_PF(bp) && BNXT_SUPPORTS_NTUPLE_VNIC(bp) && \
+ ((bp)->fw_cap & BNXT_FW_CAP_VNIC_RE_FLUSH))
+#define BNXT_RDMA_SRIOV_EN(bp) \
+ ((bp)->fw_cap & BNXT_FW_CAP_ENABLE_RDMA_SRIOV)
+#define BNXT_ROCE_VF_RESC_CAP(bp) \
+ ((bp)->fw_cap & BNXT_FW_CAP_ROCE_VF_RESC_MGMT_SUPPORTED)
+#define BNXT_SW_RES_LMT(bp) \
+ ((bp)->fw_cap & BNXT_FW_CAP_SW_MAX_RESOURCE_LIMITS)
+
u32 hwrm_spec_code;
u16 hwrm_cmd_seq;
u16 hwrm_cmd_kong_seq;
@@ -2383,6 +2590,7 @@ struct bnxt {
#define BNXT_LINK_CFG_CHANGE_SP_EVENT 21
#define BNXT_THERMAL_THRESHOLD_SP_EVENT 22
#define BNXT_FW_ECHO_REQUEST_SP_EVENT 23
+#define BNXT_RESTART_ULP_SP_EVENT 24
struct delayed_work fw_reset_task;
int fw_reset_state;
@@ -2428,6 +2636,7 @@ struct bnxt {
unsigned long *ntp_fltr_bmap;
int ntp_fltr_count;
+ int max_fltr;
#define BNXT_L2_FLTR_MAX_FLTR 1024
#define BNXT_L2_FLTR_HASH_SIZE 32
@@ -2437,12 +2646,14 @@ struct bnxt {
u32 hash_seed;
u64 toeplitz_prefix;
+ struct list_head usr_fltr_list;
+
/* To protect link related settings during link changes and
* ethtool settings changes.
*/
struct mutex link_lock;
struct bnxt_link_info link_info;
- struct ethtool_eee eee;
+ struct ethtool_keee eee;
u32 lpi_tmr_lo;
u32 lpi_tmr_hi;
@@ -2461,6 +2672,11 @@ struct bnxt {
#define BNXT_PHY_FL_BANK_SEL (PORT_PHY_QCAPS_RESP_FLAGS2_BANK_ADDR_SUPPORTED << 8)
#define BNXT_PHY_FL_SPEEDS2 (PORT_PHY_QCAPS_RESP_FLAGS2_SPEEDS2_SUPPORTED << 8)
+ /* copied from flags in hwrm_port_mac_qcaps_output */
+ u8 mac_flags;
+#define BNXT_MAC_FL_NO_MAC_LPBK \
+ PORT_MAC_QCAPS_RESP_FLAGS_LOCAL_LPBK_NOT_SUPPORTED
+
u8 num_tests;
struct bnxt_test_info *test_info;
@@ -2472,6 +2688,7 @@ struct bnxt {
u16 dump_flag;
#define BNXT_DUMP_LIVE 0
#define BNXT_DUMP_CRASH 1
+#define BNXT_DUMP_DRIVER 2
struct bpf_prog *xdp_prog;
@@ -2497,6 +2714,10 @@ struct bnxt {
#endif
u32 thermal_threshold_type;
enum board_idx board_idx;
+
+ struct bnxt_ctx_pg_info *fw_crash_mem;
+ u32 fw_crash_len;
+ struct bnxt_bs_trace_info bs_trace[BNXT_TRACE_MAX];
};
#define BNXT_NUM_RX_RING_STATS 8
@@ -2558,6 +2779,8 @@ struct bnxt {
#define SFF_MODULE_ID_QSFP28 0x11
#define BNXT_MAX_PHY_I2C_RESP_SIZE 64
+#define BNXT_HDS_THRESHOLD_MAX 1023
+
static inline u32 bnxt_tx_avail(struct bnxt *bp,
const struct bnxt_tx_ring_info *txr)
{
@@ -2632,38 +2855,53 @@ static inline bool bnxt_sriov_cfg(struct bnxt *bp)
#endif
}
+extern const u16 bnxt_bstore_to_trace[];
extern const u16 bnxt_lhint_arr[];
int bnxt_alloc_rx_data(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
u16 prod, gfp_t gfp);
void bnxt_reuse_rx_data(struct bnxt_rx_ring_info *rxr, u16 cons, void *data);
u32 bnxt_fw_health_readl(struct bnxt *bp, int reg_idx);
+bool bnxt_bs_trace_avail(struct bnxt *bp, u16 type);
void bnxt_set_tpa_flags(struct bnxt *bp);
void bnxt_set_ring_params(struct bnxt *);
-int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode);
+void bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode);
+void bnxt_insert_usr_fltr(struct bnxt *bp, struct bnxt_filter_base *fltr);
+void bnxt_del_one_usr_fltr(struct bnxt *bp, struct bnxt_filter_base *fltr);
int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp, unsigned long *bmap,
int bmap_size, bool async_only);
int bnxt_hwrm_func_drv_unrgtr(struct bnxt *bp);
void bnxt_del_l2_filter(struct bnxt *bp, struct bnxt_l2_filter *fltr);
+struct bnxt_l2_filter *bnxt_alloc_new_l2_filter(struct bnxt *bp,
+ struct bnxt_l2_key *key,
+ u16 flags);
int bnxt_hwrm_l2_filter_free(struct bnxt *bp, struct bnxt_l2_filter *fltr);
int bnxt_hwrm_l2_filter_alloc(struct bnxt *bp, struct bnxt_l2_filter *fltr);
int bnxt_hwrm_cfa_ntuple_filter_free(struct bnxt *bp,
struct bnxt_ntuple_filter *fltr);
int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp,
struct bnxt_ntuple_filter *fltr);
+int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, struct bnxt_vnic_info *vnic,
+ u32 tpa_flags);
void bnxt_fill_ipv6_mask(__be32 mask[4]);
+void bnxt_set_dflt_rss_indir_tbl(struct bnxt *bp,
+ struct ethtool_rxfh_context *rss_ctx);
int bnxt_get_nr_rss_ctxs(struct bnxt *bp, int rx_rings);
-int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id);
+int bnxt_hwrm_vnic_cfg(struct bnxt *bp, struct bnxt_vnic_info *vnic);
+int bnxt_hwrm_vnic_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic,
+ unsigned int start_rx_ring_idx,
+ unsigned int nr_rings);
int __bnxt_hwrm_get_tx_rings(struct bnxt *bp, u16 fid, int *tx_rings);
int bnxt_nq_rings_in_use(struct bnxt *bp);
int bnxt_hwrm_set_coal(struct bnxt *);
-void bnxt_free_ctx_mem(struct bnxt *bp);
+size_t bnxt_copy_ctx_mem(struct bnxt *bp, struct bnxt_ctx_mem_type *ctxm,
+ void *buf, size_t offset);
+void bnxt_free_ctx_mem(struct bnxt *bp, bool force);
int bnxt_num_tx_to_cp(struct bnxt *bp, int tx);
unsigned int bnxt_get_max_func_stat_ctxs(struct bnxt *bp);
unsigned int bnxt_get_avail_stat_ctxs_for_en(struct bnxt *bp);
unsigned int bnxt_get_max_func_cp_rings(struct bnxt *bp);
unsigned int bnxt_get_avail_cp_rings_for_en(struct bnxt *bp);
-int bnxt_get_avail_msix(struct bnxt *bp, int num);
int bnxt_reserve_rings(struct bnxt *bp, bool irq_re_init);
void bnxt_tx_disable(struct bnxt *bp);
void bnxt_tx_enable(struct bnxt *bp);
@@ -2679,6 +2917,12 @@ int bnxt_hwrm_free_wol_fltr(struct bnxt *bp);
int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp, bool all);
int bnxt_hwrm_func_qcaps(struct bnxt *bp);
int bnxt_hwrm_fw_set_time(struct bnxt *);
+int bnxt_hwrm_vnic_update(struct bnxt *bp, struct bnxt_vnic_info *vnic,
+ u8 valid);
+int bnxt_hwrm_vnic_rss_cfg_p5(struct bnxt *bp, struct bnxt_vnic_info *vnic);
+int __bnxt_setup_vnic_p5(struct bnxt *bp, struct bnxt_vnic_info *vnic);
+void bnxt_del_one_rss_ctx(struct bnxt *bp, struct bnxt_rss_ctx *rss_ctx,
+ bool all);
int bnxt_open_nic(struct bnxt *, bool, bool);
int bnxt_half_open_nic(struct bnxt *bp);
void bnxt_half_close_nic(struct bnxt *bp);
@@ -2686,6 +2930,7 @@ void bnxt_reenable_sriov(struct bnxt *bp);
void bnxt_close_nic(struct bnxt *, bool, bool);
void bnxt_get_ring_err_stats(struct bnxt *bp,
struct bnxt_total_ring_err_stats *stats);
+bool bnxt_rfs_capable(struct bnxt *bp, bool new_rss_ctx);
int bnxt_dbg_hwrm_rd_reg(struct bnxt *bp, u32 reg_off, u16 num_words,
u32 *reg_buf);
void bnxt_fw_exception(struct bnxt *bp);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.c
index c06789882036..7236d8e548ab 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.c
@@ -15,6 +15,50 @@
#include "bnxt_hwrm.h"
#include "bnxt_coredump.h"
+static const u16 bnxt_bstore_to_seg_id[] = {
+ [BNXT_CTX_QP] = BNXT_CTX_MEM_SEG_QP,
+ [BNXT_CTX_SRQ] = BNXT_CTX_MEM_SEG_SRQ,
+ [BNXT_CTX_CQ] = BNXT_CTX_MEM_SEG_CQ,
+ [BNXT_CTX_VNIC] = BNXT_CTX_MEM_SEG_VNIC,
+ [BNXT_CTX_STAT] = BNXT_CTX_MEM_SEG_STAT,
+ [BNXT_CTX_STQM] = BNXT_CTX_MEM_SEG_STQM,
+ [BNXT_CTX_FTQM] = BNXT_CTX_MEM_SEG_FTQM,
+ [BNXT_CTX_MRAV] = BNXT_CTX_MEM_SEG_MRAV,
+ [BNXT_CTX_TIM] = BNXT_CTX_MEM_SEG_TIM,
+ [BNXT_CTX_SRT] = BNXT_CTX_MEM_SEG_SRT,
+ [BNXT_CTX_SRT2] = BNXT_CTX_MEM_SEG_SRT2,
+ [BNXT_CTX_CRT] = BNXT_CTX_MEM_SEG_CRT,
+ [BNXT_CTX_CRT2] = BNXT_CTX_MEM_SEG_CRT2,
+ [BNXT_CTX_RIGP0] = BNXT_CTX_MEM_SEG_RIGP0,
+ [BNXT_CTX_L2HWRM] = BNXT_CTX_MEM_SEG_L2HWRM,
+ [BNXT_CTX_REHWRM] = BNXT_CTX_MEM_SEG_REHWRM,
+ [BNXT_CTX_CA0] = BNXT_CTX_MEM_SEG_CA0,
+ [BNXT_CTX_CA1] = BNXT_CTX_MEM_SEG_CA1,
+ [BNXT_CTX_CA2] = BNXT_CTX_MEM_SEG_CA2,
+ [BNXT_CTX_RIGP1] = BNXT_CTX_MEM_SEG_RIGP1,
+};
+
+static int bnxt_dbg_hwrm_log_buffer_flush(struct bnxt *bp, u16 type, u32 flags,
+ u32 *offset)
+{
+ struct hwrm_dbg_log_buffer_flush_output *resp;
+ struct hwrm_dbg_log_buffer_flush_input *req;
+ int rc;
+
+ rc = hwrm_req_init(bp, req, HWRM_DBG_LOG_BUFFER_FLUSH);
+ if (rc)
+ return rc;
+
+ req->flags = cpu_to_le32(flags);
+ req->type = cpu_to_le16(type);
+ resp = hwrm_req_hold(bp, req);
+ rc = hwrm_req_send(bp, req);
+ if (!rc)
+ *offset = le32_to_cpu(resp->current_buffer_offset);
+ hwrm_req_drop(bp, req);
+ return rc;
+}
+
static int bnxt_hwrm_dbg_dma_data(struct bnxt *bp, void *msg,
struct bnxt_hwrm_dbg_dma_info *info)
{
@@ -165,11 +209,12 @@ static int bnxt_hwrm_dbg_coredump_retrieve(struct bnxt *bp, u16 component_id,
return rc;
}
-static void
+void
bnxt_fill_coredump_seg_hdr(struct bnxt *bp,
struct bnxt_coredump_segment_hdr *seg_hdr,
struct coredump_segment_record *seg_rec, u32 seg_len,
- int status, u32 duration, u32 instance)
+ int status, u32 duration, u32 instance, u32 comp_id,
+ u32 seg_id)
{
memset(seg_hdr, 0, sizeof(*seg_hdr));
memcpy(seg_hdr->signature, "sEgM", 4);
@@ -180,11 +225,8 @@ bnxt_fill_coredump_seg_hdr(struct bnxt *bp,
seg_hdr->high_version = seg_rec->version_hi;
seg_hdr->flags = cpu_to_le32(seg_rec->compress_flags);
} else {
- /* For hwrm_ver_get response Component id = 2
- * and Segment id = 0
- */
- seg_hdr->component_id = cpu_to_le32(2);
- seg_hdr->segment_id = 0;
+ seg_hdr->component_id = cpu_to_le32(comp_id);
+ seg_hdr->segment_id = cpu_to_le32(seg_id);
}
seg_hdr->function_id = cpu_to_le16(bp->pdev->devfn);
seg_hdr->length = cpu_to_le32(seg_len);
@@ -269,7 +311,78 @@ bnxt_fill_coredump_record(struct bnxt *bp, struct bnxt_coredump_record *record,
record->ioctl_high_version = 0;
}
-static int __bnxt_get_coredump(struct bnxt *bp, void *buf, u32 *dump_len)
+static void bnxt_fill_drv_seg_record(struct bnxt *bp,
+ struct bnxt_driver_segment_record *record,
+ struct bnxt_ctx_mem_type *ctxm, u16 type)
+{
+ struct bnxt_bs_trace_info *bs_trace = &bp->bs_trace[type];
+ u32 offset = 0;
+ int rc = 0;
+
+ rc = bnxt_dbg_hwrm_log_buffer_flush(bp, type, 0, &offset);
+ if (rc)
+ return;
+
+ bnxt_bs_trace_check_wrap(bs_trace, offset);
+ record->max_entries = cpu_to_le32(ctxm->max_entries);
+ record->entry_size = cpu_to_le32(ctxm->entry_size);
+ record->offset = cpu_to_le32(bs_trace->last_offset);
+ record->wrapped = bs_trace->wrapped;
+}
+
+static u32 bnxt_get_ctx_coredump(struct bnxt *bp, void *buf, u32 offset,
+ u32 *segs)
+{
+ struct bnxt_driver_segment_record record = {};
+ struct bnxt_coredump_segment_hdr seg_hdr;
+ struct bnxt_ctx_mem_info *ctx = bp->ctx;
+ u32 comp_id = BNXT_DRV_COMP_ID;
+ void *data = NULL;
+ size_t len = 0;
+ u16 type;
+
+ *segs = 0;
+ if (!ctx)
+ return 0;
+
+ if (buf)
+ buf += offset;
+ for (type = 0 ; type <= BNXT_CTX_RIGP1; type++) {
+ struct bnxt_ctx_mem_type *ctxm = &ctx->ctx_arr[type];
+ bool trace = bnxt_bs_trace_avail(bp, type);
+ u32 seg_id = bnxt_bstore_to_seg_id[type];
+ size_t seg_len, extra_hlen = 0;
+
+ if (!ctxm->mem_valid || !seg_id)
+ continue;
+
+ if (trace)
+ extra_hlen = BNXT_SEG_RCD_LEN;
+ if (buf)
+ data = buf + BNXT_SEG_HDR_LEN + extra_hlen;
+ seg_len = bnxt_copy_ctx_mem(bp, ctxm, data, 0) + extra_hlen;
+ if (buf) {
+ bnxt_fill_coredump_seg_hdr(bp, &seg_hdr, NULL, seg_len,
+ 0, 0, 0, comp_id, seg_id);
+ memcpy(buf, &seg_hdr, BNXT_SEG_HDR_LEN);
+ buf += BNXT_SEG_HDR_LEN;
+ if (trace) {
+ u16 trace_type = bnxt_bstore_to_trace[type];
+
+ bnxt_fill_drv_seg_record(bp, &record, ctxm,
+ trace_type);
+ memcpy(buf, &record, BNXT_SEG_RCD_LEN);
+ }
+ buf += seg_len;
+ }
+ len += BNXT_SEG_HDR_LEN + seg_len;
+ *segs += 1;
+ }
+ return len;
+}
+
+static int __bnxt_get_coredump(struct bnxt *bp, u16 dump_type, void *buf,
+ u32 *dump_len)
{
u32 ver_get_resp_len = sizeof(struct hwrm_ver_get_output);
u32 offset = 0, seg_hdr_len, seg_record_len, buf_len = 0;
@@ -287,17 +400,31 @@ static int __bnxt_get_coredump(struct bnxt *bp, void *buf, u32 *dump_len)
start_utc = sys_tz.tz_minuteswest * 60;
seg_hdr_len = sizeof(seg_hdr);
- /* First segment should be hwrm_ver_get response */
+ /* First segment should be hwrm_ver_get response.
+ * For hwrm_ver_get response Component id = 2 and Segment id = 0.
+ */
*dump_len = seg_hdr_len + ver_get_resp_len;
if (buf) {
bnxt_fill_coredump_seg_hdr(bp, &seg_hdr, NULL, ver_get_resp_len,
- 0, 0, 0);
+ 0, 0, 0, BNXT_VER_GET_COMP_ID, 0);
memcpy(buf + offset, &seg_hdr, seg_hdr_len);
offset += seg_hdr_len;
memcpy(buf + offset, &bp->ver_resp, ver_get_resp_len);
offset += ver_get_resp_len;
}
+ if (dump_type == BNXT_DUMP_DRIVER) {
+ u32 drv_len, segs = 0;
+
+ drv_len = bnxt_get_ctx_coredump(bp, buf, offset, &segs);
+ *dump_len += drv_len;
+ offset += drv_len;
+ if (buf)
+ coredump.total_segs += segs;
+ goto err;
+ }
+
+ seg_record_len = sizeof(*seg_record);
rc = bnxt_hwrm_dbg_coredump_list(bp, &coredump);
if (rc) {
netdev_err(bp->dev, "Failed to get coredump segment list\n");
@@ -346,7 +473,7 @@ next_seg:
end = jiffies;
duration = jiffies_to_msecs(end - start);
bnxt_fill_coredump_seg_hdr(bp, &seg_hdr, seg_record, seg_len,
- rc, duration, 0);
+ rc, duration, 0, 0, 0);
if (buf) {
/* Write segment header into the buffer */
@@ -372,20 +499,81 @@ err:
return rc;
}
+static u32 bnxt_copy_crash_data(struct bnxt_ring_mem_info *rmem, void *buf,
+ u32 dump_len)
+{
+ u32 data_copied = 0;
+ u32 data_len;
+ int i;
+
+ for (i = 0; i < rmem->nr_pages; i++) {
+ data_len = rmem->page_size;
+ if (data_copied + data_len > dump_len)
+ data_len = dump_len - data_copied;
+ memcpy(buf + data_copied, rmem->pg_arr[i], data_len);
+ data_copied += data_len;
+ if (data_copied >= dump_len)
+ break;
+ }
+ return data_copied;
+}
+
+static int bnxt_copy_crash_dump(struct bnxt *bp, void *buf, u32 dump_len)
+{
+ struct bnxt_ring_mem_info *rmem;
+ u32 offset = 0;
+
+ if (!bp->fw_crash_mem)
+ return -ENOENT;
+
+ rmem = &bp->fw_crash_mem->ring_mem;
+
+ if (rmem->depth > 1) {
+ int i;
+
+ for (i = 0; i < rmem->nr_pages; i++) {
+ struct bnxt_ctx_pg_info *pg_tbl;
+
+ pg_tbl = bp->fw_crash_mem->ctx_pg_tbl[i];
+ offset += bnxt_copy_crash_data(&pg_tbl->ring_mem,
+ buf + offset,
+ dump_len - offset);
+ if (offset >= dump_len)
+ break;
+ }
+ } else {
+ bnxt_copy_crash_data(rmem, buf, dump_len);
+ }
+
+ return 0;
+}
+
+static bool bnxt_crash_dump_avail(struct bnxt *bp)
+{
+ u32 sig = 0;
+
+ /* First 4 bytes(signature) of crash dump is always non-zero */
+ bnxt_copy_crash_dump(bp, &sig, sizeof(sig));
+ return !!sig;
+}
+
int bnxt_get_coredump(struct bnxt *bp, u16 dump_type, void *buf, u32 *dump_len)
{
if (dump_type == BNXT_DUMP_CRASH) {
+ if (bp->fw_dbg_cap & DBG_QCAPS_RESP_FLAGS_CRASHDUMP_HOST_DDR)
+ return bnxt_copy_crash_dump(bp, buf, *dump_len);
#ifdef CONFIG_TEE_BNXT_FW
- return tee_bnxt_copy_coredump(buf, 0, *dump_len);
-#else
- return -EOPNOTSUPP;
+ else if (bp->fw_dbg_cap & DBG_QCAPS_RESP_FLAGS_CRASHDUMP_SOC_DDR)
+ return tee_bnxt_copy_coredump(buf, 0, *dump_len);
#endif
+ else
+ return -EOPNOTSUPP;
} else {
- return __bnxt_get_coredump(bp, buf, dump_len);
+ return __bnxt_get_coredump(bp, dump_type, buf, dump_len);
}
}
-static int bnxt_hwrm_get_dump_len(struct bnxt *bp, u16 dump_type, u32 *dump_len)
+int bnxt_hwrm_get_dump_len(struct bnxt *bp, u16 dump_type, u32 *dump_len)
{
struct hwrm_dbg_qcfg_output *resp;
struct hwrm_dbg_qcfg_input *req;
@@ -395,7 +583,8 @@ static int bnxt_hwrm_get_dump_len(struct bnxt *bp, u16 dump_type, u32 *dump_len)
return -EOPNOTSUPP;
if (dump_type == BNXT_DUMP_CRASH &&
- !(bp->fw_dbg_cap & DBG_QCAPS_RESP_FLAGS_CRASHDUMP_SOC_DDR))
+ !(bp->fw_dbg_cap & DBG_QCAPS_RESP_FLAGS_CRASHDUMP_SOC_DDR ||
+ (bp->fw_dbg_cap & DBG_QCAPS_RESP_FLAGS_CRASHDUMP_HOST_DDR)))
return -EOPNOTSUPP;
rc = hwrm_req_init(bp, req, HWRM_DBG_QCFG);
@@ -403,8 +592,12 @@ static int bnxt_hwrm_get_dump_len(struct bnxt *bp, u16 dump_type, u32 *dump_len)
return rc;
req->fid = cpu_to_le16(0xffff);
- if (dump_type == BNXT_DUMP_CRASH)
- req->flags = cpu_to_le16(DBG_QCFG_REQ_FLAGS_CRASHDUMP_SIZE_FOR_DEST_DEST_SOC_DDR);
+ if (dump_type == BNXT_DUMP_CRASH) {
+ if (bp->fw_dbg_cap & DBG_QCAPS_RESP_FLAGS_CRASHDUMP_SOC_DDR)
+ req->flags = cpu_to_le16(BNXT_DBG_FL_CR_DUMP_SIZE_SOC);
+ else
+ req->flags = cpu_to_le16(BNXT_DBG_FL_CR_DUMP_SIZE_HOST);
+ }
resp = hwrm_req_hold(bp, req);
rc = hwrm_req_send(bp, req);
@@ -412,7 +605,10 @@ static int bnxt_hwrm_get_dump_len(struct bnxt *bp, u16 dump_type, u32 *dump_len)
goto get_dump_len_exit;
if (dump_type == BNXT_DUMP_CRASH) {
- *dump_len = le32_to_cpu(resp->crashdump_size);
+ if (bp->fw_dbg_cap & DBG_QCAPS_RESP_FLAGS_CRASHDUMP_SOC_DDR)
+ *dump_len = BNXT_CRASH_DUMP_LEN;
+ else
+ *dump_len = le32_to_cpu(resp->crashdump_size);
} else {
/* Driver adds coredump header and "HWRM_VER_GET response"
* segment additionally to coredump.
@@ -434,11 +630,21 @@ u32 bnxt_get_coredump_length(struct bnxt *bp, u16 dump_type)
{
u32 len = 0;
- if (bnxt_hwrm_get_dump_len(bp, dump_type, &len)) {
- if (dump_type == BNXT_DUMP_CRASH)
- len = BNXT_CRASH_DUMP_LEN;
- else
- __bnxt_get_coredump(bp, NULL, &len);
+ if (dump_type == BNXT_DUMP_CRASH &&
+ bp->fw_dbg_cap & DBG_QCAPS_RESP_FLAGS_CRASHDUMP_HOST_DDR &&
+ bp->fw_crash_mem) {
+ if (!bnxt_crash_dump_avail(bp))
+ return 0;
+
+ return bp->fw_crash_len;
+ }
+
+ if (dump_type != BNXT_DUMP_DRIVER) {
+ if (!bnxt_hwrm_get_dump_len(bp, dump_type, &len))
+ return len;
}
+ if (dump_type != BNXT_DUMP_CRASH)
+ __bnxt_get_coredump(bp, dump_type, NULL, &len);
+
return len;
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.h
index b1a1b2fffb19..d1cd6387f3ab 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.h
@@ -68,11 +68,49 @@ struct bnxt_coredump_record {
__le16 rsvd3[313];
};
+struct bnxt_driver_segment_record {
+ __le32 max_entries;
+ __le32 entry_size;
+ __le32 offset;
+ __u8 wrapped:1;
+ __u8 unused[3];
+};
+
+#define BNXT_VER_GET_COMP_ID 2
+#define BNXT_DRV_COMP_ID 0xd
+
+#define BNXT_CTX_MEM_SEG_ID_START 0x200
+
+#define BNXT_CTX_MEM_SEG_QP (BNXT_CTX_MEM_SEG_ID_START + BNXT_CTX_QP)
+#define BNXT_CTX_MEM_SEG_SRQ (BNXT_CTX_MEM_SEG_ID_START + BNXT_CTX_SRQ)
+#define BNXT_CTX_MEM_SEG_CQ (BNXT_CTX_MEM_SEG_ID_START + BNXT_CTX_CQ)
+#define BNXT_CTX_MEM_SEG_VNIC (BNXT_CTX_MEM_SEG_ID_START + BNXT_CTX_VNIC)
+#define BNXT_CTX_MEM_SEG_STAT (BNXT_CTX_MEM_SEG_ID_START + BNXT_CTX_STAT)
+#define BNXT_CTX_MEM_SEG_STQM (BNXT_CTX_MEM_SEG_ID_START + BNXT_CTX_STQM)
+#define BNXT_CTX_MEM_SEG_FTQM (BNXT_CTX_MEM_SEG_ID_START + BNXT_CTX_FTQM)
+#define BNXT_CTX_MEM_SEG_MRAV (BNXT_CTX_MEM_SEG_ID_START + BNXT_CTX_MRAV)
+#define BNXT_CTX_MEM_SEG_TIM (BNXT_CTX_MEM_SEG_ID_START + BNXT_CTX_TIM)
+
+#define BNXT_CTX_MEM_SEG_SRT 0x1
+#define BNXT_CTX_MEM_SEG_SRT2 0x2
+#define BNXT_CTX_MEM_SEG_CRT 0x3
+#define BNXT_CTX_MEM_SEG_CRT2 0x4
+#define BNXT_CTX_MEM_SEG_RIGP0 0x5
+#define BNXT_CTX_MEM_SEG_L2HWRM 0x6
+#define BNXT_CTX_MEM_SEG_REHWRM 0x7
+#define BNXT_CTX_MEM_SEG_CA0 0x8
+#define BNXT_CTX_MEM_SEG_CA1 0x9
+#define BNXT_CTX_MEM_SEG_CA2 0xa
+#define BNXT_CTX_MEM_SEG_RIGP1 0xb
+
#define BNXT_CRASH_DUMP_LEN (8 << 20)
#define COREDUMP_LIST_BUF_LEN 2048
#define COREDUMP_RETRIEVE_BUF_LEN 4096
+#define BNXT_SEG_HDR_LEN sizeof(struct bnxt_coredump_segment_hdr)
+#define BNXT_SEG_RCD_LEN sizeof(struct bnxt_driver_segment_record)
+
struct bnxt_coredump {
void *data;
int data_size;
@@ -111,7 +149,20 @@ struct hwrm_dbg_cmn_output {
#define HWRM_DBG_CMN_FLAGS_MORE 1
};
+#define BNXT_DBG_FL_CR_DUMP_SIZE_SOC \
+ DBG_QCFG_REQ_FLAGS_CRASHDUMP_SIZE_FOR_DEST_DEST_SOC_DDR
+#define BNXT_DBG_FL_CR_DUMP_SIZE_HOST \
+ DBG_QCFG_REQ_FLAGS_CRASHDUMP_SIZE_FOR_DEST_DEST_HOST_DDR
+#define BNXT_DBG_CR_DUMP_MDM_CFG_DDR \
+ DBG_CRASHDUMP_MEDIUM_CFG_REQ_TYPE_DDR
+
+void bnxt_fill_coredump_seg_hdr(struct bnxt *bp,
+ struct bnxt_coredump_segment_hdr *seg_hdr,
+ struct coredump_segment_record *seg_rec,
+ u32 seg_len, int status, u32 duration,
+ u32 instance, u32 comp_id, u32 seg_id);
int bnxt_get_coredump(struct bnxt *bp, u16 dump_type, void *buf, u32 *dump_len);
+int bnxt_hwrm_get_dump_len(struct bnxt *bp, u16 dump_type, u32 *dump_len);
u32 bnxt_get_coredump_length(struct bnxt *bp, u16 dump_type);
#endif
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_debugfs.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_debugfs.c
index 156c2404854f..127b7015f676 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_debugfs.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_debugfs.c
@@ -64,9 +64,9 @@ static const struct file_operations debugfs_dim_fops = {
static void debugfs_dim_ring_init(struct dim *dim, int ring_idx,
struct dentry *dd)
{
- static char qname[16];
+ static char qname[12];
- snprintf(qname, 10, "%d", ring_idx);
+ snprintf(qname, sizeof(qname), "%d", ring_idx);
debugfs_create_file(qname, 0600, dd, dim, &debugfs_dim_fops);
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
index ae4529c043f0..ef8288fd68f4 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
@@ -437,18 +437,20 @@ static int bnxt_dl_reload_down(struct devlink *dl, bool netns_change,
switch (action) {
case DEVLINK_RELOAD_ACTION_DRIVER_REINIT: {
+ bnxt_ulp_stop(bp);
rtnl_lock();
if (bnxt_sriov_cfg(bp)) {
NL_SET_ERR_MSG_MOD(extack,
"reload is unsupported while VFs are allocated or being configured");
rtnl_unlock();
+ bnxt_ulp_start(bp, 0);
return -EOPNOTSUPP;
}
if (bp->dev->reg_state == NETREG_UNREGISTERED) {
rtnl_unlock();
+ bnxt_ulp_start(bp, 0);
return -ENODEV;
}
- bnxt_ulp_stop(bp);
if (netif_running(bp->dev))
bnxt_close_nic(bp, true, true);
bnxt_vf_reps_free(bp);
@@ -461,7 +463,7 @@ static int bnxt_dl_reload_down(struct devlink *dl, bool netns_change,
break;
}
bnxt_cancel_reservations(bp, false);
- bnxt_free_ctx_mem(bp);
+ bnxt_free_ctx_mem(bp, false);
break;
}
case DEVLINK_RELOAD_ACTION_FW_ACTIVATE: {
@@ -516,7 +518,6 @@ static int bnxt_dl_reload_up(struct devlink *dl, enum devlink_reload_action acti
bnxt_vf_reps_alloc(bp);
if (netif_running(bp->dev))
rc = bnxt_open_nic(bp, true, true);
- bnxt_ulp_start(bp, rc);
if (!rc) {
bnxt_reenable_sriov(bp);
bnxt_ptp_reapply_pps(bp);
@@ -570,6 +571,8 @@ static int bnxt_dl_reload_up(struct devlink *dl, enum devlink_reload_action acti
dev_close(bp->dev);
}
rtnl_unlock();
+ if (action == DEVLINK_RELOAD_ACTION_DRIVER_REINIT)
+ bnxt_ulp_start(bp, rc);
return rc;
}
@@ -1096,7 +1099,8 @@ static int bnxt_dl_nvm_param_get(struct devlink *dl, u32 id,
}
static int bnxt_dl_nvm_param_set(struct devlink *dl, u32 id,
- struct devlink_param_gset_ctx *ctx)
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
{
struct bnxt *bp = bnxt_get_bp_from_dl(dl);
struct hwrm_nvm_set_variable_input *req;
@@ -1145,7 +1149,8 @@ static int bnxt_remote_dev_reset_get(struct devlink *dl, u32 id,
}
static int bnxt_remote_dev_reset_set(struct devlink *dl, u32 id,
- struct devlink_param_gset_ctx *ctx)
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
{
struct bnxt *bp = bnxt_get_bp_from_dl(dl);
int rc;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
index dc4ca706b0e2..9c5820839514 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
@@ -24,6 +24,7 @@
#include <linux/ptp_clock_kernel.h>
#include <linux/net_tstamp.h>
#include <linux/timecounter.h>
+#include <net/netdev_queues.h>
#include <net/netlink.h>
#include "bnxt_hsi.h"
#include "bnxt.h"
@@ -631,13 +632,13 @@ static void bnxt_get_ethtool_stats(struct net_device *dev,
buf[j] = sw_stats[k];
skip_tpa_ring_stats:
- sw = (u64 *)&cpr->sw_stats.rx;
+ sw = (u64 *)&cpr->sw_stats->rx;
if (is_rx_ring(bp, i)) {
for (k = 0; k < NUM_RING_RX_SW_STATS; j++, k++)
buf[j] = sw[k];
}
- sw = (u64 *)&cpr->sw_stats.cmn;
+ sw = (u64 *)&cpr->sw_stats->cmn;
for (k = 0; k < NUM_RING_CMN_SW_STATS; j++, k++)
buf[j] = sw[k];
}
@@ -705,112 +706,105 @@ skip_ring_stats:
static void bnxt_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
{
struct bnxt *bp = netdev_priv(dev);
- static const char * const *str;
u32 i, j, num_str;
+ const char *str;
switch (stringset) {
case ETH_SS_STATS:
for (i = 0; i < bp->cp_nr_rings; i++) {
- if (is_rx_ring(bp, i)) {
- num_str = NUM_RING_RX_HW_STATS;
- for (j = 0; j < num_str; j++) {
- sprintf(buf, "[%d]: %s", i,
- bnxt_ring_rx_stats_str[j]);
- buf += ETH_GSTRING_LEN;
+ if (is_rx_ring(bp, i))
+ for (j = 0; j < NUM_RING_RX_HW_STATS; j++) {
+ str = bnxt_ring_rx_stats_str[j];
+ ethtool_sprintf(&buf, "[%d]: %s", i,
+ str);
}
- }
- if (is_tx_ring(bp, i)) {
- num_str = NUM_RING_TX_HW_STATS;
- for (j = 0; j < num_str; j++) {
- sprintf(buf, "[%d]: %s", i,
- bnxt_ring_tx_stats_str[j]);
- buf += ETH_GSTRING_LEN;
+ if (is_tx_ring(bp, i))
+ for (j = 0; j < NUM_RING_TX_HW_STATS; j++) {
+ str = bnxt_ring_tx_stats_str[j];
+ ethtool_sprintf(&buf, "[%d]: %s", i,
+ str);
}
- }
num_str = bnxt_get_num_tpa_ring_stats(bp);
if (!num_str || !is_rx_ring(bp, i))
goto skip_tpa_stats;
if (bp->max_tpa_v2)
- str = bnxt_ring_tpa2_stats_str;
+ for (j = 0; j < num_str; j++) {
+ str = bnxt_ring_tpa2_stats_str[j];
+ ethtool_sprintf(&buf, "[%d]: %s", i,
+ str);
+ }
else
- str = bnxt_ring_tpa_stats_str;
-
- for (j = 0; j < num_str; j++) {
- sprintf(buf, "[%d]: %s", i, str[j]);
- buf += ETH_GSTRING_LEN;
- }
-skip_tpa_stats:
- if (is_rx_ring(bp, i)) {
- num_str = NUM_RING_RX_SW_STATS;
for (j = 0; j < num_str; j++) {
- sprintf(buf, "[%d]: %s", i,
- bnxt_rx_sw_stats_str[j]);
- buf += ETH_GSTRING_LEN;
+ str = bnxt_ring_tpa_stats_str[j];
+ ethtool_sprintf(&buf, "[%d]: %s", i,
+ str);
}
+skip_tpa_stats:
+ if (is_rx_ring(bp, i))
+ for (j = 0; j < NUM_RING_RX_SW_STATS; j++) {
+ str = bnxt_rx_sw_stats_str[j];
+ ethtool_sprintf(&buf, "[%d]: %s", i,
+ str);
+ }
+ for (j = 0; j < NUM_RING_CMN_SW_STATS; j++) {
+ str = bnxt_cmn_sw_stats_str[j];
+ ethtool_sprintf(&buf, "[%d]: %s", i, str);
}
- num_str = NUM_RING_CMN_SW_STATS;
- for (j = 0; j < num_str; j++) {
- sprintf(buf, "[%d]: %s", i,
- bnxt_cmn_sw_stats_str[j]);
- buf += ETH_GSTRING_LEN;
- }
- }
- for (i = 0; i < BNXT_NUM_RING_ERR_STATS; i++) {
- strscpy(buf, bnxt_ring_err_stats_arr[i], ETH_GSTRING_LEN);
- buf += ETH_GSTRING_LEN;
}
+ for (i = 0; i < BNXT_NUM_RING_ERR_STATS; i++)
+ ethtool_puts(&buf, bnxt_ring_err_stats_arr[i]);
- if (bp->flags & BNXT_FLAG_PORT_STATS) {
+ if (bp->flags & BNXT_FLAG_PORT_STATS)
for (i = 0; i < BNXT_NUM_PORT_STATS; i++) {
- strcpy(buf, bnxt_port_stats_arr[i].string);
- buf += ETH_GSTRING_LEN;
+ str = bnxt_port_stats_arr[i].string;
+ ethtool_puts(&buf, str);
}
- }
+
if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) {
u32 len;
len = min_t(u32, bp->fw_rx_stats_ext_size,
ARRAY_SIZE(bnxt_port_stats_ext_arr));
for (i = 0; i < len; i++) {
- strcpy(buf, bnxt_port_stats_ext_arr[i].string);
- buf += ETH_GSTRING_LEN;
+ str = bnxt_port_stats_ext_arr[i].string;
+ ethtool_puts(&buf, str);
}
+
len = min_t(u32, bp->fw_tx_stats_ext_size,
ARRAY_SIZE(bnxt_tx_port_stats_ext_arr));
for (i = 0; i < len; i++) {
- strcpy(buf,
- bnxt_tx_port_stats_ext_arr[i].string);
- buf += ETH_GSTRING_LEN;
+ str = bnxt_tx_port_stats_ext_arr[i].string;
+ ethtool_puts(&buf, str);
}
+
if (bp->pri2cos_valid) {
for (i = 0; i < 8; i++) {
- strcpy(buf,
- bnxt_rx_bytes_pri_arr[i].string);
- buf += ETH_GSTRING_LEN;
+ str = bnxt_rx_bytes_pri_arr[i].string;
+ ethtool_puts(&buf, str);
}
+
for (i = 0; i < 8; i++) {
- strcpy(buf,
- bnxt_rx_pkts_pri_arr[i].string);
- buf += ETH_GSTRING_LEN;
+ str = bnxt_rx_pkts_pri_arr[i].string;
+ ethtool_puts(&buf, str);
}
+
for (i = 0; i < 8; i++) {
- strcpy(buf,
- bnxt_tx_bytes_pri_arr[i].string);
- buf += ETH_GSTRING_LEN;
+ str = bnxt_tx_bytes_pri_arr[i].string;
+ ethtool_puts(&buf, str);
}
+
for (i = 0; i < 8; i++) {
- strcpy(buf,
- bnxt_tx_pkts_pri_arr[i].string);
- buf += ETH_GSTRING_LEN;
+ str = bnxt_tx_pkts_pri_arr[i].string;
+ ethtool_puts(&buf, str);
}
}
}
break;
case ETH_SS_TEST:
if (bp->num_tests)
- memcpy(buf, bp->test_info->string,
- bp->num_tests * ETH_GSTRING_LEN);
+ for (i = 0; i < bp->num_tests; i++)
+ ethtool_puts(&buf, bp->test_info->string[i]);
break;
default:
netdev_err(bp->dev, "bnxt_get_strings invalid request %x\n",
@@ -840,6 +834,8 @@ static void bnxt_get_ringparam(struct net_device *dev,
ering->rx_pending = bp->rx_ring_size;
ering->rx_jumbo_pending = bp->rx_agg_ring_size;
ering->tx_pending = bp->tx_ring_size;
+
+ kernel_ering->hds_thresh_max = BNXT_HDS_THRESHOLD_MAX;
}
static int bnxt_set_ringparam(struct net_device *dev,
@@ -847,16 +843,35 @@ static int bnxt_set_ringparam(struct net_device *dev,
struct kernel_ethtool_ringparam *kernel_ering,
struct netlink_ext_ack *extack)
{
+ u8 tcp_data_split = kernel_ering->tcp_data_split;
struct bnxt *bp = netdev_priv(dev);
+ u8 hds_config_mod;
if ((ering->rx_pending > BNXT_MAX_RX_DESC_CNT) ||
(ering->tx_pending > BNXT_MAX_TX_DESC_CNT) ||
(ering->tx_pending < BNXT_MIN_TX_DESC_CNT))
return -EINVAL;
+ hds_config_mod = tcp_data_split != dev->cfg->hds_config;
+ if (tcp_data_split == ETHTOOL_TCP_DATA_SPLIT_DISABLED && hds_config_mod)
+ return -EINVAL;
+
+ if (tcp_data_split == ETHTOOL_TCP_DATA_SPLIT_ENABLED &&
+ hds_config_mod && BNXT_RX_PAGE_MODE(bp)) {
+ NL_SET_ERR_MSG_MOD(extack, "tcp-data-split is disallowed when XDP is attached");
+ return -EINVAL;
+ }
+
if (netif_running(dev))
bnxt_close_nic(bp, false, false);
+ if (hds_config_mod) {
+ if (tcp_data_split == ETHTOOL_TCP_DATA_SPLIT_ENABLED)
+ bp->flags |= BNXT_FLAG_HDS;
+ else if (tcp_data_split == ETHTOOL_TCP_DATA_SPLIT_UNKNOWN)
+ bp->flags &= ~BNXT_FLAG_HDS;
+ }
+
bp->rx_ring_size = ering->rx_pending;
bp->tx_ring_size = ering->tx_pending;
bnxt_set_ring_params(bp);
@@ -955,11 +970,6 @@ static int bnxt_set_channels(struct net_device *dev,
}
tx_xdp = req_rx_rings;
}
- rc = bnxt_check_rings(bp, req_tx_rings, req_rx_rings, sh, tcs, tx_xdp);
- if (rc) {
- netdev_warn(dev, "Unable to allocate the requested rings\n");
- return rc;
- }
if (bnxt_get_nr_rss_ctxs(bp, req_rx_rings) !=
bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings) &&
@@ -968,6 +978,12 @@ static int bnxt_set_channels(struct net_device *dev,
return -EINVAL;
}
+ rc = bnxt_check_rings(bp, req_tx_rings, req_rx_rings, sh, tcs, tx_xdp);
+ if (rc) {
+ netdev_warn(dev, "Unable to allocate the requested rings\n");
+ return rc;
+ }
+
if (netif_running(dev)) {
if (BNXT_PF(bp)) {
/* TODO CHIMP_FW: Send message to all VF's
@@ -1058,11 +1074,17 @@ static struct bnxt_filter_base *bnxt_get_one_fltr_rcu(struct bnxt *bp,
static int bnxt_grxclsrlall(struct bnxt *bp, struct ethtool_rxnfc *cmd,
u32 *rule_locs)
{
+ u32 count;
+
cmd->data = bp->ntp_fltr_count;
rcu_read_lock();
+ count = bnxt_get_all_fltr_ids_rcu(bp, bp->l2_fltr_hash_tbl,
+ BNXT_L2_FLTR_HASH_SIZE, rule_locs, 0,
+ cmd->rule_cnt);
cmd->rule_cnt = bnxt_get_all_fltr_ids_rcu(bp, bp->ntp_fltr_hash_tbl,
BNXT_NTP_FLTR_HASH_SIZE,
- rule_locs, 0, cmd->rule_cnt);
+ rule_locs, count,
+ cmd->rule_cnt);
rcu_read_unlock();
return 0;
@@ -1074,13 +1096,44 @@ static int bnxt_grxclsrule(struct bnxt *bp, struct ethtool_rxnfc *cmd)
(struct ethtool_rx_flow_spec *)&cmd->fs;
struct bnxt_filter_base *fltr_base;
struct bnxt_ntuple_filter *fltr;
+ struct bnxt_flow_masks *fmasks;
struct flow_keys *fkeys;
int rc = -EINVAL;
- if (fs->location >= BNXT_NTP_FLTR_MAX_FLTR)
+ if (fs->location >= bp->max_fltr)
return rc;
rcu_read_lock();
+ fltr_base = bnxt_get_one_fltr_rcu(bp, bp->l2_fltr_hash_tbl,
+ BNXT_L2_FLTR_HASH_SIZE,
+ fs->location);
+ if (fltr_base) {
+ struct ethhdr *h_ether = &fs->h_u.ether_spec;
+ struct ethhdr *m_ether = &fs->m_u.ether_spec;
+ struct bnxt_l2_filter *l2_fltr;
+ struct bnxt_l2_key *l2_key;
+
+ l2_fltr = container_of(fltr_base, struct bnxt_l2_filter, base);
+ l2_key = &l2_fltr->l2_key;
+ fs->flow_type = ETHER_FLOW;
+ ether_addr_copy(h_ether->h_dest, l2_key->dst_mac_addr);
+ eth_broadcast_addr(m_ether->h_dest);
+ if (l2_key->vlan) {
+ struct ethtool_flow_ext *m_ext = &fs->m_ext;
+ struct ethtool_flow_ext *h_ext = &fs->h_ext;
+
+ fs->flow_type |= FLOW_EXT;
+ m_ext->vlan_tci = htons(0xfff);
+ h_ext->vlan_tci = htons(l2_key->vlan);
+ }
+ if (fltr_base->flags & BNXT_ACT_RING_DST)
+ fs->ring_cookie = fltr_base->rxq;
+ if (fltr_base->flags & BNXT_ACT_FUNC_DST)
+ fs->ring_cookie = (u64)(fltr_base->vf_idx + 1) <<
+ ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
+ rcu_read_unlock();
+ return 0;
+ }
fltr_base = bnxt_get_one_fltr_rcu(bp, bp->ntp_fltr_hash_tbl,
BNXT_NTP_FLTR_HASH_SIZE,
fs->location);
@@ -1091,59 +1144,79 @@ static int bnxt_grxclsrule(struct bnxt *bp, struct ethtool_rxnfc *cmd)
fltr = container_of(fltr_base, struct bnxt_ntuple_filter, base);
fkeys = &fltr->fkeys;
+ fmasks = &fltr->fmasks;
if (fkeys->basic.n_proto == htons(ETH_P_IP)) {
- if (fkeys->basic.ip_proto == IPPROTO_TCP)
+ if (fkeys->basic.ip_proto == BNXT_IP_PROTO_WILDCARD) {
+ fs->flow_type = IP_USER_FLOW;
+ fs->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
+ fs->h_u.usr_ip4_spec.proto = BNXT_IP_PROTO_WILDCARD;
+ fs->m_u.usr_ip4_spec.proto = 0;
+ } else if (fkeys->basic.ip_proto == IPPROTO_ICMP) {
+ fs->flow_type = IP_USER_FLOW;
+ fs->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
+ fs->h_u.usr_ip4_spec.proto = IPPROTO_ICMP;
+ fs->m_u.usr_ip4_spec.proto = BNXT_IP_PROTO_FULL_MASK;
+ } else if (fkeys->basic.ip_proto == IPPROTO_TCP) {
fs->flow_type = TCP_V4_FLOW;
- else if (fkeys->basic.ip_proto == IPPROTO_UDP)
+ } else if (fkeys->basic.ip_proto == IPPROTO_UDP) {
fs->flow_type = UDP_V4_FLOW;
- else
+ } else {
goto fltr_err;
-
- if (fltr->ntuple_flags & BNXT_NTUPLE_MATCH_SRC_IP) {
- fs->h_u.tcp_ip4_spec.ip4src = fkeys->addrs.v4addrs.src;
- fs->m_u.tcp_ip4_spec.ip4src = cpu_to_be32(~0);
}
- if (fltr->ntuple_flags & BNXT_NTUPLE_MATCH_DST_IP) {
- fs->h_u.tcp_ip4_spec.ip4dst = fkeys->addrs.v4addrs.dst;
- fs->m_u.tcp_ip4_spec.ip4dst = cpu_to_be32(~0);
- }
- if (fltr->ntuple_flags & BNXT_NTUPLE_MATCH_SRC_PORT) {
+
+ fs->h_u.tcp_ip4_spec.ip4src = fkeys->addrs.v4addrs.src;
+ fs->m_u.tcp_ip4_spec.ip4src = fmasks->addrs.v4addrs.src;
+ fs->h_u.tcp_ip4_spec.ip4dst = fkeys->addrs.v4addrs.dst;
+ fs->m_u.tcp_ip4_spec.ip4dst = fmasks->addrs.v4addrs.dst;
+ if (fs->flow_type == TCP_V4_FLOW ||
+ fs->flow_type == UDP_V4_FLOW) {
fs->h_u.tcp_ip4_spec.psrc = fkeys->ports.src;
- fs->m_u.tcp_ip4_spec.psrc = cpu_to_be16(~0);
- }
- if (fltr->ntuple_flags & BNXT_NTUPLE_MATCH_DST_PORT) {
+ fs->m_u.tcp_ip4_spec.psrc = fmasks->ports.src;
fs->h_u.tcp_ip4_spec.pdst = fkeys->ports.dst;
- fs->m_u.tcp_ip4_spec.pdst = cpu_to_be16(~0);
+ fs->m_u.tcp_ip4_spec.pdst = fmasks->ports.dst;
}
} else {
- if (fkeys->basic.ip_proto == IPPROTO_TCP)
+ if (fkeys->basic.ip_proto == BNXT_IP_PROTO_WILDCARD) {
+ fs->flow_type = IPV6_USER_FLOW;
+ fs->h_u.usr_ip6_spec.l4_proto = BNXT_IP_PROTO_WILDCARD;
+ fs->m_u.usr_ip6_spec.l4_proto = 0;
+ } else if (fkeys->basic.ip_proto == IPPROTO_ICMPV6) {
+ fs->flow_type = IPV6_USER_FLOW;
+ fs->h_u.usr_ip6_spec.l4_proto = IPPROTO_ICMPV6;
+ fs->m_u.usr_ip6_spec.l4_proto = BNXT_IP_PROTO_FULL_MASK;
+ } else if (fkeys->basic.ip_proto == IPPROTO_TCP) {
fs->flow_type = TCP_V6_FLOW;
- else if (fkeys->basic.ip_proto == IPPROTO_UDP)
+ } else if (fkeys->basic.ip_proto == IPPROTO_UDP) {
fs->flow_type = UDP_V6_FLOW;
- else
+ } else {
goto fltr_err;
-
- if (fltr->ntuple_flags & BNXT_NTUPLE_MATCH_SRC_IP) {
- *(struct in6_addr *)&fs->h_u.tcp_ip6_spec.ip6src[0] =
- fkeys->addrs.v6addrs.src;
- bnxt_fill_ipv6_mask(fs->m_u.tcp_ip6_spec.ip6src);
- }
- if (fltr->ntuple_flags & BNXT_NTUPLE_MATCH_DST_IP) {
- *(struct in6_addr *)&fs->h_u.tcp_ip6_spec.ip6dst[0] =
- fkeys->addrs.v6addrs.dst;
- bnxt_fill_ipv6_mask(fs->m_u.tcp_ip6_spec.ip6dst);
}
- if (fltr->ntuple_flags & BNXT_NTUPLE_MATCH_SRC_PORT) {
+
+ *(struct in6_addr *)&fs->h_u.tcp_ip6_spec.ip6src[0] =
+ fkeys->addrs.v6addrs.src;
+ *(struct in6_addr *)&fs->m_u.tcp_ip6_spec.ip6src[0] =
+ fmasks->addrs.v6addrs.src;
+ *(struct in6_addr *)&fs->h_u.tcp_ip6_spec.ip6dst[0] =
+ fkeys->addrs.v6addrs.dst;
+ *(struct in6_addr *)&fs->m_u.tcp_ip6_spec.ip6dst[0] =
+ fmasks->addrs.v6addrs.dst;
+ if (fs->flow_type == TCP_V6_FLOW ||
+ fs->flow_type == UDP_V6_FLOW) {
fs->h_u.tcp_ip6_spec.psrc = fkeys->ports.src;
- fs->m_u.tcp_ip6_spec.psrc = cpu_to_be16(~0);
- }
- if (fltr->ntuple_flags & BNXT_NTUPLE_MATCH_DST_PORT) {
+ fs->m_u.tcp_ip6_spec.psrc = fmasks->ports.src;
fs->h_u.tcp_ip6_spec.pdst = fkeys->ports.dst;
- fs->m_u.tcp_ip6_spec.pdst = cpu_to_be16(~0);
+ fs->m_u.tcp_ip6_spec.pdst = fmasks->ports.dst;
}
}
- fs->ring_cookie = fltr->base.rxq;
+ if (fltr->base.flags & BNXT_ACT_DROP) {
+ fs->ring_cookie = RX_CLS_FLOW_DISC;
+ } else if (fltr->base.flags & BNXT_ACT_RSS_CTX) {
+ fs->flow_type |= FLOW_RSS;
+ cmd->rss_context = fltr->base.fw_vnic_id;
+ } else {
+ fs->ring_cookie = fltr->base.rxq;
+ }
rc = 0;
fltr_err:
@@ -1152,48 +1225,170 @@ fltr_err:
return rc;
}
-#define IPV4_ALL_MASK ((__force __be32)~0)
-#define L4_PORT_ALL_MASK ((__force __be16)~0)
+static struct bnxt_rss_ctx *bnxt_get_rss_ctx_from_index(struct bnxt *bp,
+ u32 index)
+{
+ struct ethtool_rxfh_context *ctx;
+
+ ctx = xa_load(&bp->dev->ethtool->rss_ctx, index);
+ if (!ctx)
+ return NULL;
+ return ethtool_rxfh_context_priv(ctx);
+}
+
+static int bnxt_alloc_vnic_rss_table(struct bnxt *bp,
+ struct bnxt_vnic_info *vnic)
+{
+ int size = L1_CACHE_ALIGN(BNXT_MAX_RSS_TABLE_SIZE_P5);
+
+ vnic->rss_table_size = size + HW_HASH_KEY_SIZE;
+ vnic->rss_table = dma_alloc_coherent(&bp->pdev->dev,
+ vnic->rss_table_size,
+ &vnic->rss_table_dma_addr,
+ GFP_KERNEL);
+ if (!vnic->rss_table)
+ return -ENOMEM;
+
+ vnic->rss_hash_key = ((void *)vnic->rss_table) + size;
+ vnic->rss_hash_key_dma_addr = vnic->rss_table_dma_addr + size;
+ return 0;
+}
+
+static int bnxt_add_l2_cls_rule(struct bnxt *bp,
+ struct ethtool_rx_flow_spec *fs)
+{
+ u32 ring = ethtool_get_flow_spec_ring(fs->ring_cookie);
+ u8 vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie);
+ struct ethhdr *h_ether = &fs->h_u.ether_spec;
+ struct ethhdr *m_ether = &fs->m_u.ether_spec;
+ struct bnxt_l2_filter *fltr;
+ struct bnxt_l2_key key;
+ u16 vnic_id;
+ u8 flags;
+ int rc;
+
+ if (BNXT_CHIP_P5_PLUS(bp))
+ return -EOPNOTSUPP;
+
+ if (!is_broadcast_ether_addr(m_ether->h_dest))
+ return -EINVAL;
+ ether_addr_copy(key.dst_mac_addr, h_ether->h_dest);
+ key.vlan = 0;
+ if (fs->flow_type & FLOW_EXT) {
+ struct ethtool_flow_ext *m_ext = &fs->m_ext;
+ struct ethtool_flow_ext *h_ext = &fs->h_ext;
+
+ if (m_ext->vlan_tci != htons(0xfff) || !h_ext->vlan_tci)
+ return -EINVAL;
+ key.vlan = ntohs(h_ext->vlan_tci);
+ }
+
+ if (vf) {
+ flags = BNXT_ACT_FUNC_DST;
+ vnic_id = 0xffff;
+ vf--;
+ } else {
+ flags = BNXT_ACT_RING_DST;
+ vnic_id = bp->vnic_info[ring + 1].fw_vnic_id;
+ }
+ fltr = bnxt_alloc_new_l2_filter(bp, &key, flags);
+ if (IS_ERR(fltr))
+ return PTR_ERR(fltr);
+
+ fltr->base.fw_vnic_id = vnic_id;
+ fltr->base.rxq = ring;
+ fltr->base.vf_idx = vf;
+ rc = bnxt_hwrm_l2_filter_alloc(bp, fltr);
+ if (rc)
+ bnxt_del_l2_filter(bp, fltr);
+ else
+ fs->location = fltr->base.sw_id;
+ return rc;
+}
-static bool ipv6_mask_is_full(__be32 mask[4])
+static bool bnxt_verify_ntuple_ip4_flow(struct ethtool_usrip4_spec *ip_spec,
+ struct ethtool_usrip4_spec *ip_mask)
{
- return (mask[0] & mask[1] & mask[2] & mask[3]) == IPV4_ALL_MASK;
+ u8 mproto = ip_mask->proto;
+ u8 sproto = ip_spec->proto;
+
+ if (ip_mask->l4_4_bytes || ip_mask->tos ||
+ ip_spec->ip_ver != ETH_RX_NFC_IP4 ||
+ (mproto && (mproto != BNXT_IP_PROTO_FULL_MASK || sproto != IPPROTO_ICMP)))
+ return false;
+ return true;
}
-static bool ipv6_mask_is_zero(__be32 mask[4])
+static bool bnxt_verify_ntuple_ip6_flow(struct ethtool_usrip6_spec *ip_spec,
+ struct ethtool_usrip6_spec *ip_mask)
{
- return !(mask[0] | mask[1] | mask[2] | mask[3]);
+ u8 mproto = ip_mask->l4_proto;
+ u8 sproto = ip_spec->l4_proto;
+
+ if (ip_mask->l4_4_bytes || ip_mask->tclass ||
+ (mproto && (mproto != BNXT_IP_PROTO_FULL_MASK || sproto != IPPROTO_ICMPV6)))
+ return false;
+ return true;
}
static int bnxt_add_ntuple_cls_rule(struct bnxt *bp,
- struct ethtool_rx_flow_spec *fs)
+ struct ethtool_rxnfc *cmd)
{
- u8 vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie);
- u32 ring = ethtool_get_flow_spec_ring(fs->ring_cookie);
+ struct ethtool_rx_flow_spec *fs = &cmd->fs;
struct bnxt_ntuple_filter *new_fltr, *fltr;
+ u32 flow_type = fs->flow_type & 0xff;
struct bnxt_l2_filter *l2_fltr;
- u32 flow_type = fs->flow_type;
+ struct bnxt_flow_masks *fmasks;
struct flow_keys *fkeys;
- u32 idx;
+ u32 idx, ring;
int rc;
+ u8 vf;
if (!bp->vnic_info)
return -EAGAIN;
- if ((flow_type & (FLOW_MAC_EXT | FLOW_EXT)) || vf)
+ vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie);
+ ring = ethtool_get_flow_spec_ring(fs->ring_cookie);
+ if ((fs->flow_type & (FLOW_MAC_EXT | FLOW_EXT)) || vf)
return -EOPNOTSUPP;
+ if (flow_type == IP_USER_FLOW) {
+ if (!bnxt_verify_ntuple_ip4_flow(&fs->h_u.usr_ip4_spec,
+ &fs->m_u.usr_ip4_spec))
+ return -EOPNOTSUPP;
+ }
+
+ if (flow_type == IPV6_USER_FLOW) {
+ if (!bnxt_verify_ntuple_ip6_flow(&fs->h_u.usr_ip6_spec,
+ &fs->m_u.usr_ip6_spec))
+ return -EOPNOTSUPP;
+ }
+
new_fltr = kzalloc(sizeof(*new_fltr), GFP_KERNEL);
if (!new_fltr)
return -ENOMEM;
- l2_fltr = bp->vnic_info[0].l2_filters[0];
+ l2_fltr = bp->vnic_info[BNXT_VNIC_DEFAULT].l2_filters[0];
atomic_inc(&l2_fltr->refcnt);
new_fltr->l2_fltr = l2_fltr;
+ fmasks = &new_fltr->fmasks;
fkeys = &new_fltr->fkeys;
rc = -EOPNOTSUPP;
switch (flow_type) {
+ case IP_USER_FLOW: {
+ struct ethtool_usrip4_spec *ip_spec = &fs->h_u.usr_ip4_spec;
+ struct ethtool_usrip4_spec *ip_mask = &fs->m_u.usr_ip4_spec;
+
+ fkeys->basic.ip_proto = ip_mask->proto ? ip_spec->proto
+ : BNXT_IP_PROTO_WILDCARD;
+ fkeys->basic.n_proto = htons(ETH_P_IP);
+ fkeys->addrs.v4addrs.src = ip_spec->ip4src;
+ fmasks->addrs.v4addrs.src = ip_mask->ip4src;
+ fkeys->addrs.v4addrs.dst = ip_spec->ip4dst;
+ fmasks->addrs.v4addrs.dst = ip_mask->ip4dst;
+ break;
+ }
case TCP_V4_FLOW:
case UDP_V4_FLOW: {
struct ethtool_tcpip4_spec *ip_spec = &fs->h_u.tcp_ip4_spec;
@@ -1203,32 +1398,27 @@ static int bnxt_add_ntuple_cls_rule(struct bnxt *bp,
if (flow_type == UDP_V4_FLOW)
fkeys->basic.ip_proto = IPPROTO_UDP;
fkeys->basic.n_proto = htons(ETH_P_IP);
+ fkeys->addrs.v4addrs.src = ip_spec->ip4src;
+ fmasks->addrs.v4addrs.src = ip_mask->ip4src;
+ fkeys->addrs.v4addrs.dst = ip_spec->ip4dst;
+ fmasks->addrs.v4addrs.dst = ip_mask->ip4dst;
+ fkeys->ports.src = ip_spec->psrc;
+ fmasks->ports.src = ip_mask->psrc;
+ fkeys->ports.dst = ip_spec->pdst;
+ fmasks->ports.dst = ip_mask->pdst;
+ break;
+ }
+ case IPV6_USER_FLOW: {
+ struct ethtool_usrip6_spec *ip_spec = &fs->h_u.usr_ip6_spec;
+ struct ethtool_usrip6_spec *ip_mask = &fs->m_u.usr_ip6_spec;
- if (ip_mask->ip4src == IPV4_ALL_MASK) {
- fkeys->addrs.v4addrs.src = ip_spec->ip4src;
- new_fltr->ntuple_flags |= BNXT_NTUPLE_MATCH_SRC_IP;
- } else if (ip_mask->ip4src) {
- goto ntuple_err;
- }
- if (ip_mask->ip4dst == IPV4_ALL_MASK) {
- fkeys->addrs.v4addrs.dst = ip_spec->ip4dst;
- new_fltr->ntuple_flags |= BNXT_NTUPLE_MATCH_DST_IP;
- } else if (ip_mask->ip4dst) {
- goto ntuple_err;
- }
-
- if (ip_mask->psrc == L4_PORT_ALL_MASK) {
- fkeys->ports.src = ip_spec->psrc;
- new_fltr->ntuple_flags |= BNXT_NTUPLE_MATCH_SRC_PORT;
- } else if (ip_mask->psrc) {
- goto ntuple_err;
- }
- if (ip_mask->pdst == L4_PORT_ALL_MASK) {
- fkeys->ports.dst = ip_spec->pdst;
- new_fltr->ntuple_flags |= BNXT_NTUPLE_MATCH_DST_PORT;
- } else if (ip_mask->pdst) {
- goto ntuple_err;
- }
+ fkeys->basic.ip_proto = ip_mask->l4_proto ? ip_spec->l4_proto
+ : BNXT_IP_PROTO_WILDCARD;
+ fkeys->basic.n_proto = htons(ETH_P_IPV6);
+ fkeys->addrs.v6addrs.src = *(struct in6_addr *)&ip_spec->ip6src;
+ fmasks->addrs.v6addrs.src = *(struct in6_addr *)&ip_mask->ip6src;
+ fkeys->addrs.v6addrs.dst = *(struct in6_addr *)&ip_spec->ip6dst;
+ fmasks->addrs.v6addrs.dst = *(struct in6_addr *)&ip_mask->ip6dst;
break;
}
case TCP_V6_FLOW:
@@ -1241,40 +1431,21 @@ static int bnxt_add_ntuple_cls_rule(struct bnxt *bp,
fkeys->basic.ip_proto = IPPROTO_UDP;
fkeys->basic.n_proto = htons(ETH_P_IPV6);
- if (ipv6_mask_is_full(ip_mask->ip6src)) {
- fkeys->addrs.v6addrs.src =
- *(struct in6_addr *)&ip_spec->ip6src;
- new_fltr->ntuple_flags |= BNXT_NTUPLE_MATCH_SRC_IP;
- } else if (!ipv6_mask_is_zero(ip_mask->ip6src)) {
- goto ntuple_err;
- }
- if (ipv6_mask_is_full(ip_mask->ip6dst)) {
- fkeys->addrs.v6addrs.dst =
- *(struct in6_addr *)&ip_spec->ip6dst;
- new_fltr->ntuple_flags |= BNXT_NTUPLE_MATCH_DST_IP;
- } else if (!ipv6_mask_is_zero(ip_mask->ip6dst)) {
- goto ntuple_err;
- }
-
- if (ip_mask->psrc == L4_PORT_ALL_MASK) {
- fkeys->ports.src = ip_spec->psrc;
- new_fltr->ntuple_flags |= BNXT_NTUPLE_MATCH_SRC_PORT;
- } else if (ip_mask->psrc) {
- goto ntuple_err;
- }
- if (ip_mask->pdst == L4_PORT_ALL_MASK) {
- fkeys->ports.dst = ip_spec->pdst;
- new_fltr->ntuple_flags |= BNXT_NTUPLE_MATCH_DST_PORT;
- } else if (ip_mask->pdst) {
- goto ntuple_err;
- }
+ fkeys->addrs.v6addrs.src = *(struct in6_addr *)&ip_spec->ip6src;
+ fmasks->addrs.v6addrs.src = *(struct in6_addr *)&ip_mask->ip6src;
+ fkeys->addrs.v6addrs.dst = *(struct in6_addr *)&ip_spec->ip6dst;
+ fmasks->addrs.v6addrs.dst = *(struct in6_addr *)&ip_mask->ip6dst;
+ fkeys->ports.src = ip_spec->psrc;
+ fmasks->ports.src = ip_mask->psrc;
+ fkeys->ports.dst = ip_spec->pdst;
+ fmasks->ports.dst = ip_mask->pdst;
break;
}
default:
rc = -EOPNOTSUPP;
goto ntuple_err;
}
- if (!new_fltr->ntuple_flags)
+ if (!memcmp(&BNXT_FLOW_MASK_NONE, fmasks, sizeof(*fmasks)))
goto ntuple_err;
idx = bnxt_get_ntp_filter_idx(bp, fkeys, NULL);
@@ -1287,8 +1458,24 @@ static int bnxt_add_ntuple_cls_rule(struct bnxt *bp,
}
rcu_read_unlock();
- new_fltr->base.rxq = ring;
new_fltr->base.flags = BNXT_ACT_NO_AGING;
+ if (fs->flow_type & FLOW_RSS) {
+ struct bnxt_rss_ctx *rss_ctx;
+
+ new_fltr->base.fw_vnic_id = 0;
+ new_fltr->base.flags |= BNXT_ACT_RSS_CTX;
+ rss_ctx = bnxt_get_rss_ctx_from_index(bp, cmd->rss_context);
+ if (rss_ctx) {
+ new_fltr->base.fw_vnic_id = rss_ctx->index;
+ } else {
+ rc = -EINVAL;
+ goto ntuple_err;
+ }
+ }
+ if (fs->ring_cookie == RX_CLS_FLOW_DISC)
+ new_fltr->base.flags |= BNXT_ACT_DROP;
+ else
+ new_fltr->base.rxq = ring;
__set_bit(BNXT_FLTR_VALID, &new_fltr->base.state);
rc = bnxt_insert_ntp_filter(bp, new_fltr, idx);
if (!rc) {
@@ -1321,6 +1508,18 @@ static int bnxt_srxclsrlins(struct bnxt *bp, struct ethtool_rxnfc *cmd)
if (fs->location != RX_CLS_LOC_ANY)
return -EINVAL;
+ flow_type = fs->flow_type;
+ if ((flow_type == IP_USER_FLOW ||
+ flow_type == IPV6_USER_FLOW) &&
+ !(bp->fw_cap & BNXT_FW_CAP_CFA_NTUPLE_RX_EXT_IP_PROTO))
+ return -EOPNOTSUPP;
+ if (flow_type & FLOW_MAC_EXT)
+ return -EINVAL;
+ flow_type &= ~FLOW_EXT;
+
+ if (fs->ring_cookie == RX_CLS_FLOW_DISC && flow_type != ETHER_FLOW)
+ return bnxt_add_ntuple_cls_rule(bp, cmd);
+
ring = ethtool_get_flow_spec_ring(fs->ring_cookie);
vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie);
if (BNXT_VF(bp) && vf)
@@ -1330,14 +1529,10 @@ static int bnxt_srxclsrlins(struct bnxt *bp, struct ethtool_rxnfc *cmd)
if (!vf && ring >= bp->rx_nr_rings)
return -EINVAL;
- flow_type = fs->flow_type;
- if (flow_type & (FLOW_MAC_EXT | FLOW_RSS))
- return -EINVAL;
- flow_type &= ~FLOW_EXT;
if (flow_type == ETHER_FLOW)
- rc = -EOPNOTSUPP;
+ rc = bnxt_add_l2_cls_rule(bp, fs);
else
- rc = bnxt_add_ntuple_cls_rule(bp, fs);
+ rc = bnxt_add_ntuple_cls_rule(bp, cmd);
return rc;
}
@@ -1346,11 +1541,22 @@ static int bnxt_srxclsrldel(struct bnxt *bp, struct ethtool_rxnfc *cmd)
struct ethtool_rx_flow_spec *fs = &cmd->fs;
struct bnxt_filter_base *fltr_base;
struct bnxt_ntuple_filter *fltr;
+ u32 id = fs->location;
rcu_read_lock();
+ fltr_base = bnxt_get_one_fltr_rcu(bp, bp->l2_fltr_hash_tbl,
+ BNXT_L2_FLTR_HASH_SIZE, id);
+ if (fltr_base) {
+ struct bnxt_l2_filter *l2_fltr;
+
+ l2_fltr = container_of(fltr_base, struct bnxt_l2_filter, base);
+ rcu_read_unlock();
+ bnxt_hwrm_l2_filter_free(bp, l2_fltr);
+ bnxt_del_l2_filter(bp, l2_fltr);
+ return 0;
+ }
fltr_base = bnxt_get_one_fltr_rcu(bp, bp->ntp_fltr_hash_tbl,
- BNXT_NTP_FLTR_HASH_SIZE,
- fs->location);
+ BNXT_NTP_FLTR_HASH_SIZE, id);
if (!fltr_base) {
rcu_read_unlock();
return -ENOENT;
@@ -1396,8 +1602,14 @@ static int bnxt_grxfh(struct bnxt *bp, struct ethtool_rxnfc *cmd)
cmd->data |= RXH_IP_SRC | RXH_IP_DST |
RXH_L4_B_0_1 | RXH_L4_B_2_3;
fallthrough;
- case SCTP_V4_FLOW:
case AH_ESP_V4_FLOW:
+ if (bp->rss_hash_cfg &
+ (VNIC_RSS_CFG_REQ_HASH_TYPE_AH_SPI_IPV4 |
+ VNIC_RSS_CFG_REQ_HASH_TYPE_ESP_SPI_IPV4))
+ cmd->data |= RXH_IP_SRC | RXH_IP_DST |
+ RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ fallthrough;
+ case SCTP_V4_FLOW:
case AH_V4_FLOW:
case ESP_V4_FLOW:
case IPV4_FLOW:
@@ -1415,8 +1627,14 @@ static int bnxt_grxfh(struct bnxt *bp, struct ethtool_rxnfc *cmd)
cmd->data |= RXH_IP_SRC | RXH_IP_DST |
RXH_L4_B_0_1 | RXH_L4_B_2_3;
fallthrough;
- case SCTP_V6_FLOW:
case AH_ESP_V6_FLOW:
+ if (bp->rss_hash_cfg &
+ (VNIC_RSS_CFG_REQ_HASH_TYPE_AH_SPI_IPV6 |
+ VNIC_RSS_CFG_REQ_HASH_TYPE_ESP_SPI_IPV6))
+ cmd->data |= RXH_IP_SRC | RXH_IP_DST |
+ RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ fallthrough;
+ case SCTP_V6_FLOW:
case AH_V6_FLOW:
case ESP_V6_FLOW:
case IPV6_FLOW:
@@ -1463,6 +1681,24 @@ static int bnxt_srxfh(struct bnxt *bp, struct ethtool_rxnfc *cmd)
rss_hash_cfg &= ~VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6;
if (tuple == 4)
rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6;
+ } else if (cmd->flow_type == AH_ESP_V4_FLOW) {
+ if (tuple == 4 && (!(bp->rss_cap & BNXT_RSS_CAP_AH_V4_RSS_CAP) ||
+ !(bp->rss_cap & BNXT_RSS_CAP_ESP_V4_RSS_CAP)))
+ return -EINVAL;
+ rss_hash_cfg &= ~(VNIC_RSS_CFG_REQ_HASH_TYPE_AH_SPI_IPV4 |
+ VNIC_RSS_CFG_REQ_HASH_TYPE_ESP_SPI_IPV4);
+ if (tuple == 4)
+ rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_AH_SPI_IPV4 |
+ VNIC_RSS_CFG_REQ_HASH_TYPE_ESP_SPI_IPV4;
+ } else if (cmd->flow_type == AH_ESP_V6_FLOW) {
+ if (tuple == 4 && (!(bp->rss_cap & BNXT_RSS_CAP_AH_V6_RSS_CAP) ||
+ !(bp->rss_cap & BNXT_RSS_CAP_ESP_V6_RSS_CAP)))
+ return -EINVAL;
+ rss_hash_cfg &= ~(VNIC_RSS_CFG_REQ_HASH_TYPE_AH_SPI_IPV6 |
+ VNIC_RSS_CFG_REQ_HASH_TYPE_ESP_SPI_IPV6);
+ if (tuple == 4)
+ rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_AH_SPI_IPV6 |
+ VNIC_RSS_CFG_REQ_HASH_TYPE_ESP_SPI_IPV6;
} else if (tuple == 4) {
return -EINVAL;
}
@@ -1521,7 +1757,7 @@ static int bnxt_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
case ETHTOOL_GRXCLSRLCNT:
cmd->rule_cnt = bp->ntp_fltr_count;
- cmd->data = BNXT_NTP_FLTR_MAX_FLTR | RX_CLS_LOC_SPECIAL;
+ cmd->data = bp->max_fltr | RX_CLS_LOC_SPECIAL;
break;
case ETHTOOL_GRXCLSRLALL:
@@ -1587,7 +1823,9 @@ static u32 bnxt_get_rxfh_key_size(struct net_device *dev)
static int bnxt_get_rxfh(struct net_device *dev,
struct ethtool_rxfh_param *rxfh)
{
+ struct bnxt_rss_ctx *rss_ctx = NULL;
struct bnxt *bp = netdev_priv(dev);
+ u32 *indir_tbl = bp->rss_indir_tbl;
struct bnxt_vnic_info *vnic;
u32 i, tbl_size;
@@ -1596,11 +1834,22 @@ static int bnxt_get_rxfh(struct net_device *dev,
if (!bp->vnic_info)
return 0;
- vnic = &bp->vnic_info[0];
- if (rxfh->indir && bp->rss_indir_tbl) {
+ vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT];
+ if (rxfh->rss_context) {
+ struct ethtool_rxfh_context *ctx;
+
+ ctx = xa_load(&bp->dev->ethtool->rss_ctx, rxfh->rss_context);
+ if (!ctx)
+ return -EINVAL;
+ indir_tbl = ethtool_rxfh_context_indir(ctx);
+ rss_ctx = ethtool_rxfh_context_priv(ctx);
+ vnic = &rss_ctx->vnic;
+ }
+
+ if (rxfh->indir && indir_tbl) {
tbl_size = bnxt_get_rxfh_indir_size(dev);
for (i = 0; i < tbl_size; i++)
- rxfh->indir[i] = bp->rss_indir_tbl[i];
+ rxfh->indir[i] = indir_tbl[i];
}
if (rxfh->key && vnic->rss_hash_key)
@@ -1609,6 +1858,159 @@ static int bnxt_get_rxfh(struct net_device *dev,
return 0;
}
+static void bnxt_modify_rss(struct bnxt *bp, struct ethtool_rxfh_context *ctx,
+ struct bnxt_rss_ctx *rss_ctx,
+ const struct ethtool_rxfh_param *rxfh)
+{
+ if (rxfh->key) {
+ if (rss_ctx) {
+ memcpy(rss_ctx->vnic.rss_hash_key, rxfh->key,
+ HW_HASH_KEY_SIZE);
+ } else {
+ memcpy(bp->rss_hash_key, rxfh->key, HW_HASH_KEY_SIZE);
+ bp->rss_hash_key_updated = true;
+ }
+ }
+ if (rxfh->indir) {
+ u32 i, pad, tbl_size = bnxt_get_rxfh_indir_size(bp->dev);
+ u32 *indir_tbl = bp->rss_indir_tbl;
+
+ if (rss_ctx)
+ indir_tbl = ethtool_rxfh_context_indir(ctx);
+ for (i = 0; i < tbl_size; i++)
+ indir_tbl[i] = rxfh->indir[i];
+ pad = bp->rss_indir_tbl_entries - tbl_size;
+ if (pad)
+ memset(&indir_tbl[i], 0, pad * sizeof(*indir_tbl));
+ }
+}
+
+static int bnxt_rxfh_context_check(struct bnxt *bp,
+ const struct ethtool_rxfh_param *rxfh,
+ struct netlink_ext_ack *extack)
+{
+ if (rxfh->hfunc && rxfh->hfunc != ETH_RSS_HASH_TOP) {
+ NL_SET_ERR_MSG_MOD(extack, "RSS hash function not supported");
+ return -EOPNOTSUPP;
+ }
+
+ if (!BNXT_SUPPORTS_MULTI_RSS_CTX(bp)) {
+ NL_SET_ERR_MSG_MOD(extack, "RSS contexts not supported");
+ return -EOPNOTSUPP;
+ }
+
+ if (!netif_running(bp->dev)) {
+ NL_SET_ERR_MSG_MOD(extack, "Unable to set RSS contexts when interface is down");
+ return -EAGAIN;
+ }
+
+ return 0;
+}
+
+static int bnxt_create_rxfh_context(struct net_device *dev,
+ struct ethtool_rxfh_context *ctx,
+ const struct ethtool_rxfh_param *rxfh,
+ struct netlink_ext_ack *extack)
+{
+ struct bnxt *bp = netdev_priv(dev);
+ struct bnxt_rss_ctx *rss_ctx;
+ struct bnxt_vnic_info *vnic;
+ int rc;
+
+ rc = bnxt_rxfh_context_check(bp, rxfh, extack);
+ if (rc)
+ return rc;
+
+ if (bp->num_rss_ctx >= BNXT_MAX_ETH_RSS_CTX) {
+ NL_SET_ERR_MSG_FMT_MOD(extack, "Out of RSS contexts, maximum %u",
+ BNXT_MAX_ETH_RSS_CTX);
+ return -EINVAL;
+ }
+
+ if (!bnxt_rfs_capable(bp, true)) {
+ NL_SET_ERR_MSG_MOD(extack, "Out hardware resources");
+ return -ENOMEM;
+ }
+
+ rss_ctx = ethtool_rxfh_context_priv(ctx);
+
+ bp->num_rss_ctx++;
+
+ vnic = &rss_ctx->vnic;
+ vnic->rss_ctx = ctx;
+ vnic->flags |= BNXT_VNIC_RSSCTX_FLAG;
+ vnic->vnic_id = BNXT_VNIC_ID_INVALID;
+ rc = bnxt_alloc_vnic_rss_table(bp, vnic);
+ if (rc)
+ goto out;
+
+ /* Populate defaults in the context */
+ bnxt_set_dflt_rss_indir_tbl(bp, ctx);
+ ctx->hfunc = ETH_RSS_HASH_TOP;
+ memcpy(vnic->rss_hash_key, bp->rss_hash_key, HW_HASH_KEY_SIZE);
+ memcpy(ethtool_rxfh_context_key(ctx),
+ bp->rss_hash_key, HW_HASH_KEY_SIZE);
+
+ rc = bnxt_hwrm_vnic_alloc(bp, vnic, 0, bp->rx_nr_rings);
+ if (rc) {
+ NL_SET_ERR_MSG_MOD(extack, "Unable to allocate VNIC");
+ goto out;
+ }
+
+ rc = bnxt_hwrm_vnic_set_tpa(bp, vnic, bp->flags & BNXT_FLAG_TPA);
+ if (rc) {
+ NL_SET_ERR_MSG_MOD(extack, "Unable to setup TPA");
+ goto out;
+ }
+ bnxt_modify_rss(bp, ctx, rss_ctx, rxfh);
+
+ rc = __bnxt_setup_vnic_p5(bp, vnic);
+ if (rc) {
+ NL_SET_ERR_MSG_MOD(extack, "Unable to setup TPA");
+ goto out;
+ }
+
+ rss_ctx->index = rxfh->rss_context;
+ return 0;
+out:
+ bnxt_del_one_rss_ctx(bp, rss_ctx, true);
+ return rc;
+}
+
+static int bnxt_modify_rxfh_context(struct net_device *dev,
+ struct ethtool_rxfh_context *ctx,
+ const struct ethtool_rxfh_param *rxfh,
+ struct netlink_ext_ack *extack)
+{
+ struct bnxt *bp = netdev_priv(dev);
+ struct bnxt_rss_ctx *rss_ctx;
+ int rc;
+
+ rc = bnxt_rxfh_context_check(bp, rxfh, extack);
+ if (rc)
+ return rc;
+
+ rss_ctx = ethtool_rxfh_context_priv(ctx);
+
+ bnxt_modify_rss(bp, ctx, rss_ctx, rxfh);
+
+ return bnxt_hwrm_vnic_rss_cfg_p5(bp, &rss_ctx->vnic);
+}
+
+static int bnxt_remove_rxfh_context(struct net_device *dev,
+ struct ethtool_rxfh_context *ctx,
+ u32 rss_context,
+ struct netlink_ext_ack *extack)
+{
+ struct bnxt *bp = netdev_priv(dev);
+ struct bnxt_rss_ctx *rss_ctx;
+
+ rss_ctx = ethtool_rxfh_context_priv(ctx);
+
+ bnxt_del_one_rss_ctx(bp, rss_ctx, true);
+ return 0;
+}
+
static int bnxt_set_rxfh(struct net_device *dev,
struct ethtool_rxfh_param *rxfh,
struct netlink_ext_ack *extack)
@@ -1619,18 +2021,7 @@ static int bnxt_set_rxfh(struct net_device *dev,
if (rxfh->hfunc && rxfh->hfunc != ETH_RSS_HASH_TOP)
return -EOPNOTSUPP;
- if (rxfh->key)
- return -EOPNOTSUPP;
-
- if (rxfh->indir) {
- u32 i, pad, tbl_size = bnxt_get_rxfh_indir_size(dev);
-
- for (i = 0; i < tbl_size; i++)
- bp->rss_indir_tbl[i] = rxfh->indir[i];
- pad = bp->rss_indir_tbl_entries - tbl_size;
- if (pad)
- memset(&bp->rss_indir_tbl[i], 0, pad * sizeof(u16));
- }
+ bnxt_modify_rss(bp, NULL, NULL, rxfh);
if (netif_running(bp->dev)) {
bnxt_close_nic(bp, false, false);
@@ -1681,7 +2072,8 @@ static void bnxt_get_regs(struct net_device *dev, struct ethtool_regs *regs,
int rc;
regs->version = 0;
- bnxt_dbg_hwrm_rd_reg(bp, 0, BNXT_PXP_REG_LEN / 4, _p);
+ if (!(bp->fw_dbg_cap & DBG_QCAPS_RESP_FLAGS_REG_ACCESS_RESTRICTED))
+ bnxt_dbg_hwrm_rd_reg(bp, 0, BNXT_PXP_REG_LEN / 4, _p);
if (!(bp->fw_cap & BNXT_FW_CAP_PCIE_STATS_SUPPORTED))
return;
@@ -1751,31 +2143,21 @@ static int bnxt_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
return 0;
}
-u32 _bnxt_fw_to_ethtool_adv_spds(u16 fw_speeds, u8 fw_pause)
+/* TODO: support 25GB, 40GB, 50GB with different cable type */
+void _bnxt_fw_to_linkmode(unsigned long *mode, u16 fw_speeds)
{
- u32 speed_mask = 0;
+ linkmode_zero(mode);
- /* TODO: support 25GB, 40GB, 50GB with different cable type */
- /* set the advertised speeds */
if (fw_speeds & BNXT_LINK_SPEED_MSK_100MB)
- speed_mask |= ADVERTISED_100baseT_Full;
+ linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, mode);
if (fw_speeds & BNXT_LINK_SPEED_MSK_1GB)
- speed_mask |= ADVERTISED_1000baseT_Full;
+ linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, mode);
if (fw_speeds & BNXT_LINK_SPEED_MSK_2_5GB)
- speed_mask |= ADVERTISED_2500baseX_Full;
+ linkmode_set_bit(ETHTOOL_LINK_MODE_2500baseX_Full_BIT, mode);
if (fw_speeds & BNXT_LINK_SPEED_MSK_10GB)
- speed_mask |= ADVERTISED_10000baseT_Full;
+ linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseT_Full_BIT, mode);
if (fw_speeds & BNXT_LINK_SPEED_MSK_40GB)
- speed_mask |= ADVERTISED_40000baseCR4_Full;
-
- if ((fw_pause & BNXT_LINK_PAUSE_BOTH) == BNXT_LINK_PAUSE_BOTH)
- speed_mask |= ADVERTISED_Pause;
- else if (fw_pause & BNXT_LINK_PAUSE_TX)
- speed_mask |= ADVERTISED_Asym_Pause;
- else if (fw_pause & BNXT_LINK_PAUSE_RX)
- speed_mask |= ADVERTISED_Pause | ADVERTISED_Asym_Pause;
-
- return speed_mask;
+ linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT, mode);
}
enum bnxt_media_type {
@@ -2482,19 +2864,24 @@ static int bnxt_get_link_ksettings(struct net_device *dev,
}
base->port = PORT_NONE;
- if (link_info->media_type == PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP) {
+ if (media == BNXT_MEDIA_TP) {
base->port = PORT_TP;
linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT,
lk_ksettings->link_modes.supported);
linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT,
lk_ksettings->link_modes.advertising);
+ } else if (media == BNXT_MEDIA_KR) {
+ linkmode_set_bit(ETHTOOL_LINK_MODE_Backplane_BIT,
+ lk_ksettings->link_modes.supported);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_Backplane_BIT,
+ lk_ksettings->link_modes.advertising);
} else {
linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT,
lk_ksettings->link_modes.supported);
linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT,
lk_ksettings->link_modes.advertising);
- if (link_info->media_type == PORT_PHY_QCFG_RESP_MEDIA_TYPE_DAC)
+ if (media == BNXT_MEDIA_CR)
base->port = PORT_DA;
else
base->port = PORT_FIBRE;
@@ -2643,23 +3030,22 @@ bnxt_force_link_speed(struct net_device *dev, u32 ethtool_speed, u32 lanes)
return 0;
}
-u16 bnxt_get_fw_auto_link_speeds(u32 advertising)
+u16 bnxt_get_fw_auto_link_speeds(const unsigned long *mode)
{
u16 fw_speed_mask = 0;
- /* only support autoneg at speed 100, 1000, and 10000 */
- if (advertising & (ADVERTISED_100baseT_Full |
- ADVERTISED_100baseT_Half)) {
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, mode) ||
+ linkmode_test_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT, mode))
fw_speed_mask |= BNXT_LINK_SPEED_MSK_100MB;
- }
- if (advertising & (ADVERTISED_1000baseT_Full |
- ADVERTISED_1000baseT_Half)) {
+
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, mode) ||
+ linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT, mode))
fw_speed_mask |= BNXT_LINK_SPEED_MSK_1GB;
- }
- if (advertising & ADVERTISED_10000baseT_Full)
+
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_10000baseT_Full_BIT, mode))
fw_speed_mask |= BNXT_LINK_SPEED_MSK_10GB;
- if (advertising & ADVERTISED_40000baseCR4_Full)
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT, mode))
fw_speed_mask |= BNXT_LINK_SPEED_MSK_40GB;
return fw_speed_mask;
@@ -3798,12 +4184,12 @@ err:
static void bnxt_get_pkgver(struct net_device *dev)
{
struct bnxt *bp = netdev_priv(dev);
- char buf[FW_VER_STR_LEN];
+ char buf[FW_VER_STR_LEN - 5];
int len;
if (!bnxt_get_pkginfo(dev, buf, sizeof(buf))) {
len = strlen(bp->fw_ver_str);
- snprintf(bp->fw_ver_str + len, FW_VER_STR_LEN - len - 1,
+ snprintf(bp->fw_ver_str + len, FW_VER_STR_LEN - len,
"/pkg %s", buf);
}
}
@@ -3884,12 +4270,13 @@ static int bnxt_set_eeprom(struct net_device *dev,
eeprom->len);
}
-static int bnxt_set_eee(struct net_device *dev, struct ethtool_eee *edata)
+static int bnxt_set_eee(struct net_device *dev, struct ethtool_keee *edata)
{
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(advertising);
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(tmp);
struct bnxt *bp = netdev_priv(dev);
- struct ethtool_eee *eee = &bp->eee;
+ struct ethtool_keee *eee = &bp->eee;
struct bnxt_link_info *link_info = &bp->link_info;
- u32 advertising;
int rc = 0;
if (!BNXT_PHY_CFG_ABLE(bp))
@@ -3899,7 +4286,7 @@ static int bnxt_set_eee(struct net_device *dev, struct ethtool_eee *edata)
return -EOPNOTSUPP;
mutex_lock(&bp->link_lock);
- advertising = _bnxt_fw_to_ethtool_adv_spds(link_info->advertising, 0);
+ _bnxt_fw_to_linkmode(advertising, link_info->advertising);
if (!edata->eee_enabled)
goto eee_ok;
@@ -3919,16 +4306,15 @@ static int bnxt_set_eee(struct net_device *dev, struct ethtool_eee *edata)
edata->tx_lpi_timer = eee->tx_lpi_timer;
}
}
- if (!edata->advertised) {
- edata->advertised = advertising & eee->supported;
- } else if (edata->advertised & ~advertising) {
- netdev_warn(dev, "EEE advertised %x must be a subset of autoneg advertised speeds %x\n",
- edata->advertised, advertising);
+ if (linkmode_empty(edata->advertised)) {
+ linkmode_and(edata->advertised, advertising, eee->supported);
+ } else if (linkmode_andnot(tmp, edata->advertised, advertising)) {
+ netdev_warn(dev, "EEE advertised must be a subset of autoneg advertised speeds\n");
rc = -EINVAL;
goto eee_exit;
}
- eee->advertised = edata->advertised;
+ linkmode_copy(eee->advertised, edata->advertised);
eee->tx_lpi_enabled = edata->tx_lpi_enabled;
eee->tx_lpi_timer = edata->tx_lpi_timer;
eee_ok:
@@ -3942,7 +4328,7 @@ eee_exit:
return rc;
}
-static int bnxt_get_eee(struct net_device *dev, struct ethtool_eee *edata)
+static int bnxt_get_eee(struct net_device *dev, struct ethtool_keee *edata)
{
struct bnxt *bp = netdev_priv(dev);
@@ -3954,12 +4340,51 @@ static int bnxt_get_eee(struct net_device *dev, struct ethtool_eee *edata)
/* Preserve tx_lpi_timer so that the last value will be used
* by default when it is re-enabled.
*/
- edata->advertised = 0;
+ linkmode_zero(edata->advertised);
edata->tx_lpi_enabled = 0;
}
if (!bp->eee.eee_active)
- edata->lp_advertised = 0;
+ linkmode_zero(edata->lp_advertised);
+
+ return 0;
+}
+
+static int bnxt_set_tunable(struct net_device *dev,
+ const struct ethtool_tunable *tuna,
+ const void *data)
+{
+ struct bnxt *bp = netdev_priv(dev);
+ u32 rx_copybreak;
+
+ switch (tuna->id) {
+ case ETHTOOL_RX_COPYBREAK:
+ rx_copybreak = *(u32 *)data;
+ if (rx_copybreak > BNXT_MAX_RX_COPYBREAK)
+ return -ERANGE;
+ if (rx_copybreak != bp->rx_copybreak) {
+ if (netif_running(dev))
+ return -EBUSY;
+ bp->rx_copybreak = rx_copybreak;
+ }
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int bnxt_get_tunable(struct net_device *dev,
+ const struct ethtool_tunable *tuna, void *data)
+{
+ struct bnxt *bp = netdev_priv(dev);
+
+ switch (tuna->id) {
+ case ETHTOOL_RX_COPYBREAK:
+ *(u32 *)data = bp->rx_copybreak;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
return 0;
}
@@ -4012,6 +4437,9 @@ static int bnxt_get_module_info(struct net_device *dev,
struct bnxt *bp = netdev_priv(dev);
int rc;
+ if (BNXT_VF(bp) && !BNXT_VF_IS_TRUSTED(bp))
+ return -EPERM;
+
/* No point in going further if phy status indicates
* module is not inserted or if it is powered down or
* if it is of type 10GBase-T
@@ -4063,6 +4491,9 @@ static int bnxt_get_module_eeprom(struct net_device *dev,
u16 start = eeprom->offset, length = eeprom->len;
int rc = 0;
+ if (BNXT_VF(bp) && !BNXT_VF_IS_TRUSTED(bp))
+ return -EPERM;
+
memset(data, 0, eeprom->len);
/* Read A0 portion of the EEPROM */
@@ -4117,6 +4548,12 @@ static int bnxt_get_module_eeprom_by_page(struct net_device *dev,
struct bnxt *bp = netdev_priv(dev);
int rc;
+ if (BNXT_VF(bp) && !BNXT_VF_IS_TRUSTED(bp)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Module read not permitted on untrusted VF");
+ return -EPERM;
+ }
+
rc = bnxt_get_module_status(bp, extack);
if (rc)
return rc;
@@ -4414,7 +4851,8 @@ static int bnxt_run_loopback(struct bnxt *bp)
cpr = &rxr->bnapi->cp_ring;
if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
cpr = rxr->rx_cpr;
- pkt_size = min(bp->dev->mtu + ETH_HLEN, bp->rx_copy_thresh);
+ pkt_size = min(bp->dev->mtu + ETH_HLEN, max(BNXT_DEFAULT_RX_COPYBREAK,
+ bp->rx_copybreak));
skb = netdev_alloc_skb(bp->dev, pkt_size);
if (!skb)
return -ENOMEM;
@@ -4483,6 +4921,14 @@ static void bnxt_self_test(struct net_device *dev, struct ethtool_test *etest,
if (!bp->num_tests || !BNXT_PF(bp))
return;
+
+ if (etest->flags & ETH_TEST_FL_OFFLINE &&
+ bnxt_ulp_registered(bp->edev)) {
+ etest->flags |= ETH_TEST_FL_FAILED;
+ netdev_warn(dev, "Offline tests cannot be run with RoCE driver loaded\n");
+ return;
+ }
+
memset(buf, 0, sizeof(u64) * bp->num_tests);
if (!netif_running(dev)) {
etest->flags |= ETH_TEST_FL_FAILED;
@@ -4513,45 +4959,51 @@ static void bnxt_self_test(struct net_device *dev, struct ethtool_test *etest,
if (!offline) {
bnxt_run_fw_tests(bp, test_mask, &test_results);
} else {
- bnxt_ulp_stop(bp);
bnxt_close_nic(bp, true, false);
bnxt_run_fw_tests(bp, test_mask, &test_results);
- buf[BNXT_MACLPBK_TEST_IDX] = 1;
- bnxt_hwrm_mac_loopback(bp, true);
- msleep(250);
rc = bnxt_half_open_nic(bp);
if (rc) {
- bnxt_hwrm_mac_loopback(bp, false);
etest->flags |= ETH_TEST_FL_FAILED;
- bnxt_ulp_start(bp, rc);
return;
}
+ buf[BNXT_MACLPBK_TEST_IDX] = 1;
+ if (bp->mac_flags & BNXT_MAC_FL_NO_MAC_LPBK)
+ goto skip_mac_loopback;
+
+ bnxt_hwrm_mac_loopback(bp, true);
+ msleep(250);
if (bnxt_run_loopback(bp))
etest->flags |= ETH_TEST_FL_FAILED;
else
buf[BNXT_MACLPBK_TEST_IDX] = 0;
bnxt_hwrm_mac_loopback(bp, false);
+skip_mac_loopback:
+ buf[BNXT_PHYLPBK_TEST_IDX] = 1;
+ if (bp->phy_flags & BNXT_PHY_FL_NO_PHY_LPBK)
+ goto skip_phy_loopback;
+
bnxt_hwrm_phy_loopback(bp, true, false);
msleep(1000);
- if (bnxt_run_loopback(bp)) {
- buf[BNXT_PHYLPBK_TEST_IDX] = 1;
+ if (bnxt_run_loopback(bp))
etest->flags |= ETH_TEST_FL_FAILED;
- }
+ else
+ buf[BNXT_PHYLPBK_TEST_IDX] = 0;
+skip_phy_loopback:
+ buf[BNXT_EXTLPBK_TEST_IDX] = 1;
if (do_ext_lpbk) {
etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
bnxt_hwrm_phy_loopback(bp, true, true);
msleep(1000);
- if (bnxt_run_loopback(bp)) {
- buf[BNXT_EXTLPBK_TEST_IDX] = 1;
+ if (bnxt_run_loopback(bp))
etest->flags |= ETH_TEST_FL_FAILED;
- }
+ else
+ buf[BNXT_EXTLPBK_TEST_IDX] = 0;
}
bnxt_hwrm_phy_loopback(bp, false, false);
bnxt_half_close_nic(bp);
rc = bnxt_open_nic(bp, true, true);
- bnxt_ulp_start(bp, rc);
}
if (rc || bnxt_test_irq(bp)) {
buf[BNXT_IRQ_TEST_IDX] = 1;
@@ -4625,14 +5077,21 @@ static int bnxt_set_dump(struct net_device *dev, struct ethtool_dump *dump)
{
struct bnxt *bp = netdev_priv(dev);
- if (dump->flag > BNXT_DUMP_CRASH) {
- netdev_info(dev, "Supports only Live(0) and Crash(1) dumps.\n");
+ if (dump->flag > BNXT_DUMP_DRIVER) {
+ netdev_info(dev, "Supports only Live(0), Crash(1), Driver(2) dumps.\n");
return -EINVAL;
}
- if (!IS_ENABLED(CONFIG_TEE_BNXT_FW) && dump->flag == BNXT_DUMP_CRASH) {
- netdev_info(dev, "Cannot collect crash dump as TEE_BNXT_FW config option is not enabled.\n");
- return -EOPNOTSUPP;
+ if (dump->flag == BNXT_DUMP_CRASH) {
+ if (bp->fw_dbg_cap & DBG_QCAPS_RESP_FLAGS_CRASHDUMP_SOC_DDR &&
+ (!IS_ENABLED(CONFIG_TEE_BNXT_FW))) {
+ netdev_info(dev,
+ "Cannot collect crash dump as TEE_BNXT_FW config option is not enabled.\n");
+ return -EOPNOTSUPP;
+ } else if (!(bp->fw_dbg_cap & DBG_QCAPS_RESP_FLAGS_CRASHDUMP_HOST_DDR)) {
+ netdev_info(dev, "Crash dump collection from host memory is not supported on this interface.\n");
+ return -EOPNOTSUPP;
+ }
}
bp->dump_flag = dump->flag;
@@ -4671,17 +5130,14 @@ static int bnxt_get_dump_data(struct net_device *dev, struct ethtool_dump *dump,
}
static int bnxt_get_ts_info(struct net_device *dev,
- struct ethtool_ts_info *info)
+ struct kernel_ethtool_ts_info *info)
{
struct bnxt *bp = netdev_priv(dev);
struct bnxt_ptp_cfg *ptp;
ptp = bp->ptp_cfg;
- info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
- SOF_TIMESTAMPING_RX_SOFTWARE |
- SOF_TIMESTAMPING_SOFTWARE;
+ info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE;
- info->phc_index = -1;
if (!ptp)
return 0;
@@ -4891,6 +5347,19 @@ static void bnxt_get_rmon_stats(struct net_device *dev,
*ranges = bnxt_rmon_ranges;
}
+static void bnxt_get_ptp_stats(struct net_device *dev,
+ struct ethtool_ts_stats *ts_stats)
+{
+ struct bnxt *bp = netdev_priv(dev);
+ struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
+
+ if (ptp) {
+ ts_stats->pkts = ptp->stats.ts_pkts;
+ ts_stats->lost = ptp->stats.ts_lost;
+ ts_stats->err = atomic64_read(&ptp->stats.ts_err);
+ }
+}
+
static void bnxt_get_link_ext_stats(struct net_device *dev,
struct ethtool_link_ext_stats *stats)
{
@@ -4913,6 +5382,10 @@ void bnxt_ethtool_free(struct bnxt *bp)
const struct ethtool_ops bnxt_ethtool_ops = {
.cap_link_lanes_supported = 1,
+ .rxfh_per_ctx_key = 1,
+ .rxfh_max_num_contexts = BNXT_MAX_ETH_RSS_CTX + 1,
+ .rxfh_indir_space = BNXT_MAX_RSS_TABLE_ENTRIES_P5,
+ .rxfh_priv_size = sizeof(struct bnxt_rss_ctx),
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
ETHTOOL_COALESCE_MAX_FRAMES |
ETHTOOL_COALESCE_USECS_IRQ |
@@ -4920,6 +5393,8 @@ const struct ethtool_ops bnxt_ethtool_ops = {
ETHTOOL_COALESCE_STATS_BLOCK_USECS |
ETHTOOL_COALESCE_USE_ADAPTIVE_RX |
ETHTOOL_COALESCE_USE_CQE,
+ .supported_ring_params = ETHTOOL_RING_USE_TCP_DATA_SPLIT |
+ ETHTOOL_RING_USE_HDS_THRS,
.get_link_ksettings = bnxt_get_link_ksettings,
.set_link_ksettings = bnxt_set_link_ksettings,
.get_fec_stats = bnxt_get_fec_stats,
@@ -4950,6 +5425,9 @@ const struct ethtool_ops bnxt_ethtool_ops = {
.get_rxfh_key_size = bnxt_get_rxfh_key_size,
.get_rxfh = bnxt_get_rxfh,
.set_rxfh = bnxt_set_rxfh,
+ .create_rxfh_context = bnxt_create_rxfh_context,
+ .modify_rxfh_context = bnxt_modify_rxfh_context,
+ .remove_rxfh_context = bnxt_remove_rxfh_context,
.flash_device = bnxt_flash_device,
.get_eeprom_len = bnxt_get_eeprom_len,
.get_eeprom = bnxt_get_eeprom,
@@ -4958,6 +5436,8 @@ const struct ethtool_ops bnxt_ethtool_ops = {
.get_link_ext_stats = bnxt_get_link_ext_stats,
.get_eee = bnxt_get_eee,
.set_eee = bnxt_set_eee,
+ .get_tunable = bnxt_get_tunable,
+ .set_tunable = bnxt_set_tunable,
.get_module_info = bnxt_get_module_info,
.get_module_eeprom = bnxt_get_module_eeprom,
.get_module_eeprom_by_page = bnxt_get_module_eeprom_by_page,
@@ -4973,4 +5453,5 @@ const struct ethtool_ops bnxt_ethtool_ops = {
.get_eth_mac_stats = bnxt_get_eth_mac_stats,
.get_eth_ctrl_stats = bnxt_get_eth_ctrl_stats,
.get_rmon_stats = bnxt_get_rmon_stats,
+ .get_ts_stats = bnxt_get_ptp_stats,
};
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h
index a8ecef8ab82c..33b86ede1ce5 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h
@@ -43,12 +43,15 @@ struct bnxt_led_cfg {
#define BNXT_PXP_REG_LEN 0x3110
+#define BNXT_IP_PROTO_FULL_MASK 0xFF
+#define BNXT_IP_PROTO_WILDCARD 0x0
+
extern const struct ethtool_ops bnxt_ethtool_ops;
u32 bnxt_get_rxfh_indir_size(struct net_device *dev);
-u32 _bnxt_fw_to_ethtool_adv_spds(u16, u8);
+void _bnxt_fw_to_linkmode(unsigned long *mode, u16 fw_speeds);
u32 bnxt_fw_to_ethtool_speed(u16);
-u16 bnxt_get_fw_auto_link_speeds(u32);
+u16 bnxt_get_fw_auto_link_speeds(const unsigned long *mode);
int bnxt_hwrm_nvm_get_dev_info(struct bnxt *bp,
struct hwrm_nvm_get_dev_info_output *nvm_dev_info);
int bnxt_hwrm_firmware_reset(struct net_device *dev, u8 proc_type,
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
index e957abd704db..5f8de1634378 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
@@ -2,7 +2,7 @@
*
* Copyright (c) 2014-2016 Broadcom Corporation
* Copyright (c) 2014-2018 Broadcom Limited
- * Copyright (c) 2018-2023 Broadcom Inc.
+ * Copyright (c) 2018-2024 Broadcom Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -42,6 +42,10 @@ struct hwrm_resp_hdr {
#define TLV_TYPE_MODIFY_ROCE_CC_GEN1 0x5UL
#define TLV_TYPE_QUERY_ROCE_CC_GEN2 0x6UL
#define TLV_TYPE_MODIFY_ROCE_CC_GEN2 0x7UL
+#define TLV_TYPE_QUERY_ROCE_CC_GEN1_EXT 0x8UL
+#define TLV_TYPE_MODIFY_ROCE_CC_GEN1_EXT 0x9UL
+#define TLV_TYPE_QUERY_ROCE_CC_GEN2_EXT 0xaUL
+#define TLV_TYPE_MODIFY_ROCE_CC_GEN2_EXT 0xbUL
#define TLV_TYPE_ENGINE_CKV_ALIAS_ECC_PUBLIC_KEY 0x8001UL
#define TLV_TYPE_ENGINE_CKV_IV 0x8003UL
#define TLV_TYPE_ENGINE_CKV_AUTH_TAG 0x8004UL
@@ -403,6 +407,9 @@ struct cmd_nums {
#define HWRM_FUNC_LAG_UPDATE 0x1b1UL
#define HWRM_FUNC_LAG_FREE 0x1b2UL
#define HWRM_FUNC_LAG_QCFG 0x1b3UL
+ #define HWRM_FUNC_TIMEDTX_PACING_RATE_ADD 0x1c2UL
+ #define HWRM_FUNC_TIMEDTX_PACING_RATE_DELETE 0x1c3UL
+ #define HWRM_FUNC_TIMEDTX_PACING_RATE_QUERY 0x1c4UL
#define HWRM_SELFTEST_QLIST 0x200UL
#define HWRM_SELFTEST_EXEC 0x201UL
#define HWRM_SELFTEST_IRQ 0x202UL
@@ -430,6 +437,9 @@ struct cmd_nums {
#define HWRM_STAT_GENERIC_QSTATS 0x218UL
#define HWRM_MFG_PRVSN_EXPORT_CERT 0x219UL
#define HWRM_STAT_DB_ERROR_QSTATS 0x21aUL
+ #define HWRM_MFG_TESTS 0x21bUL
+ #define HWRM_PORT_POE_CFG 0x230UL
+ #define HWRM_PORT_POE_QCFG 0x231UL
#define HWRM_UDCC_QCAPS 0x258UL
#define HWRM_UDCC_CFG 0x259UL
#define HWRM_UDCC_QCFG 0x25aUL
@@ -439,6 +449,9 @@ struct cmd_nums {
#define HWRM_UDCC_COMP_CFG 0x25eUL
#define HWRM_UDCC_COMP_QCFG 0x25fUL
#define HWRM_UDCC_COMP_QUERY 0x260UL
+ #define HWRM_QUEUE_PFCWD_TIMEOUT_QCAPS 0x261UL
+ #define HWRM_QUEUE_PFCWD_TIMEOUT_CFG 0x262UL
+ #define HWRM_QUEUE_PFCWD_TIMEOUT_QCFG 0x263UL
#define HWRM_TF 0x2bcUL
#define HWRM_TF_VERSION_GET 0x2bdUL
#define HWRM_TF_SESSION_OPEN 0x2c6UL
@@ -468,6 +481,10 @@ struct cmd_nums {
#define HWRM_TF_GLOBAL_CFG_GET 0x2fdUL
#define HWRM_TF_IF_TBL_SET 0x2feUL
#define HWRM_TF_IF_TBL_GET 0x2ffUL
+ #define HWRM_TF_RESC_USAGE_SET 0x300UL
+ #define HWRM_TF_RESC_USAGE_QUERY 0x301UL
+ #define HWRM_TF_TBL_TYPE_ALLOC 0x302UL
+ #define HWRM_TF_TBL_TYPE_FREE 0x303UL
#define HWRM_TFC_TBL_SCOPE_QCAPS 0x380UL
#define HWRM_TFC_TBL_SCOPE_ID_ALLOC 0x381UL
#define HWRM_TFC_TBL_SCOPE_CONFIG 0x382UL
@@ -495,7 +512,11 @@ struct cmd_nums {
#define HWRM_TFC_IF_TBL_SET 0x398UL
#define HWRM_TFC_IF_TBL_GET 0x399UL
#define HWRM_TFC_TBL_SCOPE_CONFIG_GET 0x39aUL
+ #define HWRM_TFC_RESC_USAGE_QUERY 0x39bUL
+ #define HWRM_TFC_GLOBAL_ID_FREE 0x39cUL
#define HWRM_SV 0x400UL
+ #define HWRM_DBG_SERDES_TEST 0xff0eUL
+ #define HWRM_DBG_LOG_BUFFER_FLUSH 0xff0fUL
#define HWRM_DBG_READ_DIRECT 0xff10UL
#define HWRM_DBG_READ_INDIRECT 0xff11UL
#define HWRM_DBG_WRITE_DIRECT 0xff12UL
@@ -524,6 +545,9 @@ struct cmd_nums {
#define HWRM_DBG_USEQ_RUN 0xff29UL
#define HWRM_DBG_USEQ_DELIVERY_REQ 0xff2aUL
#define HWRM_DBG_USEQ_RESP_HDR 0xff2bUL
+ #define HWRM_DBG_COREDUMP_CAPTURE 0xff2cUL
+ #define HWRM_DBG_PTRACE 0xff2dUL
+ #define HWRM_DBG_SIM_CABLE_STATE 0xff2eUL
#define HWRM_NVM_GET_VPD_FIELD_INFO 0xffeaUL
#define HWRM_NVM_SET_VPD_FIELD_INFO 0xffebUL
#define HWRM_NVM_DEFRAG 0xffecUL
@@ -573,6 +597,7 @@ struct ret_codes {
#define HWRM_ERR_CODE_RESOURCE_LOCKED 0x11UL
#define HWRM_ERR_CODE_PF_UNAVAILABLE 0x12UL
#define HWRM_ERR_CODE_ENTITY_NOT_PRESENT 0x13UL
+ #define HWRM_ERR_CODE_SECURE_SOC_ERROR 0x14UL
#define HWRM_ERR_CODE_TLV_ENCAPSULATED_RESPONSE 0x8000UL
#define HWRM_ERR_CODE_UNKNOWN_ERR 0xfffeUL
#define HWRM_ERR_CODE_CMD_NOT_SUPPORTED 0xffffUL
@@ -604,8 +629,8 @@ struct hwrm_err_output {
#define HWRM_VERSION_MAJOR 1
#define HWRM_VERSION_MINOR 10
#define HWRM_VERSION_UPDATE 3
-#define HWRM_VERSION_RSVD 15
-#define HWRM_VERSION_STR "1.10.3.15"
+#define HWRM_VERSION_RSVD 85
+#define HWRM_VERSION_STR "1.10.3.85"
/* hwrm_ver_get_input (size:192b/24B) */
struct hwrm_ver_get_input {
@@ -659,6 +684,7 @@ struct hwrm_ver_get_output {
#define VER_GET_RESP_DEV_CAPS_CFG_CFA_TFLIB_SUPPORTED 0x2000UL
#define VER_GET_RESP_DEV_CAPS_CFG_CFA_TRUFLOW_SUPPORTED 0x4000UL
#define VER_GET_RESP_DEV_CAPS_CFG_SECURE_BOOT_CAPABLE 0x8000UL
+ #define VER_GET_RESP_DEV_CAPS_CFG_SECURE_SOC_CAPABLE 0x10000UL
u8 roce_fw_maj_8b;
u8 roce_fw_min_8b;
u8 roce_fw_bld_8b;
@@ -838,7 +864,12 @@ struct hwrm_async_event_cmpl {
#define ASYNC_EVENT_CMPL_EVENT_ID_HW_DOORBELL_RECOVERY_READ_ERROR 0x49UL
#define ASYNC_EVENT_CMPL_EVENT_ID_CTX_ERROR 0x4aUL
#define ASYNC_EVENT_CMPL_EVENT_ID_UDCC_SESSION_CHANGE 0x4bUL
- #define ASYNC_EVENT_CMPL_EVENT_ID_MAX_RGTR_EVENT_ID 0x4cUL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_DBG_BUF_PRODUCER 0x4cUL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_PEER_MMAP_CHANGE 0x4dUL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_REPRESENTOR_PAIR_CHANGE 0x4eUL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_VF_STAT_CHANGE 0x4fUL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_HOST_COREDUMP 0x50UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_MAX_RGTR_EVENT_ID 0x51UL
#define ASYNC_EVENT_CMPL_EVENT_ID_FW_TRACE_MSG 0xfeUL
#define ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR 0xffUL
#define ASYNC_EVENT_CMPL_EVENT_ID_LAST ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR
@@ -1276,6 +1307,43 @@ struct hwrm_async_event_cmpl_error_report {
#define ASYNC_EVENT_CMPL_ERROR_REPORT_EVENT_DATA1_ERROR_TYPE_SFT 0
};
+/* hwrm_async_event_cmpl_dbg_buf_producer (size:128b/16B) */
+struct hwrm_async_event_cmpl_dbg_buf_producer {
+ __le16 type;
+ #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_TYPE_LAST ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_TYPE_HWRM_ASYNC_EVENT
+ __le16 event_id;
+ #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_ID_DBG_BUF_PRODUCER 0x4cUL
+ #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_ID_LAST ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_ID_DBG_BUF_PRODUCER
+ __le32 event_data2;
+ #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA2_CURR_OFF_MASK 0xffffffffUL
+ #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA2_CURR_OFF_SFT 0
+ u8 opaque_v;
+ #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_V 0x1UL
+ #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
+ #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_MASK 0xffffUL
+ #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_SRT_TRACE 0x0UL
+ #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_SRT2_TRACE 0x1UL
+ #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_CRT_TRACE 0x2UL
+ #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_CRT2_TRACE 0x3UL
+ #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_RIGP0_TRACE 0x4UL
+ #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_L2_HWRM_TRACE 0x5UL
+ #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_ROCE_HWRM_TRACE 0x6UL
+ #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_CA0_TRACE 0x7UL
+ #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_CA1_TRACE 0x8UL
+ #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_CA2_TRACE 0x9UL
+ #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_RIGP1_TRACE 0xaUL
+ #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_AFM_KONG_HWRM_TRACE 0xbUL
+ #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_LAST ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_AFM_KONG_HWRM_TRACE
+};
+
/* hwrm_async_event_cmpl_hwrm_error (size:128b/16B) */
struct hwrm_async_event_cmpl_hwrm_error {
__le16 type;
@@ -1321,15 +1389,16 @@ struct hwrm_async_event_cmpl_error_report_base {
u8 timestamp_lo;
__le16 timestamp_hi;
__le32 event_data1;
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_MASK 0xffUL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_SFT 0
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_RESERVED 0x0UL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_PAUSE_STORM 0x1UL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_INVALID_SIGNAL 0x2UL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_NVM 0x3UL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_DOORBELL_DROP_THRESHOLD 0x4UL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_THERMAL_THRESHOLD 0x5UL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_THERMAL_THRESHOLD
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_MASK 0xffUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_RESERVED 0x0UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_PAUSE_STORM 0x1UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_INVALID_SIGNAL 0x2UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_NVM 0x3UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_DOORBELL_DROP_THRESHOLD 0x4UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_THERMAL_THRESHOLD 0x5UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_DUAL_DATA_RATE_NOT_SUPPORTED 0x6UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_DUAL_DATA_RATE_NOT_SUPPORTED
};
/* hwrm_async_event_cmpl_error_report_pause_storm (size:128b/16B) */
@@ -1478,6 +1547,30 @@ struct hwrm_async_event_cmpl_error_report_thermal {
#define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_TRANSITION_DIR_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_TRANSITION_DIR_INCREASING
};
+/* hwrm_async_event_cmpl_error_report_dual_data_rate_not_supported (size:128b/16B) */
+struct hwrm_async_event_cmpl_error_report_dual_data_rate_not_supported {
+ __le16 type;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_TYPE_HWRM_ASYNC_EVENT
+ __le16 event_id;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_EVENT_ID_ERROR_REPORT 0x45UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_EVENT_ID_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_EVENT_ID_ERROR_REPORT
+ __le32 event_data2;
+ u8 opaque_v;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_V 0x1UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_EVENT_DATA1_ERROR_TYPE_MASK 0xffUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_EVENT_DATA1_ERROR_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_EVENT_DATA1_ERROR_TYPE_DUAL_DATA_RATE_NOT_SUPPORTED 0x6UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_EVENT_DATA1_ERROR_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_EVENT_DATA1_ERROR_TYPE_DUAL_DATA_RATE_NOT_SUPPORTED
+};
+
/* hwrm_func_reset_input (size:192b/24B) */
struct hwrm_func_reset_input {
__le16 req_type;
@@ -1654,7 +1747,7 @@ struct hwrm_func_qcaps_input {
u8 unused_0[6];
};
-/* hwrm_func_qcaps_output (size:1088b/136B) */
+/* hwrm_func_qcaps_output (size:1152b/144B) */
struct hwrm_func_qcaps_output {
__le16 error_code;
__le16 req_type;
@@ -1781,6 +1874,15 @@ struct hwrm_func_qcaps_output {
#define FUNC_QCAPS_RESP_FLAGS_EXT2_ROCE_VF_RESOURCE_MGMT_SUPPORTED 0x100000UL
#define FUNC_QCAPS_RESP_FLAGS_EXT2_UDCC_SUPPORTED 0x200000UL
#define FUNC_QCAPS_RESP_FLAGS_EXT2_TIMED_TX_SO_TXTIME_SUPPORTED 0x400000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_SW_MAX_RESOURCE_LIMITS_SUPPORTED 0x800000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_TF_INGRESS_NIC_FLOW_SUPPORTED 0x1000000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_LPBK_STATS_SUPPORTED 0x2000000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_TF_EGRESS_NIC_FLOW_SUPPORTED 0x4000000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_MULTI_LOSSLESS_QUEUES_SUPPORTED 0x8000000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_PEER_MMAP_SUPPORTED 0x10000000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_TIMED_TX_PACING_SUPPORTED 0x20000000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_VF_STAT_EJECTION_SUPPORTED 0x40000000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_HOST_COREDUMP_SUPPORTED 0x80000000UL
__le16 tunnel_disable_flag;
#define FUNC_QCAPS_RESP_TUNNEL_DISABLE_FLAG_DISABLE_VXLAN 0x1UL
#define FUNC_QCAPS_RESP_TUNNEL_DISABLE_FLAG_DISABLE_NGE 0x2UL
@@ -1791,20 +1893,23 @@ struct hwrm_func_qcaps_output {
#define FUNC_QCAPS_RESP_TUNNEL_DISABLE_FLAG_DISABLE_MPLS 0x40UL
#define FUNC_QCAPS_RESP_TUNNEL_DISABLE_FLAG_DISABLE_PPPOE 0x80UL
__le16 xid_partition_cap;
- #define FUNC_QCAPS_RESP_XID_PARTITION_CAP_KTLS_TKC 0x1UL
- #define FUNC_QCAPS_RESP_XID_PARTITION_CAP_KTLS_RKC 0x2UL
- #define FUNC_QCAPS_RESP_XID_PARTITION_CAP_QUIC_TKC 0x4UL
- #define FUNC_QCAPS_RESP_XID_PARTITION_CAP_QUIC_RKC 0x8UL
+ #define FUNC_QCAPS_RESP_XID_PARTITION_CAP_TX_CK 0x1UL
+ #define FUNC_QCAPS_RESP_XID_PARTITION_CAP_RX_CK 0x2UL
u8 device_serial_number[8];
__le16 ctxs_per_partition;
- u8 unused_2[2];
+ __le16 max_tso_segs;
__le32 roce_vf_max_av;
__le32 roce_vf_max_cq;
__le32 roce_vf_max_mrw;
__le32 roce_vf_max_qp;
__le32 roce_vf_max_srq;
__le32 roce_vf_max_gid;
- u8 unused_3[3];
+ __le32 flags_ext3;
+ #define FUNC_QCAPS_RESP_FLAGS_EXT3_RM_RSV_WHILE_ALLOC_CAP 0x1UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT3_REQUIRE_L2_FILTER 0x2UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT3_MAX_ROCE_VFS_SUPPORTED 0x4UL
+ __le16 max_roce_vfs;
+ u8 unused_3[5];
u8 valid;
};
@@ -1844,6 +1949,7 @@ struct hwrm_func_qcfg_output {
#define FUNC_QCFG_RESP_FLAGS_FAST_RESET_ALLOWED 0x1000UL
#define FUNC_QCFG_RESP_FLAGS_MULTI_ROOT 0x2000UL
#define FUNC_QCFG_RESP_FLAGS_ENABLE_RDMA_SRIOV 0x4000UL
+ #define FUNC_QCFG_RESP_FLAGS_ROCE_VNIC_ID_VALID 0x8000UL
u8 mac_address[6];
__le16 pci_id;
__le16 alloc_rsscos_ctx;
@@ -1955,7 +2061,7 @@ struct hwrm_func_qcfg_output {
#define FUNC_QCFG_RESP_DB_PAGE_SIZE_2MB 0x9UL
#define FUNC_QCFG_RESP_DB_PAGE_SIZE_4MB 0xaUL
#define FUNC_QCFG_RESP_DB_PAGE_SIZE_LAST FUNC_QCFG_RESP_DB_PAGE_SIZE_4MB
- u8 unused_2[2];
+ __le16 roce_vnic_id;
__le32 partition_min_bw;
#define FUNC_QCFG_RESP_PARTITION_MIN_BW_BW_VALUE_MASK 0xfffffffUL
#define FUNC_QCFG_RESP_PARTITION_MIN_BW_BW_VALUE_SFT 0
@@ -1979,7 +2085,8 @@ struct hwrm_func_qcfg_output {
#define FUNC_QCFG_RESP_PARTITION_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
#define FUNC_QCFG_RESP_PARTITION_MAX_BW_BW_VALUE_UNIT_LAST FUNC_QCFG_RESP_PARTITION_MAX_BW_BW_VALUE_UNIT_PERCENT1_100
__le16 host_mtu;
- u8 unused_3[2];
+ __le16 flags2;
+ #define FUNC_QCFG_RESP_FLAGS2_SRIOV_DSCP_INSERT_ENABLED 0x1UL
u8 unused_4[2];
u8 port_kdnet_mode;
#define FUNC_QCFG_RESP_PORT_KDNET_MODE_DISABLED 0x0UL
@@ -2003,6 +2110,8 @@ struct hwrm_func_qcfg_output {
__le32 roce_max_srq_per_vf;
__le32 roce_max_gid_per_vf;
__le16 xid_partition_cfg;
+ #define FUNC_QCFG_RESP_XID_PARTITION_CFG_TX_CK 0x1UL
+ #define FUNC_QCFG_RESP_XID_PARTITION_CFG_RX_CK 0x2UL
u8 unused_7;
u8 valid;
};
@@ -2189,17 +2298,18 @@ struct hwrm_func_cfg_input {
#define FUNC_CFG_REQ_FLAGS2_KTLS_KEY_CTX_ASSETS_TEST 0x1UL
#define FUNC_CFG_REQ_FLAGS2_QUIC_KEY_CTX_ASSETS_TEST 0x2UL
__le32 enables2;
- #define FUNC_CFG_REQ_ENABLES2_KDNET 0x1UL
- #define FUNC_CFG_REQ_ENABLES2_DB_PAGE_SIZE 0x2UL
- #define FUNC_CFG_REQ_ENABLES2_QUIC_TX_KEY_CTXS 0x4UL
- #define FUNC_CFG_REQ_ENABLES2_QUIC_RX_KEY_CTXS 0x8UL
- #define FUNC_CFG_REQ_ENABLES2_ROCE_MAX_AV_PER_VF 0x10UL
- #define FUNC_CFG_REQ_ENABLES2_ROCE_MAX_CQ_PER_VF 0x20UL
- #define FUNC_CFG_REQ_ENABLES2_ROCE_MAX_MRW_PER_VF 0x40UL
- #define FUNC_CFG_REQ_ENABLES2_ROCE_MAX_QP_PER_VF 0x80UL
- #define FUNC_CFG_REQ_ENABLES2_ROCE_MAX_SRQ_PER_VF 0x100UL
- #define FUNC_CFG_REQ_ENABLES2_ROCE_MAX_GID_PER_VF 0x200UL
- #define FUNC_CFG_REQ_ENABLES2_XID_PARTITION_CFG 0x400UL
+ #define FUNC_CFG_REQ_ENABLES2_KDNET 0x1UL
+ #define FUNC_CFG_REQ_ENABLES2_DB_PAGE_SIZE 0x2UL
+ #define FUNC_CFG_REQ_ENABLES2_QUIC_TX_KEY_CTXS 0x4UL
+ #define FUNC_CFG_REQ_ENABLES2_QUIC_RX_KEY_CTXS 0x8UL
+ #define FUNC_CFG_REQ_ENABLES2_ROCE_MAX_AV_PER_VF 0x10UL
+ #define FUNC_CFG_REQ_ENABLES2_ROCE_MAX_CQ_PER_VF 0x20UL
+ #define FUNC_CFG_REQ_ENABLES2_ROCE_MAX_MRW_PER_VF 0x40UL
+ #define FUNC_CFG_REQ_ENABLES2_ROCE_MAX_QP_PER_VF 0x80UL
+ #define FUNC_CFG_REQ_ENABLES2_ROCE_MAX_SRQ_PER_VF 0x100UL
+ #define FUNC_CFG_REQ_ENABLES2_ROCE_MAX_GID_PER_VF 0x200UL
+ #define FUNC_CFG_REQ_ENABLES2_XID_PARTITION_CFG 0x400UL
+ #define FUNC_CFG_REQ_ENABLES2_PHYSICAL_SLOT_NUMBER 0x800UL
u8 port_kdnet_mode;
#define FUNC_CFG_REQ_PORT_KDNET_MODE_DISABLED 0x0UL
#define FUNC_CFG_REQ_PORT_KDNET_MODE_ENABLED 0x1UL
@@ -2217,7 +2327,7 @@ struct hwrm_func_cfg_input {
#define FUNC_CFG_REQ_DB_PAGE_SIZE_2MB 0x9UL
#define FUNC_CFG_REQ_DB_PAGE_SIZE_4MB 0xaUL
#define FUNC_CFG_REQ_DB_PAGE_SIZE_LAST FUNC_CFG_REQ_DB_PAGE_SIZE_4MB
- u8 unused_1[2];
+ __le16 physical_slot_number;
__le32 num_ktls_tx_key_ctxs;
__le32 num_ktls_rx_key_ctxs;
__le32 num_quic_tx_key_ctxs;
@@ -2229,10 +2339,8 @@ struct hwrm_func_cfg_input {
__le32 roce_max_srq_per_vf;
__le32 roce_max_gid_per_vf;
__le16 xid_partition_cfg;
- #define FUNC_CFG_REQ_XID_PARTITION_CFG_KTLS_TKC 0x1UL
- #define FUNC_CFG_REQ_XID_PARTITION_CFG_KTLS_RKC 0x2UL
- #define FUNC_CFG_REQ_XID_PARTITION_CFG_QUIC_TKC 0x4UL
- #define FUNC_CFG_REQ_XID_PARTITION_CFG_QUIC_RKC 0x8UL
+ #define FUNC_CFG_REQ_XID_PARTITION_CFG_TX_CK 0x1UL
+ #define FUNC_CFG_REQ_XID_PARTITION_CFG_RX_CK 0x2UL
__le16 unused_2;
};
@@ -2416,6 +2524,8 @@ struct hwrm_func_drv_rgtr_input {
#define FUNC_DRV_RGTR_REQ_FLAGS_RSS_STRICT_HASH_TYPE_SUPPORT 0x100UL
#define FUNC_DRV_RGTR_REQ_FLAGS_NPAR_1_2_SUPPORT 0x200UL
#define FUNC_DRV_RGTR_REQ_FLAGS_ASYM_QUEUE_CFG_SUPPORT 0x400UL
+ #define FUNC_DRV_RGTR_REQ_FLAGS_TF_INGRESS_NIC_FLOW_MODE 0x800UL
+ #define FUNC_DRV_RGTR_REQ_FLAGS_TF_EGRESS_NIC_FLOW_MODE 0x1000UL
__le32 enables;
#define FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE 0x1UL
#define FUNC_DRV_RGTR_REQ_ENABLES_VER 0x2UL
@@ -3619,7 +3729,7 @@ struct hwrm_func_ptp_ext_qcfg_output {
u8 valid;
};
-/* hwrm_func_backing_store_cfg_v2_input (size:448b/56B) */
+/* hwrm_func_backing_store_cfg_v2_input (size:512b/64B) */
struct hwrm_func_backing_store_cfg_v2_input {
__le16 req_type;
__le16 cmpl_ring;
@@ -3627,28 +3737,39 @@ struct hwrm_func_backing_store_cfg_v2_input {
__le16 target_id;
__le64 resp_addr;
__le16 type;
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_QP 0x0UL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_SRQ 0x1UL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_CQ 0x2UL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_VNIC 0x3UL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_STAT 0x4UL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_SP_TQM_RING 0x5UL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_FP_TQM_RING 0x6UL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_MRAV 0xeUL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_TIM 0xfUL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_TKC 0x13UL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_RKC 0x14UL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_MP_TQM_RING 0x15UL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_SQ_DB_SHADOW 0x16UL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_RQ_DB_SHADOW 0x17UL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_SRQ_DB_SHADOW 0x18UL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_CQ_DB_SHADOW 0x19UL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_QUIC_TKC 0x1aUL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_QUIC_RKC 0x1bUL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_TBL_SCOPE 0x1cUL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_XID_PARTITION 0x1dUL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_INVALID 0xffffUL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_LAST FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_INVALID
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_QP 0x0UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_SRQ 0x1UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_CQ 0x2UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_VNIC 0x3UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_STAT 0x4UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_SP_TQM_RING 0x5UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_FP_TQM_RING 0x6UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_MRAV 0xeUL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_TIM 0xfUL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_TX_CK 0x13UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_RX_CK 0x14UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_MP_TQM_RING 0x15UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_SQ_DB_SHADOW 0x16UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_RQ_DB_SHADOW 0x17UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_SRQ_DB_SHADOW 0x18UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_CQ_DB_SHADOW 0x19UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_TBL_SCOPE 0x1cUL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_XID_PARTITION 0x1dUL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_SRT_TRACE 0x1eUL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_SRT2_TRACE 0x1fUL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_CRT_TRACE 0x20UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_CRT2_TRACE 0x21UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_RIGP0_TRACE 0x22UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_L2_HWRM_TRACE 0x23UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_ROCE_HWRM_TRACE 0x24UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_TTX_PACING_TQM_RING 0x25UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_CA0_TRACE 0x26UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_CA1_TRACE 0x27UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_CA2_TRACE 0x28UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_RIGP1_TRACE 0x29UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_AFM_KONG_HWRM_TRACE 0x2aUL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_INVALID 0xffffUL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_LAST FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_INVALID
__le16 instance;
__le32 flags;
#define FUNC_BACKING_STORE_CFG_V2_REQ_FLAGS_PREBOOT_MODE 0x1UL
@@ -3678,6 +3799,9 @@ struct hwrm_func_backing_store_cfg_v2_input {
__le32 split_entry_1;
__le32 split_entry_2;
__le32 split_entry_3;
+ __le32 enables;
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_ENABLES_NEXT_BS_OFFSET 0x1UL
+ __le32 next_bs_offset;
};
/* hwrm_func_backing_store_cfg_v2_output (size:128b/16B) */
@@ -3707,17 +3831,28 @@ struct hwrm_func_backing_store_qcfg_v2_input {
#define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_FP_TQM_RING 0x6UL
#define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_MRAV 0xeUL
#define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_TIM 0xfUL
- #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_TKC 0x13UL
- #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_RKC 0x14UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_TX_CK 0x13UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_RX_CK 0x14UL
#define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_MP_TQM_RING 0x15UL
#define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_SQ_DB_SHADOW 0x16UL
#define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_RQ_DB_SHADOW 0x17UL
#define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_SRQ_DB_SHADOW 0x18UL
#define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_CQ_DB_SHADOW 0x19UL
- #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_QUIC_TKC 0x1aUL
- #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_QUIC_RKC 0x1bUL
#define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_TBL_SCOPE 0x1cUL
#define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_XID_PARTITION_TABLE 0x1dUL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_SRT_TRACE 0x1eUL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_SRT2_TRACE 0x1fUL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_CRT_TRACE 0x20UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_CRT2_TRACE 0x21UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_RIGP0_TRACE 0x22UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_L2_HWRM_TRACE 0x23UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_ROCE_HWRM_TRACE 0x24UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_TTX_PACING_TQM_RING 0x25UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_CA0_TRACE 0x26UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_CA1_TRACE 0x27UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_CA2_TRACE 0x28UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_RIGP1_TRACE 0x29UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_AFM_KONG_HWRM_TRACE 0x2aUL
#define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_INVALID 0xffffUL
#define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_LAST FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_INVALID
__le16 instance;
@@ -3731,24 +3866,34 @@ struct hwrm_func_backing_store_qcfg_v2_output {
__le16 seq_id;
__le16 resp_len;
__le16 type;
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_QP 0x0UL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_SRQ 0x1UL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_CQ 0x2UL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_VNIC 0x3UL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_STAT 0x4UL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_SP_TQM_RING 0x5UL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_FP_TQM_RING 0x6UL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_MRAV 0xeUL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_TIM 0xfUL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_TKC 0x13UL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_RKC 0x14UL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_MP_TQM_RING 0x15UL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_QUIC_TKC 0x1aUL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_QUIC_RKC 0x1bUL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_TBL_SCOPE 0x1cUL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_XID_PARTITION 0x1dUL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_INVALID 0xffffUL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_LAST FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_INVALID
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_QP 0x0UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_SRQ 0x1UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_CQ 0x2UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_VNIC 0x3UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_STAT 0x4UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_SP_TQM_RING 0x5UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_FP_TQM_RING 0x6UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_MRAV 0xeUL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_TIM 0xfUL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_TX_CK 0x13UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_RX_CK 0x14UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_MP_TQM_RING 0x15UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_TBL_SCOPE 0x1cUL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_XID_PARTITION 0x1dUL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_SRT_TRACE 0x1eUL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_SRT2_TRACE 0x1fUL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_CRT_TRACE 0x20UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_CRT2_TRACE 0x21UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_RIGP0_TRACE 0x22UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_L2_HWRM_TRACE 0x23UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_ROCE_HWRM_TRACE 0x24UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_TTX_PACING_TQM_RING 0x25UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_CA0_TRACE 0x26UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_CA1_TRACE 0x27UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_CA2_TRACE 0x28UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_RIGP1_TRACE 0x29UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_INVALID 0xffffUL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_LAST FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_INVALID
__le16 instance;
__le32 flags;
__le64 page_dir;
@@ -3824,6 +3969,13 @@ struct ts_split_entries {
__le32 rsvd2[2];
};
+/* ck_split_entries (size:128b/16B) */
+struct ck_split_entries {
+ __le32 num_quic_entries;
+ __le32 rsvd;
+ __le32 rsvd2[2];
+};
+
/* hwrm_func_backing_store_qcaps_v2_input (size:192b/24B) */
struct hwrm_func_backing_store_qcaps_v2_input {
__le16 req_type;
@@ -3832,28 +3984,39 @@ struct hwrm_func_backing_store_qcaps_v2_input {
__le16 target_id;
__le64 resp_addr;
__le16 type;
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_QP 0x0UL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SRQ 0x1UL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CQ 0x2UL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_VNIC 0x3UL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_STAT 0x4UL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SP_TQM_RING 0x5UL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_FP_TQM_RING 0x6UL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_MRAV 0xeUL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_TIM 0xfUL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_KTLS_TKC 0x13UL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_KTLS_RKC 0x14UL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_MP_TQM_RING 0x15UL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SQ_DB_SHADOW 0x16UL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_RQ_DB_SHADOW 0x17UL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SRQ_DB_SHADOW 0x18UL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CQ_DB_SHADOW 0x19UL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_QUIC_TKC 0x1aUL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_QUIC_RKC 0x1bUL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_TBL_SCOPE 0x1cUL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_XID_PARTITION 0x1dUL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_INVALID 0xffffUL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_LAST FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_INVALID
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_QP 0x0UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SRQ 0x1UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CQ 0x2UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_VNIC 0x3UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_STAT 0x4UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SP_TQM_RING 0x5UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_FP_TQM_RING 0x6UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_MRAV 0xeUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_TIM 0xfUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_TX_CK 0x13UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_RX_CK 0x14UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_MP_TQM_RING 0x15UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SQ_DB_SHADOW 0x16UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_RQ_DB_SHADOW 0x17UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SRQ_DB_SHADOW 0x18UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CQ_DB_SHADOW 0x19UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_TBL_SCOPE 0x1cUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_XID_PARTITION 0x1dUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SRT_TRACE 0x1eUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SRT2_TRACE 0x1fUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CRT_TRACE 0x20UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CRT2_TRACE 0x21UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_RIGP0_TRACE 0x22UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_L2_HWRM_TRACE 0x23UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_ROCE_HWRM_TRACE 0x24UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_TTX_PACING_TQM_RING 0x25UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CA0_TRACE 0x26UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CA1_TRACE 0x27UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CA2_TRACE 0x28UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_RIGP1_TRACE 0x29UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_AFM_KONG_HWRM_TRACE 0x2aUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_INVALID 0xffffUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_LAST FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_INVALID
u8 rsvd[6];
};
@@ -3864,34 +4027,48 @@ struct hwrm_func_backing_store_qcaps_v2_output {
__le16 seq_id;
__le16 resp_len;
__le16 type;
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_QP 0x0UL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_SRQ 0x1UL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_CQ 0x2UL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_VNIC 0x3UL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_STAT 0x4UL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_SP_TQM_RING 0x5UL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_FP_TQM_RING 0x6UL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_MRAV 0xeUL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_TIM 0xfUL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_KTLS_TKC 0x13UL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_KTLS_RKC 0x14UL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_MP_TQM_RING 0x15UL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_SQ_DB_SHADOW 0x16UL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_RQ_DB_SHADOW 0x17UL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_SRQ_DB_SHADOW 0x18UL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_CQ_DB_SHADOW 0x19UL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_QUIC_TKC 0x1aUL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_QUIC_RKC 0x1bUL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_TBL_SCOPE 0x1cUL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_XID_PARTITION 0x1dUL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_INVALID 0xffffUL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_LAST FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_INVALID
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_QP 0x0UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_SRQ 0x1UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_CQ 0x2UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_VNIC 0x3UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_STAT 0x4UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_SP_TQM_RING 0x5UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_FP_TQM_RING 0x6UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_MRAV 0xeUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_TIM 0xfUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_TX_CK 0x13UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_RX_CK 0x14UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_MP_TQM_RING 0x15UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_SQ_DB_SHADOW 0x16UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_RQ_DB_SHADOW 0x17UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_SRQ_DB_SHADOW 0x18UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_CQ_DB_SHADOW 0x19UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_TBL_SCOPE 0x1cUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_XID_PARTITION 0x1dUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_SRT_TRACE 0x1eUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_SRT2_TRACE 0x1fUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_CRT_TRACE 0x20UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_CRT2_TRACE 0x21UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_RIGP0_TRACE 0x22UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_L2_HWRM_TRACE 0x23UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_ROCE_HWRM_TRACE 0x24UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_TTX_PACING_TQM_RING 0x25UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_CA0_TRACE 0x26UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_CA1_TRACE 0x27UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_CA2_TRACE 0x28UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_RIGP1_TRACE 0x29UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_AFM_KONG_HWRM_TRACE 0x2aUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_INVALID 0xffffUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_LAST FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_INVALID
__le16 entry_size;
__le32 flags;
#define FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_ENABLE_CTX_KIND_INIT 0x1UL
#define FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_TYPE_VALID 0x2UL
#define FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_DRIVER_MANAGED_MEMORY 0x4UL
#define FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_ROCE_QP_PSEUDO_STATIC_ALLOC 0x8UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_FW_DBG_TRACE 0x10UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_FW_BIN_DBG_TRACE 0x20UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_NEXT_BS_OFFSET 0x40UL
__le32 instance_bit_map;
u8 ctx_init_value;
u8 ctx_init_offset;
@@ -3912,7 +4089,8 @@ struct hwrm_func_backing_store_qcaps_v2_output {
__le32 split_entry_1;
__le32 split_entry_2;
__le32 split_entry_3;
- u8 rsvd3[3];
+ __le16 max_instance_count;
+ u8 rsvd3;
u8 valid;
};
@@ -3990,6 +4168,7 @@ struct hwrm_func_drv_if_change_output {
__le32 flags;
#define FUNC_DRV_IF_CHANGE_RESP_FLAGS_RESC_CHANGE 0x1UL
#define FUNC_DRV_IF_CHANGE_RESP_FLAGS_HOT_FW_RESET_DONE 0x2UL
+ #define FUNC_DRV_IF_CHANGE_RESP_FLAGS_CAPS_CHANGE 0x4UL
u8 unused_0[3];
u8 valid;
};
@@ -4151,7 +4330,8 @@ struct hwrm_port_phy_cfg_input {
#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_100GB_PAM4_112 0x3eaUL
#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_200GB_PAM4_112 0x7d2UL
#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_400GB_PAM4_112 0xfa2UL
- #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_LAST PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_400GB_PAM4_112
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_800GB_PAM4_112 0x1f42UL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_LAST PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_800GB_PAM4_112
__le16 auto_link_speeds2_mask;
#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_1GB 0x1UL
#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_10GB 0x2UL
@@ -4166,6 +4346,7 @@ struct hwrm_port_phy_cfg_input {
#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_100GB_PAM4_112 0x400UL
#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_200GB_PAM4_112 0x800UL
#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_400GB_PAM4_112 0x1000UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_800GB_PAM4_112 0x2000UL
u8 unused_2[6];
};
@@ -4241,6 +4422,7 @@ struct hwrm_port_phy_qcfg_output {
#define PORT_PHY_QCFG_RESP_LINK_SPEED_100GB 0x3e8UL
#define PORT_PHY_QCFG_RESP_LINK_SPEED_200GB 0x7d0UL
#define PORT_PHY_QCFG_RESP_LINK_SPEED_400GB 0xfa0UL
+ #define PORT_PHY_QCFG_RESP_LINK_SPEED_800GB 0x1f40UL
#define PORT_PHY_QCFG_RESP_LINK_SPEED_10MB 0xffffUL
#define PORT_PHY_QCFG_RESP_LINK_SPEED_LAST PORT_PHY_QCFG_RESP_LINK_SPEED_10MB
u8 duplex_cfg;
@@ -4337,6 +4519,7 @@ struct hwrm_port_phy_qcfg_output {
#define PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN 0x3UL
#define PORT_PHY_QCFG_RESP_MODULE_STATUS_NOTINSERTED 0x4UL
#define PORT_PHY_QCFG_RESP_MODULE_STATUS_CURRENTFAULT 0x5UL
+ #define PORT_PHY_QCFG_RESP_MODULE_STATUS_OVERHEATED 0x6UL
#define PORT_PHY_QCFG_RESP_MODULE_STATUS_NOTAPPLICABLE 0xffUL
#define PORT_PHY_QCFG_RESP_MODULE_STATUS_LAST PORT_PHY_QCFG_RESP_MODULE_STATUS_NOTAPPLICABLE
__le32 preemphasis;
@@ -4400,13 +4583,20 @@ struct hwrm_port_phy_qcfg_output {
#define PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASESR4 0x35UL
#define PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASELR4 0x36UL
#define PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASEER4 0x37UL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_LAST PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASEER4
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_800G_BASECR8 0x38UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_800G_BASESR8 0x39UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_800G_BASELR8 0x3aUL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_800G_BASEER8 0x3bUL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_800G_BASEFR8 0x3cUL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_800G_BASEDR8 0x3dUL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_LAST PORT_PHY_QCFG_RESP_PHY_TYPE_800G_BASEDR8
u8 media_type;
- #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_UNKNOWN 0x0UL
- #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP 0x1UL
- #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_DAC 0x2UL
- #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_FIBRE 0x3UL
- #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_LAST PORT_PHY_QCFG_RESP_MEDIA_TYPE_FIBRE
+ #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_UNKNOWN 0x0UL
+ #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP 0x1UL
+ #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_DAC 0x2UL
+ #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_FIBRE 0x3UL
+ #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_BACKPLANE 0x4UL
+ #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_LAST PORT_PHY_QCFG_RESP_MEDIA_TYPE_BACKPLANE
u8 xcvr_pkg_type;
#define PORT_PHY_QCFG_RESP_XCVR_PKG_TYPE_XCVR_INTERNAL 0x1UL
#define PORT_PHY_QCFG_RESP_XCVR_PKG_TYPE_XCVR_EXTERNAL 0x2UL
@@ -4472,7 +4662,11 @@ struct hwrm_port_phy_qcfg_output {
#define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_QSFP (0xcUL << 24)
#define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_QSFPPLUS (0xdUL << 24)
#define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_QSFP28 (0x11UL << 24)
- #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_LAST PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_QSFP28
+ #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_QSFPDD (0x18UL << 24)
+ #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_QSFP112 (0x1eUL << 24)
+ #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_SFPDD (0x1fUL << 24)
+ #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_CSFP (0x20UL << 24)
+ #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_LAST PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_CSFP
__le16 fec_cfg;
#define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED 0x1UL
#define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_AUTONEG_SUPPORTED 0x2UL
@@ -4517,7 +4711,8 @@ struct hwrm_port_phy_qcfg_output {
#define PORT_PHY_QCFG_RESP_LINK_PARTNER_PAM4_ADV_SPEEDS_100GB 0x2UL
#define PORT_PHY_QCFG_RESP_LINK_PARTNER_PAM4_ADV_SPEEDS_200GB 0x4UL
u8 link_down_reason;
- #define PORT_PHY_QCFG_RESP_LINK_DOWN_REASON_RF 0x1UL
+ #define PORT_PHY_QCFG_RESP_LINK_DOWN_REASON_RF 0x1UL
+ #define PORT_PHY_QCFG_RESP_LINK_DOWN_REASON_OTP_SPEED_VIOLATION 0x2UL
__le16 support_speeds2;
#define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_1GB 0x1UL
#define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_10GB 0x2UL
@@ -4858,7 +5053,9 @@ struct hwrm_port_qstats_output {
__le16 resp_len;
__le16 tx_stat_size;
__le16 rx_stat_size;
- u8 unused_0[3];
+ u8 flags;
+ #define PORT_QSTATS_RESP_FLAGS_CLEARED 0x1UL
+ u8 unused_0[2];
u8 valid;
};
@@ -4991,36 +5188,47 @@ struct hwrm_port_qstats_ext_output {
__le16 total_active_cos_queues;
u8 flags;
#define PORT_QSTATS_EXT_RESP_FLAGS_CLEAR_ROCE_COUNTERS_SUPPORTED 0x1UL
+ #define PORT_QSTATS_EXT_RESP_FLAGS_CLEARED 0x2UL
u8 valid;
};
-/* hwrm_port_lpbk_qstats_input (size:128b/16B) */
+/* hwrm_port_lpbk_qstats_input (size:256b/32B) */
struct hwrm_port_lpbk_qstats_input {
__le16 req_type;
__le16 cmpl_ring;
__le16 seq_id;
__le16 target_id;
__le64 resp_addr;
+ __le16 lpbk_stat_size;
+ u8 flags;
+ #define PORT_LPBK_QSTATS_REQ_FLAGS_COUNTER_MASK 0x1UL
+ u8 unused_0[5];
+ __le64 lpbk_stat_host_addr;
};
-/* hwrm_port_lpbk_qstats_output (size:768b/96B) */
+/* hwrm_port_lpbk_qstats_output (size:128b/16B) */
struct hwrm_port_lpbk_qstats_output {
__le16 error_code;
__le16 req_type;
__le16 seq_id;
__le16 resp_len;
+ __le16 lpbk_stat_size;
+ u8 unused_0[5];
+ u8 valid;
+};
+
+/* port_lpbk_stats (size:640b/80B) */
+struct port_lpbk_stats {
__le64 lpbk_ucast_frames;
__le64 lpbk_mcast_frames;
__le64 lpbk_bcast_frames;
__le64 lpbk_ucast_bytes;
__le64 lpbk_mcast_bytes;
__le64 lpbk_bcast_bytes;
- __le64 tx_stat_discard;
- __le64 tx_stat_error;
- __le64 rx_stat_discard;
- __le64 rx_stat_error;
- u8 unused_0[7];
- u8 valid;
+ __le64 lpbk_tx_discards;
+ __le64 lpbk_tx_errors;
+ __le64 lpbk_rx_discards;
+ __le64 lpbk_rx_errors;
};
/* hwrm_port_ecn_qstats_input (size:256b/32B) */
@@ -5085,13 +5293,15 @@ struct hwrm_port_clr_stats_output {
u8 valid;
};
-/* hwrm_port_lpbk_clr_stats_input (size:128b/16B) */
+/* hwrm_port_lpbk_clr_stats_input (size:192b/24B) */
struct hwrm_port_lpbk_clr_stats_input {
__le16 req_type;
__le16 cmpl_ring;
__le16 seq_id;
__le16 target_id;
__le64 resp_addr;
+ __le16 port_id;
+ u8 unused_0[6];
};
/* hwrm_port_lpbk_clr_stats_output (size:128b/16B) */
@@ -5232,10 +5442,11 @@ struct hwrm_port_phy_qcaps_output {
#define PORT_PHY_QCAPS_RESP_SUPPORTED_PAM4_SPEEDS_FORCE_MODE_100G 0x2UL
#define PORT_PHY_QCAPS_RESP_SUPPORTED_PAM4_SPEEDS_FORCE_MODE_200G 0x4UL
__le16 flags2;
- #define PORT_PHY_QCAPS_RESP_FLAGS2_PAUSE_UNSUPPORTED 0x1UL
- #define PORT_PHY_QCAPS_RESP_FLAGS2_PFC_UNSUPPORTED 0x2UL
- #define PORT_PHY_QCAPS_RESP_FLAGS2_BANK_ADDR_SUPPORTED 0x4UL
- #define PORT_PHY_QCAPS_RESP_FLAGS2_SPEEDS2_SUPPORTED 0x8UL
+ #define PORT_PHY_QCAPS_RESP_FLAGS2_PAUSE_UNSUPPORTED 0x1UL
+ #define PORT_PHY_QCAPS_RESP_FLAGS2_PFC_UNSUPPORTED 0x2UL
+ #define PORT_PHY_QCAPS_RESP_FLAGS2_BANK_ADDR_SUPPORTED 0x4UL
+ #define PORT_PHY_QCAPS_RESP_FLAGS2_SPEEDS2_SUPPORTED 0x8UL
+ #define PORT_PHY_QCAPS_RESP_FLAGS2_REMOTE_LPBK_UNSUPPORTED 0x10UL
u8 internal_port_cnt;
u8 unused_0;
__le16 supported_speeds2_force_mode;
@@ -6414,6 +6625,43 @@ struct hwrm_vnic_alloc_output {
u8 valid;
};
+/* hwrm_vnic_update_input (size:256b/32B) */
+struct hwrm_vnic_update_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 vnic_id;
+ __le32 enables;
+ #define VNIC_UPDATE_REQ_ENABLES_VNIC_STATE_VALID 0x1UL
+ #define VNIC_UPDATE_REQ_ENABLES_MRU_VALID 0x2UL
+ #define VNIC_UPDATE_REQ_ENABLES_METADATA_FORMAT_TYPE_VALID 0x4UL
+ u8 vnic_state;
+ #define VNIC_UPDATE_REQ_VNIC_STATE_NORMAL 0x0UL
+ #define VNIC_UPDATE_REQ_VNIC_STATE_DROP 0x1UL
+ #define VNIC_UPDATE_REQ_VNIC_STATE_LAST VNIC_UPDATE_REQ_VNIC_STATE_DROP
+ u8 metadata_format_type;
+ #define VNIC_UPDATE_REQ_METADATA_FORMAT_TYPE_0 0x0UL
+ #define VNIC_UPDATE_REQ_METADATA_FORMAT_TYPE_1 0x1UL
+ #define VNIC_UPDATE_REQ_METADATA_FORMAT_TYPE_2 0x2UL
+ #define VNIC_UPDATE_REQ_METADATA_FORMAT_TYPE_3 0x3UL
+ #define VNIC_UPDATE_REQ_METADATA_FORMAT_TYPE_4 0x4UL
+ #define VNIC_UPDATE_REQ_METADATA_FORMAT_TYPE_LAST VNIC_UPDATE_REQ_METADATA_FORMAT_TYPE_4
+ __le16 mru;
+ u8 unused_1[4];
+};
+
+/* hwrm_vnic_update_output (size:128b/16B) */
+struct hwrm_vnic_update_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
/* hwrm_vnic_free_input (size:192b/24B) */
struct hwrm_vnic_free_input {
__le16 req_type;
@@ -6544,6 +6792,7 @@ struct hwrm_vnic_qcaps_output {
#define VNIC_QCAPS_RESP_FLAGS_RSS_PROF_TCAM_MODE_ENABLED 0x8000000UL
#define VNIC_QCAPS_RESP_FLAGS_VNIC_RSS_HASH_MODE_CAP 0x10000000UL
#define VNIC_QCAPS_RESP_FLAGS_HW_TUNNEL_TPA_CAP 0x20000000UL
+ #define VNIC_QCAPS_RESP_FLAGS_RE_FLUSH_CAP 0x40000000UL
__le16 max_aggs_supported;
u8 unused_1[5];
u8 valid;
@@ -7380,7 +7629,7 @@ struct hwrm_cfa_l2_filter_free_output {
u8 valid;
};
-/* hwrm_cfa_l2_filter_cfg_input (size:320b/40B) */
+/* hwrm_cfa_l2_filter_cfg_input (size:384b/48B) */
struct hwrm_cfa_l2_filter_cfg_input {
__le16 req_type;
__le16 cmpl_ring;
@@ -7388,23 +7637,34 @@ struct hwrm_cfa_l2_filter_cfg_input {
__le16 target_id;
__le64 resp_addr;
__le32 flags;
- #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH 0x1UL
- #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_TX 0x0UL
- #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_RX 0x1UL
- #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_LAST CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_RX
- #define CFA_L2_FILTER_CFG_REQ_FLAGS_DROP 0x2UL
- #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_MASK 0xcUL
- #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_SFT 2
- #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_NO_ROCE_L2 (0x0UL << 2)
- #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_L2 (0x1UL << 2)
- #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_ROCE (0x2UL << 2)
- #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_LAST CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_ROCE
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH 0x1UL
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_TX 0x0UL
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_RX 0x1UL
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_LAST CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_RX
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_DROP 0x2UL
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_MASK 0xcUL
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_SFT 2
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_NO_ROCE_L2 (0x0UL << 2)
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_L2 (0x1UL << 2)
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_ROCE (0x2UL << 2)
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_LAST CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_ROCE
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_REMAP_OP_MASK 0x30UL
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_REMAP_OP_SFT 4
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_REMAP_OP_NO_UPDATE (0x0UL << 4)
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_REMAP_OP_BYPASS_LKUP (0x1UL << 4)
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_REMAP_OP_ENABLE_LKUP (0x2UL << 4)
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_REMAP_OP_RESTORE_FW_OP (0x3UL << 4)
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_REMAP_OP_LAST CFA_L2_FILTER_CFG_REQ_FLAGS_REMAP_OP_RESTORE_FW_OP
__le32 enables;
#define CFA_L2_FILTER_CFG_REQ_ENABLES_DST_ID 0x1UL
#define CFA_L2_FILTER_CFG_REQ_ENABLES_NEW_MIRROR_VNIC_ID 0x2UL
+ #define CFA_L2_FILTER_CFG_REQ_ENABLES_PROF_FUNC 0x4UL
+ #define CFA_L2_FILTER_CFG_REQ_ENABLES_L2_CONTEXT_ID 0x8UL
__le64 l2_filter_id;
__le32 dst_id;
__le32 new_mirror_vnic_id;
+ __le32 prof_func;
+ __le32 l2_context_id;
};
/* hwrm_cfa_l2_filter_cfg_output (size:128b/16B) */
@@ -8455,18 +8715,26 @@ struct hwrm_tunnel_dst_port_query_input {
__le16 target_id;
__le64 resp_addr;
u8 tunnel_type;
- #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN 0x1UL
- #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_GENEVE 0x5UL
- #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
- #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL
- #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL
- #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN_GPE_V6 0xcUL
- #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_CUSTOM_GRE 0xdUL
- #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_ECPRI 0xeUL
- #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_SRV6 0xfUL
- #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN_GPE 0x10UL
- #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_GRE 0x11UL
- #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_LAST TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_GRE
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN 0x1UL
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_GENEVE 0x5UL
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN_GPE_V6 0xcUL
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_CUSTOM_GRE 0xdUL
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_ECPRI 0xeUL
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_SRV6 0xfUL
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN_GPE 0x10UL
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_GRE 0x11UL
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_ULP_DYN_UPAR 0x12UL
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES01 0x13UL
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES02 0x14UL
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES03 0x15UL
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES04 0x16UL
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES05 0x17UL
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES06 0x18UL
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES07 0x19UL
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_LAST TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES07
u8 tunnel_next_proto;
u8 unused_0[6];
};
@@ -8503,18 +8771,26 @@ struct hwrm_tunnel_dst_port_alloc_input {
__le16 target_id;
__le64 resp_addr;
u8 tunnel_type;
- #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL
- #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE 0x5UL
- #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
- #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL
- #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL
- #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE_V6 0xcUL
- #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_CUSTOM_GRE 0xdUL
- #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_ECPRI 0xeUL
- #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_SRV6 0xfUL
- #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE 0x10UL
- #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GRE 0x11UL
- #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_LAST TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GRE
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE 0x5UL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE_V6 0xcUL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_CUSTOM_GRE 0xdUL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_ECPRI 0xeUL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_SRV6 0xfUL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE 0x10UL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GRE 0x11UL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_ULP_DYN_UPAR 0x12UL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES01 0x13UL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES02 0x14UL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES03 0x15UL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES04 0x16UL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES05 0x17UL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES06 0x18UL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES07 0x19UL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_LAST TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES07
u8 tunnel_next_proto;
__be16 tunnel_dst_port_val;
u8 unused_0[4];
@@ -8554,18 +8830,26 @@ struct hwrm_tunnel_dst_port_free_input {
__le16 target_id;
__le64 resp_addr;
u8 tunnel_type;
- #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN 0x1UL
- #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE 0x5UL
- #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
- #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL
- #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL
- #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_GPE_V6 0xcUL
- #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_CUSTOM_GRE 0xdUL
- #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_ECPRI 0xeUL
- #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_SRV6 0xfUL
- #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_GPE 0x10UL
- #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GRE 0x11UL
- #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_LAST TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GRE
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN 0x1UL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE 0x5UL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_GPE_V6 0xcUL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_CUSTOM_GRE 0xdUL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_ECPRI 0xeUL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_SRV6 0xfUL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_GPE 0x10UL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GRE 0x11UL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_ULP_DYN_UPAR 0x12UL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES01 0x13UL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES02 0x14UL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES03 0x15UL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES04 0x16UL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES05 0x17UL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES06 0x18UL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES07 0x19UL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_LAST TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES07
u8 tunnel_next_proto;
__le16 tunnel_dst_port_id;
u8 unused_0[4];
@@ -8636,7 +8920,7 @@ struct ctx_hw_stats_ext {
__le64 rx_tpa_events;
};
-/* hwrm_stat_ctx_alloc_input (size:320b/40B) */
+/* hwrm_stat_ctx_alloc_input (size:384b/48B) */
struct hwrm_stat_ctx_alloc_input {
__le16 req_type;
__le16 cmpl_ring;
@@ -8646,13 +8930,16 @@ struct hwrm_stat_ctx_alloc_input {
__le64 stats_dma_addr;
__le32 update_period_ms;
u8 stat_ctx_flags;
- #define STAT_CTX_ALLOC_REQ_STAT_CTX_FLAGS_ROCE 0x1UL
+ #define STAT_CTX_ALLOC_REQ_STAT_CTX_FLAGS_ROCE 0x1UL
+ #define STAT_CTX_ALLOC_REQ_STAT_CTX_FLAGS_DUP_HOST_BUF 0x2UL
u8 unused_0;
__le16 stats_dma_length;
__le16 flags;
#define STAT_CTX_ALLOC_REQ_FLAGS_STEERING_TAG_VALID 0x1UL
__le16 steering_tag;
- __le32 unused_1;
+ __le32 stat_ctx_id;
+ __le16 alloc_seq_id;
+ u8 unused_1[6];
};
/* hwrm_stat_ctx_alloc_output (size:128b/16B) */
@@ -8860,7 +9147,7 @@ struct hwrm_stat_generic_qstats_output {
u8 valid;
};
-/* generic_sw_hw_stats (size:1408b/176B) */
+/* generic_sw_hw_stats (size:1472b/184B) */
struct generic_sw_hw_stats {
__le64 pcie_statistics_tx_tlp;
__le64 pcie_statistics_rx_tlp;
@@ -8884,6 +9171,7 @@ struct generic_sw_hw_stats {
__le64 hw_db_recov_dbs_dropped;
__le64 hw_db_recov_drops_serviced;
__le64 hw_db_recov_dbs_recovered;
+ __le64 hw_db_recov_oo_drop_count;
};
/* hwrm_fw_reset_input (size:192b/24B) */
@@ -9011,19 +9299,22 @@ struct hwrm_fw_set_time_output {
/* hwrm_struct_hdr (size:128b/16B) */
struct hwrm_struct_hdr {
__le16 struct_id;
- #define STRUCT_HDR_STRUCT_ID_LLDP_CFG 0x41bUL
- #define STRUCT_HDR_STRUCT_ID_DCBX_ETS 0x41dUL
- #define STRUCT_HDR_STRUCT_ID_DCBX_PFC 0x41fUL
- #define STRUCT_HDR_STRUCT_ID_DCBX_APP 0x421UL
- #define STRUCT_HDR_STRUCT_ID_DCBX_FEATURE_STATE 0x422UL
- #define STRUCT_HDR_STRUCT_ID_LLDP_GENERIC 0x424UL
- #define STRUCT_HDR_STRUCT_ID_LLDP_DEVICE 0x426UL
- #define STRUCT_HDR_STRUCT_ID_POWER_BKUP 0x427UL
- #define STRUCT_HDR_STRUCT_ID_AFM_OPAQUE 0x1UL
- #define STRUCT_HDR_STRUCT_ID_PORT_DESCRIPTION 0xaUL
- #define STRUCT_HDR_STRUCT_ID_RSS_V2 0x64UL
- #define STRUCT_HDR_STRUCT_ID_MSIX_PER_VF 0xc8UL
- #define STRUCT_HDR_STRUCT_ID_LAST STRUCT_HDR_STRUCT_ID_MSIX_PER_VF
+ #define STRUCT_HDR_STRUCT_ID_LLDP_CFG 0x41bUL
+ #define STRUCT_HDR_STRUCT_ID_DCBX_ETS 0x41dUL
+ #define STRUCT_HDR_STRUCT_ID_DCBX_PFC 0x41fUL
+ #define STRUCT_HDR_STRUCT_ID_DCBX_APP 0x421UL
+ #define STRUCT_HDR_STRUCT_ID_DCBX_FEATURE_STATE 0x422UL
+ #define STRUCT_HDR_STRUCT_ID_LLDP_GENERIC 0x424UL
+ #define STRUCT_HDR_STRUCT_ID_LLDP_DEVICE 0x426UL
+ #define STRUCT_HDR_STRUCT_ID_POWER_BKUP 0x427UL
+ #define STRUCT_HDR_STRUCT_ID_PEER_MMAP 0x429UL
+ #define STRUCT_HDR_STRUCT_ID_AFM_OPAQUE 0x1UL
+ #define STRUCT_HDR_STRUCT_ID_PORT_DESCRIPTION 0xaUL
+ #define STRUCT_HDR_STRUCT_ID_RSS_V2 0x64UL
+ #define STRUCT_HDR_STRUCT_ID_MSIX_PER_VF 0xc8UL
+ #define STRUCT_HDR_STRUCT_ID_UDCC_RTT_BUCKET_COUNT 0x12cUL
+ #define STRUCT_HDR_STRUCT_ID_UDCC_RTT_BUCKET_BOUND 0x12dUL
+ #define STRUCT_HDR_STRUCT_ID_LAST STRUCT_HDR_STRUCT_ID_UDCC_RTT_BUCKET_BOUND
__le16 len;
u8 version;
u8 count;
@@ -9518,10 +9809,14 @@ struct hwrm_dbg_qcaps_output {
__le32 coredump_component_disable_caps;
#define DBG_QCAPS_RESP_COREDUMP_COMPONENT_DISABLE_CAPS_NVRAM 0x1UL
__le32 flags;
- #define DBG_QCAPS_RESP_FLAGS_CRASHDUMP_NVM 0x1UL
- #define DBG_QCAPS_RESP_FLAGS_CRASHDUMP_HOST_DDR 0x2UL
- #define DBG_QCAPS_RESP_FLAGS_CRASHDUMP_SOC_DDR 0x4UL
- #define DBG_QCAPS_RESP_FLAGS_USEQ 0x8UL
+ #define DBG_QCAPS_RESP_FLAGS_CRASHDUMP_NVM 0x1UL
+ #define DBG_QCAPS_RESP_FLAGS_CRASHDUMP_HOST_DDR 0x2UL
+ #define DBG_QCAPS_RESP_FLAGS_CRASHDUMP_SOC_DDR 0x4UL
+ #define DBG_QCAPS_RESP_FLAGS_USEQ 0x8UL
+ #define DBG_QCAPS_RESP_FLAGS_COREDUMP_HOST_DDR 0x10UL
+ #define DBG_QCAPS_RESP_FLAGS_COREDUMP_HOST_CAPTURE 0x20UL
+ #define DBG_QCAPS_RESP_FLAGS_PTRACE 0x40UL
+ #define DBG_QCAPS_RESP_FLAGS_REG_ACCESS_RESTRICTED 0x80UL
u8 unused_1[3];
u8 valid;
};
@@ -9668,6 +9963,9 @@ struct hwrm_dbg_coredump_initiate_input {
__le16 instance;
__le16 unused_0;
u8 seg_flags;
+ #define DBG_COREDUMP_INITIATE_REQ_SEG_FLAGS_LIVE_DATA 0x1UL
+ #define DBG_COREDUMP_INITIATE_REQ_SEG_FLAGS_CRASH_DATA 0x2UL
+ #define DBG_COREDUMP_INITIATE_REQ_SEG_FLAGS_COLLECT_CTX_L1_CACHE 0x4UL
u8 unused_1[7];
};
@@ -9759,6 +10057,43 @@ struct hwrm_dbg_ring_info_get_output {
u8 valid;
};
+/* hwrm_dbg_log_buffer_flush_input (size:192b/24B) */
+struct hwrm_dbg_log_buffer_flush_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 type;
+ #define DBG_LOG_BUFFER_FLUSH_REQ_TYPE_SRT_TRACE 0x0UL
+ #define DBG_LOG_BUFFER_FLUSH_REQ_TYPE_SRT2_TRACE 0x1UL
+ #define DBG_LOG_BUFFER_FLUSH_REQ_TYPE_CRT_TRACE 0x2UL
+ #define DBG_LOG_BUFFER_FLUSH_REQ_TYPE_CRT2_TRACE 0x3UL
+ #define DBG_LOG_BUFFER_FLUSH_REQ_TYPE_RIGP0_TRACE 0x4UL
+ #define DBG_LOG_BUFFER_FLUSH_REQ_TYPE_L2_HWRM_TRACE 0x5UL
+ #define DBG_LOG_BUFFER_FLUSH_REQ_TYPE_ROCE_HWRM_TRACE 0x6UL
+ #define DBG_LOG_BUFFER_FLUSH_REQ_TYPE_CA0_TRACE 0x7UL
+ #define DBG_LOG_BUFFER_FLUSH_REQ_TYPE_CA1_TRACE 0x8UL
+ #define DBG_LOG_BUFFER_FLUSH_REQ_TYPE_CA2_TRACE 0x9UL
+ #define DBG_LOG_BUFFER_FLUSH_REQ_TYPE_RIGP1_TRACE 0xaUL
+ #define DBG_LOG_BUFFER_FLUSH_REQ_TYPE_AFM_KONG_HWRM_TRACE 0xbUL
+ #define DBG_LOG_BUFFER_FLUSH_REQ_TYPE_LAST DBG_LOG_BUFFER_FLUSH_REQ_TYPE_AFM_KONG_HWRM_TRACE
+ u8 unused_1[2];
+ __le32 flags;
+ #define DBG_LOG_BUFFER_FLUSH_REQ_FLAGS_FLUSH_ALL_BUFFERS 0x1UL
+};
+
+/* hwrm_dbg_log_buffer_flush_output (size:128b/16B) */
+struct hwrm_dbg_log_buffer_flush_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 current_buffer_offset;
+ u8 unused_1[3];
+ u8 valid;
+};
+
/* hwrm_nvm_read_input (size:320b/40B) */
struct hwrm_nvm_read_input {
__le16 req_type;
@@ -9843,6 +10178,7 @@ struct hwrm_nvm_write_input {
#define NVM_WRITE_REQ_FLAGS_KEEP_ORIG_ACTIVE_IMG 0x1UL
#define NVM_WRITE_REQ_FLAGS_BATCH_MODE 0x2UL
#define NVM_WRITE_REQ_FLAGS_BATCH_LAST 0x4UL
+ #define NVM_WRITE_REQ_FLAGS_SKIP_CRID_CHECK 0x8UL
__le32 dir_item_length;
__le32 offset;
__le32 len;
@@ -9957,16 +10293,19 @@ struct hwrm_nvm_erase_dir_entry_output {
u8 valid;
};
-/* hwrm_nvm_get_dev_info_input (size:128b/16B) */
+/* hwrm_nvm_get_dev_info_input (size:192b/24B) */
struct hwrm_nvm_get_dev_info_input {
__le16 req_type;
__le16 cmpl_ring;
__le16 seq_id;
__le16 target_id;
__le64 resp_addr;
+ u8 flags;
+ #define NVM_GET_DEV_INFO_REQ_FLAGS_SECURITY_SOC_NVM 0x1UL
+ u8 unused_0[7];
};
-/* hwrm_nvm_get_dev_info_output (size:704b/88B) */
+/* hwrm_nvm_get_dev_info_output (size:768b/96B) */
struct hwrm_nvm_get_dev_info_output {
__le16 error_code;
__le16 req_type;
@@ -10000,6 +10339,10 @@ struct hwrm_nvm_get_dev_info_output {
__le16 netctrl_fw_minor;
__le16 netctrl_fw_build;
__le16 netctrl_fw_patch;
+ __le16 srt2_fw_major;
+ __le16 srt2_fw_minor;
+ __le16 srt2_fw_build;
+ __le16 srt2_fw_patch;
u8 unused_0[7];
u8 valid;
};
@@ -10343,13 +10686,13 @@ struct hwrm_selftest_irq_output {
/* dbc_dbc (size:64b/8B) */
struct dbc_dbc {
- u32 index;
+ __le32 index;
#define DBC_DBC_INDEX_MASK 0xffffffUL
#define DBC_DBC_INDEX_SFT 0
#define DBC_DBC_EPOCH 0x1000000UL
#define DBC_DBC_TOGGLE_MASK 0x6000000UL
#define DBC_DBC_TOGGLE_SFT 25
- u32 type_path_xid;
+ __le32 type_path_xid;
#define DBC_DBC_XID_MASK 0xfffffUL
#define DBC_DBC_XID_SFT 0
#define DBC_DBC_PATH_MASK 0x3000000UL
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.c
index 1df3d56cc4b5..d2fd2d04ed47 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.c
@@ -680,7 +680,7 @@ static int __hwrm_send(struct bnxt *bp, struct bnxt_hwrm_ctx *ctx)
req_type);
else if (rc && rc != HWRM_ERR_CODE_PF_UNAVAILABLE)
hwrm_err(bp, ctx, "hwrm req_type 0x%x seq id 0x%x error 0x%x\n",
- req_type, token->seq_id, rc);
+ req_type, le16_to_cpu(ctx->req->seq_id), rc);
rc = __hwrm_to_stderr(rc);
exit:
if (token)
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c
index cc07660330f5..2d4e19b96ee7 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c
@@ -62,19 +62,20 @@ static int bnxt_ptp_settime(struct ptp_clock_info *ptp_info,
struct bnxt_ptp_cfg *ptp = container_of(ptp_info, struct bnxt_ptp_cfg,
ptp_info);
u64 ns = timespec64_to_ns(ts);
+ unsigned long flags;
if (BNXT_PTP_USE_RTC(ptp->bp))
return bnxt_ptp_cfg_settime(ptp->bp, ns);
- spin_lock_bh(&ptp->ptp_lock);
+ write_seqlock_irqsave(&ptp->ptp_lock, flags);
timecounter_init(&ptp->tc, &ptp->cc, ns);
- spin_unlock_bh(&ptp->ptp_lock);
+ write_sequnlock_irqrestore(&ptp->ptp_lock, flags);
return 0;
}
/* Caller holds ptp_lock */
-static int bnxt_refclk_read(struct bnxt *bp, struct ptp_system_timestamp *sts,
- u64 *ns)
+static int __bnxt_refclk_read(struct bnxt *bp, struct ptp_system_timestamp *sts,
+ u64 *ns)
{
struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
u32 high_before, high_now, low;
@@ -97,19 +98,54 @@ static int bnxt_refclk_read(struct bnxt *bp, struct ptp_system_timestamp *sts,
return 0;
}
+static int bnxt_refclk_read(struct bnxt *bp, struct ptp_system_timestamp *sts,
+ u64 *ns)
+{
+ struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
+ unsigned long flags;
+ int rc;
+
+ /* We have to serialize reg access and FW reset */
+ read_seqlock_excl_irqsave(&ptp->ptp_lock, flags);
+ rc = __bnxt_refclk_read(bp, sts, ns);
+ read_sequnlock_excl_irqrestore(&ptp->ptp_lock, flags);
+ return rc;
+}
+
+static int bnxt_refclk_read_low(struct bnxt *bp, struct ptp_system_timestamp *sts,
+ u32 *low)
+{
+ struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
+ unsigned long flags;
+
+ /* We have to serialize reg access and FW reset */
+ read_seqlock_excl_irqsave(&ptp->ptp_lock, flags);
+
+ if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
+ read_sequnlock_excl_irqrestore(&ptp->ptp_lock, flags);
+ return -EIO;
+ }
+
+ ptp_read_system_prets(sts);
+ *low = readl(bp->bar0 + ptp->refclk_mapped_regs[0]);
+ ptp_read_system_postts(sts);
+
+ read_sequnlock_excl_irqrestore(&ptp->ptp_lock, flags);
+ return 0;
+}
+
static void bnxt_ptp_get_current_time(struct bnxt *bp)
{
struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
if (!ptp)
return;
- spin_lock_bh(&ptp->ptp_lock);
- WRITE_ONCE(ptp->old_time, ptp->current_time);
+ WRITE_ONCE(ptp->old_time, ptp->current_time >> BNXT_HI_TIMER_SHIFT);
bnxt_refclk_read(bp, NULL, &ptp->current_time);
- spin_unlock_bh(&ptp->ptp_lock);
}
-static int bnxt_hwrm_port_ts_query(struct bnxt *bp, u32 flags, u64 *ts)
+static int bnxt_hwrm_port_ts_query(struct bnxt *bp, u32 flags, u64 *ts,
+ u32 txts_tmo, int slot)
{
struct hwrm_port_ts_query_output *resp;
struct hwrm_port_ts_query_input *req;
@@ -122,10 +158,16 @@ static int bnxt_hwrm_port_ts_query(struct bnxt *bp, u32 flags, u64 *ts)
req->flags = cpu_to_le32(flags);
if ((flags & PORT_TS_QUERY_REQ_FLAGS_PATH) ==
PORT_TS_QUERY_REQ_FLAGS_PATH_TX) {
+ struct bnxt_ptp_tx_req *txts_req = &bp->ptp_cfg->txts_req[slot];
+ u32 tmo_us = txts_tmo * 1000;
+
req->enables = cpu_to_le16(BNXT_PTP_QTS_TX_ENABLES);
- req->ptp_seq_id = cpu_to_le32(bp->ptp_cfg->tx_seqid);
- req->ptp_hdr_offset = cpu_to_le16(bp->ptp_cfg->tx_hdr_off);
- req->ts_req_timeout = cpu_to_le16(BNXT_PTP_QTS_TIMEOUT);
+ req->ptp_seq_id = cpu_to_le32(txts_req->tx_seqid);
+ req->ptp_hdr_offset = cpu_to_le16(txts_req->tx_hdr_off);
+ if (!tmo_us)
+ tmo_us = BNXT_PTP_QTS_TIMEOUT;
+ tmo_us = min(tmo_us, BNXT_PTP_QTS_MAX_TMO_US);
+ req->ts_req_timeout = cpu_to_le16(tmo_us);
}
resp = hwrm_req_hold(bp, req);
@@ -143,28 +185,26 @@ static int bnxt_ptp_gettimex(struct ptp_clock_info *ptp_info,
struct bnxt_ptp_cfg *ptp = container_of(ptp_info, struct bnxt_ptp_cfg,
ptp_info);
u64 ns, cycles;
+ u32 low;
int rc;
- spin_lock_bh(&ptp->ptp_lock);
- rc = bnxt_refclk_read(ptp->bp, sts, &cycles);
- if (rc) {
- spin_unlock_bh(&ptp->ptp_lock);
+ rc = bnxt_refclk_read_low(ptp->bp, sts, &low);
+ if (rc)
return rc;
- }
- ns = timecounter_cyc2time(&ptp->tc, cycles);
- spin_unlock_bh(&ptp->ptp_lock);
+
+ cycles = bnxt_extend_cycles_32b_to_48b(ptp, low);
+ ns = bnxt_timecounter_cyc2time(ptp, cycles);
*ts = ns_to_timespec64(ns);
return 0;
}
-/* Caller holds ptp_lock */
void bnxt_ptp_update_current_time(struct bnxt *bp)
{
struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
bnxt_refclk_read(ptp->bp, NULL, &ptp->current_time);
- WRITE_ONCE(ptp->old_time, ptp->current_time);
+ WRITE_ONCE(ptp->old_time, ptp->current_time >> BNXT_HI_TIMER_SHIFT);
}
static int bnxt_ptp_adjphc(struct bnxt_ptp_cfg *ptp, s64 delta)
@@ -183,9 +223,7 @@ static int bnxt_ptp_adjphc(struct bnxt_ptp_cfg *ptp, s64 delta)
if (rc) {
netdev_err(ptp->bp->dev, "ptp adjphc failed. rc = %x\n", rc);
} else {
- spin_lock_bh(&ptp->ptp_lock);
bnxt_ptp_update_current_time(ptp->bp);
- spin_unlock_bh(&ptp->ptp_lock);
}
return rc;
@@ -195,13 +233,14 @@ static int bnxt_ptp_adjtime(struct ptp_clock_info *ptp_info, s64 delta)
{
struct bnxt_ptp_cfg *ptp = container_of(ptp_info, struct bnxt_ptp_cfg,
ptp_info);
+ unsigned long flags;
if (BNXT_PTP_USE_RTC(ptp->bp))
return bnxt_ptp_adjphc(ptp, delta);
- spin_lock_bh(&ptp->ptp_lock);
+ write_seqlock_irqsave(&ptp->ptp_lock, flags);
timecounter_adjtime(&ptp->tc, delta);
- spin_unlock_bh(&ptp->ptp_lock);
+ write_sequnlock_irqrestore(&ptp->ptp_lock, flags);
return 0;
}
@@ -229,14 +268,15 @@ static int bnxt_ptp_adjfine(struct ptp_clock_info *ptp_info, long scaled_ppm)
struct bnxt_ptp_cfg *ptp = container_of(ptp_info, struct bnxt_ptp_cfg,
ptp_info);
struct bnxt *bp = ptp->bp;
+ unsigned long flags;
if (!BNXT_MH(bp))
return bnxt_ptp_adjfine_rtc(bp, scaled_ppm);
- spin_lock_bh(&ptp->ptp_lock);
+ write_seqlock_irqsave(&ptp->ptp_lock, flags);
timecounter_read(&ptp->tc);
ptp->cc.mult = adjust_by_scaled_ppm(ptp->cmult, scaled_ppm);
- spin_unlock_bh(&ptp->ptp_lock);
+ write_sequnlock_irqrestore(&ptp->ptp_lock, flags);
return 0;
}
@@ -247,9 +287,7 @@ void bnxt_ptp_pps_event(struct bnxt *bp, u32 data1, u32 data2)
u64 ns, pps_ts;
pps_ts = EVENT_PPS_TS(data2, data1);
- spin_lock_bh(&ptp->ptp_lock);
- ns = timecounter_cyc2time(&ptp->tc, pps_ts);
- spin_unlock_bh(&ptp->ptp_lock);
+ ns = bnxt_timecounter_cyc2time(ptp, pps_ts);
switch (EVENT_DATA2_PPS_EVENT_TYPE(data2)) {
case ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA2_EVENT_TYPE_INTERNAL:
@@ -388,14 +426,11 @@ static int bnxt_get_target_cycles(struct bnxt_ptp_cfg *ptp, u64 target_ns,
u64 nsec_now, nsec_delta;
int rc;
- spin_lock_bh(&ptp->ptp_lock);
rc = bnxt_refclk_read(ptp->bp, NULL, &cycles_now);
- if (rc) {
- spin_unlock_bh(&ptp->ptp_lock);
+ if (rc)
return rc;
- }
- nsec_now = timecounter_cyc2time(&ptp->tc, cycles_now);
- spin_unlock_bh(&ptp->ptp_lock);
+
+ nsec_now = bnxt_timecounter_cyc2time(ptp, cycles_now);
nsec_delta = target_ns - nsec_now;
*cycles_delta = div64_u64(nsec_delta << ptp->cc.shift, ptp->cc.mult);
@@ -650,6 +685,14 @@ static int bnxt_map_ptp_regs(struct bnxt *bp)
(ptp->refclk_regs[i] & BNXT_GRC_OFFSET_MASK);
return 0;
}
+ if (bp->flags & BNXT_FLAG_CHIP_P7) {
+ for (i = 0; i < 2; i++) {
+ if (reg_arr[i] & BNXT_GRC_BASE_MASK)
+ return -EINVAL;
+ ptp->refclk_mapped_regs[i] = reg_arr[i];
+ }
+ return 0;
+ }
return -ENODEV;
}
@@ -664,33 +707,46 @@ static u64 bnxt_cc_read(const struct cyclecounter *cc)
struct bnxt_ptp_cfg *ptp = container_of(cc, struct bnxt_ptp_cfg, cc);
u64 ns = 0;
- bnxt_refclk_read(ptp->bp, NULL, &ns);
+ __bnxt_refclk_read(ptp->bp, NULL, &ns);
return ns;
}
-static void bnxt_stamp_tx_skb(struct bnxt *bp, struct sk_buff *skb)
+static int bnxt_stamp_tx_skb(struct bnxt *bp, int slot)
{
struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
struct skb_shared_hwtstamps timestamp;
+ struct bnxt_ptp_tx_req *txts_req;
+ unsigned long now = jiffies;
u64 ts = 0, ns = 0;
+ u32 tmo = 0;
int rc;
- rc = bnxt_hwrm_port_ts_query(bp, PORT_TS_QUERY_REQ_FLAGS_PATH_TX, &ts);
+ txts_req = &ptp->txts_req[slot];
+ /* make sure bnxt_get_tx_ts_p5() has updated abs_txts_tmo */
+ smp_rmb();
+ if (!time_after_eq(now, txts_req->abs_txts_tmo))
+ tmo = jiffies_to_msecs(txts_req->abs_txts_tmo - now);
+ rc = bnxt_hwrm_port_ts_query(bp, PORT_TS_QUERY_REQ_FLAGS_PATH_TX, &ts,
+ tmo, slot);
if (!rc) {
memset(&timestamp, 0, sizeof(timestamp));
- spin_lock_bh(&ptp->ptp_lock);
- ns = timecounter_cyc2time(&ptp->tc, ts);
- spin_unlock_bh(&ptp->ptp_lock);
+ ns = bnxt_timecounter_cyc2time(ptp, ts);
timestamp.hwtstamp = ns_to_ktime(ns);
- skb_tstamp_tx(ptp->tx_skb, &timestamp);
+ skb_tstamp_tx(txts_req->tx_skb, &timestamp);
+ ptp->stats.ts_pkts++;
} else {
+ if (!time_after_eq(jiffies, txts_req->abs_txts_tmo))
+ return -EAGAIN;
+
+ ptp->stats.ts_lost++;
netdev_warn_once(bp->dev,
"TS query for TX timer failed rc = %x\n", rc);
}
- dev_kfree_skb_any(ptp->tx_skb);
- ptp->tx_skb = NULL;
- atomic_inc(&ptp->tx_avail);
+ dev_kfree_skb_any(txts_req->tx_skb);
+ txts_req->tx_skb = NULL;
+
+ return 0;
}
static long bnxt_ptp_ts_aux_work(struct ptp_clock_info *ptp_info)
@@ -699,53 +755,115 @@ static long bnxt_ptp_ts_aux_work(struct ptp_clock_info *ptp_info)
ptp_info);
unsigned long now = jiffies;
struct bnxt *bp = ptp->bp;
+ u16 cons = ptp->txts_cons;
+ unsigned long flags;
+ u32 num_requests;
+ int rc = 0;
+
+ num_requests = BNXT_MAX_TX_TS - READ_ONCE(ptp->tx_avail);
+ while (num_requests--) {
+ if (IS_ERR(ptp->txts_req[cons].tx_skb))
+ goto next_slot;
+ if (!ptp->txts_req[cons].tx_skb)
+ break;
+ rc = bnxt_stamp_tx_skb(bp, cons);
+ if (rc == -EAGAIN)
+ break;
+next_slot:
+ BNXT_PTP_INC_TX_AVAIL(ptp);
+ cons = NEXT_TXTS(cons);
+ }
+ ptp->txts_cons = cons;
- if (ptp->tx_skb)
- bnxt_stamp_tx_skb(bp, ptp->tx_skb);
-
- if (!time_after_eq(now, ptp->next_period))
+ if (!time_after_eq(now, ptp->next_period)) {
+ if (rc == -EAGAIN)
+ return 0;
return ptp->next_period - now;
+ }
bnxt_ptp_get_current_time(bp);
ptp->next_period = now + HZ;
if (time_after_eq(now, ptp->next_overflow_check)) {
- spin_lock_bh(&ptp->ptp_lock);
+ write_seqlock_irqsave(&ptp->ptp_lock, flags);
timecounter_read(&ptp->tc);
- spin_unlock_bh(&ptp->ptp_lock);
+ write_sequnlock_irqrestore(&ptp->ptp_lock, flags);
ptp->next_overflow_check = now + BNXT_PHC_OVERFLOW_PERIOD;
}
+ if (rc == -EAGAIN)
+ return 0;
return HZ;
}
-int bnxt_get_tx_ts_p5(struct bnxt *bp, struct sk_buff *skb)
+int bnxt_ptp_get_txts_prod(struct bnxt_ptp_cfg *ptp, u16 *prod)
+{
+ spin_lock_bh(&ptp->ptp_tx_lock);
+ if (ptp->tx_avail) {
+ *prod = ptp->txts_prod;
+ ptp->txts_prod = NEXT_TXTS(*prod);
+ ptp->tx_avail--;
+ spin_unlock_bh(&ptp->ptp_tx_lock);
+ return 0;
+ }
+ spin_unlock_bh(&ptp->ptp_tx_lock);
+ atomic64_inc(&ptp->stats.ts_err);
+ return -ENOSPC;
+}
+
+void bnxt_get_tx_ts_p5(struct bnxt *bp, struct sk_buff *skb, u16 prod)
{
struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
+ struct bnxt_ptp_tx_req *txts_req;
- if (ptp->tx_skb) {
- netdev_err(bp->dev, "deferring skb:one SKB is still outstanding\n");
- return -EBUSY;
- }
- ptp->tx_skb = skb;
+ txts_req = &ptp->txts_req[prod];
+ txts_req->abs_txts_tmo = jiffies + msecs_to_jiffies(ptp->txts_tmo);
+ /* make sure abs_txts_tmo is written first */
+ smp_wmb();
+ txts_req->tx_skb = skb;
ptp_schedule_worker(ptp->ptp_clock, 0);
- return 0;
}
int bnxt_get_rx_ts_p5(struct bnxt *bp, u64 *ts, u32 pkt_ts)
{
struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
- u64 time;
if (!ptp)
return -ENODEV;
- BNXT_READ_TIME64(ptp, time, ptp->old_time);
- *ts = (time & BNXT_HI_TIMER_MASK) | pkt_ts;
- if (pkt_ts < (time & BNXT_LO_TIMER_MASK))
- *ts += BNXT_LO_TIMER_MASK + 1;
+ *ts = bnxt_extend_cycles_32b_to_48b(ptp, pkt_ts);
return 0;
}
+void bnxt_tx_ts_cmp(struct bnxt *bp, struct bnxt_napi *bnapi,
+ struct tx_ts_cmp *tscmp)
+{
+ struct skb_shared_hwtstamps timestamp = {};
+ struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
+ u32 opaque = tscmp->tx_ts_cmp_opaque;
+ struct bnxt_tx_ring_info *txr;
+ struct bnxt_sw_tx_bd *tx_buf;
+ u64 ts, ns;
+ u16 cons;
+
+ txr = bnapi->tx_ring[TX_OPAQUE_RING(opaque)];
+ ts = BNXT_GET_TX_TS_48B_NS(tscmp);
+ cons = TX_OPAQUE_IDX(opaque);
+ tx_buf = &txr->tx_buf_ring[RING_TX(bp, cons)];
+ if (tx_buf->is_ts_pkt) {
+ if (BNXT_TX_TS_ERR(tscmp)) {
+ netdev_err(bp->dev,
+ "timestamp completion error 0x%x 0x%x\n",
+ le32_to_cpu(tscmp->tx_ts_cmp_flags_type),
+ le32_to_cpu(tscmp->tx_ts_cmp_errors_v));
+ } else {
+ ns = bnxt_timecounter_cyc2time(ptp, ts);
+ timestamp.hwtstamp = ns_to_ktime(ns);
+ skb_tstamp_tx(tx_buf->skb, &timestamp);
+ }
+ tx_buf->is_ts_pkt = 0;
+ }
+}
+
static const struct ptp_clock_info bnxt_ptp_caps = {
.owner = THIS_MODULE,
.name = "bnxt clock",
@@ -847,6 +965,7 @@ static bool bnxt_pps_config_ok(struct bnxt *bp)
static void bnxt_ptp_timecounter_init(struct bnxt *bp, bool init_tc)
{
struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
+ unsigned long flags;
if (!ptp->ptp_clock) {
memset(&ptp->cc, 0, sizeof(ptp->cc));
@@ -863,8 +982,11 @@ static void bnxt_ptp_timecounter_init(struct bnxt *bp, bool init_tc)
}
ptp->next_overflow_check = jiffies + BNXT_PHC_OVERFLOW_PERIOD;
}
- if (init_tc)
+ if (init_tc) {
+ write_seqlock_irqsave(&ptp->ptp_lock, flags);
timecounter_init(&ptp->tc, &ptp->cc, ktime_to_ns(ktime_get_real()));
+ write_sequnlock_irqrestore(&ptp->ptp_lock, flags);
+ }
}
/* Caller holds ptp_lock */
@@ -878,6 +1000,7 @@ void bnxt_ptp_rtc_timecounter_init(struct bnxt_ptp_cfg *ptp, u64 ns)
int bnxt_ptp_init_rtc(struct bnxt *bp, bool phc_cfg)
{
struct timespec64 tsp;
+ unsigned long flags;
u64 ns;
int rc;
@@ -891,13 +1014,14 @@ int bnxt_ptp_init_rtc(struct bnxt *bp, bool phc_cfg)
if (rc)
return rc;
} else {
- rc = bnxt_hwrm_port_ts_query(bp, PORT_TS_QUERY_REQ_FLAGS_CURRENT_TIME, &ns);
+ rc = bnxt_hwrm_port_ts_query(bp, PORT_TS_QUERY_REQ_FLAGS_CURRENT_TIME,
+ &ns, 0, 0);
if (rc)
return rc;
}
- spin_lock_bh(&bp->ptp_cfg->ptp_lock);
+ write_seqlock_irqsave(&bp->ptp_cfg->ptp_lock, flags);
bnxt_ptp_rtc_timecounter_init(bp->ptp_cfg, ns);
- spin_unlock_bh(&bp->ptp_cfg->ptp_lock);
+ write_sequnlock_irqrestore(&bp->ptp_cfg->ptp_lock, flags);
return 0;
}
@@ -914,7 +1038,7 @@ static void bnxt_ptp_free(struct bnxt *bp)
}
}
-int bnxt_ptp_init(struct bnxt *bp, bool phc_cfg)
+int bnxt_ptp_init(struct bnxt *bp)
{
struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
int rc;
@@ -931,12 +1055,13 @@ int bnxt_ptp_init(struct bnxt *bp, bool phc_cfg)
bnxt_ptp_free(bp);
- atomic_set(&ptp->tx_avail, BNXT_MAX_TX_TS);
- spin_lock_init(&ptp->ptp_lock);
+ WRITE_ONCE(ptp->tx_avail, BNXT_MAX_TX_TS);
+ seqlock_init(&ptp->ptp_lock);
+ spin_lock_init(&ptp->ptp_tx_lock);
if (BNXT_PTP_USE_RTC(bp)) {
bnxt_ptp_timecounter_init(bp, false);
- rc = bnxt_ptp_init_rtc(bp, phc_cfg);
+ rc = bnxt_ptp_init_rtc(bp, ptp->rtc_configured);
if (rc)
goto out;
} else {
@@ -958,13 +1083,17 @@ int bnxt_ptp_init(struct bnxt *bp, bool phc_cfg)
rc = err;
goto out;
}
- if (BNXT_CHIP_P5(bp)) {
- spin_lock_bh(&ptp->ptp_lock);
+
+ ptp->stats.ts_pkts = 0;
+ ptp->stats.ts_lost = 0;
+ atomic64_set(&ptp->stats.ts_err, 0);
+
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
bnxt_refclk_read(bp, NULL, &ptp->current_time);
- WRITE_ONCE(ptp->old_time, ptp->current_time);
- spin_unlock_bh(&ptp->ptp_lock);
+ WRITE_ONCE(ptp->old_time, ptp->current_time >> BNXT_HI_TIMER_SHIFT);
ptp_schedule_worker(ptp->ptp_clock, 0);
}
+ ptp->txts_tmo = BNXT_PTP_DFLT_TX_TMO;
return 0;
out:
@@ -976,6 +1105,7 @@ out:
void bnxt_ptp_clear(struct bnxt *bp)
{
struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
+ int i;
if (!ptp)
return;
@@ -987,9 +1117,12 @@ void bnxt_ptp_clear(struct bnxt *bp)
kfree(ptp->ptp_info.pin_config);
ptp->ptp_info.pin_config = NULL;
- if (ptp->tx_skb) {
- dev_kfree_skb_any(ptp->tx_skb);
- ptp->tx_skb = NULL;
+ for (i = 0; i < BNXT_MAX_TX_TS; i++) {
+ if (ptp->txts_req[i].tx_skb) {
+ dev_kfree_skb_any(ptp->txts_req[i].tx_skb);
+ ptp->txts_req[i].tx_skb = NULL;
+ }
}
+
bnxt_unmap_ptp_regs(bp);
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h
index fce8dc39a7d0..a95f05e9c579 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h
@@ -21,8 +21,11 @@
#define BNXT_DEVCLK_FREQ 1000000
#define BNXT_LO_TIMER_MASK 0x0000ffffffffUL
#define BNXT_HI_TIMER_MASK 0xffff00000000UL
+#define BNXT_HI_TIMER_SHIFT 24
+#define BNXT_PTP_DFLT_TX_TMO 1000 /* ms */
#define BNXT_PTP_QTS_TIMEOUT 1000
+#define BNXT_PTP_QTS_MAX_TMO_US 65535U
#define BNXT_PTP_QTS_TX_ENABLES (PORT_TS_QUERY_REQ_ENABLES_PTP_SEQ_ID | \
PORT_TS_QUERY_REQ_ENABLES_TS_REQ_TIMEOUT | \
PORT_TS_QUERY_REQ_ENABLES_PTP_HDR_OFFSET)
@@ -77,6 +80,22 @@ struct bnxt_pps {
struct pps_pin pins[BNXT_MAX_TSIO_PINS];
};
+struct bnxt_ptp_stats {
+ u64 ts_pkts;
+ u64 ts_lost;
+ atomic64_t ts_err;
+};
+
+#define BNXT_MAX_TX_TS 4
+#define NEXT_TXTS(idx) (((idx) + 1) & (BNXT_MAX_TX_TS - 1))
+
+struct bnxt_ptp_tx_req {
+ struct sk_buff *tx_skb;
+ u16 tx_seqid;
+ u16 tx_hdr_off;
+ unsigned long abs_txts_tmo;
+};
+
struct bnxt_ptp_cfg {
struct ptp_clock_info ptp_info;
struct ptp_clock *ptp_clock;
@@ -84,21 +103,22 @@ struct bnxt_ptp_cfg {
struct timecounter tc;
struct bnxt_pps pps_info;
/* serialize timecounter access */
- spinlock_t ptp_lock;
- struct sk_buff *tx_skb;
+ seqlock_t ptp_lock;
+ /* serialize ts tx request queuing */
+ spinlock_t ptp_tx_lock;
u64 current_time;
- u64 old_time;
unsigned long next_period;
unsigned long next_overflow_check;
u32 cmult;
+ /* cache of upper 24 bits of cyclecoutner. 8 bits are used to check for roll-over */
+ u32 old_time;
/* a 23b shift cyclecounter will overflow in ~36 mins. Check overflow every 18 mins. */
#define BNXT_PHC_OVERFLOW_PERIOD (18 * 60 * HZ)
- u16 tx_seqid;
- u16 tx_hdr_off;
+ struct bnxt_ptp_tx_req txts_req[BNXT_MAX_TX_TS];
+
struct bnxt *bp;
- atomic_t tx_avail;
-#define BNXT_MAX_TX_TS 1
+ u32 tx_avail;
u16 rxctl;
#define BNXT_PTP_MSG_SYNC (1 << 0)
#define BNXT_PTP_MSG_DELAY_REQ (1 << 1)
@@ -115,24 +135,25 @@ struct bnxt_ptp_cfg {
BNXT_PTP_MSG_PDELAY_REQ | \
BNXT_PTP_MSG_PDELAY_RESP)
u8 tx_tstamp_en:1;
+ u8 rtc_configured:1;
int rx_filter;
u32 tstamp_filters;
u32 refclk_regs[2];
u32 refclk_mapped_regs[2];
+ u32 txts_tmo;
+ u16 txts_prod;
+ u16 txts_cons;
+
+ struct bnxt_ptp_stats stats;
};
-#if BITS_PER_LONG == 32
-#define BNXT_READ_TIME64(ptp, dst, src) \
+#define BNXT_PTP_INC_TX_AVAIL(ptp) \
do { \
- spin_lock_bh(&(ptp)->ptp_lock); \
- (dst) = (src); \
- spin_unlock_bh(&(ptp)->ptp_lock); \
+ spin_lock_bh(&(ptp)->ptp_tx_lock); \
+ (ptp)->tx_avail++; \
+ spin_unlock_bh(&(ptp)->ptp_tx_lock); \
} while (0)
-#else
-#define BNXT_READ_TIME64(ptp, dst, src) \
- ((dst) = READ_ONCE(src))
-#endif
int bnxt_ptp_parse(struct sk_buff *skb, u16 *seq_id, u16 *hdr_off);
void bnxt_ptp_update_current_time(struct bnxt *bp);
@@ -141,10 +162,36 @@ int bnxt_ptp_cfg_tstamp_filters(struct bnxt *bp);
void bnxt_ptp_reapply_pps(struct bnxt *bp);
int bnxt_hwtstamp_set(struct net_device *dev, struct ifreq *ifr);
int bnxt_hwtstamp_get(struct net_device *dev, struct ifreq *ifr);
-int bnxt_get_tx_ts_p5(struct bnxt *bp, struct sk_buff *skb);
+int bnxt_ptp_get_txts_prod(struct bnxt_ptp_cfg *ptp, u16 *prod);
+void bnxt_get_tx_ts_p5(struct bnxt *bp, struct sk_buff *skb, u16 prod);
int bnxt_get_rx_ts_p5(struct bnxt *bp, u64 *ts, u32 pkt_ts);
+void bnxt_tx_ts_cmp(struct bnxt *bp, struct bnxt_napi *bnapi,
+ struct tx_ts_cmp *tscmp);
void bnxt_ptp_rtc_timecounter_init(struct bnxt_ptp_cfg *ptp, u64 ns);
int bnxt_ptp_init_rtc(struct bnxt *bp, bool phc_cfg);
-int bnxt_ptp_init(struct bnxt *bp, bool phc_cfg);
+int bnxt_ptp_init(struct bnxt *bp);
void bnxt_ptp_clear(struct bnxt *bp);
+static inline u64 bnxt_timecounter_cyc2time(struct bnxt_ptp_cfg *ptp, u64 ts)
+{
+ unsigned int seq;
+ u64 ns;
+
+ do {
+ seq = read_seqbegin(&ptp->ptp_lock);
+ ns = timecounter_cyc2time(&ptp->tc, ts);
+ } while (read_seqretry(&ptp->ptp_lock, seq));
+
+ return ns;
+}
+
+static inline u64 bnxt_extend_cycles_32b_to_48b(struct bnxt_ptp_cfg *ptp, u32 ts)
+{
+ u64 time, cycles;
+
+ time = (u64)READ_ONCE(ptp->old_time) << BNXT_HI_TIMER_SHIFT;
+ cycles = (time & BNXT_HI_TIMER_MASK) | ts;
+ if (ts < (time & BNXT_LO_TIMER_MASK))
+ cycles += BNXT_LO_TIMER_MASK + 1;
+ return cycles;
+}
#endif
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
index 175192ebaa77..12b6ed51fd88 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
@@ -15,6 +15,7 @@
#include <linux/if_vlan.h>
#include <linux/interrupt.h>
#include <linux/etherdevice.h>
+#include <net/dcbnl.h>
#include "bnxt_hsi.h"
#include "bnxt.h"
#include "bnxt_hwrm.h"
@@ -196,11 +197,8 @@ int bnxt_get_vf_config(struct net_device *dev, int vf_id,
memcpy(&ivi->mac, vf->vf_mac_addr, ETH_ALEN);
ivi->max_tx_rate = vf->max_tx_rate;
ivi->min_tx_rate = vf->min_tx_rate;
- ivi->vlan = vf->vlan;
- if (vf->flags & BNXT_VF_QOS)
- ivi->qos = vf->vlan >> VLAN_PRIO_SHIFT;
- else
- ivi->qos = 0;
+ ivi->vlan = vf->vlan & VLAN_VID_MASK;
+ ivi->qos = vf->vlan >> VLAN_PRIO_SHIFT;
ivi->spoofchk = !!(vf->flags & BNXT_VF_SPOOFCHK);
ivi->trusted = bnxt_is_trusted_vf(bp, vf);
if (!(vf->flags & BNXT_VF_LINK_FORCED))
@@ -256,21 +254,21 @@ int bnxt_set_vf_vlan(struct net_device *dev, int vf_id, u16 vlan_id, u8 qos,
if (bp->hwrm_spec_code < 0x10201)
return -ENOTSUPP;
- if (vlan_proto != htons(ETH_P_8021Q))
+ if (vlan_proto != htons(ETH_P_8021Q) &&
+ (vlan_proto != htons(ETH_P_8021AD) ||
+ !(bp->fw_cap & BNXT_FW_CAP_DFLT_VLAN_TPID_PCP)))
return -EPROTONOSUPPORT;
rc = bnxt_vf_ndo_prep(bp, vf_id);
if (rc)
return rc;
- /* TODO: needed to implement proper handling of user priority,
- * currently fail the command if there is valid priority
- */
- if (vlan_id > 4095 || qos)
+ if (vlan_id >= VLAN_N_VID || qos >= IEEE_8021Q_MAX_PRIORITIES ||
+ (!vlan_id && qos))
return -EINVAL;
vf = &bp->pf.vf[vf_id];
- vlan_tag = vlan_id;
+ vlan_tag = vlan_id | (u16)qos << VLAN_PRIO_SHIFT;
if (vlan_tag == vf->vlan)
return 0;
@@ -279,6 +277,10 @@ int bnxt_set_vf_vlan(struct net_device *dev, int vf_id, u16 vlan_id, u8 qos,
req->fid = cpu_to_le16(vf->fw_fid);
req->dflt_vlan = cpu_to_le16(vlan_tag);
req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_VLAN);
+ if (bp->fw_cap & BNXT_FW_CAP_DFLT_VLAN_TPID_PCP) {
+ req->enables |= cpu_to_le32(FUNC_CFG_REQ_ENABLES_TPID);
+ req->tpid = vlan_proto;
+ }
rc = hwrm_req_send(bp, req);
if (!rc)
vf->vlan = vlan_tag;
@@ -518,6 +520,56 @@ static int __bnxt_set_vf_params(struct bnxt *bp, int vf_id)
return hwrm_req_send(bp, req);
}
+static void bnxt_hwrm_roce_sriov_cfg(struct bnxt *bp, int num_vfs)
+{
+ struct hwrm_func_qcaps_output *resp;
+ struct hwrm_func_cfg_input *cfg_req;
+ struct hwrm_func_qcaps_input *req;
+ int rc;
+
+ rc = hwrm_req_init(bp, req, HWRM_FUNC_QCAPS);
+ if (rc)
+ return;
+
+ req->fid = cpu_to_le16(0xffff);
+ resp = hwrm_req_hold(bp, req);
+ rc = hwrm_req_send(bp, req);
+ if (rc)
+ goto err;
+
+ rc = hwrm_req_init(bp, cfg_req, HWRM_FUNC_CFG);
+ if (rc)
+ goto err;
+
+ cfg_req->fid = cpu_to_le16(0xffff);
+ cfg_req->enables2 =
+ cpu_to_le32(FUNC_CFG_REQ_ENABLES2_ROCE_MAX_AV_PER_VF |
+ FUNC_CFG_REQ_ENABLES2_ROCE_MAX_CQ_PER_VF |
+ FUNC_CFG_REQ_ENABLES2_ROCE_MAX_MRW_PER_VF |
+ FUNC_CFG_REQ_ENABLES2_ROCE_MAX_QP_PER_VF |
+ FUNC_CFG_REQ_ENABLES2_ROCE_MAX_SRQ_PER_VF |
+ FUNC_CFG_REQ_ENABLES2_ROCE_MAX_GID_PER_VF);
+ cfg_req->roce_max_av_per_vf =
+ cpu_to_le32(le32_to_cpu(resp->roce_vf_max_av) / num_vfs);
+ cfg_req->roce_max_cq_per_vf =
+ cpu_to_le32(le32_to_cpu(resp->roce_vf_max_cq) / num_vfs);
+ cfg_req->roce_max_mrw_per_vf =
+ cpu_to_le32(le32_to_cpu(resp->roce_vf_max_mrw) / num_vfs);
+ cfg_req->roce_max_qp_per_vf =
+ cpu_to_le32(le32_to_cpu(resp->roce_vf_max_qp) / num_vfs);
+ cfg_req->roce_max_srq_per_vf =
+ cpu_to_le32(le32_to_cpu(resp->roce_vf_max_srq) / num_vfs);
+ cfg_req->roce_max_gid_per_vf =
+ cpu_to_le32(le32_to_cpu(resp->roce_vf_max_gid) / num_vfs);
+
+ rc = hwrm_req_send(bp, cfg_req);
+
+err:
+ hwrm_req_drop(bp, req);
+ if (rc)
+ netdev_err(bp->dev, "RoCE sriov configuration failed\n");
+}
+
/* Only called by PF to reserve resources for VFs, returns actual number of
* VFs configured, or < 0 on error.
*/
@@ -757,6 +809,9 @@ int bnxt_cfg_hw_sriov(struct bnxt *bp, int *num_vfs, bool reset)
*num_vfs = rc;
}
+ if (BNXT_RDMA_SRIOV_EN(bp) && BNXT_ROCE_VF_RESC_CAP(bp))
+ bnxt_hwrm_roce_sriov_cfg(bp, *num_vfs);
+
return 0;
}
@@ -900,11 +955,6 @@ int bnxt_sriov_configure(struct pci_dev *pdev, int num_vfs)
struct net_device *dev = pci_get_drvdata(pdev);
struct bnxt *bp = netdev_priv(dev);
- if (!(bp->flags & BNXT_FLAG_USING_MSIX)) {
- netdev_warn(dev, "Not allow SRIOV if the irq mode is not MSIX\n");
- return 0;
- }
-
rtnl_lock();
if (!netif_running(dev)) {
netdev_warn(dev, "Reject SRIOV config request since if is down!\n");
@@ -950,8 +1000,11 @@ static int bnxt_hwrm_fwd_resp(struct bnxt *bp, struct bnxt_vf_info *vf,
struct hwrm_fwd_resp_input *req;
int rc;
- if (BNXT_FWD_RESP_SIZE_ERR(msg_size))
+ if (BNXT_FWD_RESP_SIZE_ERR(msg_size)) {
+ netdev_warn_once(bp->dev, "HWRM fwd response too big (%d bytes)\n",
+ msg_size);
return -EINVAL;
+ }
rc = hwrm_req_init(bp, req, HWRM_FWD_RESP);
if (!rc) {
@@ -1085,7 +1138,7 @@ static int bnxt_vf_set_link(struct bnxt *bp, struct bnxt_vf_info *vf)
rc = bnxt_hwrm_exec_fwd_resp(
bp, vf, sizeof(struct hwrm_port_phy_qcfg_input));
} else {
- struct hwrm_port_phy_qcfg_output phy_qcfg_resp = {0};
+ struct hwrm_port_phy_qcfg_output_compat phy_qcfg_resp = {};
struct hwrm_port_phy_qcfg_input *phy_qcfg_req;
phy_qcfg_req =
@@ -1096,6 +1149,11 @@ static int bnxt_vf_set_link(struct bnxt *bp, struct bnxt_vf_info *vf)
mutex_unlock(&bp->link_lock);
phy_qcfg_resp.resp_len = cpu_to_le16(sizeof(phy_qcfg_resp));
phy_qcfg_resp.seq_id = phy_qcfg_req->seq_id;
+ /* New SPEEDS2 fields are beyond the legacy structure, so
+ * clear the SPEEDS2_SUPPORTED flag.
+ */
+ phy_qcfg_resp.option_flags &=
+ ~PORT_PHY_QCAPS_RESP_FLAGS2_SPEEDS2_SUPPORTED;
phy_qcfg_resp.valid = 1;
if (vf->flags & BNXT_VF_LINK_UP) {
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
index 273c9ba48f09..d2ca90407cce 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
@@ -370,6 +370,7 @@ static int bnxt_tc_parse_flow(struct bnxt *bp,
struct bnxt_tc_flow *flow)
{
struct flow_rule *rule = flow_cls_offload_flow_rule(tc_flow_cmd);
+ struct netlink_ext_ack *extack = tc_flow_cmd->common.extack;
struct flow_dissector *dissector = rule->match.dissector;
/* KEY_CONTROL and KEY_BASIC are needed for forming a meaningful key */
@@ -380,6 +381,9 @@ static int bnxt_tc_parse_flow(struct bnxt *bp,
return -EOPNOTSUPP;
}
+ if (flow_rule_match_has_control_flags(rule, extack))
+ return -EOPNOTSUPP;
+
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
struct flow_match_basic match;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
index 93f9bd55020f..e4a7f37036ed 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
@@ -31,21 +31,74 @@ static DEFINE_IDA(bnxt_aux_dev_ids);
static void bnxt_fill_msix_vecs(struct bnxt *bp, struct bnxt_msix_entry *ent)
{
struct bnxt_en_dev *edev = bp->edev;
- int num_msix, idx, i;
+ int num_msix, i;
if (!edev->ulp_tbl->msix_requested) {
netdev_warn(bp->dev, "Requested MSI-X vectors insufficient\n");
return;
}
num_msix = edev->ulp_tbl->msix_requested;
- idx = edev->ulp_tbl->msix_base;
for (i = 0; i < num_msix; i++) {
- ent[i].vector = bp->irq_tbl[idx + i].vector;
- ent[i].ring_idx = idx + i;
+ ent[i].vector = bp->irq_tbl[i].vector;
+ ent[i].ring_idx = i;
if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
ent[i].db_offset = bp->db_offset;
else
- ent[i].db_offset = (idx + i) * 0x80;
+ ent[i].db_offset = i * 0x80;
+ }
+}
+
+int bnxt_get_ulp_msix_num(struct bnxt *bp)
+{
+ if (bp->edev)
+ return bp->edev->ulp_num_msix_vec;
+ return 0;
+}
+
+void bnxt_set_ulp_msix_num(struct bnxt *bp, int num)
+{
+ if (bp->edev)
+ bp->edev->ulp_num_msix_vec = num;
+}
+
+int bnxt_get_ulp_msix_num_in_use(struct bnxt *bp)
+{
+ if (bnxt_ulp_registered(bp->edev))
+ return bp->edev->ulp_num_msix_vec;
+ return 0;
+}
+
+int bnxt_get_ulp_stat_ctxs(struct bnxt *bp)
+{
+ if (bp->edev)
+ return bp->edev->ulp_num_ctxs;
+ return 0;
+}
+
+void bnxt_set_ulp_stat_ctxs(struct bnxt *bp, int num_ulp_ctx)
+{
+ if (bp->edev)
+ bp->edev->ulp_num_ctxs = num_ulp_ctx;
+}
+
+int bnxt_get_ulp_stat_ctxs_in_use(struct bnxt *bp)
+{
+ if (bnxt_ulp_registered(bp->edev))
+ return bp->edev->ulp_num_ctxs;
+ return 0;
+}
+
+void bnxt_set_dflt_ulp_stat_ctxs(struct bnxt *bp)
+{
+ if (bp->edev) {
+ bp->edev->ulp_num_ctxs = BNXT_MIN_ROCE_STAT_CTXS;
+ /* Reserve one additional stat_ctx for PF0 (except
+ * on 1-port NICs) as it also creates one stat_ctx
+ * for PF1 in case of RoCE bonding.
+ */
+ if (BNXT_PF(bp) && !bp->pf.port_id &&
+ bp->port_count > 1)
+ bp->edev->ulp_num_ctxs++;
}
}
@@ -57,25 +110,36 @@ int bnxt_register_dev(struct bnxt_en_dev *edev,
struct bnxt *bp = netdev_priv(dev);
unsigned int max_stat_ctxs;
struct bnxt_ulp *ulp;
+ int rc = 0;
+ rtnl_lock();
+ mutex_lock(&edev->en_dev_lock);
+ if (!bp->irq_tbl) {
+ rc = -ENODEV;
+ goto exit;
+ }
max_stat_ctxs = bnxt_get_max_func_stat_ctxs(bp);
if (max_stat_ctxs <= BNXT_MIN_ROCE_STAT_CTXS ||
- bp->cp_nr_rings == max_stat_ctxs)
- return -ENOMEM;
+ bp->cp_nr_rings == max_stat_ctxs) {
+ rc = -ENOMEM;
+ goto exit;
+ }
ulp = edev->ulp_tbl;
- if (!ulp)
- return -ENOMEM;
-
ulp->handle = handle;
rcu_assign_pointer(ulp->ulp_ops, ulp_ops);
if (test_bit(BNXT_STATE_OPEN, &bp->state))
- bnxt_hwrm_vnic_cfg(bp, 0);
+ bnxt_hwrm_vnic_cfg(bp, &bp->vnic_info[BNXT_VNIC_DEFAULT]);
+
+ edev->ulp_tbl->msix_requested = bnxt_get_ulp_msix_num(bp);
bnxt_fill_msix_vecs(bp, bp->edev->msix_entries);
edev->flags |= BNXT_EN_FLAG_MSIX_REQUESTED;
- return 0;
+exit:
+ mutex_unlock(&edev->en_dev_lock);
+ rtnl_unlock();
+ return rc;
}
EXPORT_SYMBOL(bnxt_register_dev);
@@ -87,8 +151,11 @@ void bnxt_unregister_dev(struct bnxt_en_dev *edev)
int i = 0;
ulp = edev->ulp_tbl;
+ rtnl_lock();
+ mutex_lock(&edev->en_dev_lock);
if (ulp->msix_requested)
edev->flags &= ~BNXT_EN_FLAG_MSIX_REQUESTED;
+ edev->ulp_tbl->msix_requested = 0;
if (ulp->max_async_event_id)
bnxt_hwrm_func_drv_rgtr(bp, NULL, 0, true);
@@ -101,40 +168,25 @@ void bnxt_unregister_dev(struct bnxt_en_dev *edev)
msleep(100);
i++;
}
+ mutex_unlock(&edev->en_dev_lock);
+ rtnl_unlock();
return;
}
EXPORT_SYMBOL(bnxt_unregister_dev);
-int bnxt_get_ulp_msix_num(struct bnxt *bp)
-{
- u32 roce_msix = BNXT_VF(bp) ?
- BNXT_MAX_VF_ROCE_MSIX : BNXT_MAX_ROCE_MSIX;
-
- return ((bp->flags & BNXT_FLAG_ROCE_CAP) ?
- min_t(u32, roce_msix, num_online_cpus()) : 0);
-}
-
-int bnxt_get_ulp_msix_base(struct bnxt *bp)
+static int bnxt_set_dflt_ulp_msix(struct bnxt *bp)
{
- if (bnxt_ulp_registered(bp->edev)) {
- struct bnxt_en_dev *edev = bp->edev;
+ int roce_msix = BNXT_MAX_ROCE_MSIX;
- if (edev->ulp_tbl->msix_requested)
- return edev->ulp_tbl->msix_base;
- }
- return 0;
-}
-
-int bnxt_get_ulp_stat_ctxs(struct bnxt *bp)
-{
- if (bnxt_ulp_registered(bp->edev)) {
- struct bnxt_en_dev *edev = bp->edev;
+ if (BNXT_VF(bp))
+ roce_msix = BNXT_MAX_ROCE_MSIX_VF;
+ else if (bp->port_partition_type)
+ roce_msix = BNXT_MAX_ROCE_MSIX_NPAR_PF;
- if (edev->ulp_tbl->msix_requested)
- return BNXT_MIN_ROCE_STAT_CTXS;
- }
-
- return 0;
+ /* NQ MSIX vectors should match the number of CPUs plus 1 more for
+ * the CREQ MSIX, up to the default.
+ */
+ return min_t(int, roce_msix, num_online_cpus() + 1);
}
int bnxt_send_msg(struct bnxt_en_dev *edev,
@@ -156,7 +208,7 @@ int bnxt_send_msg(struct bnxt_en_dev *edev,
rc = hwrm_req_replace(bp, req, fw_msg->msg, fw_msg->msg_len);
if (rc)
- return rc;
+ goto drop_req;
hwrm_req_timeout(bp, req, fw_msg->timeout);
resp = hwrm_req_hold(bp, req);
@@ -168,6 +220,7 @@ int bnxt_send_msg(struct bnxt_en_dev *edev,
memcpy(fw_msg->resp, resp, resp_len);
}
+drop_req:
hwrm_req_drop(bp, req);
return rc;
}
@@ -181,13 +234,19 @@ void bnxt_ulp_stop(struct bnxt *bp)
if (!edev)
return;
+ mutex_lock(&edev->en_dev_lock);
+ if (!bnxt_ulp_registered(edev)) {
+ mutex_unlock(&edev->en_dev_lock);
+ return;
+ }
+
edev->flags |= BNXT_EN_FLAG_ULP_STOPPED;
if (aux_priv) {
struct auxiliary_device *adev;
adev = &aux_priv->aux_dev;
if (adev->dev.driver) {
- struct auxiliary_driver *adrv;
+ const struct auxiliary_driver *adrv;
pm_message_t pm = {};
adrv = to_auxiliary_drv(adev->dev.driver);
@@ -195,6 +254,7 @@ void bnxt_ulp_stop(struct bnxt *bp)
adrv->suspend(adev, pm);
}
}
+ mutex_unlock(&edev->en_dev_lock);
}
void bnxt_ulp_start(struct bnxt *bp, int err)
@@ -210,25 +270,35 @@ void bnxt_ulp_start(struct bnxt *bp, int err)
if (err)
return;
+ mutex_lock(&edev->en_dev_lock);
+ if (!bnxt_ulp_registered(edev)) {
+ mutex_unlock(&edev->en_dev_lock);
+ return;
+ }
+
+ if (edev->ulp_tbl->msix_requested)
+ bnxt_fill_msix_vecs(bp, edev->msix_entries);
+
if (aux_priv) {
struct auxiliary_device *adev;
adev = &aux_priv->aux_dev;
if (adev->dev.driver) {
- struct auxiliary_driver *adrv;
+ const struct auxiliary_driver *adrv;
adrv = to_auxiliary_drv(adev->dev.driver);
edev->en_state = bp->state;
adrv->resume(adev);
}
}
-
+ mutex_unlock(&edev->en_dev_lock);
}
void bnxt_ulp_irq_stop(struct bnxt *bp)
{
struct bnxt_en_dev *edev = bp->edev;
struct bnxt_ulp_ops *ops;
+ bool reset = false;
if (!edev || !(edev->flags & BNXT_EN_FLAG_MSIX_REQUESTED))
return;
@@ -242,7 +312,9 @@ void bnxt_ulp_irq_stop(struct bnxt *bp)
ops = rtnl_dereference(ulp->ulp_ops);
if (!ops || !ops->ulp_irq_stop)
return;
- ops->ulp_irq_stop(ulp->handle);
+ if (test_bit(BNXT_STATE_FW_RESET_DET, &bp->state))
+ reset = true;
+ ops->ulp_irq_stop(ulp->handle, reset);
}
}
@@ -277,9 +349,36 @@ void bnxt_ulp_irq_restart(struct bnxt *bp, int err)
}
}
-int bnxt_register_async_events(struct bnxt_en_dev *edev,
- unsigned long *events_bmap,
- u16 max_id)
+void bnxt_ulp_async_events(struct bnxt *bp, struct hwrm_async_event_cmpl *cmpl)
+{
+ u16 event_id = le16_to_cpu(cmpl->event_id);
+ struct bnxt_en_dev *edev = bp->edev;
+ struct bnxt_ulp_ops *ops;
+ struct bnxt_ulp *ulp;
+
+ if (!bnxt_ulp_registered(edev))
+ return;
+ ulp = edev->ulp_tbl;
+
+ rcu_read_lock();
+
+ ops = rcu_dereference(ulp->ulp_ops);
+ if (!ops || !ops->ulp_async_notifier)
+ goto exit_unlock_rcu;
+ if (!ulp->async_events_bmap || event_id > ulp->max_async_event_id)
+ goto exit_unlock_rcu;
+
+ /* Read max_async_event_id first before testing the bitmap. */
+ smp_rmb();
+
+ if (test_bit(event_id, ulp->async_events_bmap))
+ ops->ulp_async_notifier(ulp->handle, cmpl);
+exit_unlock_rcu:
+ rcu_read_unlock();
+}
+
+void bnxt_register_async_events(struct bnxt_en_dev *edev,
+ unsigned long *events_bmap, u16 max_id)
{
struct net_device *dev = edev->net;
struct bnxt *bp = netdev_priv(dev);
@@ -291,7 +390,6 @@ int bnxt_register_async_events(struct bnxt_en_dev *edev,
smp_wmb();
ulp->max_async_event_id = max_id;
bnxt_hwrm_func_drv_rgtr(bp, events_bmap, max_id + 1, true);
- return 0;
}
EXPORT_SYMBOL(bnxt_register_async_events);
@@ -306,7 +404,6 @@ void bnxt_rdma_aux_device_uninit(struct bnxt *bp)
aux_priv = bp->aux_priv;
adev = &aux_priv->aux_dev;
- auxiliary_device_delete(adev);
auxiliary_device_uninit(adev);
}
@@ -324,6 +421,14 @@ static void bnxt_aux_dev_release(struct device *dev)
bp->aux_priv = NULL;
}
+void bnxt_rdma_aux_device_del(struct bnxt *bp)
+{
+ if (!bp->edev)
+ return;
+
+ auxiliary_device_delete(&bp->aux_priv->aux_dev);
+}
+
static void bnxt_set_edev_info(struct bnxt_en_dev *edev, struct bnxt *bp)
{
edev->net = bp->dev;
@@ -331,6 +436,7 @@ static void bnxt_set_edev_info(struct bnxt_en_dev *edev, struct bnxt *bp)
edev->l2_db_size = bp->db_size;
edev->l2_db_size_nc = bp->db_size;
edev->l2_db_offset = bp->db_offset;
+ mutex_init(&edev->en_dev_lock);
if (bp->flags & BNXT_FLAG_ROCEV1_CAP)
edev->flags |= BNXT_EN_FLAG_ROCEV1_CAP;
@@ -338,13 +444,33 @@ static void bnxt_set_edev_info(struct bnxt_en_dev *edev, struct bnxt *bp)
edev->flags |= BNXT_EN_FLAG_ROCEV2_CAP;
if (bp->flags & BNXT_FLAG_VF)
edev->flags |= BNXT_EN_FLAG_VF;
+ if (BNXT_ROCE_VF_RESC_CAP(bp))
+ edev->flags |= BNXT_EN_FLAG_ROCE_VF_RES_MGMT;
+ if (BNXT_SW_RES_LMT(bp))
+ edev->flags |= BNXT_EN_FLAG_SW_RES_LMT;
edev->chip_num = bp->chip_num;
edev->hw_ring_stats_size = bp->hw_ring_stats_size;
edev->pf_port_id = bp->pf.port_id;
edev->en_state = bp->state;
edev->bar0 = bp->bar0;
- edev->ulp_tbl->msix_requested = bnxt_get_ulp_msix_num(bp);
+}
+
+void bnxt_rdma_aux_device_add(struct bnxt *bp)
+{
+ struct auxiliary_device *aux_dev;
+ int rc;
+
+ if (!bp->edev)
+ return;
+
+ aux_dev = &bp->aux_priv->aux_dev;
+ rc = auxiliary_device_add(aux_dev);
+ if (rc) {
+ netdev_warn(bp->dev, "Failed to add auxiliary device for ROCE\n");
+ auxiliary_device_uninit(aux_dev);
+ bp->flags &= ~BNXT_FLAG_ROCE_CAP;
+ }
}
void bnxt_rdma_aux_device_init(struct bnxt *bp)
@@ -392,21 +518,16 @@ void bnxt_rdma_aux_device_init(struct bnxt *bp)
if (!edev)
goto aux_dev_uninit;
+ aux_priv->edev = edev;
+
ulp = kzalloc(sizeof(*ulp), GFP_KERNEL);
if (!ulp)
goto aux_dev_uninit;
edev->ulp_tbl = ulp;
- aux_priv->edev = edev;
bp->edev = edev;
bnxt_set_edev_info(edev, bp);
-
- rc = auxiliary_device_add(aux_dev);
- if (rc) {
- netdev_warn(bp->dev,
- "Failed to add auxiliary device for ROCE\n");
- goto aux_dev_uninit;
- }
+ bp->ulp_num_msix_want = bnxt_set_dflt_ulp_msix(bp);
return;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h
index b9e73de14b57..7fa3b8d1ebd2 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h
@@ -15,8 +15,10 @@
#define BNXT_MIN_ROCE_CP_RINGS 2
#define BNXT_MIN_ROCE_STAT_CTXS 1
-#define BNXT_MAX_ROCE_MSIX 9
-#define BNXT_MAX_VF_ROCE_MSIX 2
+
+#define BNXT_MAX_ROCE_MSIX_VF 2
+#define BNXT_MAX_ROCE_MSIX_NPAR_PF 5
+#define BNXT_MAX_ROCE_MSIX 64
struct hwrm_async_event_cmpl;
struct bnxt;
@@ -28,7 +30,9 @@ struct bnxt_msix_entry {
};
struct bnxt_ulp_ops {
- void (*ulp_irq_stop)(void *);
+ /* async_notifier() cannot sleep (in BH context) */
+ void (*ulp_async_notifier)(void *, struct hwrm_async_event_cmpl *);
+ void (*ulp_irq_stop)(void *, bool);
void (*ulp_irq_restart)(void *, struct bnxt_msix_entry *);
};
@@ -46,7 +50,6 @@ struct bnxt_ulp {
unsigned long *async_events_bmap;
u16 max_async_event_id;
u16 msix_requested;
- u16 msix_base;
atomic_t ref_count;
};
@@ -63,6 +66,9 @@ struct bnxt_en_dev {
#define BNXT_EN_FLAG_ULP_STOPPED 0x8
#define BNXT_EN_FLAG_VF 0x10
#define BNXT_EN_VF(edev) ((edev)->flags & BNXT_EN_FLAG_VF)
+ #define BNXT_EN_FLAG_ROCE_VF_RES_MGMT 0x20
+ #define BNXT_EN_FLAG_SW_RES_LMT 0x40
+#define BNXT_EN_SW_RES_LMT(edev) ((edev)->flags & BNXT_EN_FLAG_SW_RES_LMT)
struct bnxt_ulp *ulp_tbl;
int l2_db_size; /* Doorbell BAR size in
@@ -86,18 +92,28 @@ struct bnxt_en_dev {
* updated in resume.
*/
void __iomem *bar0;
+
+ u16 ulp_num_msix_vec;
+ u16 ulp_num_ctxs;
+
+ /* serialize ulp operations */
+ struct mutex en_dev_lock;
};
static inline bool bnxt_ulp_registered(struct bnxt_en_dev *edev)
{
- if (edev && edev->ulp_tbl)
+ if (edev && rcu_access_pointer(edev->ulp_tbl->ulp_ops))
return true;
return false;
}
int bnxt_get_ulp_msix_num(struct bnxt *bp);
-int bnxt_get_ulp_msix_base(struct bnxt *bp);
+int bnxt_get_ulp_msix_num_in_use(struct bnxt *bp);
+void bnxt_set_ulp_msix_num(struct bnxt *bp, int num);
int bnxt_get_ulp_stat_ctxs(struct bnxt *bp);
+void bnxt_set_ulp_stat_ctxs(struct bnxt *bp, int num_ctxs);
+int bnxt_get_ulp_stat_ctxs_in_use(struct bnxt *bp);
+void bnxt_set_dflt_ulp_stat_ctxs(struct bnxt *bp);
void bnxt_ulp_stop(struct bnxt *bp);
void bnxt_ulp_start(struct bnxt *bp, int err);
void bnxt_ulp_sriov_cfg(struct bnxt *bp, int num_vfs);
@@ -105,11 +121,13 @@ void bnxt_ulp_irq_stop(struct bnxt *bp);
void bnxt_ulp_irq_restart(struct bnxt *bp, int err);
void bnxt_ulp_async_events(struct bnxt *bp, struct hwrm_async_event_cmpl *cmpl);
void bnxt_rdma_aux_device_uninit(struct bnxt *bp);
+void bnxt_rdma_aux_device_del(struct bnxt *bp);
+void bnxt_rdma_aux_device_add(struct bnxt *bp);
void bnxt_rdma_aux_device_init(struct bnxt *bp);
int bnxt_register_dev(struct bnxt_en_dev *edev, struct bnxt_ulp_ops *ulp_ops,
void *handle);
void bnxt_unregister_dev(struct bnxt_en_dev *edev);
int bnxt_send_msg(struct bnxt_en_dev *edev, struct bnxt_fw_msg *fw_msg);
-int bnxt_register_async_events(struct bnxt_en_dev *edev,
- unsigned long *events_bmap, u16 max_id);
+void bnxt_register_async_events(struct bnxt_en_dev *edev,
+ unsigned long *events_bmap, u16 max_id);
#endif
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
index 4079538bc310..299822cacca4 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
@@ -197,7 +197,7 @@ void bnxt_xdp_buff_init(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
dma_sync_single_for_cpu(&pdev->dev, mapping + offset, len, bp->rx_dir);
xdp_init_buff(xdp, buflen, &rxr->xdp_rxq);
- xdp_prepare_buff(xdp, data_ptr - offset, offset, len, false);
+ xdp_prepare_buff(xdp, data_ptr - offset, offset, len, true);
}
void bnxt_xdp_buff_frags_free(struct bnxt_rx_ring_info *rxr,
@@ -222,7 +222,7 @@ void bnxt_xdp_buff_frags_free(struct bnxt_rx_ring_info *rxr,
* false - packet should be passed to the stack.
*/
bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons,
- struct xdp_buff xdp, struct page *page, u8 **data_ptr,
+ struct xdp_buff *xdp, struct page *page, u8 **data_ptr,
unsigned int *len, u8 *event)
{
struct bpf_prog *xdp_prog = READ_ONCE(rxr->xdp_prog);
@@ -244,9 +244,9 @@ bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons,
txr = rxr->bnapi->tx_ring[0];
/* BNXT_RX_PAGE_MODE(bp) when XDP enabled */
- orig_data = xdp.data;
+ orig_data = xdp->data;
- act = bpf_prog_run_xdp(xdp_prog, &xdp);
+ act = bpf_prog_run_xdp(xdp_prog, xdp);
tx_avail = bnxt_tx_avail(bp, txr);
/* If the tx ring is not full, we must not update the rx producer yet
@@ -255,10 +255,10 @@ bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons,
if (tx_avail != bp->tx_ring_size)
*event &= ~BNXT_RX_EVENT;
- *len = xdp.data_end - xdp.data;
- if (orig_data != xdp.data) {
- offset = xdp.data - xdp.data_hard_start;
- *data_ptr = xdp.data_hard_start + offset;
+ *len = xdp->data_end - xdp->data;
+ if (orig_data != xdp->data) {
+ offset = xdp->data - xdp->data_hard_start;
+ *data_ptr = xdp->data_hard_start + offset;
}
switch (act) {
@@ -270,8 +270,8 @@ bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons,
mapping = rx_buf->mapping - bp->rx_dma_offset;
*event &= BNXT_TX_CMP_EVENT;
- if (unlikely(xdp_buff_has_frags(&xdp))) {
- struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(&xdp);
+ if (unlikely(xdp_buff_has_frags(xdp))) {
+ struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp);
tx_needed += sinfo->nr_frags;
*event = BNXT_AGG_EVENT;
@@ -279,7 +279,7 @@ bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons,
if (tx_avail < tx_needed) {
trace_xdp_exception(bp->dev, xdp_prog, act);
- bnxt_xdp_buff_frags_free(rxr, &xdp);
+ bnxt_xdp_buff_frags_free(rxr, xdp);
bnxt_reuse_rx_data(rxr, cons, page);
return true;
}
@@ -289,7 +289,7 @@ bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons,
*event |= BNXT_TX_EVENT;
__bnxt_xmit_xdp(bp, txr, mapping + offset, *len,
- NEXT_RX(rxr->rx_prod), &xdp);
+ NEXT_RX(rxr->rx_prod), xdp);
bnxt_reuse_rx_data(rxr, cons, page);
return true;
case XDP_REDIRECT:
@@ -297,21 +297,16 @@ bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons,
* redirect is coming from a frame received by the
* bnxt_en driver.
*/
- rx_buf = &rxr->rx_buf_ring[cons];
- mapping = rx_buf->mapping - bp->rx_dma_offset;
- dma_unmap_page_attrs(&pdev->dev, mapping,
- BNXT_RX_PAGE_SIZE, bp->rx_dir,
- DMA_ATTR_WEAK_ORDERING);
/* if we are unable to allocate a new buffer, abort and reuse */
if (bnxt_alloc_rx_data(bp, rxr, rxr->rx_prod, GFP_ATOMIC)) {
trace_xdp_exception(bp->dev, xdp_prog, act);
- bnxt_xdp_buff_frags_free(rxr, &xdp);
+ bnxt_xdp_buff_frags_free(rxr, xdp);
bnxt_reuse_rx_data(rxr, cons, page);
return true;
}
- if (xdp_do_redirect(bp->dev, &xdp, xdp_prog)) {
+ if (xdp_do_redirect(bp->dev, xdp, xdp_prog)) {
trace_xdp_exception(bp->dev, xdp_prog, act);
page_pool_recycle_direct(rxr->page_pool, page);
return true;
@@ -326,7 +321,7 @@ bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons,
trace_xdp_exception(bp->dev, xdp_prog, act);
fallthrough;
case XDP_DROP:
- bnxt_xdp_buff_frags_free(rxr, &xdp);
+ bnxt_xdp_buff_frags_free(rxr, xdp);
bnxt_reuse_rx_data(rxr, cons, page);
break;
}
@@ -400,6 +395,10 @@ static int bnxt_xdp_set(struct bnxt *bp, struct bpf_prog *prog)
bp->dev->mtu, BNXT_MAX_PAGE_MODE_MTU);
return -EOPNOTSUPP;
}
+ if (prog && bp->flags & BNXT_FLAG_HDS) {
+ netdev_warn(dev, "XDP is disallowed when HDS is enabled.\n");
+ return -EOPNOTSUPP;
+ }
if (!(bp->flags & BNXT_FLAG_SHARED_RINGS)) {
netdev_warn(dev, "ethtool rx/tx channels must be combined to support XDP.\n");
return -EOPNOTSUPP;
@@ -427,15 +426,8 @@ static int bnxt_xdp_set(struct bnxt *bp, struct bpf_prog *prog)
bnxt_set_rx_skb_mode(bp, true);
xdp_features_set_redirect_target(dev, true);
} else {
- int rx, tx;
-
xdp_features_clear_redirect_target(dev);
bnxt_set_rx_skb_mode(bp, false);
- bnxt_get_max_rings(bp, &rx, &tx, true);
- if (rx > 1) {
- bp->flags &= ~BNXT_FLAG_NO_AGG_RINGS;
- bp->dev->hw_features |= NETIF_F_LRO;
- }
}
bp->tx_nr_rings_xdp = tx_xdp;
bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tc + tx_xdp;
@@ -468,23 +460,16 @@ int bnxt_xdp(struct net_device *dev, struct netdev_bpf *xdp)
struct sk_buff *
bnxt_xdp_build_skb(struct bnxt *bp, struct sk_buff *skb, u8 num_frags,
- struct page_pool *pool, struct xdp_buff *xdp,
- struct rx_cmp_ext *rxcmp1)
+ struct page_pool *pool, struct xdp_buff *xdp)
{
struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp);
if (!skb)
return NULL;
- skb_checksum_none_assert(skb);
- if (RX_CMP_L4_CS_OK(rxcmp1)) {
- if (bp->dev->features & NETIF_F_RXCSUM) {
- skb->ip_summed = CHECKSUM_UNNECESSARY;
- skb->csum_level = RX_CMP_ENCAP(rxcmp1);
- }
- }
+
xdp_update_skb_shared_info(skb, num_frags,
sinfo->xdp_frags_size,
- BNXT_RX_PAGE_SIZE * sinfo->nr_frags,
+ BNXT_RX_PAGE_SIZE * num_frags,
xdp_buff_is_frag_pfmemalloc(xdp));
return skb;
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h
index 5e412c5655ba..220285e190fc 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h
@@ -18,7 +18,7 @@ struct bnxt_sw_tx_bd *bnxt_xmit_bd(struct bnxt *bp,
struct xdp_buff *xdp);
void bnxt_tx_int_xdp(struct bnxt *bp, struct bnxt_napi *bnapi, int budget);
bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons,
- struct xdp_buff xdp, struct page *page, u8 **data_ptr,
+ struct xdp_buff *xdp, struct page *page, u8 **data_ptr,
unsigned int *len, u8 *event);
int bnxt_xdp(struct net_device *dev, struct netdev_bpf *xdp);
int bnxt_xdp_xmit(struct net_device *dev, int num_frames,
@@ -33,6 +33,5 @@ void bnxt_xdp_buff_frags_free(struct bnxt_rx_ring_info *rxr,
struct xdp_buff *xdp);
struct sk_buff *bnxt_xdp_build_skb(struct bnxt *bp, struct sk_buff *skb,
u8 num_frags, struct page_pool *pool,
- struct xdp_buff *xdp,
- struct rx_cmp_ext *rxcmp1);
+ struct xdp_buff *xdp);
#endif
diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c
index 7926aaef8f0c..a9040c42d2ff 100644
--- a/drivers/net/ethernet/broadcom/cnic.c
+++ b/drivers/net/ethernet/broadcom/cnic.c
@@ -31,6 +31,7 @@
#include <linux/if_vlan.h>
#include <linux/prefetch.h>
#include <linux/random.h>
+#include <linux/workqueue.h>
#if IS_ENABLED(CONFIG_VLAN_8021Q)
#define BCM_VLAN 1
#endif
@@ -1107,10 +1108,11 @@ static int cnic_init_uio(struct cnic_dev *dev)
TX_MAX_TSS_RINGS + 1);
uinfo->mem[1].addr = (unsigned long) cp->status_blk.gen &
CNIC_PAGE_MASK;
+ uinfo->mem[1].dma_addr = cp->status_blk_map;
if (cp->ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX)
- uinfo->mem[1].size = BNX2_SBLK_MSIX_ALIGN_SIZE * 9;
+ uinfo->mem[1].size = PAGE_ALIGN(BNX2_SBLK_MSIX_ALIGN_SIZE * 9);
else
- uinfo->mem[1].size = BNX2_SBLK_MSIX_ALIGN_SIZE;
+ uinfo->mem[1].size = PAGE_ALIGN(BNX2_SBLK_MSIX_ALIGN_SIZE);
uinfo->name = "bnx2_cnic";
} else if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) {
@@ -1118,20 +1120,26 @@ static int cnic_init_uio(struct cnic_dev *dev)
uinfo->mem[1].addr = (unsigned long) cp->bnx2x_def_status_blk &
CNIC_PAGE_MASK;
- uinfo->mem[1].size = sizeof(*cp->bnx2x_def_status_blk);
+ uinfo->mem[1].dma_addr = cp->status_blk_map;
+ uinfo->mem[1].size = PAGE_ALIGN(sizeof(*cp->bnx2x_def_status_blk));
uinfo->name = "bnx2x_cnic";
}
- uinfo->mem[1].memtype = UIO_MEM_LOGICAL;
+ uinfo->mem[1].dma_device = &dev->pcidev->dev;
+ uinfo->mem[1].memtype = UIO_MEM_DMA_COHERENT;
uinfo->mem[2].addr = (unsigned long) udev->l2_ring;
- uinfo->mem[2].size = udev->l2_ring_size;
- uinfo->mem[2].memtype = UIO_MEM_LOGICAL;
+ uinfo->mem[2].dma_addr = udev->l2_ring_map;
+ uinfo->mem[2].size = PAGE_ALIGN(udev->l2_ring_size);
+ uinfo->mem[2].dma_device = &dev->pcidev->dev;
+ uinfo->mem[2].memtype = UIO_MEM_DMA_COHERENT;
uinfo->mem[3].addr = (unsigned long) udev->l2_buf;
- uinfo->mem[3].size = udev->l2_buf_size;
- uinfo->mem[3].memtype = UIO_MEM_LOGICAL;
+ uinfo->mem[3].dma_addr = udev->l2_buf_map;
+ uinfo->mem[3].size = PAGE_ALIGN(udev->l2_buf_size);
+ uinfo->mem[3].dma_device = &dev->pcidev->dev;
+ uinfo->mem[3].memtype = UIO_MEM_DMA_COHERENT;
uinfo->version = CNIC_MODULE_VERSION;
uinfo->irq = UIO_IRQ_CUSTOM;
@@ -1313,6 +1321,7 @@ static int cnic_alloc_bnx2x_resc(struct cnic_dev *dev)
return 0;
cp->bnx2x_def_status_blk = cp->ethdev->irq_arr[1].status_blk;
+ cp->status_blk_map = cp->ethdev->irq_arr[1].status_blk_map;
cp->l2_rx_ring_size = 15;
@@ -3007,9 +3016,9 @@ static int cnic_service_bnx2(void *data, void *status_blk)
return cnic_service_bnx2_queues(dev);
}
-static void cnic_service_bnx2_msix(struct tasklet_struct *t)
+static void cnic_service_bnx2_msix(struct work_struct *work)
{
- struct cnic_local *cp = from_tasklet(cp, t, cnic_irq_task);
+ struct cnic_local *cp = from_work(cp, work, cnic_irq_bh_work);
struct cnic_dev *dev = cp->dev;
cp->last_status_idx = cnic_service_bnx2_queues(dev);
@@ -3028,7 +3037,7 @@ static void cnic_doirq(struct cnic_dev *dev)
prefetch(cp->status_blk.gen);
prefetch(&cp->kcq1.kcq[KCQ_PG(prod)][KCQ_IDX(prod)]);
- tasklet_schedule(&cp->cnic_irq_task);
+ queue_work(system_bh_wq, &cp->cnic_irq_bh_work);
}
}
@@ -3132,9 +3141,9 @@ static u32 cnic_service_bnx2x_kcq(struct cnic_dev *dev, struct kcq_info *info)
return last_status;
}
-static void cnic_service_bnx2x_bh(struct tasklet_struct *t)
+static void cnic_service_bnx2x_bh_work(struct work_struct *work)
{
- struct cnic_local *cp = from_tasklet(cp, t, cnic_irq_task);
+ struct cnic_local *cp = from_work(cp, work, cnic_irq_bh_work);
struct cnic_dev *dev = cp->dev;
struct bnx2x *bp = netdev_priv(dev->netdev);
u32 status_idx, new_status_idx;
@@ -3674,7 +3683,8 @@ static int cnic_get_v4_route(struct sockaddr_in *dst_addr,
#if defined(CONFIG_INET)
struct rtable *rt;
- rt = ip_route_output(&init_net, dst_addr->sin_addr.s_addr, 0, 0, 0);
+ rt = ip_route_output(&init_net, dst_addr->sin_addr.s_addr, 0, 0, 0,
+ RT_SCOPE_UNIVERSE);
if (!IS_ERR(rt)) {
*dst = &rt->dst;
return 0;
@@ -4419,7 +4429,7 @@ static void cnic_free_irq(struct cnic_dev *dev)
if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
cp->disable_int_sync(dev);
- tasklet_kill(&cp->cnic_irq_task);
+ cancel_work_sync(&cp->cnic_irq_bh_work);
free_irq(ethdev->irq_arr[0].vector, dev);
}
}
@@ -4432,7 +4442,7 @@ static int cnic_request_irq(struct cnic_dev *dev)
err = request_irq(ethdev->irq_arr[0].vector, cnic_irq, 0, "cnic", dev);
if (err)
- tasklet_disable(&cp->cnic_irq_task);
+ disable_work_sync(&cp->cnic_irq_bh_work);
return err;
}
@@ -4455,7 +4465,7 @@ static int cnic_init_bnx2_irq(struct cnic_dev *dev)
CNIC_WR(dev, base + BNX2_HC_CMD_TICKS_OFF, (64 << 16) | 220);
cp->last_status_idx = cp->status_blk.bnx2->status_idx;
- tasklet_setup(&cp->cnic_irq_task, cnic_service_bnx2_msix);
+ INIT_WORK(&cp->cnic_irq_bh_work, cnic_service_bnx2_msix);
err = cnic_request_irq(dev);
if (err)
return err;
@@ -4864,7 +4874,7 @@ static int cnic_init_bnx2x_irq(struct cnic_dev *dev)
struct cnic_eth_dev *ethdev = cp->ethdev;
int err = 0;
- tasklet_setup(&cp->cnic_irq_task, cnic_service_bnx2x_bh);
+ INIT_WORK(&cp->cnic_irq_bh_work, cnic_service_bnx2x_bh_work);
if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX)
err = cnic_request_irq(dev);
@@ -5323,6 +5333,7 @@ static int cnic_start_hw(struct cnic_dev *dev)
pci_dev_get(dev->pcidev);
cp->func = PCI_FUNC(dev->pcidev->devfn);
cp->status_blk.gen = ethdev->irq_arr[0].status_blk;
+ cp->status_blk_map = ethdev->irq_arr[0].status_blk_map;
cp->status_blk_num = ethdev->irq_arr[0].status_blk_num;
err = cp->alloc_resc(dev);
diff --git a/drivers/net/ethernet/broadcom/cnic.h b/drivers/net/ethernet/broadcom/cnic.h
index 4baea81bae7a..1a314a75d2d2 100644
--- a/drivers/net/ethernet/broadcom/cnic.h
+++ b/drivers/net/ethernet/broadcom/cnic.h
@@ -260,6 +260,7 @@ struct cnic_local {
#define SM_RX_ID 0
#define SM_TX_ID 1
} status_blk;
+ dma_addr_t status_blk_map;
struct host_sp_status_block *bnx2x_def_status_blk;
@@ -267,7 +268,7 @@ struct cnic_local {
u32 bnx2x_igu_sb_id;
u32 int_num;
u32 last_status_idx;
- struct tasklet_struct cnic_irq_task;
+ struct work_struct cnic_irq_bh_work;
struct kcqe *completed_kcq[MAX_COMPLETED_KCQE];
diff --git a/drivers/net/ethernet/broadcom/cnic_if.h b/drivers/net/ethernet/broadcom/cnic_if.h
index 789e5c7e9311..49a11ec80b36 100644
--- a/drivers/net/ethernet/broadcom/cnic_if.h
+++ b/drivers/net/ethernet/broadcom/cnic_if.h
@@ -190,6 +190,7 @@ struct cnic_ops {
struct cnic_irq {
unsigned int vector;
void *status_blk;
+ dma_addr_t status_blk_map;
u32 status_blk_num;
u32 status_blk_num2;
u32 irq_flags;
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index fef1d2f53489..3e93f957430b 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -2,7 +2,7 @@
/*
* Broadcom GENET (Gigabit Ethernet) controller driver
*
- * Copyright (c) 2014-2020 Broadcom
+ * Copyright (c) 2014-2024 Broadcom
*/
#define pr_fmt(fmt) "bcmgenet: " fmt
@@ -37,7 +37,7 @@
#include <linux/phy.h>
#include <linux/platform_data/bcmgenet.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include "bcmgenet.h"
@@ -1144,14 +1144,14 @@ static int bcmgenet_get_sset_count(struct net_device *dev, int string_set)
static void bcmgenet_get_strings(struct net_device *dev, u32 stringset,
u8 *data)
{
+ const char *str;
int i;
switch (stringset) {
case ETH_SS_STATS:
for (i = 0; i < BCMGENET_STATS_LEN; i++) {
- memcpy(data + i * ETH_GSTRING_LEN,
- bcmgenet_gstrings_stats[i].stat_string,
- ETH_GSTRING_LEN);
+ str = bcmgenet_gstrings_stats[i].stat_string;
+ ethtool_puts(&data, str);
}
break;
}
@@ -1316,10 +1316,10 @@ void bcmgenet_eee_enable_set(struct net_device *dev, bool enable,
priv->eee.tx_lpi_enabled = tx_lpi_enabled;
}
-static int bcmgenet_get_eee(struct net_device *dev, struct ethtool_eee *e)
+static int bcmgenet_get_eee(struct net_device *dev, struct ethtool_keee *e)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
- struct ethtool_eee *p = &priv->eee;
+ struct ethtool_keee *p = &priv->eee;
if (GENET_IS_V1(priv))
return -EOPNOTSUPP;
@@ -1333,10 +1333,10 @@ static int bcmgenet_get_eee(struct net_device *dev, struct ethtool_eee *e)
return phy_ethtool_get_eee(dev->phydev, e);
}
-static int bcmgenet_set_eee(struct net_device *dev, struct ethtool_eee *e)
+static int bcmgenet_set_eee(struct net_device *dev, struct ethtool_keee *e)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
- struct ethtool_eee *p = &priv->eee;
+ struct ethtool_keee *p = &priv->eee;
bool active;
if (GENET_IS_V1(priv))
@@ -2405,7 +2405,7 @@ static int bcmgenet_rx_poll(struct napi_struct *napi, int budget)
if (ring->dim.use_dim) {
dim_update_sample(ring->dim.event_ctr, ring->dim.packets,
ring->dim.bytes, &dim_sample);
- net_dim(&ring->dim.dim, dim_sample);
+ net_dim(&ring->dim.dim, &dim_sample);
}
return work_done;
@@ -2467,14 +2467,18 @@ static void umac_enable_set(struct bcmgenet_priv *priv, u32 mask, bool enable)
{
u32 reg;
+ spin_lock_bh(&priv->reg_lock);
reg = bcmgenet_umac_readl(priv, UMAC_CMD);
- if (reg & CMD_SW_RESET)
+ if (reg & CMD_SW_RESET) {
+ spin_unlock_bh(&priv->reg_lock);
return;
+ }
if (enable)
reg |= mask;
else
reg &= ~mask;
bcmgenet_umac_writel(priv, reg, UMAC_CMD);
+ spin_unlock_bh(&priv->reg_lock);
/* UniMAC stops on a packet boundary, wait for a full-size packet
* to be processed
@@ -2490,8 +2494,10 @@ static void reset_umac(struct bcmgenet_priv *priv)
udelay(10);
/* issue soft reset and disable MAC while updating its registers */
+ spin_lock_bh(&priv->reg_lock);
bcmgenet_umac_writel(priv, CMD_SW_RESET, UMAC_CMD);
udelay(2);
+ spin_unlock_bh(&priv->reg_lock);
}
static void bcmgenet_intr_disable(struct bcmgenet_priv *priv)
@@ -3280,7 +3286,7 @@ static void bcmgenet_get_hw_addr(struct bcmgenet_priv *priv,
}
/* Returns a reusable dma control register value */
-static u32 bcmgenet_dma_disable(struct bcmgenet_priv *priv)
+static u32 bcmgenet_dma_disable(struct bcmgenet_priv *priv, bool flush_rx)
{
unsigned int i;
u32 reg;
@@ -3305,6 +3311,14 @@ static u32 bcmgenet_dma_disable(struct bcmgenet_priv *priv)
udelay(10);
bcmgenet_umac_writel(priv, 0, UMAC_TX_FLUSH);
+ if (flush_rx) {
+ reg = bcmgenet_rbuf_ctrl_get(priv);
+ bcmgenet_rbuf_ctrl_set(priv, reg | BIT(0));
+ udelay(10);
+ bcmgenet_rbuf_ctrl_set(priv, reg);
+ udelay(10);
+ }
+
return dma_ctrl;
}
@@ -3326,7 +3340,9 @@ static void bcmgenet_netif_start(struct net_device *dev)
struct bcmgenet_priv *priv = netdev_priv(dev);
/* Start the network engine */
+ netif_addr_lock_bh(dev);
bcmgenet_set_rx_mode(dev);
+ netif_addr_unlock_bh(dev);
bcmgenet_enable_rx_napi(priv);
umac_enable_set(priv, CMD_TX_EN | CMD_RX_EN, true);
@@ -3368,8 +3384,8 @@ static int bcmgenet_open(struct net_device *dev)
bcmgenet_set_hw_addr(priv, dev->dev_addr);
- /* Disable RX/TX DMA and flush TX queues */
- dma_ctrl = bcmgenet_dma_disable(priv);
+ /* Disable RX/TX DMA and flush TX and RX queues */
+ dma_ctrl = bcmgenet_dma_disable(priv, true);
/* Reinitialize TDMA and RDMA and SW housekeeping */
ret = bcmgenet_init_dma(priv);
@@ -3587,16 +3603,19 @@ static void bcmgenet_set_rx_mode(struct net_device *dev)
* 3. The number of filters needed exceeds the number filters
* supported by the hardware.
*/
+ spin_lock(&priv->reg_lock);
reg = bcmgenet_umac_readl(priv, UMAC_CMD);
if ((dev->flags & (IFF_PROMISC | IFF_ALLMULTI)) ||
(nfilter > MAX_MDF_FILTER)) {
reg |= CMD_PROMISC;
bcmgenet_umac_writel(priv, reg, UMAC_CMD);
+ spin_unlock(&priv->reg_lock);
bcmgenet_umac_writel(priv, 0, UMAC_MDF_CTRL);
return;
} else {
reg &= ~CMD_PROMISC;
bcmgenet_umac_writel(priv, reg, UMAC_CMD);
+ spin_unlock(&priv->reg_lock);
}
/* update MDF filter */
@@ -3995,6 +4014,7 @@ static int bcmgenet_probe(struct platform_device *pdev)
goto err;
}
+ spin_lock_init(&priv->reg_lock);
spin_lock_init(&priv->lock);
/* Set default pause parameters */
@@ -4235,7 +4255,7 @@ static int bcmgenet_resume(struct device *d)
bcmgenet_hfb_create_rxnfc_filter(priv, rule);
/* Disable RX/TX DMA and flush TX queues */
- dma_ctrl = bcmgenet_dma_disable(priv);
+ dma_ctrl = bcmgenet_dma_disable(priv, false);
/* Reinitialize TDMA and RDMA and SW housekeeping */
ret = bcmgenet_init_dma(priv);
@@ -4330,7 +4350,7 @@ MODULE_DEVICE_TABLE(acpi, genet_acpi_match);
static struct platform_driver bcmgenet_driver = {
.probe = bcmgenet_probe,
- .remove_new = bcmgenet_remove,
+ .remove = bcmgenet_remove,
.shutdown = bcmgenet_shutdown,
.driver = {
.name = "bcmgenet",
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.h b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
index 1985c0ec4da2..43b923c48b14 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.h
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * Copyright (c) 2014-2020 Broadcom
+ * Copyright (c) 2014-2024 Broadcom
*/
#ifndef __BCMGENET_H__
@@ -573,6 +573,8 @@ struct bcmgenet_rxnfc_rule {
/* device context */
struct bcmgenet_priv {
void __iomem *base;
+ /* reg_lock: lock to serialize access to shared registers */
+ spinlock_t reg_lock;
enum bcmgenet_version version;
struct net_device *dev;
@@ -645,7 +647,7 @@ struct bcmgenet_priv {
struct bcmgenet_mib_counters mib;
- struct ethtool_eee eee;
+ struct ethtool_keee eee;
};
#define GENET_IO_MACRO(name, offset) \
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c b/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c
index 7a41cad5788f..3b082114f2e5 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c
@@ -2,7 +2,7 @@
/*
* Broadcom GENET (Gigabit Ethernet) Wake-on-LAN support
*
- * Copyright (c) 2014-2020 Broadcom
+ * Copyright (c) 2014-2024 Broadcom
*/
#define pr_fmt(fmt) "bcmgenet_wol: " fmt
@@ -41,23 +41,27 @@ void bcmgenet_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
struct device *kdev = &priv->pdev->dev;
+ u32 phy_wolopts = 0;
if (dev->phydev) {
phy_ethtool_get_wol(dev->phydev, wol);
- if (wol->supported)
- return;
+ phy_wolopts = wol->wolopts;
}
- if (!device_can_wakeup(kdev)) {
- wol->supported = 0;
- wol->wolopts = 0;
+ /* MAC is not wake-up capable, return what the PHY does */
+ if (!device_can_wakeup(kdev))
return;
- }
- wol->supported = WAKE_MAGIC | WAKE_MAGICSECURE | WAKE_FILTER;
- wol->wolopts = priv->wolopts;
- memset(wol->sopass, 0, sizeof(wol->sopass));
+ /* Overlay MAC capabilities with that of the PHY queried before */
+ wol->supported |= WAKE_MAGIC | WAKE_MAGICSECURE | WAKE_FILTER;
+ wol->wolopts |= priv->wolopts;
+ /* Return the PHY configured magic password */
+ if (phy_wolopts & WAKE_MAGICSECURE)
+ return;
+
+ /* Otherwise the MAC one */
+ memset(wol->sopass, 0, sizeof(wol->sopass));
if (wol->wolopts & WAKE_MAGICSECURE)
memcpy(wol->sopass, priv->sopass, sizeof(priv->sopass));
}
@@ -74,7 +78,7 @@ int bcmgenet_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
/* Try Wake-on-LAN from the PHY first */
if (dev->phydev) {
ret = phy_ethtool_set_wol(dev->phydev, wol);
- if (ret != -EOPNOTSUPP)
+ if (ret != -EOPNOTSUPP && wol->wolopts)
return ret;
}
@@ -151,6 +155,7 @@ int bcmgenet_wol_power_down_cfg(struct bcmgenet_priv *priv,
}
/* Can't suspend with WoL if MAC is still in reset */
+ spin_lock_bh(&priv->reg_lock);
reg = bcmgenet_umac_readl(priv, UMAC_CMD);
if (reg & CMD_SW_RESET)
reg &= ~CMD_SW_RESET;
@@ -158,6 +163,7 @@ int bcmgenet_wol_power_down_cfg(struct bcmgenet_priv *priv,
/* disable RX */
reg &= ~CMD_RX_EN;
bcmgenet_umac_writel(priv, reg, UMAC_CMD);
+ spin_unlock_bh(&priv->reg_lock);
mdelay(10);
if (priv->wolopts & (WAKE_MAGIC | WAKE_MAGICSECURE)) {
@@ -203,6 +209,7 @@ int bcmgenet_wol_power_down_cfg(struct bcmgenet_priv *priv,
}
/* Enable CRC forward */
+ spin_lock_bh(&priv->reg_lock);
reg = bcmgenet_umac_readl(priv, UMAC_CMD);
priv->crc_fwd_en = 1;
reg |= CMD_CRC_FWD;
@@ -210,6 +217,7 @@ int bcmgenet_wol_power_down_cfg(struct bcmgenet_priv *priv,
/* Receiver must be enabled for WOL MP detection */
reg |= CMD_RX_EN;
bcmgenet_umac_writel(priv, reg, UMAC_CMD);
+ spin_unlock_bh(&priv->reg_lock);
reg = UMAC_IRQ_MPD_R;
if (hfb_enable)
@@ -256,7 +264,9 @@ void bcmgenet_wol_power_up_cfg(struct bcmgenet_priv *priv,
}
/* Disable CRC Forward */
+ spin_lock_bh(&priv->reg_lock);
reg = bcmgenet_umac_readl(priv, UMAC_CMD);
reg &= ~CMD_CRC_FWD;
bcmgenet_umac_writel(priv, reg, UMAC_CMD);
+ spin_unlock_bh(&priv->reg_lock);
}
diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c
index cbbe004621bc..c4a3698cef66 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmmii.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c
@@ -2,7 +2,7 @@
/*
* Broadcom GENET MDIO routines
*
- * Copyright (c) 2014-2017 Broadcom
+ * Copyright (c) 2014-2024 Broadcom
*/
#include <linux/acpi.h>
@@ -76,6 +76,7 @@ static void bcmgenet_mac_config(struct net_device *dev)
reg |= RGMII_LINK;
bcmgenet_ext_writel(priv, reg, EXT_RGMII_OOB_CTRL);
+ spin_lock_bh(&priv->reg_lock);
reg = bcmgenet_umac_readl(priv, UMAC_CMD);
reg &= ~((CMD_SPEED_MASK << CMD_SPEED_SHIFT) |
CMD_HD_EN |
@@ -88,6 +89,7 @@ static void bcmgenet_mac_config(struct net_device *dev)
reg |= CMD_TX_EN | CMD_RX_EN;
}
bcmgenet_umac_writel(priv, reg, UMAC_CMD);
+ spin_unlock_bh(&priv->reg_lock);
active = phy_init_eee(phydev, 0) >= 0;
bcmgenet_eee_enable_set(dev,
@@ -275,6 +277,7 @@ int bcmgenet_mii_config(struct net_device *dev, bool init)
* block for the interface to work, unconditionally clear the
* Out-of-band disable since we do not need it.
*/
+ mutex_lock(&phydev->lock);
reg = bcmgenet_ext_readl(priv, EXT_RGMII_OOB_CTRL);
reg &= ~OOB_DISABLE;
if (priv->ext_phy) {
@@ -286,6 +289,7 @@ int bcmgenet_mii_config(struct net_device *dev, bool init)
reg |= RGMII_MODE_EN;
}
bcmgenet_ext_writel(priv, reg, EXT_RGMII_OOB_CTRL);
+ mutex_unlock(&phydev->lock);
if (init)
dev_info(kdev, "configuring instance for %s\n", phy_name);
@@ -476,6 +480,10 @@ static int bcmgenet_mii_register(struct bcmgenet_priv *priv)
ppd.wait_func = bcmgenet_mii_wait;
ppd.wait_func_data = priv;
ppd.bus_name = "bcmgenet MII bus";
+ /* Pass a reference to our "main" clock which is used for MDIO
+ * transfers
+ */
+ ppd.clk = priv->clk;
/* Unimac MDIO bus controller starts at UniMAC offset + MDIO_CMD
* and is 2 * 32-bits word long, 8 bytes total.
@@ -674,7 +682,5 @@ void bcmgenet_mii_exit(struct net_device *dev)
if (of_phy_is_fixed_link(dn))
of_phy_deregister_fixed_link(dn);
of_node_put(priv->phy_dn);
- clk_prepare_enable(priv->clk);
platform_device_unregister(priv->mii_pdev);
- clk_disable_unprepare(priv->clk);
}
diff --git a/drivers/net/ethernet/broadcom/sb1250-mac.c b/drivers/net/ethernet/broadcom/sb1250-mac.c
index fcf8485f3446..30865fe03eeb 100644
--- a/drivers/net/ethernet/broadcom/sb1250-mac.c
+++ b/drivers/net/ethernet/broadcom/sb1250-mac.c
@@ -2608,7 +2608,7 @@ static void sbmac_remove(struct platform_device *pldev)
static struct platform_driver sbmac_driver = {
.probe = sbmac_probe,
- .remove_new = sbmac_remove,
+ .remove = sbmac_remove,
.driver = {
.name = sbmac_string,
},
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index 04964bbe08cf..d9d675f1ebfe 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -55,6 +55,7 @@
#include <linux/hwmon.h>
#include <linux/hwmon-sysfs.h>
#include <linux/crc32poly.h>
+#include <linux/dmi.h>
#include <net/checksum.h>
#include <net/gso.h>
@@ -221,7 +222,7 @@ static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
#define FIRMWARE_TG3TSO "tigon/tg3_tso.bin"
#define FIRMWARE_TG3TSO5 "tigon/tg3_tso5.bin"
-MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
+MODULE_AUTHOR("David S. Miller <davem@redhat.com> and Jeff Garzik <jgarzik@pobox.com>");
MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
MODULE_LICENSE("GPL");
MODULE_FIRMWARE(FIRMWARE_TG3);
@@ -2338,10 +2339,10 @@ static void tg3_phy_apply_otp(struct tg3 *tp)
tg3_phy_toggle_auxctl_smdsp(tp, false);
}
-static void tg3_eee_pull_config(struct tg3 *tp, struct ethtool_eee *eee)
+static void tg3_eee_pull_config(struct tg3 *tp, struct ethtool_keee *eee)
{
u32 val;
- struct ethtool_eee *dest = &tp->eee;
+ struct ethtool_keee *dest = &tp->eee;
if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
return;
@@ -2362,13 +2363,13 @@ static void tg3_eee_pull_config(struct tg3 *tp, struct ethtool_eee *eee)
/* Pull lp advertised settings */
if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_LPABLE, &val))
return;
- dest->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(val);
+ mii_eee_cap1_mod_linkmode_t(dest->lp_advertised, val);
/* Pull advertised and eee_enabled settings */
if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, &val))
return;
dest->eee_enabled = !!val;
- dest->advertised = mmd_eee_adv_to_ethtool_adv_t(val);
+ mii_eee_cap1_mod_linkmode_t(dest->advertised, val);
/* Pull tx_lpi_enabled */
val = tr32(TG3_CPMU_EEE_MODE);
@@ -3737,7 +3738,7 @@ static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base,
}
do {
- u32 *fw_data = (u32 *)(fw_hdr + 1);
+ __be32 *fw_data = (__be32 *)(fw_hdr + 1);
for (i = 0; i < tg3_fw_data_len(tp, fw_hdr); i++)
write_op(tp, cpu_scratch_base +
(be32_to_cpu(fw_hdr->base_addr) & 0xffff) +
@@ -4019,7 +4020,7 @@ static int tg3_power_up(struct tg3 *tp)
static int tg3_setup_phy(struct tg3 *, bool);
-static int tg3_power_down_prepare(struct tg3 *tp)
+static void tg3_power_down_prepare(struct tg3 *tp)
{
u32 misc_host_ctrl;
bool device_should_wake, do_low_power;
@@ -4263,7 +4264,7 @@ static int tg3_power_down_prepare(struct tg3 *tp)
tg3_ape_driver_state_change(tp, RESET_KIND_SHUTDOWN);
- return 0;
+ return;
}
static void tg3_power_down(struct tg3 *tp)
@@ -4354,23 +4355,12 @@ static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
if (!err) {
u32 err2;
- val = 0;
- /* Advertise 100-BaseTX EEE ability */
- if (advertise & ADVERTISED_100baseT_Full)
- val |= MDIO_AN_EEE_ADV_100TX;
- /* Advertise 1000-BaseT EEE ability */
- if (advertise & ADVERTISED_1000baseT_Full)
- val |= MDIO_AN_EEE_ADV_1000T;
-
- if (!tp->eee.eee_enabled) {
+ if (!tp->eee.eee_enabled)
val = 0;
- tp->eee.advertised = 0;
- } else {
- tp->eee.advertised = advertise &
- (ADVERTISED_100baseT_Full |
- ADVERTISED_1000baseT_Full);
- }
+ else
+ val = ethtool_adv_to_mmd_eee_adv_t(advertise);
+ mii_eee_cap1_mod_linkmode_t(tp->eee.advertised, val);
err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
if (err)
val = 0;
@@ -4618,7 +4608,7 @@ static int tg3_init_5401phy_dsp(struct tg3 *tp)
static bool tg3_phy_eee_config_ok(struct tg3 *tp)
{
- struct ethtool_eee eee;
+ struct ethtool_keee eee = {};
if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
return true;
@@ -4626,13 +4616,13 @@ static bool tg3_phy_eee_config_ok(struct tg3 *tp)
tg3_eee_pull_config(tp, &eee);
if (tp->eee.eee_enabled) {
- if (tp->eee.advertised != eee.advertised ||
+ if (!linkmode_equal(tp->eee.advertised, eee.advertised) ||
tp->eee.tx_lpi_timer != eee.tx_lpi_timer ||
tp->eee.tx_lpi_enabled != eee.tx_lpi_enabled)
return false;
} else {
/* EEE is disabled but we're advertising */
- if (eee.advertised)
+ if (!linkmode_empty(eee.advertised))
return false;
}
@@ -6152,13 +6142,11 @@ static void tg3_refclk_write(struct tg3 *tp, u64 newval)
static inline void tg3_full_lock(struct tg3 *tp, int irq_sync);
static inline void tg3_full_unlock(struct tg3 *tp);
-static int tg3_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info)
+static int tg3_get_ts_info(struct net_device *dev, struct kernel_ethtool_ts_info *info)
{
struct tg3 *tp = netdev_priv(dev);
- info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
- SOF_TIMESTAMPING_RX_SOFTWARE |
- SOF_TIMESTAMPING_SOFTWARE;
+ info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE;
if (tg3_flag(tp, PTP_CAPABLE)) {
info->so_timestamping |= SOF_TIMESTAMPING_TX_HARDWARE |
@@ -6168,8 +6156,6 @@ static int tg3_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info)
if (tp->ptp_clock)
info->phc_index = ptp_clock_index(tp->ptp_clock);
- else
- info->phc_index = -1;
info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
@@ -7410,27 +7396,61 @@ tx_recovery:
static void tg3_napi_disable(struct tg3 *tp)
{
+ int txq_idx = tp->txq_cnt - 1;
+ int rxq_idx = tp->rxq_cnt - 1;
+ struct tg3_napi *tnapi;
int i;
- for (i = tp->irq_cnt - 1; i >= 0; i--)
- napi_disable(&tp->napi[i].napi);
+ for (i = tp->irq_cnt - 1; i >= 0; i--) {
+ tnapi = &tp->napi[i];
+ if (tnapi->tx_buffers) {
+ netif_queue_set_napi(tp->dev, txq_idx,
+ NETDEV_QUEUE_TYPE_TX, NULL);
+ txq_idx--;
+ }
+ if (tnapi->rx_rcb) {
+ netif_queue_set_napi(tp->dev, rxq_idx,
+ NETDEV_QUEUE_TYPE_RX, NULL);
+ rxq_idx--;
+ }
+ napi_disable(&tnapi->napi);
+ }
}
static void tg3_napi_enable(struct tg3 *tp)
{
+ int txq_idx = 0, rxq_idx = 0;
+ struct tg3_napi *tnapi;
int i;
- for (i = 0; i < tp->irq_cnt; i++)
- napi_enable(&tp->napi[i].napi);
+ for (i = 0; i < tp->irq_cnt; i++) {
+ tnapi = &tp->napi[i];
+ napi_enable_locked(&tnapi->napi);
+ if (tnapi->tx_buffers) {
+ netif_queue_set_napi(tp->dev, txq_idx,
+ NETDEV_QUEUE_TYPE_TX,
+ &tnapi->napi);
+ txq_idx++;
+ }
+ if (tnapi->rx_rcb) {
+ netif_queue_set_napi(tp->dev, rxq_idx,
+ NETDEV_QUEUE_TYPE_RX,
+ &tnapi->napi);
+ rxq_idx++;
+ }
+ }
}
static void tg3_napi_init(struct tg3 *tp)
{
int i;
- netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll);
- for (i = 1; i < tp->irq_cnt; i++)
- netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix);
+ for (i = 0; i < tp->irq_cnt; i++) {
+ netif_napi_add_locked(tp->dev, &tp->napi[i].napi,
+ i ? tg3_poll_msix : tg3_poll);
+ netif_napi_set_irq_locked(&tp->napi[i].napi,
+ tp->napi[i].irq_vec);
+ }
}
static void tg3_napi_fini(struct tg3 *tp)
@@ -11241,6 +11261,8 @@ static void tg3_timer_stop(struct tg3 *tp)
static int tg3_restart_hw(struct tg3 *tp, bool reset_phy)
__releases(tp->lock)
__acquires(tp->lock)
+ __releases(tp->dev->lock)
+ __acquires(tp->dev->lock)
{
int err;
@@ -11253,7 +11275,9 @@ static int tg3_restart_hw(struct tg3 *tp, bool reset_phy)
tg3_timer_stop(tp);
tp->irq_sync = 0;
tg3_napi_enable(tp);
+ netdev_unlock(tp->dev);
dev_close(tp->dev);
+ netdev_lock(tp->dev);
tg3_full_lock(tp, 0);
}
return err;
@@ -11281,6 +11305,7 @@ static void tg3_reset_task(struct work_struct *work)
tg3_netif_stop(tp);
+ netdev_lock(tp->dev);
tg3_full_lock(tp, 1);
if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
@@ -11300,12 +11325,14 @@ static void tg3_reset_task(struct work_struct *work)
* call cancel_work_sync() and wait forever.
*/
tg3_flag_clear(tp, RESET_TASK_PENDING);
+ netdev_unlock(tp->dev);
dev_close(tp->dev);
goto out;
}
tg3_netif_start(tp);
tg3_full_unlock(tp);
+ netdev_unlock(tp->dev);
tg3_phy_start(tp);
tg3_flag_clear(tp, RESET_TASK_PENDING);
out:
@@ -11324,18 +11351,17 @@ static int tg3_request_irq(struct tg3 *tp, int irq_num)
else {
name = &tnapi->irq_lbl[0];
if (tnapi->tx_buffers && tnapi->rx_rcb)
- snprintf(name, IFNAMSIZ,
+ snprintf(name, sizeof(tnapi->irq_lbl),
"%s-txrx-%d", tp->dev->name, irq_num);
else if (tnapi->tx_buffers)
- snprintf(name, IFNAMSIZ,
+ snprintf(name, sizeof(tnapi->irq_lbl),
"%s-tx-%d", tp->dev->name, irq_num);
else if (tnapi->rx_rcb)
- snprintf(name, IFNAMSIZ,
+ snprintf(name, sizeof(tnapi->irq_lbl),
"%s-rx-%d", tp->dev->name, irq_num);
else
- snprintf(name, IFNAMSIZ,
+ snprintf(name, sizeof(tnapi->irq_lbl),
"%s-%d", tp->dev->name, irq_num);
- name[IFNAMSIZ-1] = 0;
}
if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
@@ -11666,9 +11692,11 @@ static int tg3_start(struct tg3 *tp, bool reset_phy, bool test_irq,
if (err)
goto out_ints_fini;
+ netdev_lock(dev);
tg3_napi_init(tp);
tg3_napi_enable(tp);
+ netdev_unlock(dev);
for (i = 0; i < tp->irq_cnt; i++) {
err = tg3_request_irq(tp, i);
@@ -12552,6 +12580,7 @@ static int tg3_set_ringparam(struct net_device *dev,
irq_sync = 1;
}
+ netdev_lock(dev);
tg3_full_lock(tp, irq_sync);
tp->rx_pending = ering->rx_pending;
@@ -12580,6 +12609,7 @@ static int tg3_set_ringparam(struct net_device *dev,
}
tg3_full_unlock(tp);
+ netdev_unlock(dev);
if (irq_sync && !err)
tg3_phy_start(tp);
@@ -12661,6 +12691,7 @@ static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam
irq_sync = 1;
}
+ netdev_lock(dev);
tg3_full_lock(tp, irq_sync);
if (epause->autoneg)
@@ -12690,6 +12721,7 @@ static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam
}
tg3_full_unlock(tp);
+ netdev_unlock(dev);
}
tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
@@ -13108,12 +13140,16 @@ static int tg3_test_nvram(struct tg3 *tp)
/* Bootstrap checksum at offset 0x10 */
csum = calc_crc((unsigned char *) buf, 0x10);
- if (csum != le32_to_cpu(buf[0x10/4]))
+
+ /* The type of buf is __be32 *, but this value is __le32 */
+ if (csum != le32_to_cpu((__force __le32)buf[0x10 / 4]))
goto out;
/* Manufacturing block starts at offset 0x74, checksum at 0xfc */
- csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
- if (csum != le32_to_cpu(buf[0xfc/4]))
+ csum = calc_crc((unsigned char *)&buf[0x74 / 4], 0x88);
+
+ /* The type of buf is __be32 *, but this value is __le32 */
+ if (csum != le32_to_cpu((__force __le32)buf[0xfc / 4]))
goto out;
kfree(buf);
@@ -13890,6 +13926,7 @@ static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
data[TG3_INTERRUPT_TEST] = 1;
}
+ netdev_lock(dev);
tg3_full_lock(tp, 0);
tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
@@ -13901,6 +13938,7 @@ static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
}
tg3_full_unlock(tp);
+ netdev_unlock(dev);
if (irq_sync && !err2)
tg3_phy_start(tp);
@@ -14180,7 +14218,7 @@ static int tg3_set_coalesce(struct net_device *dev,
return 0;
}
-static int tg3_set_eee(struct net_device *dev, struct ethtool_eee *edata)
+static int tg3_set_eee(struct net_device *dev, struct ethtool_keee *edata)
{
struct tg3 *tp = netdev_priv(dev);
@@ -14189,7 +14227,7 @@ static int tg3_set_eee(struct net_device *dev, struct ethtool_eee *edata)
return -EOPNOTSUPP;
}
- if (edata->advertised != tp->eee.advertised) {
+ if (!linkmode_equal(edata->advertised, tp->eee.advertised)) {
netdev_warn(tp->dev,
"Direct manipulation of EEE advertisement is not supported\n");
return -EINVAL;
@@ -14202,7 +14240,9 @@ static int tg3_set_eee(struct net_device *dev, struct ethtool_eee *edata)
return -EINVAL;
}
- tp->eee = *edata;
+ tp->eee.eee_enabled = edata->eee_enabled;
+ tp->eee.tx_lpi_enabled = edata->tx_lpi_enabled;
+ tp->eee.tx_lpi_timer = edata->tx_lpi_timer;
tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
tg3_warn_mgmt_link_flap(tp);
@@ -14217,7 +14257,7 @@ static int tg3_set_eee(struct net_device *dev, struct ethtool_eee *edata)
return 0;
}
-static int tg3_get_eee(struct net_device *dev, struct ethtool_eee *edata)
+static int tg3_get_eee(struct net_device *dev, struct ethtool_keee *edata)
{
struct tg3 *tp = netdev_priv(dev);
@@ -14304,7 +14344,7 @@ static void tg3_set_rx_mode(struct net_device *dev)
static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
int new_mtu)
{
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
if (new_mtu > ETH_DATA_LEN) {
if (tg3_flag(tp, 5780_CLASS)) {
@@ -14342,6 +14382,7 @@ static int tg3_change_mtu(struct net_device *dev, int new_mtu)
tg3_set_mtu(dev, tp, new_mtu);
+ netdev_lock(dev);
tg3_full_lock(tp, 1);
tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
@@ -14361,6 +14402,7 @@ static int tg3_change_mtu(struct net_device *dev, int new_mtu)
tg3_netif_start(tp);
tg3_full_unlock(tp);
+ netdev_unlock(dev);
if (!err)
tg3_phy_start(tp);
@@ -15655,10 +15697,13 @@ static int tg3_phy_probe(struct tg3 *tp)
tg3_chip_rev_id(tp) != CHIPREV_ID_57765_A0))) {
tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
- tp->eee.supported = SUPPORTED_100baseT_Full |
- SUPPORTED_1000baseT_Full;
- tp->eee.advertised = ADVERTISED_100baseT_Full |
- ADVERTISED_1000baseT_Full;
+ linkmode_zero(tp->eee.supported);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
+ tp->eee.supported);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
+ tp->eee.supported);
+ linkmode_copy(tp->eee.advertised, tp->eee.supported);
+
tp->eee.eee_enabled = 1;
tp->eee.tx_lpi_enabled = 1;
tp->eee.tx_lpi_timer = TG3_CPMU_DBTMR1_LNKIDLE_2047US;
@@ -17075,12 +17120,14 @@ static int tg3_get_device_address(struct tg3 *tp, u8 *addr)
addr_ok = is_valid_ether_addr(addr);
}
if (!addr_ok) {
+ __be32 be_hi, be_lo;
+
/* Next, try NVRAM. */
if (!tg3_flag(tp, NO_NVRAM) &&
- !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
- !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
- memcpy(&addr[0], ((char *)&hi) + 2, 2);
- memcpy(&addr[2], (char *)&lo, sizeof(lo));
+ !tg3_nvram_read_be32(tp, mac_offset + 0, &be_hi) &&
+ !tg3_nvram_read_be32(tp, mac_offset + 4, &be_lo)) {
+ memcpy(&addr[0], ((char *)&be_hi) + 2, 2);
+ memcpy(&addr[2], (char *)&be_lo, sizeof(be_lo));
}
/* Finally just fetch it out of the MAC control regs. */
else {
@@ -17811,6 +17858,9 @@ static int tg3_init_one(struct pci_dev *pdev,
} else
persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
+ if (tg3_asic_rev(tp) == ASIC_REV_57766)
+ persist_dma_mask = DMA_BIT_MASK(31);
+
/* Configure DMA attributes. */
if (dma_mask > DMA_BIT_MASK(32)) {
err = dma_set_mask(&pdev->dev, dma_mask);
@@ -18090,7 +18140,6 @@ static int tg3_suspend(struct device *device)
{
struct net_device *dev = dev_get_drvdata(device);
struct tg3 *tp = netdev_priv(dev);
- int err = 0;
rtnl_lock();
@@ -18114,32 +18163,11 @@ static int tg3_suspend(struct device *device)
tg3_flag_clear(tp, INIT_COMPLETE);
tg3_full_unlock(tp);
- err = tg3_power_down_prepare(tp);
- if (err) {
- int err2;
-
- tg3_full_lock(tp, 0);
-
- tg3_flag_set(tp, INIT_COMPLETE);
- err2 = tg3_restart_hw(tp, true);
- if (err2)
- goto out;
-
- tg3_timer_start(tp);
-
- netif_device_attach(dev);
- tg3_netif_start(tp);
-
-out:
- tg3_full_unlock(tp);
-
- if (!err2)
- tg3_phy_start(tp);
- }
+ tg3_power_down_prepare(tp);
unlock:
rtnl_unlock();
- return err;
+ return 0;
}
static int tg3_resume(struct device *device)
@@ -18155,6 +18183,7 @@ static int tg3_resume(struct device *device)
netif_device_attach(dev);
+ netdev_lock(dev);
tg3_full_lock(tp, 0);
tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
@@ -18171,6 +18200,7 @@ static int tg3_resume(struct device *device)
out:
tg3_full_unlock(tp);
+ netdev_unlock(dev);
if (!err)
tg3_phy_start(tp);
@@ -18183,6 +18213,50 @@ unlock:
static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
+/* Systems where ACPI _PTS (Prepare To Sleep) S5 will result in a fatal
+ * PCIe AER event on the tg3 device if the tg3 device is not, or cannot
+ * be, powered down.
+ */
+static const struct dmi_system_id tg3_restart_aer_quirk_table[] = {
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge R440"),
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge R540"),
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge R640"),
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge R650"),
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge R740"),
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge R750"),
+ },
+ },
+ {}
+};
+
static void tg3_shutdown(struct pci_dev *pdev)
{
struct net_device *dev = pci_get_drvdata(pdev);
@@ -18199,6 +18273,19 @@ static void tg3_shutdown(struct pci_dev *pdev)
if (system_state == SYSTEM_POWER_OFF)
tg3_power_down(tp);
+ else if (system_state == SYSTEM_RESTART &&
+ dmi_first_match(tg3_restart_aer_quirk_table) &&
+ pdev->current_state != PCI_D3cold &&
+ pdev->current_state != PCI_UNKNOWN) {
+ /* Disable PCIe AER on the tg3 to avoid a fatal
+ * error during this system restart.
+ */
+ pcie_capability_clear_word(pdev, PCI_EXP_DEVCTL,
+ PCI_EXP_DEVCTL_CERE |
+ PCI_EXP_DEVCTL_NFERE |
+ PCI_EXP_DEVCTL_FERE |
+ PCI_EXP_DEVCTL_URRE);
+ }
rtnl_unlock();
@@ -18251,7 +18338,9 @@ static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
done:
if (state == pci_channel_io_perm_failure) {
if (netdev) {
+ netdev_lock(netdev);
tg3_napi_enable(tp);
+ netdev_unlock(netdev);
dev_close(netdev);
}
err = PCI_ERS_RESULT_DISCONNECT;
@@ -18269,7 +18358,7 @@ done:
* @pdev: Pointer to PCI device
*
* Restart the card from scratch, as if from a cold-boot.
- * At this point, the card has exprienced a hard reset,
+ * At this point, the card has experienced a hard reset,
* followed by fixups by BIOS, and has its config space
* set up identically to what it was at cold boot.
*/
@@ -18305,7 +18394,9 @@ static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
done:
if (rc != PCI_ERS_RESULT_RECOVERED && netdev && netif_running(netdev)) {
+ netdev_lock(netdev);
tg3_napi_enable(tp);
+ netdev_unlock(netdev);
dev_close(netdev);
}
rtnl_unlock();
@@ -18331,12 +18422,14 @@ static void tg3_io_resume(struct pci_dev *pdev)
if (!netdev || !netif_running(netdev))
goto done;
+ netdev_lock(netdev);
tg3_full_lock(tp, 0);
tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
tg3_flag_set(tp, INIT_COMPLETE);
err = tg3_restart_hw(tp, true);
if (err) {
tg3_full_unlock(tp);
+ netdev_unlock(netdev);
netdev_err(netdev, "Cannot restart hardware after reset.\n");
goto done;
}
@@ -18348,6 +18441,7 @@ static void tg3_io_resume(struct pci_dev *pdev)
tg3_netif_start(tp);
tg3_full_unlock(tp);
+ netdev_unlock(netdev);
tg3_phy_start(tp);
diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h
index 5016475e5005..b473f8014d9c 100644
--- a/drivers/net/ethernet/broadcom/tg3.h
+++ b/drivers/net/ethernet/broadcom/tg3.h
@@ -3033,7 +3033,7 @@ struct tg3_napi {
dma_addr_t rx_rcb_mapping;
dma_addr_t tx_desc_mapping;
- char irq_lbl[IFNAMSIZ];
+ char irq_lbl[IFNAMSIZ + 6 + 10]; /* name + "-txrx-" + %d */
unsigned int irq_vec;
};
@@ -3419,7 +3419,7 @@ struct tg3 {
unsigned int irq_cnt;
struct ethtool_coalesce coal;
- struct ethtool_eee eee;
+ struct ethtool_keee eee;
/* firmware info */
const char *fw_needed;