summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/broadcom
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/broadcom')
-rw-r--r--drivers/net/ethernet/broadcom/Kconfig8
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.c82
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.h5
-rw-r--r--drivers/net/ethernet/broadcom/bgmac-bcma.c103
-rw-r--r--drivers/net/ethernet/broadcom/bgmac-platform.c34
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.c52
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.h4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c6
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c109
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c228
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h29
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c108
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.h1
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c427
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h3
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h325
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c16
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h1
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c20
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h2
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c292
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.h16
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c15
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmmii.c52
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c7
25 files changed, 1594 insertions, 351 deletions
diff --git a/drivers/net/ethernet/broadcom/Kconfig b/drivers/net/ethernet/broadcom/Kconfig
index 940fb24bba21..96413808c726 100644
--- a/drivers/net/ethernet/broadcom/Kconfig
+++ b/drivers/net/ethernet/broadcom/Kconfig
@@ -109,7 +109,6 @@ config TIGON3
tristate "Broadcom Tigon3 support"
depends on PCI
select PHYLIB
- select HWMON
imply PTP_1588_CLOCK
---help---
This driver supports Broadcom Tigon3 based gigabit Ethernet cards.
@@ -117,6 +116,13 @@ config TIGON3
To compile this driver as a module, choose M here: the module
will be called tg3. This is recommended.
+config TIGON3_HWMON
+ bool "Broadcom Tigon3 HWMON support"
+ default y
+ depends on TIGON3 && HWMON && !(TIGON3=y && HWMON=m)
+ ---help---
+ Say Y if you want to expose the thermal sensor on Tigon3 devices.
+
config BNX2X
tristate "Broadcom NetXtremeII 10Gb support"
depends on PCI
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index a68d4889f5db..099b374c1b17 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -22,6 +22,7 @@
#include <linux/of_mdio.h>
#include <linux/phy.h>
#include <linux/phy_fixed.h>
+#include <net/dsa.h>
#include <net/ip.h>
#include <net/ipv6.h>
@@ -284,6 +285,7 @@ static const struct bcm_sysport_stats bcm_sysport_gstrings_stats[] = {
STAT_MIB_SOFT("alloc_rx_buff_failed", mib.alloc_rx_buff_failed),
STAT_MIB_SOFT("rx_dma_failed", mib.rx_dma_failed),
STAT_MIB_SOFT("tx_dma_failed", mib.tx_dma_failed),
+ /* Per TX-queue statistics are dynamically appended */
};
#define BCM_SYSPORT_STATS_LEN ARRAY_SIZE(bcm_sysport_gstrings_stats)
@@ -338,7 +340,8 @@ static int bcm_sysport_get_sset_count(struct net_device *dev, int string_set)
continue;
j++;
}
- return j;
+ /* Include per-queue statistics */
+ return j + dev->num_tx_queues * NUM_SYSPORT_TXQ_STAT;
default:
return -EOPNOTSUPP;
}
@@ -349,6 +352,7 @@ static void bcm_sysport_get_strings(struct net_device *dev,
{
struct bcm_sysport_priv *priv = netdev_priv(dev);
const struct bcm_sysport_stats *s;
+ char buf[128];
int i, j;
switch (stringset) {
@@ -363,6 +367,18 @@ static void bcm_sysport_get_strings(struct net_device *dev,
ETH_GSTRING_LEN);
j++;
}
+
+ for (i = 0; i < dev->num_tx_queues; i++) {
+ snprintf(buf, sizeof(buf), "txq%d_packets", i);
+ memcpy(data + j * ETH_GSTRING_LEN, buf,
+ ETH_GSTRING_LEN);
+ j++;
+
+ snprintf(buf, sizeof(buf), "txq%d_bytes", i);
+ memcpy(data + j * ETH_GSTRING_LEN, buf,
+ ETH_GSTRING_LEN);
+ j++;
+ }
break;
default:
break;
@@ -418,6 +434,7 @@ static void bcm_sysport_get_stats(struct net_device *dev,
struct ethtool_stats *stats, u64 *data)
{
struct bcm_sysport_priv *priv = netdev_priv(dev);
+ struct bcm_sysport_tx_ring *ring;
int i, j;
if (netif_running(dev))
@@ -436,6 +453,22 @@ static void bcm_sysport_get_stats(struct net_device *dev,
data[j] = *(unsigned long *)p;
j++;
}
+
+ /* For SYSTEMPORT Lite since we have holes in our statistics, j would
+ * be equal to BCM_SYSPORT_STATS_LEN at the end of the loop, but it
+ * needs to point to how many total statistics we have minus the
+ * number of per TX queue statistics
+ */
+ j = bcm_sysport_get_sset_count(dev, ETH_SS_STATS) -
+ dev->num_tx_queues * NUM_SYSPORT_TXQ_STAT;
+
+ for (i = 0; i < dev->num_tx_queues; i++) {
+ ring = &priv->tx_rings[i];
+ data[j] = ring->packets;
+ j++;
+ data[j] = ring->bytes;
+ j++;
+ }
}
static void bcm_sysport_get_wol(struct net_device *dev,
@@ -637,6 +670,9 @@ static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv *priv,
u16 len, status;
struct bcm_rsb *rsb;
+ /* Clear status before servicing to reduce spurious interrupts */
+ intrl2_0_writel(priv, INTRL2_0_RDMA_MBDONE, INTRL2_CPU_CLEAR);
+
/* Determine how much we should process since last call, SYSTEMPORT Lite
* groups the producer and consumer indexes into the same 32-bit
* which we access using RDMA_CONS_INDEX
@@ -647,11 +683,7 @@ static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv *priv,
p_index = rdma_readl(priv, RDMA_CONS_INDEX);
p_index &= RDMA_PROD_INDEX_MASK;
- if (p_index < priv->rx_c_index)
- to_process = (RDMA_CONS_INDEX_MASK + 1) -
- priv->rx_c_index + p_index;
- else
- to_process = p_index - priv->rx_c_index;
+ to_process = (p_index - priv->rx_c_index) & RDMA_CONS_INDEX_MASK;
netif_dbg(priv, rx_status, ndev,
"p_index=%d rx_c_index=%d to_process=%d\n",
@@ -746,26 +778,26 @@ next:
return processed;
}
-static void bcm_sysport_tx_reclaim_one(struct bcm_sysport_priv *priv,
+static void bcm_sysport_tx_reclaim_one(struct bcm_sysport_tx_ring *ring,
struct bcm_sysport_cb *cb,
unsigned int *bytes_compl,
unsigned int *pkts_compl)
{
+ struct bcm_sysport_priv *priv = ring->priv;
struct device *kdev = &priv->pdev->dev;
- struct net_device *ndev = priv->netdev;
if (cb->skb) {
- ndev->stats.tx_bytes += cb->skb->len;
+ ring->bytes += cb->skb->len;
*bytes_compl += cb->skb->len;
dma_unmap_single(kdev, dma_unmap_addr(cb, dma_addr),
dma_unmap_len(cb, dma_len),
DMA_TO_DEVICE);
- ndev->stats.tx_packets++;
+ ring->packets++;
(*pkts_compl)++;
bcm_sysport_free_cb(cb);
/* SKB fragment */
} else if (dma_unmap_addr(cb, dma_addr)) {
- ndev->stats.tx_bytes += dma_unmap_len(cb, dma_len);
+ ring->bytes += dma_unmap_len(cb, dma_len);
dma_unmap_page(kdev, dma_unmap_addr(cb, dma_addr),
dma_unmap_len(cb, dma_len), DMA_TO_DEVICE);
dma_unmap_addr_set(cb, dma_addr, 0);
@@ -782,6 +814,13 @@ static unsigned int __bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv,
struct bcm_sysport_cb *cb;
u32 hw_ind;
+ /* Clear status before servicing to reduce spurious interrupts */
+ if (!ring->priv->is_lite)
+ intrl2_1_writel(ring->priv, BIT(ring->index), INTRL2_CPU_CLEAR);
+ else
+ intrl2_0_writel(ring->priv, BIT(ring->index +
+ INTRL2_0_TDMA_MBDONE_SHIFT), INTRL2_CPU_CLEAR);
+
/* Compute how many descriptors have been processed since last call */
hw_ind = tdma_readl(priv, TDMA_DESC_RING_PROD_CONS_INDEX(ring->index));
c_index = (hw_ind >> RING_CONS_INDEX_SHIFT) & RING_CONS_INDEX_MASK;
@@ -803,7 +842,7 @@ static unsigned int __bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv,
while (last_tx_cn-- > 0) {
cb = ring->cbs + last_c_index;
- bcm_sysport_tx_reclaim_one(priv, cb, &bytes_compl, &pkts_compl);
+ bcm_sysport_tx_reclaim_one(ring, cb, &bytes_compl, &pkts_compl);
ring->desc_count++;
last_c_index++;
@@ -1632,6 +1671,24 @@ static int bcm_sysport_change_mac(struct net_device *dev, void *p)
return 0;
}
+static struct net_device_stats *bcm_sysport_get_nstats(struct net_device *dev)
+{
+ struct bcm_sysport_priv *priv = netdev_priv(dev);
+ unsigned long tx_bytes = 0, tx_packets = 0;
+ struct bcm_sysport_tx_ring *ring;
+ unsigned int q;
+
+ for (q = 0; q < dev->num_tx_queues; q++) {
+ ring = &priv->tx_rings[q];
+ tx_bytes += ring->bytes;
+ tx_packets += ring->packets;
+ }
+
+ dev->stats.tx_bytes = tx_bytes;
+ dev->stats.tx_packets = tx_packets;
+ return &dev->stats;
+}
+
static void bcm_sysport_netif_start(struct net_device *dev)
{
struct bcm_sysport_priv *priv = netdev_priv(dev);
@@ -1893,6 +1950,7 @@ static const struct net_device_ops bcm_sysport_netdev_ops = {
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = bcm_sysport_poll_controller,
#endif
+ .ndo_get_stats = bcm_sysport_get_nstats,
};
#define REV_FMT "v%2x.%02x"
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.h b/drivers/net/ethernet/broadcom/bcmsysport.h
index 863ddd7870b7..77a51c167a69 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.h
+++ b/drivers/net/ethernet/broadcom/bcmsysport.h
@@ -647,6 +647,9 @@ enum bcm_sysport_stat_type {
.reg_offset = ofs, \
}
+/* TX bytes and packets */
+#define NUM_SYSPORT_TXQ_STAT 2
+
struct bcm_sysport_stats {
char stat_string[ETH_GSTRING_LEN];
int stat_sizeof;
@@ -690,6 +693,8 @@ struct bcm_sysport_tx_ring {
struct bcm_sysport_cb *cbs; /* Transmit control blocks */
struct dma_desc *desc_cpu; /* CPU view of the descriptor */
struct bcm_sysport_priv *priv; /* private context backpointer */
+ unsigned long packets; /* packets statistics */
+ unsigned long bytes; /* bytes statistics */
};
/* Driver private structure */
diff --git a/drivers/net/ethernet/broadcom/bgmac-bcma.c b/drivers/net/ethernet/broadcom/bgmac-bcma.c
index d59cfcc4c4d5..6322594ab260 100644
--- a/drivers/net/ethernet/broadcom/bgmac-bcma.c
+++ b/drivers/net/ethernet/broadcom/bgmac-bcma.c
@@ -11,6 +11,7 @@
#include <linux/bcma/bcma.h>
#include <linux/brcmphy.h>
#include <linux/etherdevice.h>
+#include <linux/of_net.h>
#include "bgmac.h"
static inline bool bgmac_is_bcm4707_family(struct bcma_device *core)
@@ -114,7 +115,7 @@ static int bgmac_probe(struct bcma_device *core)
struct ssb_sprom *sprom = &core->bus->sprom;
struct mii_bus *mii_bus;
struct bgmac *bgmac;
- u8 *mac;
+ const u8 *mac = NULL;
int err;
bgmac = bgmac_alloc(&core->dev);
@@ -127,21 +128,27 @@ static int bgmac_probe(struct bcma_device *core)
bcma_set_drvdata(core, bgmac);
- switch (core->core_unit) {
- case 0:
- mac = sprom->et0mac;
- break;
- case 1:
- mac = sprom->et1mac;
- break;
- case 2:
- mac = sprom->et2mac;
- break;
- default:
- dev_err(bgmac->dev, "Unsupported core_unit %d\n",
- core->core_unit);
- err = -ENOTSUPP;
- goto err;
+ if (bgmac->dev->of_node)
+ mac = of_get_mac_address(bgmac->dev->of_node);
+
+ /* If no MAC address assigned via device tree, check SPROM */
+ if (!mac) {
+ switch (core->core_unit) {
+ case 0:
+ mac = sprom->et0mac;
+ break;
+ case 1:
+ mac = sprom->et1mac;
+ break;
+ case 2:
+ mac = sprom->et2mac;
+ break;
+ default:
+ dev_err(bgmac->dev, "Unsupported core_unit %d\n",
+ core->core_unit);
+ err = -ENOTSUPP;
+ goto err;
+ }
}
ether_addr_copy(bgmac->net_dev->dev_addr, mac);
@@ -192,36 +199,50 @@ static int bgmac_probe(struct bcma_device *core)
goto err1;
}
- bgmac->has_robosw = !!(core->bus->sprom.boardflags_lo &
- BGMAC_BFL_ENETROBO);
+ bgmac->has_robosw = !!(sprom->boardflags_lo & BGMAC_BFL_ENETROBO);
if (bgmac->has_robosw)
dev_warn(bgmac->dev, "Support for Roboswitch not implemented\n");
- if (core->bus->sprom.boardflags_lo & BGMAC_BFL_ENETADM)
+ if (sprom->boardflags_lo & BGMAC_BFL_ENETADM)
dev_warn(bgmac->dev, "Support for ADMtek ethernet switch not implemented\n");
/* Feature Flags */
- switch (core->bus->chipinfo.id) {
+ switch (ci->id) {
+ /* BCM 471X/535X family */
+ case BCMA_CHIP_ID_BCM4716:
+ bgmac->feature_flags |= BGMAC_FEAT_CLKCTLST;
+ /* fallthrough */
+ case BCMA_CHIP_ID_BCM47162:
+ bgmac->feature_flags |= BGMAC_FEAT_FLW_CTRL2;
+ bgmac->feature_flags |= BGMAC_FEAT_SET_RXQ_CLK;
+ break;
case BCMA_CHIP_ID_BCM5357:
+ case BCMA_CHIP_ID_BCM53572:
bgmac->feature_flags |= BGMAC_FEAT_SET_RXQ_CLK;
bgmac->feature_flags |= BGMAC_FEAT_CLKCTLST;
bgmac->feature_flags |= BGMAC_FEAT_FLW_CTRL1;
bgmac->feature_flags |= BGMAC_FEAT_SW_TYPE_PHY;
- if (core->bus->chipinfo.pkg == BCMA_PKG_ID_BCM47186) {
- bgmac->feature_flags |= BGMAC_FEAT_IOST_ATTACHED;
+ if (ci->pkg == BCMA_PKG_ID_BCM47188 ||
+ ci->pkg == BCMA_PKG_ID_BCM47186) {
bgmac->feature_flags |= BGMAC_FEAT_SW_TYPE_RGMII;
+ bgmac->feature_flags |= BGMAC_FEAT_IOST_ATTACHED;
}
- if (core->bus->chipinfo.pkg == BCMA_PKG_ID_BCM5358)
+ if (ci->pkg == BCMA_PKG_ID_BCM5358)
bgmac->feature_flags |= BGMAC_FEAT_SW_TYPE_EPHYRMII;
break;
- case BCMA_CHIP_ID_BCM53572:
- bgmac->feature_flags |= BGMAC_FEAT_SET_RXQ_CLK;
+ case BCMA_CHIP_ID_BCM53573:
bgmac->feature_flags |= BGMAC_FEAT_CLKCTLST;
- bgmac->feature_flags |= BGMAC_FEAT_FLW_CTRL1;
- bgmac->feature_flags |= BGMAC_FEAT_SW_TYPE_PHY;
- if (core->bus->chipinfo.pkg == BCMA_PKG_ID_BCM47188) {
- bgmac->feature_flags |= BGMAC_FEAT_SW_TYPE_RGMII;
+ bgmac->feature_flags |= BGMAC_FEAT_SET_RXQ_CLK;
+ if (ci->pkg == BCMA_PKG_ID_BCM47189)
bgmac->feature_flags |= BGMAC_FEAT_IOST_ATTACHED;
+ if (core->core_unit == 0) {
+ bgmac->feature_flags |= BGMAC_FEAT_CC4_IF_SW_TYPE;
+ if (ci->pkg == BCMA_PKG_ID_BCM47189)
+ bgmac->feature_flags |=
+ BGMAC_FEAT_CC4_IF_SW_TYPE_RGMII;
+ } else if (core->core_unit == 1) {
+ bgmac->feature_flags |= BGMAC_FEAT_IRQ_ID_OOB_6;
+ bgmac->feature_flags |= BGMAC_FEAT_CC7_IF_TYPE_RGMII;
}
break;
case BCMA_CHIP_ID_BCM4749:
@@ -229,18 +250,11 @@ static int bgmac_probe(struct bcma_device *core)
bgmac->feature_flags |= BGMAC_FEAT_CLKCTLST;
bgmac->feature_flags |= BGMAC_FEAT_FLW_CTRL1;
bgmac->feature_flags |= BGMAC_FEAT_SW_TYPE_PHY;
- if (core->bus->chipinfo.pkg == 10) {
+ if (ci->pkg == 10) {
bgmac->feature_flags |= BGMAC_FEAT_SW_TYPE_RGMII;
bgmac->feature_flags |= BGMAC_FEAT_IOST_ATTACHED;
}
break;
- case BCMA_CHIP_ID_BCM4716:
- bgmac->feature_flags |= BGMAC_FEAT_CLKCTLST;
- /* fallthrough */
- case BCMA_CHIP_ID_BCM47162:
- bgmac->feature_flags |= BGMAC_FEAT_FLW_CTRL2;
- bgmac->feature_flags |= BGMAC_FEAT_SET_RXQ_CLK;
- break;
/* bcm4707_family */
case BCMA_CHIP_ID_BCM4707:
case BCMA_CHIP_ID_BCM47094:
@@ -249,21 +263,6 @@ static int bgmac_probe(struct bcma_device *core)
bgmac->feature_flags |= BGMAC_FEAT_NO_RESET;
bgmac->feature_flags |= BGMAC_FEAT_FORCE_SPEED_2500;
break;
- case BCMA_CHIP_ID_BCM53573:
- bgmac->feature_flags |= BGMAC_FEAT_CLKCTLST;
- bgmac->feature_flags |= BGMAC_FEAT_SET_RXQ_CLK;
- if (ci->pkg == BCMA_PKG_ID_BCM47189)
- bgmac->feature_flags |= BGMAC_FEAT_IOST_ATTACHED;
- if (core->core_unit == 0) {
- bgmac->feature_flags |= BGMAC_FEAT_CC4_IF_SW_TYPE;
- if (ci->pkg == BCMA_PKG_ID_BCM47189)
- bgmac->feature_flags |=
- BGMAC_FEAT_CC4_IF_SW_TYPE_RGMII;
- } else if (core->core_unit == 1) {
- bgmac->feature_flags |= BGMAC_FEAT_IRQ_ID_OOB_6;
- bgmac->feature_flags |= BGMAC_FEAT_CC7_IF_TYPE_RGMII;
- }
- break;
default:
bgmac->feature_flags |= BGMAC_FEAT_CLKCTLST;
bgmac->feature_flags |= BGMAC_FEAT_SET_RXQ_CLK;
diff --git a/drivers/net/ethernet/broadcom/bgmac-platform.c b/drivers/net/ethernet/broadcom/bgmac-platform.c
index da1b8b225eb9..73aca97a96bc 100644
--- a/drivers/net/ethernet/broadcom/bgmac-platform.c
+++ b/drivers/net/ethernet/broadcom/bgmac-platform.c
@@ -21,8 +21,12 @@
#include <linux/of_net.h>
#include "bgmac.h"
+#define NICPM_PADRING_CFG 0x00000004
#define NICPM_IOMUX_CTRL 0x00000008
+#define NICPM_PADRING_CFG_INIT_VAL 0x74000000
+#define NICPM_IOMUX_CTRL_INIT_VAL_AX 0x21880000
+
#define NICPM_IOMUX_CTRL_INIT_VAL 0x3196e000
#define NICPM_IOMUX_CTRL_SPD_SHIFT 10
#define NICPM_IOMUX_CTRL_SPD_10M 0
@@ -113,6 +117,10 @@ static void bgmac_nicpm_speed_set(struct net_device *net_dev)
if (!bgmac->plat.nicpm_base)
return;
+ /* SET RGMII IO CONFIG */
+ writel(NICPM_PADRING_CFG_INIT_VAL,
+ bgmac->plat.nicpm_base + NICPM_PADRING_CFG);
+
val = NICPM_IOMUX_CTRL_INIT_VAL;
switch (bgmac->net_dev->phydev->speed) {
default:
@@ -244,6 +252,31 @@ static int bgmac_remove(struct platform_device *pdev)
return 0;
}
+#ifdef CONFIG_PM
+static int bgmac_suspend(struct device *dev)
+{
+ struct bgmac *bgmac = dev_get_drvdata(dev);
+
+ return bgmac_enet_suspend(bgmac);
+}
+
+static int bgmac_resume(struct device *dev)
+{
+ struct bgmac *bgmac = dev_get_drvdata(dev);
+
+ return bgmac_enet_resume(bgmac);
+}
+
+static const struct dev_pm_ops bgmac_pm_ops = {
+ .suspend = bgmac_suspend,
+ .resume = bgmac_resume
+};
+
+#define BGMAC_PM_OPS (&bgmac_pm_ops)
+#else
+#define BGMAC_PM_OPS NULL
+#endif /* CONFIG_PM */
+
static const struct of_device_id bgmac_of_enet_match[] = {
{.compatible = "brcm,amac",},
{.compatible = "brcm,nsp-amac",},
@@ -257,6 +290,7 @@ static struct platform_driver bgmac_enet_driver = {
.driver = {
.name = "bgmac-enet",
.of_match_table = bgmac_of_enet_match,
+ .pm = BGMAC_PM_OPS
},
.probe = bgmac_probe,
.remove = bgmac_remove,
diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c
index fd66fca00e01..ba4d2e145bb9 100644
--- a/drivers/net/ethernet/broadcom/bgmac.c
+++ b/drivers/net/ethernet/broadcom/bgmac.c
@@ -11,6 +11,7 @@
#include <linux/bcma/bcma.h>
#include <linux/etherdevice.h>
+#include <linux/interrupt.h>
#include <linux/bcm47xx_nvram.h>
#include <linux/phy.h>
#include <linux/phy_fixed.h>
@@ -1480,6 +1481,7 @@ int bgmac_enet_probe(struct bgmac *bgmac)
net_dev->irq = bgmac->irq;
SET_NETDEV_DEV(net_dev, bgmac->dev);
+ dev_set_drvdata(bgmac->dev, bgmac);
if (!is_valid_ether_addr(net_dev->dev_addr)) {
dev_err(bgmac->dev, "Invalid MAC addr: %pM\n",
@@ -1552,5 +1554,55 @@ void bgmac_enet_remove(struct bgmac *bgmac)
}
EXPORT_SYMBOL_GPL(bgmac_enet_remove);
+int bgmac_enet_suspend(struct bgmac *bgmac)
+{
+ if (!netif_running(bgmac->net_dev))
+ return 0;
+
+ phy_stop(bgmac->net_dev->phydev);
+
+ netif_stop_queue(bgmac->net_dev);
+
+ napi_disable(&bgmac->napi);
+
+ netif_tx_lock(bgmac->net_dev);
+ netif_device_detach(bgmac->net_dev);
+ netif_tx_unlock(bgmac->net_dev);
+
+ bgmac_chip_intrs_off(bgmac);
+ bgmac_chip_reset(bgmac);
+ bgmac_dma_cleanup(bgmac);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(bgmac_enet_suspend);
+
+int bgmac_enet_resume(struct bgmac *bgmac)
+{
+ int rc;
+
+ if (!netif_running(bgmac->net_dev))
+ return 0;
+
+ rc = bgmac_dma_init(bgmac);
+ if (rc)
+ return rc;
+
+ bgmac_chip_init(bgmac);
+
+ napi_enable(&bgmac->napi);
+
+ netif_tx_lock(bgmac->net_dev);
+ netif_device_attach(bgmac->net_dev);
+ netif_tx_unlock(bgmac->net_dev);
+
+ netif_start_queue(bgmac->net_dev);
+
+ phy_start(bgmac->net_dev->phydev);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(bgmac_enet_resume);
+
MODULE_AUTHOR("Rafał Miłecki");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/broadcom/bgmac.h b/drivers/net/ethernet/broadcom/bgmac.h
index 6d1c6ff1ed96..c1818766c501 100644
--- a/drivers/net/ethernet/broadcom/bgmac.h
+++ b/drivers/net/ethernet/broadcom/bgmac.h
@@ -402,7 +402,7 @@
#define BGMAC_WEIGHT 64
-#define ETHER_MAX_LEN 1518
+#define ETHER_MAX_LEN (ETH_FRAME_LEN + ETH_FCS_LEN)
/* Feature Flags */
#define BGMAC_FEAT_TX_MASK_SETUP BIT(0)
@@ -537,6 +537,8 @@ int bgmac_enet_probe(struct bgmac *bgmac);
void bgmac_enet_remove(struct bgmac *bgmac);
void bgmac_adjust_link(struct net_device *net_dev);
int bgmac_phy_connect_direct(struct bgmac *bgmac);
+int bgmac_enet_suspend(struct bgmac *bgmac);
+int bgmac_enet_resume(struct bgmac *bgmac);
struct mii_bus *bcma_mdio_mii_register(struct bgmac *bgmac);
void bcma_mdio_mii_unregister(struct mii_bus *mii_bus);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index 9e8c06130c09..eccb3d1b6abb 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -2021,6 +2021,7 @@ static void bnx2x_set_rx_buf_size(struct bnx2x *bp)
ETH_OVERHEAD +
mtu +
BNX2X_FW_RX_ALIGN_END;
+ fp->rx_buf_size = SKB_DATA_ALIGN(fp->rx_buf_size);
/* Note : rx_buf_size doesn't take into account NET_SKB_PAD */
if (fp->rx_buf_size + NET_SKB_PAD <= PAGE_SIZE)
fp->rx_frag_size = fp->rx_buf_size + NET_SKB_PAD;
@@ -4277,7 +4278,10 @@ int __bnx2x_setup_tc(struct net_device *dev, u32 handle, __be16 proto,
{
if (tc->type != TC_SETUP_MQPRIO)
return -EINVAL;
- return bnx2x_setup_tc(dev, tc->tc);
+
+ tc->mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
+
+ return bnx2x_setup_tc(dev, tc->mqprio->num_tc);
}
/* called with rtnl_lock */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
index b209b7f6093e..7dd83d0ef0a0 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
@@ -6161,103 +6161,40 @@ static void bnx2x_link_int_ack(struct link_params *params,
}
}
+static int bnx2x_null_format_ver(u32 spirom_ver, u8 *str, u16 *len)
+{
+ str[0] = '\0';
+ (*len)--;
+ return 0;
+}
+
static int bnx2x_format_ver(u32 num, u8 *str, u16 *len)
{
- u8 *str_ptr = str;
- u32 mask = 0xf0000000;
- u8 shift = 8*4;
- u8 digit;
- u8 remove_leading_zeros = 1;
+ u16 ret;
+
if (*len < 10) {
/* Need more than 10chars for this format */
- *str_ptr = '\0';
- (*len)--;
+ bnx2x_null_format_ver(num, str, len);
return -EINVAL;
}
- while (shift > 0) {
- shift -= 4;
- digit = ((num & mask) >> shift);
- if (digit == 0 && remove_leading_zeros) {
- *str_ptr = '0';
- } else {
- if (digit < 0xa)
- *str_ptr = digit + '0';
- else
- *str_ptr = digit - 0xa + 'a';
-
- remove_leading_zeros = 0;
- str_ptr++;
- (*len)--;
- }
- mask = mask >> 4;
- if (shift == 4*4) {
- if (remove_leading_zeros) {
- str_ptr++;
- (*len)--;
- }
- *str_ptr = '.';
- str_ptr++;
- (*len)--;
- remove_leading_zeros = 1;
- }
- }
- if (remove_leading_zeros)
- (*len)--;
+ ret = scnprintf(str, *len, "%hx.%hx", num >> 16, num);
+ *len -= ret;
return 0;
}
static int bnx2x_3_seq_format_ver(u32 num, u8 *str, u16 *len)
{
- u8 *str_ptr = str;
- u32 mask = 0x00f00000;
- u8 shift = 8*3;
- u8 digit;
- u8 remove_leading_zeros = 1;
+ u16 ret;
if (*len < 10) {
/* Need more than 10chars for this format */
- *str_ptr = '\0';
- (*len)--;
+ bnx2x_null_format_ver(num, str, len);
return -EINVAL;
}
- while (shift > 0) {
- shift -= 4;
- digit = ((num & mask) >> shift);
- if (digit == 0 && remove_leading_zeros) {
- *str_ptr = '0';
- } else {
- if (digit < 0xa)
- *str_ptr = digit + '0';
- else
- *str_ptr = digit - 0xa + 'a';
-
- remove_leading_zeros = 0;
- str_ptr++;
- (*len)--;
- }
- mask = mask >> 4;
- if ((shift == 4*4) || (shift == 4*2)) {
- if (remove_leading_zeros) {
- str_ptr++;
- (*len)--;
- }
- *str_ptr = '.';
- str_ptr++;
- (*len)--;
- remove_leading_zeros = 1;
- }
- }
- if (remove_leading_zeros)
- (*len)--;
- return 0;
-}
-
-static int bnx2x_null_format_ver(u32 spirom_ver, u8 *str, u16 *len)
-{
- str[0] = '\0';
- (*len)--;
+ ret = scnprintf(str, *len, "%hhx.%hhx.%hhx", num >> 16, num >> 8, num);
+ *len -= ret;
return 0;
}
@@ -10681,22 +10618,19 @@ static u8 bnx2x_848xx_read_status(struct bnx2x_phy *phy,
static int bnx2x_8485x_format_ver(u32 raw_ver, u8 *str, u16 *len)
{
- int status = 0;
u32 num;
num = ((raw_ver & 0xF80) >> 7) << 16 | ((raw_ver & 0x7F) << 8) |
((raw_ver & 0xF000) >> 12);
- status = bnx2x_3_seq_format_ver(num, str, len);
- return status;
+ return bnx2x_3_seq_format_ver(num, str, len);
}
static int bnx2x_848xx_format_ver(u32 raw_ver, u8 *str, u16 *len)
{
- int status = 0;
u32 spirom_ver;
+
spirom_ver = ((raw_ver & 0xF80) >> 7) << 16 | (raw_ver & 0x7F);
- status = bnx2x_format_ver(spirom_ver, str, len);
- return status;
+ return bnx2x_format_ver(spirom_ver, str, len);
}
static void bnx2x_8481_hw_reset(struct bnx2x_phy *phy,
@@ -12547,13 +12481,12 @@ static int bnx2x_populate_ext_phy(struct bnx2x *bp,
static int bnx2x_populate_phy(struct bnx2x *bp, u8 phy_index, u32 shmem_base,
u32 shmem2_base, u8 port, struct bnx2x_phy *phy)
{
- int status = 0;
phy->type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN;
if (phy_index == INT_PHY)
return bnx2x_populate_int_phy(bp, shmem_base, port, phy);
- status = bnx2x_populate_ext_phy(bp, phy_index, shmem_base, shmem2_base,
+
+ return bnx2x_populate_ext_phy(bp, phy_index, shmem_base, shmem2_base,
port, phy);
- return status;
}
static void bnx2x_phy_def_cfg(struct link_params *params,
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 1f1e54ba0ecb..b3ba66032980 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -4482,9 +4482,15 @@ static int bnxt_hwrm_func_qcfg(struct bnxt *bp)
vf->vlan = le16_to_cpu(resp->vlan) & VLAN_VID_MASK;
}
#endif
- if (BNXT_PF(bp) && (le16_to_cpu(resp->flags) &
- FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED))
- bp->flags |= BNXT_FLAG_FW_LLDP_AGENT;
+ if (BNXT_PF(bp)) {
+ u16 flags = le16_to_cpu(resp->flags);
+
+ if (flags & (FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED |
+ FUNC_QCFG_RESP_FLAGS_FW_LLDP_AGENT_ENABLED))
+ bp->flags |= BNXT_FLAG_FW_LLDP_AGENT;
+ if (flags & FUNC_QCFG_RESP_FLAGS_MULTI_HOST)
+ bp->flags |= BNXT_FLAG_MULTI_HOST;
+ }
switch (resp->port_partition_type) {
case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_0:
@@ -4549,6 +4555,9 @@ static int bnxt_hwrm_func_qcaps(struct bnxt *bp)
pf->max_tx_wm_flows = le32_to_cpu(resp->max_tx_wm_flows);
pf->max_rx_em_flows = le32_to_cpu(resp->max_rx_em_flows);
pf->max_rx_wm_flows = le32_to_cpu(resp->max_rx_wm_flows);
+ if (resp->flags &
+ cpu_to_le32(FUNC_QCAPS_RESP_FLAGS_WOL_MAGICPKT_SUPPORTED))
+ bp->flags |= BNXT_FLAG_WOL_CAP;
} else {
#ifdef CONFIG_BNXT_SRIOV
struct bnxt_vf_info *vf = &bp->vf;
@@ -5198,9 +5207,10 @@ static unsigned int bnxt_get_max_func_irqs(struct bnxt *bp)
{
#if defined(CONFIG_BNXT_SRIOV)
if (BNXT_VF(bp))
- return bp->vf.max_irqs;
+ return min_t(unsigned int, bp->vf.max_irqs,
+ bp->vf.max_cp_rings);
#endif
- return bp->pf.max_irqs;
+ return min_t(unsigned int, bp->pf.max_irqs, bp->pf.max_cp_rings);
}
void bnxt_set_max_func_irqs(struct bnxt *bp, unsigned int max_irqs)
@@ -5467,7 +5477,8 @@ static void bnxt_report_link(struct bnxt *bp)
if (bp->link_info.link_up) {
const char *duplex;
const char *flow_ctrl;
- u16 speed, fec;
+ u32 speed;
+ u16 fec;
netif_carrier_on(bp->dev);
if (bp->link_info.duplex == BNXT_LINK_DUPLEX_FULL)
@@ -5483,7 +5494,7 @@ static void bnxt_report_link(struct bnxt *bp)
else
flow_ctrl = "none";
speed = bnxt_fw_to_ethtool_speed(bp->link_info.link_speed);
- netdev_info(bp->dev, "NIC Link is Up, %d Mbps %s duplex, Flow control: %s\n",
+ netdev_info(bp->dev, "NIC Link is Up, %u Mbps %s duplex, Flow control: %s\n",
speed, duplex, flow_ctrl);
if (bp->flags & BNXT_FLAG_EEE_CAP)
netdev_info(bp->dev, "EEE is %s\n",
@@ -5857,6 +5868,76 @@ static int bnxt_hwrm_port_led_qcaps(struct bnxt *bp)
return 0;
}
+int bnxt_hwrm_alloc_wol_fltr(struct bnxt *bp)
+{
+ struct hwrm_wol_filter_alloc_input req = {0};
+ struct hwrm_wol_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr;
+ int rc;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_WOL_FILTER_ALLOC, -1, -1);
+ req.port_id = cpu_to_le16(bp->pf.port_id);
+ req.wol_type = WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT;
+ req.enables = cpu_to_le32(WOL_FILTER_ALLOC_REQ_ENABLES_MAC_ADDRESS);
+ memcpy(req.mac_address, bp->dev->dev_addr, ETH_ALEN);
+ mutex_lock(&bp->hwrm_cmd_lock);
+ rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (!rc)
+ bp->wol_filter_id = resp->wol_filter_id;
+ mutex_unlock(&bp->hwrm_cmd_lock);
+ return rc;
+}
+
+int bnxt_hwrm_free_wol_fltr(struct bnxt *bp)
+{
+ struct hwrm_wol_filter_free_input req = {0};
+ int rc;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_WOL_FILTER_FREE, -1, -1);
+ req.port_id = cpu_to_le16(bp->pf.port_id);
+ req.enables = cpu_to_le32(WOL_FILTER_FREE_REQ_ENABLES_WOL_FILTER_ID);
+ req.wol_filter_id = bp->wol_filter_id;
+ rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ return rc;
+}
+
+static u16 bnxt_hwrm_get_wol_fltrs(struct bnxt *bp, u16 handle)
+{
+ struct hwrm_wol_filter_qcfg_input req = {0};
+ struct hwrm_wol_filter_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
+ u16 next_handle = 0;
+ int rc;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_WOL_FILTER_QCFG, -1, -1);
+ req.port_id = cpu_to_le16(bp->pf.port_id);
+ req.handle = cpu_to_le16(handle);
+ mutex_lock(&bp->hwrm_cmd_lock);
+ rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (!rc) {
+ next_handle = le16_to_cpu(resp->next_handle);
+ if (next_handle != 0) {
+ if (resp->wol_type ==
+ WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT) {
+ bp->wol = 1;
+ bp->wol_filter_id = resp->wol_filter_id;
+ }
+ }
+ }
+ mutex_unlock(&bp->hwrm_cmd_lock);
+ return next_handle;
+}
+
+static void bnxt_get_wol_settings(struct bnxt *bp)
+{
+ u16 handle = 0;
+
+ if (!BNXT_PF(bp) || !(bp->flags & BNXT_FLAG_WOL_CAP))
+ return;
+
+ do {
+ handle = bnxt_hwrm_get_wol_fltrs(bp, handle);
+ } while (handle && handle != 0xffff);
+}
+
static bool bnxt_eee_config_ok(struct bnxt *bp)
{
struct ethtool_eee *eee = &bp->eee;
@@ -6042,6 +6123,43 @@ int bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
return rc;
}
+/* rtnl_lock held, open the NIC half way by allocating all resources, but
+ * NAPI, IRQ, and TX are not enabled. This is mainly used for offline
+ * self tests.
+ */
+int bnxt_half_open_nic(struct bnxt *bp)
+{
+ int rc = 0;
+
+ rc = bnxt_alloc_mem(bp, false);
+ if (rc) {
+ netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc);
+ goto half_open_err;
+ }
+ rc = bnxt_init_nic(bp, false);
+ if (rc) {
+ netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc);
+ goto half_open_err;
+ }
+ return 0;
+
+half_open_err:
+ bnxt_free_skbs(bp);
+ bnxt_free_mem(bp, false);
+ dev_close(bp->dev);
+ return rc;
+}
+
+/* rtnl_lock held, this call can only be made after a previous successful
+ * call to bnxt_half_open_nic().
+ */
+void bnxt_half_close_nic(struct bnxt *bp)
+{
+ bnxt_hwrm_resource_free(bp, false, false);
+ bnxt_free_skbs(bp);
+ bnxt_free_mem(bp, false);
+}
+
static int bnxt_open(struct net_device *dev)
{
struct bnxt *bp = netdev_priv(dev);
@@ -6923,7 +7041,9 @@ static int bnxt_setup_tc(struct net_device *dev, u32 handle, __be16 proto,
if (ntc->type != TC_SETUP_MQPRIO)
return -EINVAL;
- return bnxt_setup_mq_tc(dev, ntc->tc);
+ ntc->mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
+
+ return bnxt_setup_mq_tc(dev, ntc->mqprio->num_tc);
}
#ifdef CONFIG_RFS_ACCEL
@@ -7224,6 +7344,7 @@ static void bnxt_remove_one(struct pci_dev *pdev)
bnxt_clear_int_mode(bp);
bnxt_hwrm_func_drv_unrgtr(bp);
bnxt_free_hwrm_resources(bp);
+ bnxt_ethtool_free(bp);
bnxt_dcb_free(bp);
kfree(bp->edev);
bp->edev = NULL;
@@ -7546,6 +7667,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
bnxt_hwrm_func_qcfg(bp);
bnxt_hwrm_port_led_qcaps(bp);
+ bnxt_ethtool_init(bp);
bnxt_set_rx_skb_mode(bp, false);
bnxt_set_tpa_flags(bp);
@@ -7591,6 +7713,12 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (rc)
goto init_err_pci_clean;
+ bnxt_get_wol_settings(bp);
+ if (bp->flags & BNXT_FLAG_WOL_CAP)
+ device_set_wakeup_enable(&pdev->dev, bp->wol);
+ else
+ device_set_wakeup_capable(&pdev->dev, false);
+
rc = register_netdev(dev);
if (rc)
goto init_err_clr_int;
@@ -7614,6 +7742,88 @@ init_err_free:
return rc;
}
+static void bnxt_shutdown(struct pci_dev *pdev)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+ struct bnxt *bp;
+
+ if (!dev)
+ return;
+
+ rtnl_lock();
+ bp = netdev_priv(dev);
+ if (!bp)
+ goto shutdown_exit;
+
+ if (netif_running(dev))
+ dev_close(dev);
+
+ if (system_state == SYSTEM_POWER_OFF) {
+ bnxt_clear_int_mode(bp);
+ pci_wake_from_d3(pdev, bp->wol);
+ pci_set_power_state(pdev, PCI_D3hot);
+ }
+
+shutdown_exit:
+ rtnl_unlock();
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int bnxt_suspend(struct device *device)
+{
+ struct pci_dev *pdev = to_pci_dev(device);
+ struct net_device *dev = pci_get_drvdata(pdev);
+ struct bnxt *bp = netdev_priv(dev);
+ int rc = 0;
+
+ rtnl_lock();
+ if (netif_running(dev)) {
+ netif_device_detach(dev);
+ rc = bnxt_close(dev);
+ }
+ bnxt_hwrm_func_drv_unrgtr(bp);
+ rtnl_unlock();
+ return rc;
+}
+
+static int bnxt_resume(struct device *device)
+{
+ struct pci_dev *pdev = to_pci_dev(device);
+ struct net_device *dev = pci_get_drvdata(pdev);
+ struct bnxt *bp = netdev_priv(dev);
+ int rc = 0;
+
+ rtnl_lock();
+ if (bnxt_hwrm_ver_get(bp) || bnxt_hwrm_func_drv_rgtr(bp)) {
+ rc = -ENODEV;
+ goto resume_exit;
+ }
+ rc = bnxt_hwrm_func_reset(bp);
+ if (rc) {
+ rc = -EBUSY;
+ goto resume_exit;
+ }
+ bnxt_get_wol_settings(bp);
+ if (netif_running(dev)) {
+ rc = bnxt_open(dev);
+ if (!rc)
+ netif_device_attach(dev);
+ }
+
+resume_exit:
+ rtnl_unlock();
+ return rc;
+}
+
+static SIMPLE_DEV_PM_OPS(bnxt_pm_ops, bnxt_suspend, bnxt_resume);
+#define BNXT_PM_OPS (&bnxt_pm_ops)
+
+#else
+
+#define BNXT_PM_OPS NULL
+
+#endif /* CONFIG_PM_SLEEP */
+
/**
* bnxt_io_error_detected - called when PCI error is detected
* @pdev: Pointer to PCI device
@@ -7730,6 +7940,8 @@ static struct pci_driver bnxt_pci_driver = {
.id_table = bnxt_pci_tbl,
.probe = bnxt_init_one,
.remove = bnxt_remove_one,
+ .shutdown = bnxt_shutdown,
+ .driver.pm = BNXT_PM_OPS,
.err_handler = &bnxt_err_handler,
#if defined(CONFIG_BNXT_SRIOV)
.sriov_configure = bnxt_sriov_configure,
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index c7a5b84a5cb2..3ef42dbc6327 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -18,6 +18,8 @@
#define DRV_VER_MIN 7
#define DRV_VER_UPD 0
+#include <linux/interrupt.h>
+
struct tx_bd {
__le32 tx_bd_len_flags_type;
#define TX_BD_TYPE (0x3f << 0)
@@ -424,8 +426,6 @@ struct rx_tpa_end_cmp_ext {
#define BNXT_MIN_PKT_SIZE 52
-#define BNXT_NUM_TESTS(bp) 0
-
#define BNXT_DEFAULT_RX_RING_SIZE 511
#define BNXT_DEFAULT_TX_RING_SIZE 511
@@ -851,6 +851,7 @@ struct bnxt_link_info {
#define BNXT_LINK_SPEED_25GB PORT_PHY_QCFG_RESP_LINK_SPEED_25GB
#define BNXT_LINK_SPEED_40GB PORT_PHY_QCFG_RESP_LINK_SPEED_40GB
#define BNXT_LINK_SPEED_50GB PORT_PHY_QCFG_RESP_LINK_SPEED_50GB
+#define BNXT_LINK_SPEED_100GB PORT_PHY_QCFG_RESP_LINK_SPEED_100GB
u16 support_speeds;
u16 auto_link_speeds; /* fw adv setting */
#define BNXT_LINK_SPEED_MSK_100MB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_100MB
@@ -862,6 +863,7 @@ struct bnxt_link_info {
#define BNXT_LINK_SPEED_MSK_25GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_25GB
#define BNXT_LINK_SPEED_MSK_40GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_40GB
#define BNXT_LINK_SPEED_MSK_50GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_50GB
+#define BNXT_LINK_SPEED_MSK_100GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_100GB
u16 support_auto_speeds;
u16 lp_auto_link_speeds;
u16 force_link_speed;
@@ -909,6 +911,14 @@ struct bnxt_led_info {
__le16 led_color_caps;
};
+#define BNXT_MAX_TEST 8
+
+struct bnxt_test_info {
+ u8 offline_mask;
+ u16 timeout;
+ char string[BNXT_MAX_TEST][ETH_GSTRING_LEN];
+};
+
#define BNXT_GRCPF_REG_WINDOW_BASE_OUT 0x400
#define BNXT_CAG_REG_LEGACY_INT_STATUS 0x4014
#define BNXT_CAG_REG_BASE 0x300000
@@ -987,6 +997,7 @@ struct bnxt {
#define BNXT_FLAG_UDP_RSS_CAP 0x800
#define BNXT_FLAG_EEE_CAP 0x1000
#define BNXT_FLAG_NEW_RSS_CAP 0x2000
+ #define BNXT_FLAG_WOL_CAP 0x4000
#define BNXT_FLAG_ROCEV1_CAP 0x8000
#define BNXT_FLAG_ROCEV2_CAP 0x10000
#define BNXT_FLAG_ROCE_CAP (BNXT_FLAG_ROCEV1_CAP | \
@@ -994,6 +1005,7 @@ struct bnxt {
#define BNXT_FLAG_NO_AGG_RINGS 0x20000
#define BNXT_FLAG_RX_PAGE_MODE 0x40000
#define BNXT_FLAG_FW_LLDP_AGENT 0x80000
+ #define BNXT_FLAG_MULTI_HOST 0x100000
#define BNXT_FLAG_CHIP_NITRO_A0 0x1000000
#define BNXT_FLAG_ALL_CONFIG_FEATS (BNXT_FLAG_TPA | \
@@ -1003,7 +1015,8 @@ struct bnxt {
#define BNXT_PF(bp) (!((bp)->flags & BNXT_FLAG_VF))
#define BNXT_VF(bp) ((bp)->flags & BNXT_FLAG_VF)
#define BNXT_NPAR(bp) ((bp)->port_partition_type)
-#define BNXT_SINGLE_PF(bp) (BNXT_PF(bp) && !BNXT_NPAR(bp))
+#define BNXT_MH(bp) ((bp)->flags & BNXT_FLAG_MULTI_HOST)
+#define BNXT_SINGLE_PF(bp) (BNXT_PF(bp) && !BNXT_NPAR(bp) && !BNXT_MH(bp))
#define BNXT_CHIP_TYPE_NITRO_A0(bp) ((bp)->flags & BNXT_FLAG_CHIP_NITRO_A0)
#define BNXT_RX_PAGE_MODE(bp) ((bp)->flags & BNXT_FLAG_RX_PAGE_MODE)
@@ -1178,6 +1191,12 @@ struct bnxt {
u32 lpi_tmr_lo;
u32 lpi_tmr_hi;
+ u8 num_tests;
+ struct bnxt_test_info *test_info;
+
+ u8 wol_filter_id;
+ u8 wol;
+
u8 num_leds;
struct bnxt_led_info leds[BNXT_MAX_LED];
@@ -1236,8 +1255,12 @@ void bnxt_tx_disable(struct bnxt *bp);
void bnxt_tx_enable(struct bnxt *bp);
int bnxt_hwrm_set_pause(struct bnxt *);
int bnxt_hwrm_set_link_setting(struct bnxt *, bool, bool);
+int bnxt_hwrm_alloc_wol_fltr(struct bnxt *bp);
+int bnxt_hwrm_free_wol_fltr(struct bnxt *bp);
int bnxt_hwrm_fw_set_time(struct bnxt *);
int bnxt_open_nic(struct bnxt *, bool, bool);
+int bnxt_half_open_nic(struct bnxt *bp);
+void bnxt_half_close_nic(struct bnxt *bp);
int bnxt_close_nic(struct bnxt *, bool, bool);
int bnxt_reserve_rings(struct bnxt *bp, int tx, int rx, int tcs, int tx_xdp);
int bnxt_setup_mq_tc(struct net_device *dev, u8 tc);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
index 03532061d211..46de2f8ff024 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
@@ -1,6 +1,7 @@
/* Broadcom NetXtreme-C/E network driver.
*
* Copyright (c) 2014-2016 Broadcom Corporation
+ * Copyright (c) 2016-2017 Broadcom Limited
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -14,6 +15,7 @@
#include <linux/interrupt.h>
#include <linux/pci.h>
#include <linux/etherdevice.h>
+#include <rdma/ib_verbs.h>
#include "bnxt_hsi.h"
#include "bnxt.h"
#include "bnxt_dcb.h"
@@ -241,6 +243,92 @@ static int bnxt_hwrm_queue_pfc_qcfg(struct bnxt *bp, struct ieee_pfc *pfc)
return 0;
}
+static int bnxt_hwrm_set_dcbx_app(struct bnxt *bp, struct dcb_app *app,
+ bool add)
+{
+ struct hwrm_fw_set_structured_data_input set = {0};
+ struct hwrm_fw_get_structured_data_input get = {0};
+ struct hwrm_struct_data_dcbx_app *fw_app;
+ struct hwrm_struct_hdr *data;
+ dma_addr_t mapping;
+ size_t data_len;
+ int rc, n, i;
+
+ if (bp->hwrm_spec_code < 0x10601)
+ return 0;
+
+ n = IEEE_8021QAZ_MAX_TCS;
+ data_len = sizeof(*data) + sizeof(*fw_app) * n;
+ data = dma_alloc_coherent(&bp->pdev->dev, data_len, &mapping,
+ GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ memset(data, 0, data_len);
+ bnxt_hwrm_cmd_hdr_init(bp, &get, HWRM_FW_GET_STRUCTURED_DATA, -1, -1);
+ get.dest_data_addr = cpu_to_le64(mapping);
+ get.structure_id = cpu_to_le16(STRUCT_HDR_STRUCT_ID_DCBX_APP);
+ get.subtype = cpu_to_le16(HWRM_STRUCT_DATA_SUBTYPE_HOST_OPERATIONAL);
+ get.count = 0;
+ rc = hwrm_send_message(bp, &get, sizeof(get), HWRM_CMD_TIMEOUT);
+ if (rc)
+ goto set_app_exit;
+
+ fw_app = (struct hwrm_struct_data_dcbx_app *)(data + 1);
+
+ if (data->struct_id != cpu_to_le16(STRUCT_HDR_STRUCT_ID_DCBX_APP)) {
+ rc = -ENODEV;
+ goto set_app_exit;
+ }
+
+ n = data->count;
+ for (i = 0; i < n; i++, fw_app++) {
+ if (fw_app->protocol_id == cpu_to_be16(app->protocol) &&
+ fw_app->protocol_selector == app->selector &&
+ fw_app->priority == app->priority) {
+ if (add)
+ goto set_app_exit;
+ else
+ break;
+ }
+ }
+ if (add) {
+ /* append */
+ n++;
+ fw_app->protocol_id = cpu_to_be16(app->protocol);
+ fw_app->protocol_selector = app->selector;
+ fw_app->priority = app->priority;
+ fw_app->valid = 1;
+ } else {
+ size_t len = 0;
+
+ /* not found, nothing to delete */
+ if (n == i)
+ goto set_app_exit;
+
+ len = (n - 1 - i) * sizeof(*fw_app);
+ if (len)
+ memmove(fw_app, fw_app + 1, len);
+ n--;
+ memset(fw_app + n, 0, sizeof(*fw_app));
+ }
+ data->count = n;
+ data->len = cpu_to_le16(sizeof(*fw_app) * n);
+ data->subtype = cpu_to_le16(HWRM_STRUCT_DATA_SUBTYPE_HOST_OPERATIONAL);
+
+ bnxt_hwrm_cmd_hdr_init(bp, &set, HWRM_FW_SET_STRUCTURED_DATA, -1, -1);
+ set.src_data_addr = cpu_to_le64(mapping);
+ set.data_len = cpu_to_le16(sizeof(*data) + sizeof(*fw_app) * n);
+ set.hdr_cnt = 1;
+ rc = hwrm_send_message(bp, &set, sizeof(set), HWRM_CMD_TIMEOUT);
+ if (rc)
+ rc = -EIO;
+
+set_app_exit:
+ dma_free_coherent(&bp->pdev->dev, data_len, data, mapping);
+ return rc;
+}
+
static int bnxt_ets_validate(struct bnxt *bp, struct ieee_ets *ets, u8 *tc)
{
int total_ets_bw = 0;
@@ -417,6 +505,15 @@ static int bnxt_dcbnl_ieee_setapp(struct net_device *dev, struct dcb_app *app)
return -EINVAL;
rc = dcb_ieee_setapp(dev, app);
+ if (rc)
+ return rc;
+
+ if ((app->selector == IEEE_8021QAZ_APP_SEL_ETHERTYPE &&
+ app->protocol == ETH_P_IBOE) ||
+ (app->selector == IEEE_8021QAZ_APP_SEL_DGRAM &&
+ app->protocol == ROCE_V2_UDP_DPORT))
+ rc = bnxt_hwrm_set_dcbx_app(bp, app, true);
+
return rc;
}
@@ -425,10 +522,19 @@ static int bnxt_dcbnl_ieee_delapp(struct net_device *dev, struct dcb_app *app)
struct bnxt *bp = netdev_priv(dev);
int rc;
- if (!(bp->dcbx_cap & DCB_CAP_DCBX_VER_IEEE))
+ if (!(bp->dcbx_cap & DCB_CAP_DCBX_VER_IEEE) ||
+ !(bp->dcbx_cap & DCB_CAP_DCBX_HOST))
return -EINVAL;
rc = dcb_ieee_delapp(dev, app);
+ if (rc)
+ return rc;
+ if ((app->selector == IEEE_8021QAZ_APP_SEL_ETHERTYPE &&
+ app->protocol == ETH_P_IBOE) ||
+ (app->selector == IEEE_8021QAZ_APP_SEL_DGRAM &&
+ app->protocol == ROCE_V2_UDP_DPORT))
+ rc = bnxt_hwrm_set_dcbx_app(bp, app, false);
+
return rc;
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.h
index 35a0d28cf2fd..ecd0a5e46a49 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.h
@@ -1,6 +1,7 @@
/* Broadcom NetXtreme-C/E network driver.
*
* Copyright (c) 2014-2016 Broadcom Corporation
+ * Copyright (c) 2016-2017 Broadcom Limited
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
index 6903a873f072..11ddf0adc6e1 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
@@ -1,6 +1,7 @@
/* Broadcom NetXtreme-C/E network driver.
*
* Copyright (c) 2014-2016 Broadcom Corporation
+ * Copyright (c) 2016-2017 Broadcom Limited
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -17,6 +18,7 @@
#include <linux/firmware.h>
#include "bnxt_hsi.h"
#include "bnxt.h"
+#include "bnxt_xdp.h"
#include "bnxt_ethtool.h"
#include "bnxt_nvm_defs.h" /* NVRAM content constant and structure defs */
#include "bnxt_fw_hdr.h" /* Firmware hdr constant and structure defs */
@@ -209,6 +211,10 @@ static int bnxt_get_sset_count(struct net_device *dev, int sset)
return num_stats;
}
+ case ETH_SS_TEST:
+ if (!bp->num_tests)
+ return -EOPNOTSUPP;
+ return bp->num_tests;
default:
return -EOPNOTSUPP;
}
@@ -306,6 +312,11 @@ static void bnxt_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
}
}
break;
+ case ETH_SS_TEST:
+ if (bp->num_tests)
+ memcpy(buf, bp->test_info->string,
+ bp->num_tests * ETH_GSTRING_LEN);
+ break;
default:
netdev_err(bp->dev, "bnxt_get_strings invalid request %x\n",
stringset);
@@ -824,7 +835,7 @@ static void bnxt_get_drvinfo(struct net_device *dev,
sizeof(info->fw_version));
strlcpy(info->bus_info, pci_name(bp->pdev), sizeof(info->bus_info));
info->n_stats = BNXT_NUM_STATS * bp->cp_nr_rings;
- info->testinfo_len = BNXT_NUM_TESTS(bp);
+ info->testinfo_len = bp->num_tests;
/* TODO CHIMP_FW: eeprom dump details */
info->eedump_len = 0;
/* TODO CHIMP FW: reg dump details */
@@ -832,6 +843,45 @@ static void bnxt_get_drvinfo(struct net_device *dev,
kfree(pkglog);
}
+static void bnxt_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+{
+ struct bnxt *bp = netdev_priv(dev);
+
+ wol->supported = 0;
+ wol->wolopts = 0;
+ memset(&wol->sopass, 0, sizeof(wol->sopass));
+ if (bp->flags & BNXT_FLAG_WOL_CAP) {
+ wol->supported = WAKE_MAGIC;
+ if (bp->wol)
+ wol->wolopts = WAKE_MAGIC;
+ }
+}
+
+static int bnxt_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+{
+ struct bnxt *bp = netdev_priv(dev);
+
+ if (wol->wolopts & ~WAKE_MAGIC)
+ return -EINVAL;
+
+ if (wol->wolopts & WAKE_MAGIC) {
+ if (!(bp->flags & BNXT_FLAG_WOL_CAP))
+ return -EINVAL;
+ if (!bp->wol) {
+ if (bnxt_hwrm_alloc_wol_fltr(bp))
+ return -EBUSY;
+ bp->wol = 1;
+ }
+ } else {
+ if (bp->wol) {
+ if (bnxt_hwrm_free_wol_fltr(bp))
+ return -EBUSY;
+ bp->wol = 0;
+ }
+ }
+ return 0;
+}
+
u32 _bnxt_fw_to_ethtool_adv_spds(u16 fw_speeds, u8 fw_pause)
{
u32 speed_mask = 0;
@@ -879,6 +929,9 @@ u32 _bnxt_fw_to_ethtool_adv_spds(u16 fw_speeds, u8 fw_pause)
if ((fw_speeds) & BNXT_LINK_SPEED_MSK_50GB) \
ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\
50000baseCR2_Full);\
+ if ((fw_speeds) & BNXT_LINK_SPEED_MSK_100GB) \
+ ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\
+ 100000baseCR4_Full);\
if ((fw_pause) & BNXT_LINK_PAUSE_RX) { \
ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\
Pause); \
@@ -915,6 +968,9 @@ u32 _bnxt_fw_to_ethtool_adv_spds(u16 fw_speeds, u8 fw_pause)
if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \
50000baseCR2_Full)) \
(fw_speeds) |= BNXT_LINK_SPEED_MSK_50GB; \
+ if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \
+ 100000baseCR4_Full)) \
+ (fw_speeds) |= BNXT_LINK_SPEED_MSK_100GB; \
}
static void bnxt_fw_to_ethtool_advertised_spds(struct bnxt_link_info *link_info,
@@ -977,6 +1033,8 @@ u32 bnxt_fw_to_ethtool_speed(u16 fw_link_speed)
return SPEED_40000;
case BNXT_LINK_SPEED_50GB:
return SPEED_50000;
+ case BNXT_LINK_SPEED_100GB:
+ return SPEED_100000;
default:
return SPEED_UNKNOWN;
}
@@ -1042,7 +1100,7 @@ static int bnxt_get_link_ksettings(struct net_device *dev,
return 0;
}
-static u32 bnxt_get_fw_speed(struct net_device *dev, u16 ethtool_speed)
+static u32 bnxt_get_fw_speed(struct net_device *dev, u32 ethtool_speed)
{
struct bnxt *bp = netdev_priv(dev);
struct bnxt_link_info *link_info = &bp->link_info;
@@ -1082,6 +1140,10 @@ static u32 bnxt_get_fw_speed(struct net_device *dev, u16 ethtool_speed)
if (support_spds & BNXT_LINK_SPEED_MSK_50GB)
fw_speed = PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_50GB;
break;
+ case SPEED_100000:
+ if (support_spds & BNXT_LINK_SPEED_MSK_100GB)
+ fw_speed = PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_100GB;
+ break;
default:
netdev_err(dev, "unsupported speed!\n");
break;
@@ -2128,12 +2190,372 @@ static int bnxt_set_phys_id(struct net_device *dev,
return rc;
}
+static int bnxt_hwrm_selftest_irq(struct bnxt *bp, u16 cmpl_ring)
+{
+ struct hwrm_selftest_irq_input req = {0};
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_SELFTEST_IRQ, cmpl_ring, -1);
+ return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+}
+
+static int bnxt_test_irq(struct bnxt *bp)
+{
+ int i;
+
+ for (i = 0; i < bp->cp_nr_rings; i++) {
+ u16 cmpl_ring = bp->grp_info[i].cp_fw_ring_id;
+ int rc;
+
+ rc = bnxt_hwrm_selftest_irq(bp, cmpl_ring);
+ if (rc)
+ return rc;
+ }
+ return 0;
+}
+
+static int bnxt_hwrm_mac_loopback(struct bnxt *bp, bool enable)
+{
+ struct hwrm_port_mac_cfg_input req = {0};
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_MAC_CFG, -1, -1);
+
+ req.enables = cpu_to_le32(PORT_MAC_CFG_REQ_ENABLES_LPBK);
+ if (enable)
+ req.lpbk = PORT_MAC_CFG_REQ_LPBK_LOCAL;
+ else
+ req.lpbk = PORT_MAC_CFG_REQ_LPBK_NONE;
+ return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+}
+
+static int bnxt_disable_an_for_lpbk(struct bnxt *bp,
+ struct hwrm_port_phy_cfg_input *req)
+{
+ struct bnxt_link_info *link_info = &bp->link_info;
+ u16 fw_advertising = link_info->advertising;
+ u16 fw_speed;
+ int rc;
+
+ if (!link_info->autoneg)
+ return 0;
+
+ fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_1GB;
+ if (netif_carrier_ok(bp->dev))
+ fw_speed = bp->link_info.link_speed;
+ else if (fw_advertising & BNXT_LINK_SPEED_MSK_10GB)
+ fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10GB;
+ else if (fw_advertising & BNXT_LINK_SPEED_MSK_25GB)
+ fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_25GB;
+ else if (fw_advertising & BNXT_LINK_SPEED_MSK_40GB)
+ fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_40GB;
+ else if (fw_advertising & BNXT_LINK_SPEED_MSK_50GB)
+ fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_50GB;
+
+ req->force_link_speed = cpu_to_le16(fw_speed);
+ req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE |
+ PORT_PHY_CFG_REQ_FLAGS_RESET_PHY);
+ rc = hwrm_send_message(bp, req, sizeof(*req), HWRM_CMD_TIMEOUT);
+ req->flags = 0;
+ req->force_link_speed = cpu_to_le16(0);
+ return rc;
+}
+
+static int bnxt_hwrm_phy_loopback(struct bnxt *bp, bool enable)
+{
+ struct hwrm_port_phy_cfg_input req = {0};
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1);
+
+ if (enable) {
+ bnxt_disable_an_for_lpbk(bp, &req);
+ req.lpbk = PORT_PHY_CFG_REQ_LPBK_LOCAL;
+ } else {
+ req.lpbk = PORT_PHY_CFG_REQ_LPBK_NONE;
+ }
+ req.enables = cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_LPBK);
+ return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+}
+
+static int bnxt_rx_loopback(struct bnxt *bp, struct bnxt_napi *bnapi,
+ u32 raw_cons, int pkt_size)
+{
+ struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
+ struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
+ struct bnxt_sw_rx_bd *rx_buf;
+ struct rx_cmp *rxcmp;
+ u16 cp_cons, cons;
+ u8 *data;
+ u32 len;
+ int i;
+
+ cp_cons = RING_CMP(raw_cons);
+ rxcmp = (struct rx_cmp *)
+ &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
+ cons = rxcmp->rx_cmp_opaque;
+ rx_buf = &rxr->rx_buf_ring[cons];
+ data = rx_buf->data_ptr;
+ len = le32_to_cpu(rxcmp->rx_cmp_len_flags_type) >> RX_CMP_LEN_SHIFT;
+ if (len != pkt_size)
+ return -EIO;
+ i = ETH_ALEN;
+ if (!ether_addr_equal(data + i, bnapi->bp->dev->dev_addr))
+ return -EIO;
+ i += ETH_ALEN;
+ for ( ; i < pkt_size; i++) {
+ if (data[i] != (u8)(i & 0xff))
+ return -EIO;
+ }
+ return 0;
+}
+
+static int bnxt_poll_loopback(struct bnxt *bp, int pkt_size)
+{
+ struct bnxt_napi *bnapi = bp->bnapi[0];
+ struct bnxt_cp_ring_info *cpr;
+ struct tx_cmp *txcmp;
+ int rc = -EIO;
+ u32 raw_cons;
+ u32 cons;
+ int i;
+
+ cpr = &bnapi->cp_ring;
+ raw_cons = cpr->cp_raw_cons;
+ for (i = 0; i < 200; i++) {
+ cons = RING_CMP(raw_cons);
+ txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)];
+
+ if (!TX_CMP_VALID(txcmp, raw_cons)) {
+ udelay(5);
+ continue;
+ }
+
+ /* The valid test of the entry must be done first before
+ * reading any further.
+ */
+ dma_rmb();
+ if (TX_CMP_TYPE(txcmp) == CMP_TYPE_RX_L2_CMP) {
+ rc = bnxt_rx_loopback(bp, bnapi, raw_cons, pkt_size);
+ raw_cons = NEXT_RAW_CMP(raw_cons);
+ raw_cons = NEXT_RAW_CMP(raw_cons);
+ break;
+ }
+ raw_cons = NEXT_RAW_CMP(raw_cons);
+ }
+ cpr->cp_raw_cons = raw_cons;
+ return rc;
+}
+
+static int bnxt_run_loopback(struct bnxt *bp)
+{
+ struct bnxt_tx_ring_info *txr = &bp->tx_ring[0];
+ int pkt_size, i = 0;
+ struct sk_buff *skb;
+ dma_addr_t map;
+ u8 *data;
+ int rc;
+
+ pkt_size = min(bp->dev->mtu + ETH_HLEN, bp->rx_copy_thresh);
+ skb = netdev_alloc_skb(bp->dev, pkt_size);
+ if (!skb)
+ return -ENOMEM;
+ data = skb_put(skb, pkt_size);
+ eth_broadcast_addr(data);
+ i += ETH_ALEN;
+ ether_addr_copy(&data[i], bp->dev->dev_addr);
+ i += ETH_ALEN;
+ for ( ; i < pkt_size; i++)
+ data[i] = (u8)(i & 0xff);
+
+ map = dma_map_single(&bp->pdev->dev, skb->data, pkt_size,
+ PCI_DMA_TODEVICE);
+ if (dma_mapping_error(&bp->pdev->dev, map)) {
+ dev_kfree_skb(skb);
+ return -EIO;
+ }
+ bnxt_xmit_xdp(bp, txr, map, pkt_size, 0);
+
+ /* Sync BD data before updating doorbell */
+ wmb();
+
+ writel(DB_KEY_TX | txr->tx_prod, txr->tx_doorbell);
+ writel(DB_KEY_TX | txr->tx_prod, txr->tx_doorbell);
+ rc = bnxt_poll_loopback(bp, pkt_size);
+
+ dma_unmap_single(&bp->pdev->dev, map, pkt_size, PCI_DMA_TODEVICE);
+ dev_kfree_skb(skb);
+ return rc;
+}
+
+static int bnxt_run_fw_tests(struct bnxt *bp, u8 test_mask, u8 *test_results)
+{
+ struct hwrm_selftest_exec_output *resp = bp->hwrm_cmd_resp_addr;
+ struct hwrm_selftest_exec_input req = {0};
+ int rc;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_SELFTEST_EXEC, -1, -1);
+ mutex_lock(&bp->hwrm_cmd_lock);
+ resp->test_success = 0;
+ req.flags = test_mask;
+ rc = _hwrm_send_message(bp, &req, sizeof(req), bp->test_info->timeout);
+ *test_results = resp->test_success;
+ mutex_unlock(&bp->hwrm_cmd_lock);
+ return rc;
+}
+
+#define BNXT_DRV_TESTS 3
+#define BNXT_MACLPBK_TEST_IDX (bp->num_tests - BNXT_DRV_TESTS)
+#define BNXT_PHYLPBK_TEST_IDX (BNXT_MACLPBK_TEST_IDX + 1)
+#define BNXT_IRQ_TEST_IDX (BNXT_MACLPBK_TEST_IDX + 2)
+
+static void bnxt_self_test(struct net_device *dev, struct ethtool_test *etest,
+ u64 *buf)
+{
+ struct bnxt *bp = netdev_priv(dev);
+ bool offline = false;
+ u8 test_results = 0;
+ u8 test_mask = 0;
+ int rc, i;
+
+ if (!bp->num_tests || !BNXT_SINGLE_PF(bp))
+ return;
+ memset(buf, 0, sizeof(u64) * bp->num_tests);
+ if (!netif_running(dev)) {
+ etest->flags |= ETH_TEST_FL_FAILED;
+ return;
+ }
+
+ if (etest->flags & ETH_TEST_FL_OFFLINE) {
+ if (bp->pf.active_vfs) {
+ etest->flags |= ETH_TEST_FL_FAILED;
+ netdev_warn(dev, "Offline tests cannot be run with active VFs\n");
+ return;
+ }
+ offline = true;
+ }
+
+ for (i = 0; i < bp->num_tests - BNXT_DRV_TESTS; i++) {
+ u8 bit_val = 1 << i;
+
+ if (!(bp->test_info->offline_mask & bit_val))
+ test_mask |= bit_val;
+ else if (offline)
+ test_mask |= bit_val;
+ }
+ if (!offline) {
+ bnxt_run_fw_tests(bp, test_mask, &test_results);
+ } else {
+ rc = bnxt_close_nic(bp, false, false);
+ if (rc)
+ return;
+ bnxt_run_fw_tests(bp, test_mask, &test_results);
+
+ buf[BNXT_MACLPBK_TEST_IDX] = 1;
+ bnxt_hwrm_mac_loopback(bp, true);
+ msleep(250);
+ rc = bnxt_half_open_nic(bp);
+ if (rc) {
+ bnxt_hwrm_mac_loopback(bp, false);
+ etest->flags |= ETH_TEST_FL_FAILED;
+ return;
+ }
+ if (bnxt_run_loopback(bp))
+ etest->flags |= ETH_TEST_FL_FAILED;
+ else
+ buf[BNXT_MACLPBK_TEST_IDX] = 0;
+
+ bnxt_hwrm_mac_loopback(bp, false);
+ bnxt_hwrm_phy_loopback(bp, true);
+ msleep(1000);
+ if (bnxt_run_loopback(bp)) {
+ buf[BNXT_PHYLPBK_TEST_IDX] = 1;
+ etest->flags |= ETH_TEST_FL_FAILED;
+ }
+ bnxt_hwrm_phy_loopback(bp, false);
+ bnxt_half_close_nic(bp);
+ bnxt_open_nic(bp, false, true);
+ }
+ if (bnxt_test_irq(bp)) {
+ buf[BNXT_IRQ_TEST_IDX] = 1;
+ etest->flags |= ETH_TEST_FL_FAILED;
+ }
+ for (i = 0; i < bp->num_tests - BNXT_DRV_TESTS; i++) {
+ u8 bit_val = 1 << i;
+
+ if ((test_mask & bit_val) && !(test_results & bit_val)) {
+ buf[i] = 1;
+ etest->flags |= ETH_TEST_FL_FAILED;
+ }
+ }
+}
+
+void bnxt_ethtool_init(struct bnxt *bp)
+{
+ struct hwrm_selftest_qlist_output *resp = bp->hwrm_cmd_resp_addr;
+ struct hwrm_selftest_qlist_input req = {0};
+ struct bnxt_test_info *test_info;
+ int i, rc;
+
+ if (bp->hwrm_spec_code < 0x10704 || !BNXT_SINGLE_PF(bp))
+ return;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_SELFTEST_QLIST, -1, -1);
+ mutex_lock(&bp->hwrm_cmd_lock);
+ rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (rc)
+ goto ethtool_init_exit;
+
+ test_info = kzalloc(sizeof(*bp->test_info), GFP_KERNEL);
+ if (!test_info)
+ goto ethtool_init_exit;
+
+ bp->test_info = test_info;
+ bp->num_tests = resp->num_tests + BNXT_DRV_TESTS;
+ if (bp->num_tests > BNXT_MAX_TEST)
+ bp->num_tests = BNXT_MAX_TEST;
+
+ test_info->offline_mask = resp->offline_tests;
+ test_info->timeout = le16_to_cpu(resp->test_timeout);
+ if (!test_info->timeout)
+ test_info->timeout = HWRM_CMD_TIMEOUT;
+ for (i = 0; i < bp->num_tests; i++) {
+ char *str = test_info->string[i];
+ char *fw_str = resp->test0_name + i * 32;
+
+ if (i == BNXT_MACLPBK_TEST_IDX) {
+ strcpy(str, "Mac loopback test (offline)");
+ } else if (i == BNXT_PHYLPBK_TEST_IDX) {
+ strcpy(str, "Phy loopback test (offline)");
+ } else if (i == BNXT_IRQ_TEST_IDX) {
+ strcpy(str, "Interrupt_test (offline)");
+ } else {
+ strlcpy(str, fw_str, ETH_GSTRING_LEN);
+ strncat(str, " test", ETH_GSTRING_LEN - strlen(str));
+ if (test_info->offline_mask & (1 << i))
+ strncat(str, " (offline)",
+ ETH_GSTRING_LEN - strlen(str));
+ else
+ strncat(str, " (online)",
+ ETH_GSTRING_LEN - strlen(str));
+ }
+ }
+
+ethtool_init_exit:
+ mutex_unlock(&bp->hwrm_cmd_lock);
+}
+
+void bnxt_ethtool_free(struct bnxt *bp)
+{
+ kfree(bp->test_info);
+ bp->test_info = NULL;
+}
+
const struct ethtool_ops bnxt_ethtool_ops = {
.get_link_ksettings = bnxt_get_link_ksettings,
.set_link_ksettings = bnxt_set_link_ksettings,
.get_pauseparam = bnxt_get_pauseparam,
.set_pauseparam = bnxt_set_pauseparam,
.get_drvinfo = bnxt_get_drvinfo,
+ .get_wol = bnxt_get_wol,
+ .set_wol = bnxt_set_wol,
.get_coalesce = bnxt_get_coalesce,
.set_coalesce = bnxt_set_coalesce,
.get_msglevel = bnxt_get_msglevel,
@@ -2161,4 +2583,5 @@ const struct ethtool_ops bnxt_ethtool_ops = {
.get_module_eeprom = bnxt_get_module_eeprom,
.nway_reset = bnxt_nway_reset,
.set_phys_id = bnxt_set_phys_id,
+ .self_test = bnxt_self_test,
};
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h
index ed1e555292e9..f1bc90b6fb5b 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h
@@ -1,6 +1,7 @@
/* Broadcom NetXtreme-C/E network driver.
*
* Copyright (c) 2014-2016 Broadcom Corporation
+ * Copyright (c) 2016-2017 Broadcom Limited
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -38,5 +39,7 @@ extern const struct ethtool_ops bnxt_ethtool_ops;
u32 _bnxt_fw_to_ethtool_adv_spds(u16, u8);
u32 bnxt_fw_to_ethtool_speed(u16);
u16 bnxt_get_fw_auto_link_speeds(u32);
+void bnxt_ethtool_init(struct bnxt *bp);
+void bnxt_ethtool_free(struct bnxt *bp);
#endif
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
index 6e275c23d68b..7dc71bb95837 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
@@ -11,19 +11,21 @@
#ifndef BNXT_HSI_H
#define BNXT_HSI_H
-/* HSI and HWRM Specification 1.7.0 */
+/* HSI and HWRM Specification 1.7.6 */
#define HWRM_VERSION_MAJOR 1
#define HWRM_VERSION_MINOR 7
-#define HWRM_VERSION_UPDATE 0
+#define HWRM_VERSION_UPDATE 6
-#define HWRM_VERSION_STR "1.7.0"
+#define HWRM_VERSION_RSVD 2 /* non-zero means beta version */
+
+#define HWRM_VERSION_STR "1.7.6.2"
/*
* Following is the signature for HWRM message field that indicates not
* applicable (All F's). Need to cast it the size of the field if needed.
*/
#define HWRM_NA_SIGNATURE ((__le32)(-1))
#define HWRM_MAX_REQ_LEN (128) /* hwrm_func_buf_rgtr */
-#define HWRM_MAX_RESP_LEN (176) /* hwrm_func_qstats */
+#define HWRM_MAX_RESP_LEN (248) /* hwrm_selftest_qlist */
#define HW_HASH_INDEX_SIZE 0x80 /* 7 bit indirection table index. */
#define HW_HASH_KEY_SIZE 40
#define HWRM_RESP_VALID_KEY 1 /* valid key for HWRM response */
@@ -571,9 +573,10 @@ struct hwrm_ver_get_output {
__le16 max_req_win_len;
__le16 max_resp_len;
__le16 def_req_timeout;
+ u8 init_pending;
+ #define VER_GET_RESP_INIT_PENDING_DEV_NOT_RDY 0x1UL
u8 unused_0;
u8 unused_1;
- u8 unused_2;
u8 valid;
};
@@ -809,6 +812,8 @@ struct hwrm_func_qcfg_output {
#define FUNC_QCFG_RESP_FLAGS_OOB_WOL_BMP_ENABLED 0x2UL
#define FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED 0x4UL
#define FUNC_QCFG_RESP_FLAGS_STD_TX_RING_MODE_ENABLED 0x8UL
+ #define FUNC_QCFG_RESP_FLAGS_FW_LLDP_AGENT_ENABLED 0x10UL
+ #define FUNC_QCFG_RESP_FLAGS_MULTI_HOST 0x20UL
u8 mac_address[6];
__le16 pci_id;
__le16 alloc_rsscos_ctx;
@@ -827,10 +832,12 @@ struct hwrm_func_qcfg_output {
#define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_5 0x3UL
#define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR2_0 0x4UL
#define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_UNKNOWN 0xffUL
- u8 unused_0;
+ u8 port_pf_cnt;
+ #define FUNC_QCFG_RESP_PORT_PF_CNT_UNAVAIL 0x0UL
__le16 dflt_vnic_id;
- u8 unused_1;
- u8 unused_2;
+ u8 host_cnt;
+ #define FUNC_QCFG_RESP_HOST_CNT_UNAVAIL 0x0UL
+ u8 unused_0;
__le32 min_bw;
#define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_MASK 0xfffffffUL
#define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_SFT 0
@@ -867,12 +874,12 @@ struct hwrm_func_qcfg_output {
#define FUNC_QCFG_RESP_EVB_MODE_NO_EVB 0x0UL
#define FUNC_QCFG_RESP_EVB_MODE_VEB 0x1UL
#define FUNC_QCFG_RESP_EVB_MODE_VEPA 0x2UL
- u8 unused_3;
+ u8 unused_1;
__le16 alloc_vfs;
__le32 alloc_mcast_filters;
__le32 alloc_hw_ring_grps;
__le16 alloc_sp_tx_rings;
- u8 unused_4;
+ u8 unused_2;
u8 valid;
};
@@ -888,16 +895,13 @@ struct hwrm_func_cfg_input {
u8 unused_0;
u8 unused_1;
__le32 flags;
- #define FUNC_CFG_REQ_FLAGS_PROM_MODE 0x1UL
- #define FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK 0x2UL
- #define FUNC_CFG_REQ_FLAGS_SRC_IP_ADDR_CHECK 0x4UL
- #define FUNC_CFG_REQ_FLAGS_VLAN_PRI_MATCH 0x8UL
- #define FUNC_CFG_REQ_FLAGS_DFLT_PRI_NOMATCH 0x10UL
- #define FUNC_CFG_REQ_FLAGS_DISABLE_PAUSE 0x20UL
- #define FUNC_CFG_REQ_FLAGS_DISABLE_STP 0x40UL
- #define FUNC_CFG_REQ_FLAGS_DISABLE_LLDP 0x80UL
- #define FUNC_CFG_REQ_FLAGS_DISABLE_PTPV2 0x100UL
- #define FUNC_CFG_REQ_FLAGS_STD_TX_RING_MODE 0x200UL
+ #define FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_DISABLE 0x1UL
+ #define FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_ENABLE 0x2UL
+ #define FUNC_CFG_REQ_FLAGS_RSVD_MASK 0x1fcUL
+ #define FUNC_CFG_REQ_FLAGS_RSVD_SFT 2
+ #define FUNC_CFG_REQ_FLAGS_STD_TX_RING_MODE_ENABLE 0x200UL
+ #define FUNC_CFG_REQ_FLAGS_STD_TX_RING_MODE_DISABLE 0x400UL
+ #define FUNC_CFG_REQ_FLAGS_VIRT_MAC_PERSIST 0x800UL
__le32 enables;
#define FUNC_CFG_REQ_ENABLES_MTU 0x1UL
#define FUNC_CFG_REQ_ENABLES_MRU 0x2UL
@@ -1013,7 +1017,7 @@ struct hwrm_func_qstats_output {
__le64 tx_ucast_pkts;
__le64 tx_mcast_pkts;
__le64 tx_bcast_pkts;
- __le64 tx_err_pkts;
+ __le64 tx_discard_pkts;
__le64 tx_drop_pkts;
__le64 tx_ucast_bytes;
__le64 tx_mcast_bytes;
@@ -1021,7 +1025,7 @@ struct hwrm_func_qstats_output {
__le64 rx_ucast_pkts;
__le64 rx_mcast_pkts;
__le64 rx_bcast_pkts;
- __le64 rx_err_pkts;
+ __le64 rx_discard_pkts;
__le64 rx_drop_pkts;
__le64 rx_ucast_bytes;
__le64 rx_mcast_bytes;
@@ -4743,25 +4747,72 @@ struct hwrm_temp_monitor_query_output {
u8 valid;
};
-/* hwrm_nvm_read */
-/* Input (40 bytes) */
-struct hwrm_nvm_read_input {
+/* hwrm_wol_filter_alloc */
+/* Input (64 bytes) */
+struct hwrm_wol_filter_alloc_input {
__le16 req_type;
__le16 cmpl_ring;
__le16 seq_id;
__le16 target_id;
__le64 resp_addr;
- __le64 host_dest_addr;
- __le16 dir_idx;
+ __le32 flags;
+ __le32 enables;
+ #define WOL_FILTER_ALLOC_REQ_ENABLES_MAC_ADDRESS 0x1UL
+ #define WOL_FILTER_ALLOC_REQ_ENABLES_PATTERN_OFFSET 0x2UL
+ #define WOL_FILTER_ALLOC_REQ_ENABLES_PATTERN_BUF_SIZE 0x4UL
+ #define WOL_FILTER_ALLOC_REQ_ENABLES_PATTERN_BUF_ADDR 0x8UL
+ #define WOL_FILTER_ALLOC_REQ_ENABLES_PATTERN_MASK_ADDR 0x10UL
+ #define WOL_FILTER_ALLOC_REQ_ENABLES_PATTERN_MASK_SIZE 0x20UL
+ __le16 port_id;
+ u8 wol_type;
+ #define WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT 0x0UL
+ #define WOL_FILTER_ALLOC_REQ_WOL_TYPE_BMP 0x1UL
+ #define WOL_FILTER_ALLOC_REQ_WOL_TYPE_INVALID 0xffUL
u8 unused_0;
- u8 unused_1;
- __le32 offset;
- __le32 len;
+ __le32 unused_1;
+ u8 mac_address[6];
+ __le16 pattern_offset;
+ __le16 pattern_buf_size;
+ __le16 pattern_mask_size;
__le32 unused_2;
+ __le64 pattern_buf_addr;
+ __le64 pattern_mask_addr;
};
/* Output (16 bytes) */
-struct hwrm_nvm_read_output {
+struct hwrm_wol_filter_alloc_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 wol_filter_id;
+ u8 unused_0;
+ __le16 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 unused_4;
+ u8 valid;
+};
+
+/* hwrm_wol_filter_free */
+/* Input (32 bytes) */
+struct hwrm_wol_filter_free_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define WOL_FILTER_FREE_REQ_FLAGS_FREE_ALL_WOL_FILTERS 0x1UL
+ __le32 enables;
+ #define WOL_FILTER_FREE_REQ_ENABLES_WOL_FILTER_ID 0x1UL
+ __le16 port_id;
+ u8 wol_filter_id;
+ u8 unused_0[5];
+};
+
+/* Output (16 bytes) */
+struct hwrm_wol_filter_free_output {
__le16 error_code;
__le16 req_type;
__le16 seq_id;
@@ -4773,21 +4824,107 @@ struct hwrm_nvm_read_output {
u8 valid;
};
-/* hwrm_nvm_raw_dump */
-/* Input (32 bytes) */
-struct hwrm_nvm_raw_dump_input {
+/* hwrm_wol_filter_qcfg */
+/* Input (56 bytes) */
+struct hwrm_wol_filter_qcfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 port_id;
+ __le16 handle;
+ __le32 unused_0;
+ __le64 pattern_buf_addr;
+ __le16 pattern_buf_size;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3[3];
+ u8 unused_4;
+ __le64 pattern_mask_addr;
+ __le16 pattern_mask_size;
+ __le16 unused_5[3];
+};
+
+/* Output (32 bytes) */
+struct hwrm_wol_filter_qcfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 next_handle;
+ u8 wol_filter_id;
+ u8 wol_type;
+ #define WOL_FILTER_QCFG_RESP_WOL_TYPE_MAGICPKT 0x0UL
+ #define WOL_FILTER_QCFG_RESP_WOL_TYPE_BMP 0x1UL
+ #define WOL_FILTER_QCFG_RESP_WOL_TYPE_INVALID 0xffUL
+ __le32 unused_0;
+ u8 mac_address[6];
+ __le16 pattern_offset;
+ __le16 pattern_size;
+ __le16 pattern_mask_size;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_wol_reason_qcfg */
+/* Input (40 bytes) */
+struct hwrm_wol_reason_qcfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 port_id;
+ u8 unused_0;
+ u8 unused_1;
+ u8 unused_2[3];
+ u8 unused_3;
+ __le64 wol_pkt_buf_addr;
+ __le16 wol_pkt_buf_size;
+ __le16 unused_4[3];
+};
+
+/* Output (16 bytes) */
+struct hwrm_wol_reason_qcfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 wol_filter_id;
+ u8 wol_reason;
+ #define WOL_REASON_QCFG_RESP_WOL_REASON_MAGICPKT 0x0UL
+ #define WOL_REASON_QCFG_RESP_WOL_REASON_BMP 0x1UL
+ #define WOL_REASON_QCFG_RESP_WOL_REASON_INVALID 0xffUL
+ u8 wol_pkt_len;
+ u8 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_nvm_read */
+/* Input (40 bytes) */
+struct hwrm_nvm_read_input {
__le16 req_type;
__le16 cmpl_ring;
__le16 seq_id;
__le16 target_id;
__le64 resp_addr;
__le64 host_dest_addr;
+ __le16 dir_idx;
+ u8 unused_0;
+ u8 unused_1;
__le32 offset;
__le32 len;
+ __le32 unused_2;
};
/* Output (16 bytes) */
-struct hwrm_nvm_raw_dump_output {
+struct hwrm_nvm_read_output {
__le16 error_code;
__le16 req_type;
__le16 seq_id;
@@ -4881,6 +5018,15 @@ struct hwrm_nvm_write_output {
u8 valid;
};
+/* Command specific Error Codes (8 bytes) */
+struct hwrm_nvm_write_cmd_err {
+ u8 code;
+ #define NVM_WRITE_CMD_ERR_CODE_UNKNOWN 0x0UL
+ #define NVM_WRITE_CMD_ERR_CODE_FRAG_ERR 0x1UL
+ #define NVM_WRITE_CMD_ERR_CODE_NO_SPACE 0x2UL
+ u8 unused_0[7];
+};
+
/* hwrm_nvm_modify */
/* Input (40 bytes) */
struct hwrm_nvm_modify_input {
@@ -5112,6 +5258,100 @@ struct hwrm_nvm_install_update_cmd_err {
u8 unused_0[7];
};
+/* hwrm_selftest_qlist */
+/* Input (16 bytes) */
+struct hwrm_selftest_qlist_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+};
+
+/* Output (248 bytes) */
+struct hwrm_selftest_qlist_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 num_tests;
+ u8 available_tests;
+ #define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_NVM_TEST 0x1UL
+ #define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_LINK_TEST 0x2UL
+ #define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_REGISTER_TEST 0x4UL
+ #define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_MEMORY_TEST 0x8UL
+ u8 offline_tests;
+ #define SELFTEST_QLIST_RESP_OFFLINE_TESTS_NVM_TEST 0x1UL
+ #define SELFTEST_QLIST_RESP_OFFLINE_TESTS_LINK_TEST 0x2UL
+ #define SELFTEST_QLIST_RESP_OFFLINE_TESTS_REGISTER_TEST 0x4UL
+ #define SELFTEST_QLIST_RESP_OFFLINE_TESTS_MEMORY_TEST 0x8UL
+ u8 unused_0;
+ __le16 test_timeout;
+ u8 unused_1;
+ u8 unused_2;
+ char test0_name[32];
+ char test1_name[32];
+ char test2_name[32];
+ char test3_name[32];
+ char test4_name[32];
+ char test5_name[32];
+ char test6_name[32];
+ char test7_name[32];
+};
+
+/* hwrm_selftest_exec */
+/* Input (24 bytes) */
+struct hwrm_selftest_exec_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ u8 flags;
+ #define SELFTEST_EXEC_REQ_FLAGS_NVM_TEST 0x1UL
+ #define SELFTEST_EXEC_REQ_FLAGS_LINK_TEST 0x2UL
+ #define SELFTEST_EXEC_REQ_FLAGS_REGISTER_TEST 0x4UL
+ #define SELFTEST_EXEC_REQ_FLAGS_MEMORY_TEST 0x8UL
+ u8 unused_0[7];
+};
+
+/* Output (16 bytes) */
+struct hwrm_selftest_exec_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 requested_tests;
+ #define SELFTEST_EXEC_RESP_REQUESTED_TESTS_NVM_TEST 0x1UL
+ #define SELFTEST_EXEC_RESP_REQUESTED_TESTS_LINK_TEST 0x2UL
+ #define SELFTEST_EXEC_RESP_REQUESTED_TESTS_REGISTER_TEST 0x4UL
+ #define SELFTEST_EXEC_RESP_REQUESTED_TESTS_MEMORY_TEST 0x8UL
+ u8 test_success;
+ #define SELFTEST_EXEC_RESP_TEST_SUCCESS_NVM_TEST 0x1UL
+ #define SELFTEST_EXEC_RESP_TEST_SUCCESS_LINK_TEST 0x2UL
+ #define SELFTEST_EXEC_RESP_TEST_SUCCESS_REGISTER_TEST 0x4UL
+ #define SELFTEST_EXEC_RESP_TEST_SUCCESS_MEMORY_TEST 0x8UL
+ __le16 unused_0[3];
+};
+
+/* hwrm_selftest_irq */
+/* Input (16 bytes) */
+struct hwrm_selftest_irq_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+};
+
+/* Output (8 bytes) */
+struct hwrm_selftest_irq_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+};
+
/* Hardware Resource Manager Specification */
/* Input (16 bytes) */
struct input {
@@ -5130,6 +5370,16 @@ struct output {
__le16 resp_len;
};
+/* Short Command Structure (16 bytes) */
+struct hwrm_short_input {
+ __le16 req_type;
+ __le16 signature;
+ #define SHORT_REQ_SIGNATURE_SHORT_CMD 0x4321UL
+ __le16 unused_0;
+ __le16 size;
+ __le64 req_addr;
+};
+
/* Command numbering (8 bytes) */
struct cmd_nums {
__le16 req_type;
@@ -5252,11 +5502,15 @@ struct cmd_nums {
#define HWRM_CFA_FLOW_FLUSH (0x105UL)
#define HWRM_CFA_FLOW_STATS (0x106UL)
#define HWRM_CFA_FLOW_INFO (0x107UL)
+ #define HWRM_SELFTEST_QLIST (0x200UL)
+ #define HWRM_SELFTEST_EXEC (0x201UL)
+ #define HWRM_SELFTEST_IRQ (0x202UL)
#define HWRM_DBG_READ_DIRECT (0xff10UL)
#define HWRM_DBG_READ_INDIRECT (0xff11UL)
#define HWRM_DBG_WRITE_DIRECT (0xff12UL)
#define HWRM_DBG_WRITE_INDIRECT (0xff13UL)
#define HWRM_DBG_DUMP (0xff14UL)
+ #define HWRM_NVM_FACTORY_DEFAULTS (0xffeeUL)
#define HWRM_NVM_VALIDATE_OPTION (0xffefUL)
#define HWRM_NVM_FLUSH (0xfff0UL)
#define HWRM_NVM_GET_VARIABLE (0xfff1UL)
@@ -5464,6 +5718,7 @@ struct hwrm_struct_hdr {
#define STRUCT_HDR_STRUCT_ID_DCBX_FEATURE_STATE 0x422UL
#define STRUCT_HDR_STRUCT_ID_LLDP_GENERIC 0x424UL
#define STRUCT_HDR_STRUCT_ID_LLDP_DEVICE 0x426UL
+ #define STRUCT_HDR_STRUCT_ID_AFM_OPAQUE 0x1UL
#define STRUCT_HDR_STRUCT_ID_PORT_DESCRIPTION 0xaUL
__le16 len;
u8 version;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
index 0b8cd7443843..b8e7248294d9 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
@@ -1,6 +1,7 @@
/* Broadcom NetXtreme-C/E network driver.
*
* Copyright (c) 2014-2016 Broadcom Corporation
+ * Copyright (c) 2016-2017 Broadcom Limited
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -84,6 +85,9 @@ int bnxt_set_vf_spoofchk(struct net_device *dev, int vf_id, bool setting)
u32 func_flags;
int rc;
+ if (bp->hwrm_spec_code < 0x10701)
+ return -ENOTSUPP;
+
rc = bnxt_vf_ndo_prep(bp, vf_id);
if (rc)
return rc;
@@ -96,9 +100,9 @@ int bnxt_set_vf_spoofchk(struct net_device *dev, int vf_id, bool setting)
func_flags = vf->func_flags;
if (setting)
- func_flags |= FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK;
+ func_flags |= FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_ENABLE;
else
- func_flags &= ~FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK;
+ func_flags |= FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_DISABLE;
/*TODO: if the driver supports VLAN filter on guest VLAN,
* the spoof check should also include vlan anti-spoofing
*/
@@ -134,8 +138,11 @@ int bnxt_get_vf_config(struct net_device *dev, int vf_id,
ivi->max_tx_rate = vf->max_tx_rate;
ivi->min_tx_rate = vf->min_tx_rate;
ivi->vlan = vf->vlan;
- ivi->qos = vf->flags & BNXT_VF_QOS;
- ivi->spoofchk = vf->flags & BNXT_VF_SPOOFCHK;
+ if (vf->flags & BNXT_VF_QOS)
+ ivi->qos = vf->vlan >> VLAN_PRIO_SHIFT;
+ else
+ ivi->qos = 0;
+ ivi->spoofchk = !!(vf->flags & BNXT_VF_SPOOFCHK);
if (!(vf->flags & BNXT_VF_LINK_FORCED))
ivi->linkstate = IFLA_VF_LINK_STATE_AUTO;
else if (vf->flags & BNXT_VF_LINK_UP)
@@ -300,7 +307,6 @@ static int bnxt_set_vf_attr(struct bnxt *bp, int num_vfs)
for (i = 0; i < num_vfs; i++) {
vf = &bp->pf.vf[i];
memset(vf, 0, sizeof(*vf));
- vf->flags = BNXT_VF_QOS | BNXT_VF_LINK_UP;
}
return 0;
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h
index 1ab72e4820af..dbc8d977fc5a 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h
@@ -1,6 +1,7 @@
/* Broadcom NetXtreme-C/E network driver.
*
* Copyright (c) 2014-2016 Broadcom Corporation
+ * Copyright (c) 2016-2017 Broadcom Limited
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
index 899c30fb5188..9dae32756767 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
@@ -19,11 +19,10 @@
#include "bnxt.h"
#include "bnxt_xdp.h"
-static void bnxt_xmit_xdp(struct bnxt *bp, struct bnxt_tx_ring_info *txr,
- dma_addr_t mapping, u32 len, u16 rx_prod)
+void bnxt_xmit_xdp(struct bnxt *bp, struct bnxt_tx_ring_info *txr,
+ dma_addr_t mapping, u32 len, u16 rx_prod)
{
struct bnxt_sw_tx_bd *tx_buf;
- struct tx_bd_ext *txbd1;
struct tx_bd *txbd;
u32 flags;
u16 prod;
@@ -33,23 +32,13 @@ static void bnxt_xmit_xdp(struct bnxt *bp, struct bnxt_tx_ring_info *txr,
tx_buf->rx_prod = rx_prod;
txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
- flags = (len << TX_BD_LEN_SHIFT) | TX_BD_TYPE_LONG_TX_BD |
- (2 << TX_BD_FLAGS_BD_CNT_SHIFT) | TX_BD_FLAGS_COAL_NOW |
+ flags = (len << TX_BD_LEN_SHIFT) | (1 << TX_BD_FLAGS_BD_CNT_SHIFT) |
TX_BD_FLAGS_PACKET_END | bnxt_lhint_arr[len >> 9];
txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
txbd->tx_bd_opaque = prod;
txbd->tx_bd_haddr = cpu_to_le64(mapping);
prod = NEXT_TX(prod);
- txbd1 = (struct tx_bd_ext *)
- &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
-
- txbd1->tx_bd_hsize_lflags = cpu_to_le32(0);
- txbd1->tx_bd_mss = cpu_to_le32(0);
- txbd1->tx_bd_cfa_action = cpu_to_le32(0);
- txbd1->tx_bd_cfa_meta = cpu_to_le32(0);
-
- prod = NEXT_TX(prod);
txr->tx_prod = prod;
}
@@ -66,7 +55,6 @@ void bnxt_tx_int_xdp(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts)
for (i = 0; i < nr_pkts; i++) {
last_tx_cons = tx_cons;
tx_cons = NEXT_TX(tx_cons);
- tx_cons = NEXT_TX(tx_cons);
}
txr->tx_cons = tx_cons;
if (bnxt_tx_avail(bp, txr) == bp->tx_ring_size) {
@@ -133,7 +121,7 @@ bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons,
return false;
case XDP_TX:
- if (tx_avail < 2) {
+ if (tx_avail < 1) {
trace_xdp_exception(bp->dev, xdp_prog, act);
bnxt_reuse_rx_data(rxr, cons, page);
return true;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h
index b529f2c5355b..12a5ad66b564 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h
@@ -10,6 +10,8 @@
#ifndef BNXT_XDP_H
#define BNXT_XDP_H
+void bnxt_xmit_xdp(struct bnxt *bp, struct bnxt_tx_ring_info *txr,
+ dma_addr_t mapping, u32 len, u16 rx_prod);
void bnxt_tx_int_xdp(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts);
bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons,
struct page *page, u8 **data_ptr, unsigned int *len,
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index 365895ed3c3e..a205a9ff9e17 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -621,7 +621,7 @@ static int bcmgenet_set_coalesce(struct net_device *dev,
/* GENET TDMA hardware does not support a configurable timeout, but will
* always generate an interrupt either after MBDONE packets have been
- * transmitted, or when the ring is emtpy.
+ * transmitted, or when the ring is empty.
*/
if (ec->tx_coalesce_usecs || ec->tx_coalesce_usecs_high ||
ec->tx_coalesce_usecs_irq || ec->tx_coalesce_usecs_low)
@@ -707,6 +707,19 @@ struct bcmgenet_stats {
.reg_offset = offset, \
}
+#define STAT_GENET_Q(num) \
+ STAT_GENET_SOFT_MIB("txq" __stringify(num) "_packets", \
+ tx_rings[num].packets), \
+ STAT_GENET_SOFT_MIB("txq" __stringify(num) "_bytes", \
+ tx_rings[num].bytes), \
+ STAT_GENET_SOFT_MIB("rxq" __stringify(num) "_bytes", \
+ rx_rings[num].bytes), \
+ STAT_GENET_SOFT_MIB("rxq" __stringify(num) "_packets", \
+ rx_rings[num].packets), \
+ STAT_GENET_SOFT_MIB("rxq" __stringify(num) "_errors", \
+ rx_rings[num].errors), \
+ STAT_GENET_SOFT_MIB("rxq" __stringify(num) "_dropped", \
+ rx_rings[num].dropped)
/* There is a 0xC gap between the end of RX and beginning of TX stats and then
* between the end of TX stats and the beginning of the RX RUNT
@@ -801,6 +814,12 @@ static const struct bcmgenet_stats bcmgenet_gstrings_stats[] = {
STAT_GENET_SOFT_MIB("alloc_rx_buff_failed", mib.alloc_rx_buff_failed),
STAT_GENET_SOFT_MIB("rx_dma_failed", mib.rx_dma_failed),
STAT_GENET_SOFT_MIB("tx_dma_failed", mib.tx_dma_failed),
+ /* Per TX queues */
+ STAT_GENET_Q(0),
+ STAT_GENET_Q(1),
+ STAT_GENET_Q(2),
+ STAT_GENET_Q(3),
+ STAT_GENET_Q(16),
};
#define BCMGENET_STATS_LEN ARRAY_SIZE(bcmgenet_gstrings_stats)
@@ -1078,8 +1097,17 @@ static int bcmgenet_power_down(struct bcmgenet_priv *priv,
/* Power down LED */
if (priv->hw_params->flags & GENET_HAS_EXT) {
reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT);
- reg |= (EXT_PWR_DOWN_PHY |
- EXT_PWR_DOWN_DLL | EXT_PWR_DOWN_BIAS);
+ if (GENET_IS_V5(priv))
+ reg |= EXT_PWR_DOWN_PHY_EN |
+ EXT_PWR_DOWN_PHY_RD |
+ EXT_PWR_DOWN_PHY_SD |
+ EXT_PWR_DOWN_PHY_RX |
+ EXT_PWR_DOWN_PHY_TX |
+ EXT_IDDQ_GLBL_PWR;
+ else
+ reg |= EXT_PWR_DOWN_PHY;
+
+ reg |= (EXT_PWR_DOWN_DLL | EXT_PWR_DOWN_BIAS);
bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
bcmgenet_phy_power_set(priv->dev, false);
@@ -1104,12 +1132,34 @@ static void bcmgenet_power_up(struct bcmgenet_priv *priv,
switch (mode) {
case GENET_POWER_PASSIVE:
- reg &= ~(EXT_PWR_DOWN_DLL | EXT_PWR_DOWN_PHY |
- EXT_PWR_DOWN_BIAS);
- /* fallthrough */
+ reg &= ~(EXT_PWR_DOWN_DLL | EXT_PWR_DOWN_BIAS);
+ if (GENET_IS_V5(priv)) {
+ reg &= ~(EXT_PWR_DOWN_PHY_EN |
+ EXT_PWR_DOWN_PHY_RD |
+ EXT_PWR_DOWN_PHY_SD |
+ EXT_PWR_DOWN_PHY_RX |
+ EXT_PWR_DOWN_PHY_TX |
+ EXT_IDDQ_GLBL_PWR);
+ reg |= EXT_PHY_RESET;
+ bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
+ mdelay(1);
+
+ reg &= ~EXT_PHY_RESET;
+ } else {
+ reg &= ~EXT_PWR_DOWN_PHY;
+ reg |= EXT_PWR_DN_EN_LD;
+ }
+ bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
+ bcmgenet_phy_power_set(priv->dev, true);
+ bcmgenet_mii_reset(priv->dev);
+ break;
+
case GENET_POWER_CABLE_SENSE:
/* enable APD */
- reg |= EXT_PWR_DN_EN_LD;
+ if (!GENET_IS_V5(priv)) {
+ reg |= EXT_PWR_DN_EN_LD;
+ bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
+ }
break;
case GENET_POWER_WOL_MAGIC:
bcmgenet_wol_power_up_cfg(priv, mode);
@@ -1117,39 +1167,20 @@ static void bcmgenet_power_up(struct bcmgenet_priv *priv,
default:
break;
}
-
- bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
- if (mode == GENET_POWER_PASSIVE) {
- bcmgenet_phy_power_set(priv->dev, true);
- bcmgenet_mii_reset(priv->dev);
- }
}
/* ioctl handle special commands that are not present in ethtool. */
static int bcmgenet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
- int val = 0;
if (!netif_running(dev))
return -EINVAL;
- switch (cmd) {
- case SIOCGMIIPHY:
- case SIOCGMIIREG:
- case SIOCSMIIREG:
- if (!priv->phydev)
- val = -ENODEV;
- else
- val = phy_mii_ioctl(priv->phydev, rq, cmd);
- break;
-
- default:
- val = -EINVAL;
- break;
- }
+ if (!priv->phydev)
+ return -ENODEV;
- return val;
+ return phy_mii_ioctl(priv->phydev, rq, cmd);
}
static struct enet_cb *bcmgenet_get_txcb(struct bcmgenet_priv *priv,
@@ -1240,14 +1271,18 @@ static unsigned int __bcmgenet_tx_reclaim(struct net_device *dev,
unsigned int txbds_ready;
unsigned int txbds_processed = 0;
- /* Compute how many buffers are transmitted since last xmit call */
- c_index = bcmgenet_tdma_ring_readl(priv, ring->index, TDMA_CONS_INDEX);
- c_index &= DMA_C_INDEX_MASK;
-
- if (likely(c_index >= ring->c_index))
- txbds_ready = c_index - ring->c_index;
+ /* Clear status before servicing to reduce spurious interrupts */
+ if (ring->index == DESC_INDEX)
+ bcmgenet_intrl2_0_writel(priv, UMAC_IRQ_TXDMA_DONE,
+ INTRL2_CPU_CLEAR);
else
- txbds_ready = (DMA_C_INDEX_MASK + 1) - ring->c_index + c_index;
+ bcmgenet_intrl2_1_writel(priv, (1 << ring->index),
+ INTRL2_CPU_CLEAR);
+
+ /* Compute how many buffers are transmitted since last xmit call */
+ c_index = bcmgenet_tdma_ring_readl(priv, ring->index, TDMA_CONS_INDEX)
+ & DMA_C_INDEX_MASK;
+ txbds_ready = (c_index - ring->c_index) & DMA_C_INDEX_MASK;
netif_dbg(priv, tx_done, dev,
"%s ring=%d old_c_index=%u c_index=%u txbds_ready=%u\n",
@@ -1280,15 +1315,15 @@ static unsigned int __bcmgenet_tx_reclaim(struct net_device *dev,
}
ring->free_bds += txbds_processed;
- ring->c_index = (ring->c_index + txbds_processed) & DMA_C_INDEX_MASK;
+ ring->c_index = c_index;
- dev->stats.tx_packets += pkts_compl;
- dev->stats.tx_bytes += bytes_compl;
+ ring->packets += pkts_compl;
+ ring->bytes += bytes_compl;
netdev_tx_completed_queue(netdev_get_tx_queue(dev, ring->queue),
pkts_compl, bytes_compl);
- return pkts_compl;
+ return txbds_processed;
}
static unsigned int bcmgenet_tx_reclaim(struct net_device *dev,
@@ -1657,18 +1692,28 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring,
unsigned long dma_flag;
int len;
unsigned int rxpktprocessed = 0, rxpkttoprocess;
- unsigned int p_index;
+ unsigned int p_index, mask;
unsigned int discards;
unsigned int chksum_ok = 0;
+ /* Clear status before servicing to reduce spurious interrupts */
+ if (ring->index == DESC_INDEX) {
+ bcmgenet_intrl2_0_writel(priv, UMAC_IRQ_RXDMA_DONE,
+ INTRL2_CPU_CLEAR);
+ } else {
+ mask = 1 << (UMAC_IRQ1_RX_INTR_SHIFT + ring->index);
+ bcmgenet_intrl2_1_writel(priv,
+ mask,
+ INTRL2_CPU_CLEAR);
+ }
+
p_index = bcmgenet_rdma_ring_readl(priv, ring->index, RDMA_PROD_INDEX);
discards = (p_index >> DMA_P_INDEX_DISCARD_CNT_SHIFT) &
DMA_P_INDEX_DISCARD_CNT_MASK;
if (discards > ring->old_discards) {
discards = discards - ring->old_discards;
- dev->stats.rx_missed_errors += discards;
- dev->stats.rx_errors += discards;
+ ring->errors += discards;
ring->old_discards += discards;
/* Clear HW register when we reach 75% of maximum 0xFFFF */
@@ -1680,12 +1725,7 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring,
}
p_index &= DMA_P_INDEX_MASK;
-
- if (likely(p_index >= ring->c_index))
- rxpkttoprocess = p_index - ring->c_index;
- else
- rxpkttoprocess = (DMA_C_INDEX_MASK + 1) - ring->c_index +
- p_index;
+ rxpkttoprocess = (p_index - ring->c_index) & DMA_C_INDEX_MASK;
netif_dbg(priv, rx_status, dev,
"RDMA: rxpkttoprocess=%d\n", rxpkttoprocess);
@@ -1696,7 +1736,7 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring,
skb = bcmgenet_rx_refill(priv, cb);
if (unlikely(!skb)) {
- dev->stats.rx_dropped++;
+ ring->dropped++;
goto next;
}
@@ -1724,7 +1764,7 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring,
if (unlikely(!(dma_flag & DMA_EOP) || !(dma_flag & DMA_SOP))) {
netif_err(priv, rx_status, dev,
"dropping fragmented packet!\n");
- dev->stats.rx_errors++;
+ ring->errors++;
dev_kfree_skb_any(skb);
goto next;
}
@@ -1773,8 +1813,8 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring,
/*Finish setting up the received SKB and send it to the kernel*/
skb->protocol = eth_type_trans(skb, priv->dev);
- dev->stats.rx_packets++;
- dev->stats.rx_bytes += len;
+ ring->packets++;
+ ring->bytes += len;
if (dma_flag & DMA_RX_MULT)
dev->stats.multicast++;
@@ -1912,10 +1952,8 @@ static void bcmgenet_intr_disable(struct bcmgenet_priv *priv)
/* Mask all interrupts.*/
bcmgenet_intrl2_0_writel(priv, 0xFFFFFFFF, INTRL2_CPU_MASK_SET);
bcmgenet_intrl2_0_writel(priv, 0xFFFFFFFF, INTRL2_CPU_CLEAR);
- bcmgenet_intrl2_0_writel(priv, 0, INTRL2_CPU_MASK_CLEAR);
bcmgenet_intrl2_1_writel(priv, 0xFFFFFFFF, INTRL2_CPU_MASK_SET);
bcmgenet_intrl2_1_writel(priv, 0xFFFFFFFF, INTRL2_CPU_CLEAR);
- bcmgenet_intrl2_1_writel(priv, 0, INTRL2_CPU_MASK_CLEAR);
}
static void bcmgenet_link_intr_enable(struct bcmgenet_priv *priv)
@@ -1942,8 +1980,6 @@ static int init_umac(struct bcmgenet_priv *priv)
int ret;
u32 reg;
u32 int0_enable = 0;
- u32 int1_enable = 0;
- int i;
dev_dbg(&priv->pdev->dev, "bcmgenet: init_umac\n");
@@ -1970,12 +2006,6 @@ static int init_umac(struct bcmgenet_priv *priv)
bcmgenet_intr_disable(priv);
- /* Enable Rx default queue 16 interrupts */
- int0_enable |= UMAC_IRQ_RXDMA_DONE;
-
- /* Enable Tx default queue 16 interrupts */
- int0_enable |= UMAC_IRQ_TXDMA_DONE;
-
/* Configure backpressure vectors for MoCA */
if (priv->phy_interface == PHY_INTERFACE_MODE_MOCA) {
reg = bcmgenet_bp_mc_get(priv);
@@ -1993,18 +2023,8 @@ static int init_umac(struct bcmgenet_priv *priv)
if (priv->hw_params->flags & GENET_HAS_MDIO_INTR)
int0_enable |= (UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR);
- /* Enable Rx priority queue interrupts */
- for (i = 0; i < priv->hw_params->rx_queues; ++i)
- int1_enable |= (1 << (UMAC_IRQ1_RX_INTR_SHIFT + i));
-
- /* Enable Tx priority queue interrupts */
- for (i = 0; i < priv->hw_params->tx_queues; ++i)
- int1_enable |= (1 << i);
-
bcmgenet_intrl2_0_writel(priv, int0_enable, INTRL2_CPU_MASK_CLEAR);
- bcmgenet_intrl2_1_writel(priv, int1_enable, INTRL2_CPU_MASK_CLEAR);
- /* Enable rx/tx engine.*/
dev_dbg(kdev, "done init umac\n");
return 0;
@@ -2136,22 +2156,33 @@ static void bcmgenet_init_tx_napi(struct bcmgenet_priv *priv)
static void bcmgenet_enable_tx_napi(struct bcmgenet_priv *priv)
{
unsigned int i;
+ u32 int0_enable = UMAC_IRQ_TXDMA_DONE;
+ u32 int1_enable = 0;
struct bcmgenet_tx_ring *ring;
for (i = 0; i < priv->hw_params->tx_queues; ++i) {
ring = &priv->tx_rings[i];
napi_enable(&ring->napi);
+ int1_enable |= (1 << i);
}
ring = &priv->tx_rings[DESC_INDEX];
napi_enable(&ring->napi);
+
+ bcmgenet_intrl2_0_writel(priv, int0_enable, INTRL2_CPU_MASK_CLEAR);
+ bcmgenet_intrl2_1_writel(priv, int1_enable, INTRL2_CPU_MASK_CLEAR);
}
static void bcmgenet_disable_tx_napi(struct bcmgenet_priv *priv)
{
unsigned int i;
+ u32 int0_disable = UMAC_IRQ_TXDMA_DONE;
+ u32 int1_disable = 0xffff;
struct bcmgenet_tx_ring *ring;
+ bcmgenet_intrl2_0_writel(priv, int0_disable, INTRL2_CPU_MASK_SET);
+ bcmgenet_intrl2_1_writel(priv, int1_disable, INTRL2_CPU_MASK_SET);
+
for (i = 0; i < priv->hw_params->tx_queues; ++i) {
ring = &priv->tx_rings[i];
napi_disable(&ring->napi);
@@ -2264,22 +2295,33 @@ static void bcmgenet_init_rx_napi(struct bcmgenet_priv *priv)
static void bcmgenet_enable_rx_napi(struct bcmgenet_priv *priv)
{
unsigned int i;
+ u32 int0_enable = UMAC_IRQ_RXDMA_DONE;
+ u32 int1_enable = 0;
struct bcmgenet_rx_ring *ring;
for (i = 0; i < priv->hw_params->rx_queues; ++i) {
ring = &priv->rx_rings[i];
napi_enable(&ring->napi);
+ int1_enable |= (1 << (UMAC_IRQ1_RX_INTR_SHIFT + i));
}
ring = &priv->rx_rings[DESC_INDEX];
napi_enable(&ring->napi);
+
+ bcmgenet_intrl2_0_writel(priv, int0_enable, INTRL2_CPU_MASK_CLEAR);
+ bcmgenet_intrl2_1_writel(priv, int1_enable, INTRL2_CPU_MASK_CLEAR);
}
static void bcmgenet_disable_rx_napi(struct bcmgenet_priv *priv)
{
unsigned int i;
+ u32 int0_disable = UMAC_IRQ_RXDMA_DONE;
+ u32 int1_disable = 0xffff << UMAC_IRQ1_RX_INTR_SHIFT;
struct bcmgenet_rx_ring *ring;
+ bcmgenet_intrl2_0_writel(priv, int0_disable, INTRL2_CPU_MASK_SET);
+ bcmgenet_intrl2_1_writel(priv, int1_disable, INTRL2_CPU_MASK_SET);
+
for (i = 0; i < priv->hw_params->rx_queues; ++i) {
ring = &priv->rx_rings[i];
napi_disable(&ring->napi);
@@ -2634,6 +2676,15 @@ static irqreturn_t bcmgenet_isr0(int irq, void *dev_id)
}
}
+ if (priv->irq0_stat & (UMAC_IRQ_PHY_DET_R |
+ UMAC_IRQ_PHY_DET_F |
+ UMAC_IRQ_LINK_EVENT |
+ UMAC_IRQ_HFB_SM |
+ UMAC_IRQ_HFB_MM)) {
+ /* all other interested interrupts handled in bottom half */
+ schedule_work(&priv->bcmgenet_irq_work);
+ }
+
if ((priv->hw_params->flags & GENET_HAS_MDIO_INTR) &&
status & (UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR)) {
wake_up(&priv->wq);
@@ -2921,7 +2972,7 @@ static int bcmgenet_close(struct net_device *dev)
if (ret)
return ret;
- /* Disable MAC transmit. TX DMA disabled have to done before this */
+ /* Disable MAC transmit. TX DMA disabled must be done before this */
umac_enable_set(priv, CMD_TX_EN, false);
/* tx reclaim */
@@ -3101,6 +3152,48 @@ static int bcmgenet_set_mac_addr(struct net_device *dev, void *p)
return 0;
}
+static struct net_device_stats *bcmgenet_get_stats(struct net_device *dev)
+{
+ struct bcmgenet_priv *priv = netdev_priv(dev);
+ unsigned long tx_bytes = 0, tx_packets = 0;
+ unsigned long rx_bytes = 0, rx_packets = 0;
+ unsigned long rx_errors = 0, rx_dropped = 0;
+ struct bcmgenet_tx_ring *tx_ring;
+ struct bcmgenet_rx_ring *rx_ring;
+ unsigned int q;
+
+ for (q = 0; q < priv->hw_params->tx_queues; q++) {
+ tx_ring = &priv->tx_rings[q];
+ tx_bytes += tx_ring->bytes;
+ tx_packets += tx_ring->packets;
+ }
+ tx_ring = &priv->tx_rings[DESC_INDEX];
+ tx_bytes += tx_ring->bytes;
+ tx_packets += tx_ring->packets;
+
+ for (q = 0; q < priv->hw_params->rx_queues; q++) {
+ rx_ring = &priv->rx_rings[q];
+
+ rx_bytes += rx_ring->bytes;
+ rx_packets += rx_ring->packets;
+ rx_errors += rx_ring->errors;
+ rx_dropped += rx_ring->dropped;
+ }
+ rx_ring = &priv->rx_rings[DESC_INDEX];
+ rx_bytes += rx_ring->bytes;
+ rx_packets += rx_ring->packets;
+ rx_errors += rx_ring->errors;
+ rx_dropped += rx_ring->dropped;
+
+ dev->stats.tx_bytes = tx_bytes;
+ dev->stats.tx_packets = tx_packets;
+ dev->stats.rx_bytes = rx_bytes;
+ dev->stats.rx_packets = rx_packets;
+ dev->stats.rx_errors = rx_errors;
+ dev->stats.rx_missed_errors = rx_errors;
+ return &dev->stats;
+}
+
static const struct net_device_ops bcmgenet_netdev_ops = {
.ndo_open = bcmgenet_open,
.ndo_stop = bcmgenet_close,
@@ -3113,6 +3206,7 @@ static const struct net_device_ops bcmgenet_netdev_ops = {
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = bcmgenet_poll_controller,
#endif
+ .ndo_get_stats = bcmgenet_get_stats,
};
/* Array of GENET hardware parameters/characteristics */
@@ -3186,6 +3280,25 @@ static struct bcmgenet_hw_params bcmgenet_hw_params[] = {
.flags = GENET_HAS_40BITS | GENET_HAS_EXT |
GENET_HAS_MDIO_INTR | GENET_HAS_MOCA_LINK_DET,
},
+ [GENET_V5] = {
+ .tx_queues = 4,
+ .tx_bds_per_q = 32,
+ .rx_queues = 0,
+ .rx_bds_per_q = 0,
+ .bp_in_en_shift = 17,
+ .bp_in_mask = 0x1ffff,
+ .hfb_filter_cnt = 48,
+ .hfb_filter_size = 128,
+ .qtag_mask = 0x3F,
+ .tbuf_offset = 0x0600,
+ .hfb_offset = 0x8000,
+ .hfb_reg_offset = 0xfc00,
+ .rdma_offset = 0x2000,
+ .tdma_offset = 0x4000,
+ .words_per_bd = 3,
+ .flags = GENET_HAS_40BITS | GENET_HAS_EXT |
+ GENET_HAS_MDIO_INTR | GENET_HAS_MOCA_LINK_DET,
+ },
};
/* Infer hardware parameters from the detected GENET version */
@@ -3196,26 +3309,22 @@ static void bcmgenet_set_hw_params(struct bcmgenet_priv *priv)
u8 major;
u16 gphy_rev;
- if (GENET_IS_V4(priv)) {
+ if (GENET_IS_V5(priv) || GENET_IS_V4(priv)) {
bcmgenet_dma_regs = bcmgenet_dma_regs_v3plus;
genet_dma_ring_regs = genet_dma_ring_regs_v4;
priv->dma_rx_chk_bit = DMA_RX_CHK_V3PLUS;
- priv->version = GENET_V4;
} else if (GENET_IS_V3(priv)) {
bcmgenet_dma_regs = bcmgenet_dma_regs_v3plus;
genet_dma_ring_regs = genet_dma_ring_regs_v123;
priv->dma_rx_chk_bit = DMA_RX_CHK_V3PLUS;
- priv->version = GENET_V3;
} else if (GENET_IS_V2(priv)) {
bcmgenet_dma_regs = bcmgenet_dma_regs_v2;
genet_dma_ring_regs = genet_dma_ring_regs_v123;
priv->dma_rx_chk_bit = DMA_RX_CHK_V12;
- priv->version = GENET_V2;
} else if (GENET_IS_V1(priv)) {
bcmgenet_dma_regs = bcmgenet_dma_regs_v1;
genet_dma_ring_regs = genet_dma_ring_regs_v123;
priv->dma_rx_chk_bit = DMA_RX_CHK_V12;
- priv->version = GENET_V1;
}
/* enum genet_version starts at 1 */
@@ -3225,7 +3334,9 @@ static void bcmgenet_set_hw_params(struct bcmgenet_priv *priv)
/* Read GENET HW version */
reg = bcmgenet_sys_readl(priv, SYS_REV_CTRL);
major = (reg >> 24 & 0x0f);
- if (major == 5)
+ if (major == 6)
+ major = 5;
+ else if (major == 5)
major = 4;
else if (major == 0)
major = 1;
@@ -3253,19 +3364,25 @@ static void bcmgenet_set_hw_params(struct bcmgenet_priv *priv)
*/
gphy_rev = reg & 0xffff;
+ if (GENET_IS_V5(priv)) {
+ /* The EPHY revision should come from the MDIO registers of
+ * the PHY not from GENET.
+ */
+ if (gphy_rev != 0) {
+ pr_warn("GENET is reporting EPHY revision: 0x%04x\n",
+ gphy_rev);
+ }
/* This is reserved so should require special treatment */
- if (gphy_rev == 0 || gphy_rev == 0x01ff) {
+ } else if (gphy_rev == 0 || gphy_rev == 0x01ff) {
pr_warn("Invalid GPHY revision detected: 0x%04x\n", gphy_rev);
return;
- }
-
/* This is the good old scheme, just GPHY major, no minor nor patch */
- if ((gphy_rev & 0xf0) != 0)
+ } else if ((gphy_rev & 0xf0) != 0) {
priv->gphy_rev = gphy_rev << 8;
-
/* This is the new scheme, GPHY major rolls over with 0x10 = rev G0 */
- else if ((gphy_rev & 0xff00) != 0)
+ } else if ((gphy_rev & 0xff00) != 0) {
priv->gphy_rev = gphy_rev;
+ }
#ifdef CONFIG_PHYS_ADDR_T_64BIT
if (!(params->flags & GENET_HAS_40BITS))
@@ -3295,6 +3412,7 @@ static const struct of_device_id bcmgenet_match[] = {
{ .compatible = "brcm,genet-v2", .data = (void *)GENET_V2 },
{ .compatible = "brcm,genet-v3", .data = (void *)GENET_V3 },
{ .compatible = "brcm,genet-v4", .data = (void *)GENET_V4 },
+ { .compatible = "brcm,genet-v5", .data = (void *)GENET_V5 },
{ },
};
MODULE_DEVICE_TABLE(of, bcmgenet_match);
@@ -3493,7 +3611,7 @@ static int bcmgenet_suspend(struct device *d)
if (ret)
return ret;
- /* Disable MAC transmit. TX DMA disabled have to done before this */
+ /* Disable MAC transmit. TX DMA disabled must be done before this */
umac_enable_set(priv, CMD_TX_EN, false);
/* tx reclaim */
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.h b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
index db7f289d65ae..efd07020b89f 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.h
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
@@ -355,8 +355,14 @@ struct bcmgenet_mib_counters {
#define EXT_PWR_DN_EN_LD (1 << 3)
#define EXT_ENERGY_DET (1 << 4)
#define EXT_IDDQ_FROM_PHY (1 << 5)
+#define EXT_IDDQ_GLBL_PWR (1 << 7)
#define EXT_PHY_RESET (1 << 8)
#define EXT_ENERGY_DET_MASK (1 << 12)
+#define EXT_PWR_DOWN_PHY_TX (1 << 16)
+#define EXT_PWR_DOWN_PHY_RX (1 << 17)
+#define EXT_PWR_DOWN_PHY_SD (1 << 18)
+#define EXT_PWR_DOWN_PHY_RD (1 << 19)
+#define EXT_PWR_DOWN_PHY_EN (1 << 20)
#define EXT_RGMII_OOB_CTRL 0x0C
#define RGMII_LINK (1 << 4)
@@ -499,13 +505,15 @@ enum bcmgenet_version {
GENET_V1 = 1,
GENET_V2,
GENET_V3,
- GENET_V4
+ GENET_V4,
+ GENET_V5
};
#define GENET_IS_V1(p) ((p)->version == GENET_V1)
#define GENET_IS_V2(p) ((p)->version == GENET_V2)
#define GENET_IS_V3(p) ((p)->version == GENET_V3)
#define GENET_IS_V4(p) ((p)->version == GENET_V4)
+#define GENET_IS_V5(p) ((p)->version == GENET_V5)
/* Hardware flags */
#define GENET_HAS_40BITS (1 << 0)
@@ -544,6 +552,8 @@ struct bcmgenet_skb_cb {
struct bcmgenet_tx_ring {
spinlock_t lock; /* ring lock */
struct napi_struct napi; /* NAPI per tx queue */
+ unsigned long packets;
+ unsigned long bytes;
unsigned int index; /* ring index */
unsigned int queue; /* queue index */
struct enet_cb *cbs; /* tx ring buffer control block*/
@@ -562,6 +572,10 @@ struct bcmgenet_tx_ring {
struct bcmgenet_rx_ring {
struct napi_struct napi; /* Rx NAPI struct */
+ unsigned long bytes;
+ unsigned long packets;
+ unsigned long errors;
+ unsigned long dropped;
unsigned int index; /* Rx ring index */
struct enet_cb *cbs; /* Rx ring buffer control block */
unsigned int size; /* Rx ring size */
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c b/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c
index b97122926d3a..2fbd027f0148 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c
@@ -1,7 +1,7 @@
/*
* Broadcom GENET (Gigabit Ethernet) Wake-on-LAN support
*
- * Copyright (c) 2014 Broadcom Corporation
+ * Copyright (c) 2014-2017 Broadcom
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -127,7 +127,6 @@ int bcmgenet_wol_power_down_cfg(struct bcmgenet_priv *priv,
enum bcmgenet_power_mode mode)
{
struct net_device *dev = priv->dev;
- u32 cpu_mask_clear;
int retries = 0;
u32 reg;
@@ -173,18 +172,12 @@ int bcmgenet_wol_power_down_cfg(struct bcmgenet_priv *priv,
bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
}
- /* Enable the MPD interrupt */
- cpu_mask_clear = UMAC_IRQ_MPD_R;
-
- bcmgenet_intrl2_0_writel(priv, cpu_mask_clear, INTRL2_CPU_MASK_CLEAR);
-
return 0;
}
void bcmgenet_wol_power_up_cfg(struct bcmgenet_priv *priv,
enum bcmgenet_power_mode mode)
{
- u32 cpu_mask_set;
u32 reg;
if (mode != GENET_POWER_WOL_MAGIC) {
@@ -201,10 +194,4 @@ void bcmgenet_wol_power_up_cfg(struct bcmgenet_priv *priv,
reg &= ~CMD_CRC_FWD;
bcmgenet_umac_writel(priv, reg, UMAC_CMD);
priv->crc_fwd_en = 0;
-
- /* Stop monitoring magic packet IRQ */
- cpu_mask_set = UMAC_IRQ_MPD_R;
-
- /* Stop monitoring magic packet IRQ */
- bcmgenet_intrl2_0_writel(priv, cpu_mask_set, INTRL2_CPU_MASK_SET);
}
diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c
index 2f9281936f0e..285676f8da6b 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmmii.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c
@@ -1,7 +1,7 @@
/*
* Broadcom GENET MDIO routines
*
- * Copyright (c) 2014 Broadcom Corporation
+ * Copyright (c) 2014-2017 Broadcom
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -195,39 +195,43 @@ void bcmgenet_phy_power_set(struct net_device *dev, bool enable)
u32 reg = 0;
/* EXT_GPHY_CTRL is only valid for GENETv4 and onward */
- if (!GENET_IS_V4(priv))
- return;
-
- reg = bcmgenet_ext_readl(priv, EXT_GPHY_CTRL);
- if (enable) {
- reg &= ~EXT_CK25_DIS;
- bcmgenet_ext_writel(priv, reg, EXT_GPHY_CTRL);
- mdelay(1);
-
- reg &= ~(EXT_CFG_IDDQ_BIAS | EXT_CFG_PWR_DOWN);
- reg |= EXT_GPHY_RESET;
+ if (GENET_IS_V4(priv)) {
+ reg = bcmgenet_ext_readl(priv, EXT_GPHY_CTRL);
+ if (enable) {
+ reg &= ~EXT_CK25_DIS;
+ bcmgenet_ext_writel(priv, reg, EXT_GPHY_CTRL);
+ mdelay(1);
+
+ reg &= ~(EXT_CFG_IDDQ_BIAS | EXT_CFG_PWR_DOWN);
+ reg |= EXT_GPHY_RESET;
+ bcmgenet_ext_writel(priv, reg, EXT_GPHY_CTRL);
+ mdelay(1);
+
+ reg &= ~EXT_GPHY_RESET;
+ } else {
+ reg |= EXT_CFG_IDDQ_BIAS | EXT_CFG_PWR_DOWN |
+ EXT_GPHY_RESET;
+ bcmgenet_ext_writel(priv, reg, EXT_GPHY_CTRL);
+ mdelay(1);
+ reg |= EXT_CK25_DIS;
+ }
bcmgenet_ext_writel(priv, reg, EXT_GPHY_CTRL);
- mdelay(1);
-
- reg &= ~EXT_GPHY_RESET;
+ udelay(60);
} else {
- reg |= EXT_CFG_IDDQ_BIAS | EXT_CFG_PWR_DOWN | EXT_GPHY_RESET;
- bcmgenet_ext_writel(priv, reg, EXT_GPHY_CTRL);
mdelay(1);
- reg |= EXT_CK25_DIS;
}
- bcmgenet_ext_writel(priv, reg, EXT_GPHY_CTRL);
- udelay(60);
}
static void bcmgenet_moca_phy_setup(struct bcmgenet_priv *priv)
{
u32 reg;
- /* Speed settings are set in bcmgenet_mii_setup() */
- reg = bcmgenet_sys_readl(priv, SYS_PORT_CTRL);
- reg |= LED_ACT_SOURCE_MAC;
- bcmgenet_sys_writel(priv, reg, SYS_PORT_CTRL);
+ if (!GENET_IS_V5(priv)) {
+ /* Speed settings are set in bcmgenet_mii_setup() */
+ reg = bcmgenet_sys_readl(priv, SYS_PORT_CTRL);
+ reg |= LED_ACT_SOURCE_MAC;
+ bcmgenet_sys_writel(priv, reg, SYS_PORT_CTRL);
+ }
if (priv->hw_params->flags & GENET_HAS_MOCA_LINK_DET)
fixed_phy_set_link_update(priv->phydev,
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index 30d1eb9ebec9..f395b951f5e7 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -825,6 +825,7 @@ static int tg3_ape_event_lock(struct tg3 *tp, u32 timeout_us)
return timeout_us ? 0 : -EBUSY;
}
+#ifdef CONFIG_TIGON3_HWMON
static int tg3_ape_wait_for_event(struct tg3 *tp, u32 timeout_us)
{
u32 i, apedata;
@@ -904,6 +905,7 @@ static int tg3_ape_scratchpad_read(struct tg3 *tp, u32 *data, u32 base_off,
return 0;
}
+#endif
static int tg3_ape_send_event(struct tg3 *tp, u32 event)
{
@@ -10744,6 +10746,7 @@ static int tg3_init_hw(struct tg3 *tp, bool reset_phy)
return tg3_reset_hw(tp, reset_phy);
}
+#ifdef CONFIG_TIGON3_HWMON
static void tg3_sd_scan_scratchpad(struct tg3 *tp, struct tg3_ocir *ocir)
{
int i;
@@ -10826,6 +10829,10 @@ static void tg3_hwmon_open(struct tg3 *tp)
dev_err(&pdev->dev, "Cannot register hwmon device, aborting\n");
}
}
+#else
+static inline void tg3_hwmon_close(struct tg3 *tp) { }
+static inline void tg3_hwmon_open(struct tg3 *tp) { }
+#endif /* CONFIG_TIGON3_HWMON */
#define TG3_STAT_ADD32(PSTAT, REG) \