diff options
Diffstat (limited to 'drivers/net/ethernet/broadcom/bnx2.c')
| -rw-r--r-- | drivers/net/ethernet/broadcom/bnx2.c | 1028 |
1 files changed, 528 insertions, 500 deletions
diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c index 6a2de1d79ff6..805daae9dd36 100644 --- a/drivers/net/ethernet/broadcom/bnx2.c +++ b/drivers/net/ethernet/broadcom/bnx2.c @@ -1,6 +1,7 @@ -/* bnx2.c: Broadcom NX2 network driver. +/* bnx2.c: QLogic bnx2 network driver. * - * Copyright (c) 2004-2011 Broadcom Corporation + * Copyright (c) 2004-2014 Broadcom Corporation + * Copyright (c) 2014-2015 QLogic Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -23,7 +24,6 @@ #include <linux/vmalloc.h> #include <linux/interrupt.h> #include <linux/pci.h> -#include <linux/init.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/skbuff.h> @@ -48,9 +48,9 @@ #include <linux/cache.h> #include <linux/firmware.h> #include <linux/log2.h> -#include <linux/aer.h> +#include <linux/crash_dump.h> -#if defined(CONFIG_CNIC) || defined(CONFIG_CNIC_MODULE) +#if IS_ENABLED(CONFIG_CNIC) #define BCM_CNIC 1 #include "cnic_if.h" #endif @@ -58,8 +58,6 @@ #include "bnx2_fw.h" #define DRV_MODULE_NAME "bnx2" -#define DRV_MODULE_VERSION "2.2.3" -#define DRV_MODULE_RELDATE "June 27, 2012" #define FW_MIPS_FILE_06 "bnx2/bnx2-mips-06-6.2.3.fw" #define FW_RV2P_FILE_06 "bnx2/bnx2-rv2p-06-6.0.15.fw" #define FW_MIPS_FILE_09 "bnx2/bnx2-mips-09-6.2.1b.fw" @@ -71,13 +69,9 @@ /* Time in jiffies before concluding the transmitter is hung. */ #define TX_TIMEOUT (5*HZ) -static char version[] = - "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; - MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>"); -MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708/5709/5716 Driver"); +MODULE_DESCRIPTION("QLogic BCM5706/5708/5709/5716 Driver"); MODULE_LICENSE("GPL"); -MODULE_VERSION(DRV_MODULE_VERSION); MODULE_FIRMWARE(FW_MIPS_FILE_06); MODULE_FIRMWARE(FW_RV2P_FILE_06); MODULE_FIRMWARE(FW_MIPS_FILE_09); @@ -86,7 +80,7 @@ MODULE_FIRMWARE(FW_RV2P_FILE_09_Ax); static int disable_msi = 0; -module_param(disable_msi, int, 0); +module_param(disable_msi, int, 0444); MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)"); typedef enum { @@ -120,7 +114,7 @@ static struct { { "Broadcom NetXtreme II BCM5716 1000Base-SX" }, }; -static DEFINE_PCI_DEVICE_TABLE(bnx2_pci_tbl) = { +static const struct pci_device_id bnx2_pci_tbl[] = { { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706, PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T }, { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706, @@ -181,12 +175,12 @@ static const struct flash_spec flash_table[] = {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406, NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE, ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2, - "Entry 0101: ST M45PE10 (128kB non-bufferred)"}, + "Entry 0101: ST M45PE10 (128kB non-buffered)"}, /* Entry 0110: ST M45PE20 (non-buffered flash)*/ {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406, NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE, ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4, - "Entry 0110: ST M45PE20 (256kB non-bufferred)"}, + "Entry 0110: ST M45PE20 (256kB non-buffered)"}, /* Saifun SA25F005 (non-buffered flash) */ /* strap, cfg1, & write1 need updates */ {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406, @@ -253,13 +247,10 @@ static inline u32 bnx2_tx_avail(struct bnx2 *bp, struct bnx2_tx_ring_info *txr) { u32 diff; - /* Tell compiler to fetch tx_prod and tx_cons from memory. */ - barrier(); - /* The ring uses 256 indices for 255 entries, one of them * needs to be skipped. */ - diff = txr->tx_prod - txr->tx_cons; + diff = READ_ONCE(txr->tx_prod) - READ_ONCE(txr->tx_cons); if (unlikely(diff >= BNX2_TX_DESC_CNT)) { diff &= 0xffff; if (diff == BNX2_TX_DESC_CNT) @@ -271,22 +262,25 @@ static inline u32 bnx2_tx_avail(struct bnx2 *bp, struct bnx2_tx_ring_info *txr) static u32 bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset) { + unsigned long flags; u32 val; - spin_lock_bh(&bp->indirect_lock); + spin_lock_irqsave(&bp->indirect_lock, flags); BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset); val = BNX2_RD(bp, BNX2_PCICFG_REG_WINDOW); - spin_unlock_bh(&bp->indirect_lock); + spin_unlock_irqrestore(&bp->indirect_lock, flags); return val; } static void bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val) { - spin_lock_bh(&bp->indirect_lock); + unsigned long flags; + + spin_lock_irqsave(&bp->indirect_lock, flags); BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset); BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW, val); - spin_unlock_bh(&bp->indirect_lock); + spin_unlock_irqrestore(&bp->indirect_lock, flags); } static void @@ -304,8 +298,10 @@ bnx2_shmem_rd(struct bnx2 *bp, u32 offset) static void bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val) { + unsigned long flags; + offset += cid_addr; - spin_lock_bh(&bp->indirect_lock); + spin_lock_irqsave(&bp->indirect_lock, flags); if (BNX2_CHIP(bp) == BNX2_CHIP_5709) { int i; @@ -322,7 +318,7 @@ bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val) BNX2_WR(bp, BNX2_CTX_DATA_ADR, offset); BNX2_WR(bp, BNX2_CTX_DATA, val); } - spin_unlock_bh(&bp->indirect_lock); + spin_unlock_irqrestore(&bp->indirect_lock, flags); } #ifdef BCM_CNIC @@ -371,6 +367,7 @@ static void bnx2_setup_cnic_irq_info(struct bnx2 *bp) cp->irq_arr[0].status_blk = (void *) ((unsigned long) bnapi->status_blk.msi + (BNX2_SBLK_MSIX_ALIGN_SIZE * sb_id)); + cp->irq_arr[0].status_blk_map = bp->status_blk_mapping; cp->irq_arr[0].status_blk_num = sb_id; cp->num_irq = 1; } @@ -381,7 +378,7 @@ static int bnx2_register_cnic(struct net_device *dev, struct cnic_ops *ops, struct bnx2 *bp = netdev_priv(dev); struct cnic_eth_dev *cp = &bp->cnic_eth_dev; - if (ops == NULL) + if (!ops) return -EINVAL; if (cp->drv_state & CNIC_DRV_STATE_REGD) @@ -575,7 +572,7 @@ bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val) } if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) - ret = -EBUSY; + ret = -EBUSY; else ret = 0; @@ -752,13 +749,13 @@ bnx2_alloc_tx_mem(struct bnx2 *bp) struct bnx2_tx_ring_info *txr = &bnapi->tx_ring; txr->tx_buf_ring = kzalloc(SW_TXBD_RING_SIZE, GFP_KERNEL); - if (txr->tx_buf_ring == NULL) + if (!txr->tx_buf_ring) return -ENOMEM; txr->tx_desc_ring = dma_alloc_coherent(&bp->pdev->dev, TXBD_RING_SIZE, &txr->tx_desc_mapping, GFP_KERNEL); - if (txr->tx_desc_ring == NULL) + if (!txr->tx_desc_ring) return -ENOMEM; } return 0; @@ -775,8 +772,8 @@ bnx2_alloc_rx_mem(struct bnx2 *bp) int j; rxr->rx_buf_ring = - vzalloc(SW_RXBD_RING_SIZE * bp->rx_max_ring); - if (rxr->rx_buf_ring == NULL) + vzalloc(array_size(SW_RXBD_RING_SIZE, bp->rx_max_ring)); + if (!rxr->rx_buf_ring) return -ENOMEM; for (j = 0; j < bp->rx_max_ring; j++) { @@ -785,15 +782,16 @@ bnx2_alloc_rx_mem(struct bnx2 *bp) RXBD_RING_SIZE, &rxr->rx_desc_mapping[j], GFP_KERNEL); - if (rxr->rx_desc_ring[j] == NULL) + if (!rxr->rx_desc_ring[j]) return -ENOMEM; } if (bp->rx_pg_ring_size) { - rxr->rx_pg_ring = vzalloc(SW_RXPG_RING_SIZE * - bp->rx_max_pg_ring); - if (rxr->rx_pg_ring == NULL) + rxr->rx_pg_ring = + vzalloc(array_size(SW_RXPG_RING_SIZE, + bp->rx_max_pg_ring)); + if (!rxr->rx_pg_ring) return -ENOMEM; } @@ -804,7 +802,7 @@ bnx2_alloc_rx_mem(struct bnx2 *bp) RXBD_RING_SIZE, &rxr->rx_pg_desc_mapping[j], GFP_KERNEL); - if (rxr->rx_pg_desc_ring[j] == NULL) + if (!rxr->rx_pg_desc_ring[j]) return -ENOMEM; } @@ -813,6 +811,46 @@ bnx2_alloc_rx_mem(struct bnx2 *bp) } static void +bnx2_free_stats_blk(struct net_device *dev) +{ + struct bnx2 *bp = netdev_priv(dev); + + if (bp->status_blk) { + dma_free_coherent(&bp->pdev->dev, bp->status_stats_size, + bp->status_blk, + bp->status_blk_mapping); + bp->status_blk = NULL; + bp->stats_blk = NULL; + } +} + +static int +bnx2_alloc_stats_blk(struct net_device *dev) +{ + int status_blk_size; + void *status_blk; + struct bnx2 *bp = netdev_priv(dev); + + /* Combine status and statistics blocks into one allocation. */ + status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block)); + if (bp->flags & BNX2_FLAG_MSIX_CAP) + status_blk_size = L1_CACHE_ALIGN(BNX2_MAX_MSIX_HW_VEC * + BNX2_SBLK_MSIX_ALIGN_SIZE); + bp->status_stats_size = status_blk_size + + sizeof(struct statistics_block); + status_blk = dma_alloc_coherent(&bp->pdev->dev, bp->status_stats_size, + &bp->status_blk_mapping, GFP_KERNEL); + if (!status_blk) + return -ENOMEM; + + bp->status_blk = status_blk; + bp->stats_blk = status_blk + status_blk_size; + bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size; + + return 0; +} + +static void bnx2_free_mem(struct bnx2 *bp) { int i; @@ -829,38 +867,19 @@ bnx2_free_mem(struct bnx2 *bp) bp->ctx_blk[i] = NULL; } } - if (bnapi->status_blk.msi) { - dma_free_coherent(&bp->pdev->dev, bp->status_stats_size, - bnapi->status_blk.msi, - bp->status_blk_mapping); + + if (bnapi->status_blk.msi) bnapi->status_blk.msi = NULL; - bp->stats_blk = NULL; - } } static int bnx2_alloc_mem(struct bnx2 *bp) { - int i, status_blk_size, err; + int i, err; struct bnx2_napi *bnapi; - void *status_blk; - - /* Combine status and statistics blocks into one allocation. */ - status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block)); - if (bp->flags & BNX2_FLAG_MSIX_CAP) - status_blk_size = L1_CACHE_ALIGN(BNX2_MAX_MSIX_HW_VEC * - BNX2_SBLK_MSIX_ALIGN_SIZE); - bp->status_stats_size = status_blk_size + - sizeof(struct statistics_block); - - status_blk = dma_alloc_coherent(&bp->pdev->dev, bp->status_stats_size, - &bp->status_blk_mapping, - GFP_KERNEL | __GFP_ZERO); - if (status_blk == NULL) - goto alloc_mem_err; bnapi = &bp->bnx2_napi[0]; - bnapi->status_blk.msi = status_blk; + bnapi->status_blk.msi = bp->status_blk; bnapi->hw_tx_cons_ptr = &bnapi->status_blk.msi->status_tx_quick_consumer_index0; bnapi->hw_rx_cons_ptr = @@ -871,7 +890,7 @@ bnx2_alloc_mem(struct bnx2 *bp) bnapi = &bp->bnx2_napi[i]; - sblk = (status_blk + BNX2_SBLK_MSIX_ALIGN_SIZE * i); + sblk = (bp->status_blk + BNX2_SBLK_MSIX_ALIGN_SIZE * i); bnapi->status_blk.msix = sblk; bnapi->hw_tx_cons_ptr = &sblk->status_tx_quick_consumer_index; @@ -881,10 +900,6 @@ bnx2_alloc_mem(struct bnx2 *bp) } } - bp->stats_blk = status_blk + status_blk_size; - - bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size; - if (BNX2_CHIP(bp) == BNX2_CHIP_5709) { bp->ctx_pages = 0x2000 / BNX2_PAGE_SIZE; if (bp->ctx_pages == 0) @@ -894,7 +909,7 @@ bnx2_alloc_mem(struct bnx2 *bp) BNX2_PAGE_SIZE, &bp->ctx_blk_mapping[i], GFP_KERNEL); - if (bp->ctx_blk[i] == NULL) + if (!bp->ctx_blk[i]) goto alloc_mem_err; } } @@ -1198,6 +1213,8 @@ bnx2_copper_linkup(struct bnx2 *bp) { u32 bmcr; + bp->phy_flags &= ~BNX2_PHY_FLAG_MDIX; + bnx2_read_phy(bp, bp->mii_bmcr, &bmcr); if (bmcr & BMCR_ANENABLE) { u32 local_adv, remote_adv, common; @@ -1256,6 +1273,14 @@ bnx2_copper_linkup(struct bnx2 *bp) } } + if (bp->link_up) { + u32 ext_status; + + bnx2_read_phy(bp, MII_BNX2_EXT_STATUS, &ext_status); + if (ext_status & EXT_STATUS_MDIX) + bp->phy_flags |= BNX2_PHY_FLAG_MDIX; + } + return 0; } @@ -1312,13 +1337,13 @@ bnx2_set_mac_link(struct bnx2 *bp) val |= BNX2_EMAC_MODE_PORT_MII_10M; break; } - /* fall through */ + fallthrough; case SPEED_100: val |= BNX2_EMAC_MODE_PORT_MII; break; case SPEED_2500: val |= BNX2_EMAC_MODE_25G_MODE; - /* fall through */ + fallthrough; case SPEED_1000: val |= BNX2_EMAC_MODE_PORT_GMII; break; @@ -1430,7 +1455,7 @@ bnx2_test_and_disable_2g5(struct bnx2 *bp) static void bnx2_enable_forced_2g5(struct bnx2 *bp) { - u32 uninitialized_var(bmcr); + u32 bmcr; int err; if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)) @@ -1474,7 +1499,7 @@ bnx2_enable_forced_2g5(struct bnx2 *bp) static void bnx2_disable_forced_2g5(struct bnx2 *bp) { - u32 uninitialized_var(bmcr); + u32 bmcr; int err; if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)) @@ -1970,26 +1995,26 @@ bnx2_remote_phy_event(struct bnx2 *bp) switch (speed) { case BNX2_LINK_STATUS_10HALF: bp->duplex = DUPLEX_HALF; - /* fall through */ + fallthrough; case BNX2_LINK_STATUS_10FULL: bp->line_speed = SPEED_10; break; case BNX2_LINK_STATUS_100HALF: bp->duplex = DUPLEX_HALF; - /* fall through */ + fallthrough; case BNX2_LINK_STATUS_100BASE_T4: case BNX2_LINK_STATUS_100FULL: bp->line_speed = SPEED_100; break; case BNX2_LINK_STATUS_1000HALF: bp->duplex = DUPLEX_HALF; - /* fall through */ + fallthrough; case BNX2_LINK_STATUS_1000FULL: bp->line_speed = SPEED_1000; break; case BNX2_LINK_STATUS_2500HALF: bp->duplex = DUPLEX_HALF; - /* fall through */ + fallthrough; case BNX2_LINK_STATUS_2500FULL: bp->line_speed = SPEED_2500; break; @@ -2049,29 +2074,27 @@ bnx2_setup_copper_phy(struct bnx2 *bp) __releases(&bp->phy_lock) __acquires(&bp->phy_lock) { - u32 bmcr; + u32 bmcr, adv_reg, new_adv = 0; u32 new_bmcr; bnx2_read_phy(bp, bp->mii_bmcr, &bmcr); + bnx2_read_phy(bp, bp->mii_adv, &adv_reg); + adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP | + ADVERTISE_PAUSE_ASYM); + + new_adv = ADVERTISE_CSMA | ethtool_adv_to_mii_adv_t(bp->advertising); + if (bp->autoneg & AUTONEG_SPEED) { - u32 adv_reg, adv1000_reg; - u32 new_adv = 0; + u32 adv1000_reg; u32 new_adv1000 = 0; - bnx2_read_phy(bp, bp->mii_adv, &adv_reg); - adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP | - ADVERTISE_PAUSE_ASYM); + new_adv |= bnx2_phy_get_pause_adv(bp); bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg); adv1000_reg &= PHY_ALL_1000_SPEED; - new_adv = ethtool_adv_to_mii_adv_t(bp->advertising); - new_adv |= ADVERTISE_CSMA; - new_adv |= bnx2_phy_get_pause_adv(bp); - new_adv1000 |= ethtool_adv_to_mii_ctrl1000_t(bp->advertising); - if ((adv1000_reg != new_adv1000) || (adv_reg != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) { @@ -2091,6 +2114,10 @@ __acquires(&bp->phy_lock) return 0; } + /* advertise nothing when forcing speed */ + if (adv_reg != new_adv) + bnx2_write_phy(bp, bp->mii_adv, new_adv); + new_bmcr = 0; if (bp->req_line_speed == SPEED_100) { new_bmcr |= BMCR_SPEED100; @@ -2269,7 +2296,7 @@ bnx2_init_5706s_phy(struct bnx2 *bp, int reset_phy) if (BNX2_CHIP(bp) == BNX2_CHIP_5706) BNX2_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300); - if (bp->dev->mtu > 1500) { + if (bp->dev->mtu > ETH_DATA_LEN) { u32 val; /* Set extended packet length bit */ @@ -2323,7 +2350,7 @@ bnx2_init_copper_phy(struct bnx2 *bp, int reset_phy) bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val); } - if (bp->dev->mtu > 1500) { + if (bp->dev->mtu > ETH_DATA_LEN) { /* Set extended packet length bit */ bnx2_write_phy(bp, 0x18, 0x7); bnx2_read_phy(bp, 0x18, &val); @@ -2342,9 +2369,15 @@ bnx2_init_copper_phy(struct bnx2 *bp, int reset_phy) } /* ethernet@wirespeed */ - bnx2_write_phy(bp, 0x18, 0x7007); - bnx2_read_phy(bp, 0x18, &val); - bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4)); + bnx2_write_phy(bp, MII_BNX2_AUX_CTL, AUX_CTL_MISC_CTL); + bnx2_read_phy(bp, MII_BNX2_AUX_CTL, &val); + val |= AUX_CTL_MISC_CTL_WR | AUX_CTL_MISC_CTL_WIRESPEED; + + /* auto-mdix */ + if (BNX2_CHIP(bp) == BNX2_CHIP_5709) + val |= AUX_CTL_MISC_CTL_AUTOMDIX; + + bnx2_write_phy(bp, MII_BNX2_AUX_CTL, val); return 0; } @@ -2491,6 +2524,7 @@ bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int ack, int silent) bp->fw_wr_seq++; msg_data |= bp->fw_wr_seq; + bp->fw_last_msg = msg_data; bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data); @@ -2627,8 +2661,8 @@ bnx2_alloc_bad_rbuf(struct bnx2 *bp) u32 good_mbuf_cnt; u32 val; - good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL); - if (good_mbuf == NULL) + good_mbuf = kmalloc_array(512, sizeof(u16), GFP_KERNEL); + if (!good_mbuf) return -ENOMEM; BNX2_WR(bp, BNX2_MISC_ENABLE_SET_BITS, @@ -2670,7 +2704,7 @@ bnx2_alloc_bad_rbuf(struct bnx2 *bp) } static void -bnx2_set_mac_addr(struct bnx2 *bp, u8 *mac_addr, u32 pos) +bnx2_set_mac_addr(struct bnx2 *bp, const u8 *mac_addr, u32 pos) { u32 val; @@ -2696,7 +2730,7 @@ bnx2_alloc_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index, gf if (!page) return -ENOMEM; mapping = dma_map_page(&bp->pdev->dev, page, 0, PAGE_SIZE, - PCI_DMA_FROMDEVICE); + DMA_FROM_DEVICE); if (dma_mapping_error(&bp->pdev->dev, mapping)) { __free_page(page); return -EIO; @@ -2719,7 +2753,7 @@ bnx2_free_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index) return; dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(rx_pg, mapping), - PAGE_SIZE, PCI_DMA_FROMDEVICE); + PAGE_SIZE, DMA_FROM_DEVICE); __free_page(page); rx_pg->page = NULL; @@ -2741,7 +2775,7 @@ bnx2_alloc_rx_data(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index, gf mapping = dma_map_single(&bp->pdev->dev, get_l2_fhdr(data), bp->rx_buf_use_size, - PCI_DMA_FROMDEVICE); + DMA_FROM_DEVICE); if (dma_mapping_error(&bp->pdev->dev, mapping)) { kfree(data); return -EIO; @@ -2797,10 +2831,8 @@ bnx2_get_hw_tx_cons(struct bnx2_napi *bnapi) { u16 cons; - /* Tell compiler that status block fields can change. */ - barrier(); - cons = *bnapi->hw_tx_cons_ptr; - barrier(); + cons = READ_ONCE(*bnapi->hw_tx_cons_ptr); + if (unlikely((cons & BNX2_MAX_TX_DESC_CNT) == BNX2_MAX_TX_DESC_CNT)) cons++; return cons; @@ -2849,7 +2881,7 @@ bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget) } dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping), - skb_headlen(skb), PCI_DMA_TODEVICE); + skb_headlen(skb), DMA_TO_DEVICE); tx_buf->skb = NULL; last = tx_buf->nr_frags; @@ -2863,13 +2895,13 @@ bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget) dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping), skb_frag_size(&skb_shinfo(skb)->frags[i]), - PCI_DMA_TODEVICE); + DMA_TO_DEVICE); } sw_cons = BNX2_NEXT_TX_BD(sw_cons); tx_bytes += skb->len; - dev_kfree_skb(skb); + dev_kfree_skb_any(skb); tx_pkt++; if (tx_pkt == budget) break; @@ -2924,7 +2956,6 @@ bnx2_reuse_rx_skb_pages(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, shinfo = skb_shinfo(skb); shinfo->nr_frags--; page = skb_frag_page(&shinfo->frags[shinfo->nr_frags]); - __skb_frag_set_page(&shinfo->frags[shinfo->nr_frags], NULL); cons_rx_pg->page = page; dev_kfree_skb(skb); @@ -2971,7 +3002,7 @@ bnx2_reuse_rx_data(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, dma_sync_single_for_device(&bp->pdev->dev, dma_unmap_addr(cons_rx_buf, mapping), - BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH, PCI_DMA_FROMDEVICE); + BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH, DMA_FROM_DEVICE); rxr->rx_prod_bseq += bp->rx_buf_use_size; @@ -3012,8 +3043,8 @@ error: } dma_unmap_single(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size, - PCI_DMA_FROMDEVICE); - skb = build_skb(data, 0); + DMA_FROM_DEVICE); + skb = slab_build_skb(data); if (!skb) { kfree(data); goto error; @@ -3078,7 +3109,7 @@ error: } dma_unmap_page(&bp->pdev->dev, mapping_old, - PAGE_SIZE, PCI_DMA_FROMDEVICE); + PAGE_SIZE, DMA_FROM_DEVICE); frag_size -= frag_len; skb->data_len += frag_len; @@ -3099,10 +3130,8 @@ bnx2_get_hw_rx_cons(struct bnx2_napi *bnapi) { u16 cons; - /* Tell compiler that status block fields can change. */ - barrier(); - cons = *bnapi->hw_rx_cons_ptr; - barrier(); + cons = READ_ONCE(*bnapi->hw_rx_cons_ptr); + if (unlikely((cons & BNX2_MAX_RX_DESC_CNT) == BNX2_MAX_RX_DESC_CNT)) cons++; return cons; @@ -3116,6 +3145,9 @@ bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget) struct l2_fhdr *rx_hdr; int rx_pkt = 0, pg_ring_used = 0; + if (budget <= 0) + return rx_pkt; + hw_cons = bnx2_get_hw_rx_cons(bnapi); sw_cons = rxr->rx_cons; sw_prod = rxr->rx_prod; @@ -3147,7 +3179,7 @@ bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget) dma_sync_single_for_cpu(&bp->pdev->dev, dma_addr, BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH, - PCI_DMA_FROMDEVICE); + DMA_FROM_DEVICE); next_ring_idx = BNX2_RX_RING_IDX(BNX2_NEXT_RX_BD(sw_cons)); next_rx_buf = &rxr->rx_buf_ring[next_ring_idx]; @@ -3187,7 +3219,7 @@ bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget) if (len <= bp->rx_copy_thresh) { skb = netdev_alloc_skb(bp->dev, len + 6); - if (skb == NULL) { + if (!skb) { bnx2_reuse_rx_data(bp, rxr, data, sw_ring_cons, sw_ring_prod); goto next_rx; @@ -3215,8 +3247,9 @@ bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget) skb->protocol = eth_type_trans(skb, bp->dev); - if ((len > (bp->dev->mtu + ETH_HLEN)) && - (ntohs(skb->protocol) != 0x8100)) { + if (len > (bp->dev->mtu + ETH_HLEN) && + skb->protocol != htons(0x8100) && + skb->protocol != htons(ETH_P_8021AD)) { dev_kfree_skb(skb); goto next_rx; @@ -3235,7 +3268,8 @@ bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget) if ((bp->dev->features & NETIF_F_RXHASH) && ((status & L2_FHDR_STATUS_USE_RXHASH) == L2_FHDR_STATUS_USE_RXHASH)) - skb->rxhash = rx_hdr->l2_fhdr_hash; + skb_set_hash(skb, rx_hdr->l2_fhdr_hash, + PKT_HASH_TYPE_L3); skb_record_rx_queue(skb, bnapi - &bp->bnx2_napi[0]); napi_gro_receive(&bnapi->napi, skb); @@ -3245,7 +3279,7 @@ next_rx: sw_cons = BNX2_NEXT_RX_BD(sw_cons); sw_prod = BNX2_NEXT_RX_BD(sw_prod); - if ((rx_pkt == budget)) + if (rx_pkt == budget) break; /* Refresh hw_cons to see if there is new work */ @@ -3264,8 +3298,6 @@ next_rx: BNX2_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq); - mmiowb(); - return rx_pkt; } @@ -3475,7 +3507,7 @@ static int bnx2_poll_msix(struct napi_struct *napi, int budget) rmb(); if (likely(!bnx2_has_fast_work(bnapi))) { - napi_complete(napi); + napi_complete_done(napi, work_done); BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num | BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | bnapi->last_status_idx); @@ -3512,7 +3544,7 @@ static int bnx2_poll(struct napi_struct *napi, int budget) rmb(); if (likely(!bnx2_has_work(bnapi))) { - napi_complete(napi); + napi_complete_done(napi, work_done); if (likely(bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)) { BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | @@ -3566,7 +3598,7 @@ bnx2_set_rx_mode(struct net_device *dev) for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) { BNX2_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4), 0xffffffff); - } + } sort_mode |= BNX2_RPM_SORT_USER0_MC_EN; } else { @@ -3796,7 +3828,7 @@ load_rv2p_fw(struct bnx2 *bp, u32 rv2p_proc, return 0; } -static int +static void load_cpu_fw(struct bnx2 *bp, const struct cpu_reg *cpu_reg, const struct bnx2_mips_fw_file_entry *fw_entry) { @@ -3864,180 +3896,179 @@ load_cpu_fw(struct bnx2 *bp, const struct cpu_reg *cpu_reg, val &= ~cpu_reg->mode_value_halt; bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear); bnx2_reg_wr_ind(bp, cpu_reg->mode, val); - - return 0; } -static int +static void bnx2_init_cpus(struct bnx2 *bp) { const struct bnx2_mips_fw_file *mips_fw = (const struct bnx2_mips_fw_file *) bp->mips_firmware->data; const struct bnx2_rv2p_fw_file *rv2p_fw = (const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data; - int rc; /* Initialize the RV2P processor. */ load_rv2p_fw(bp, RV2P_PROC1, &rv2p_fw->proc1); load_rv2p_fw(bp, RV2P_PROC2, &rv2p_fw->proc2); /* Initialize the RX Processor. */ - rc = load_cpu_fw(bp, &cpu_reg_rxp, &mips_fw->rxp); - if (rc) - goto init_cpu_err; + load_cpu_fw(bp, &cpu_reg_rxp, &mips_fw->rxp); /* Initialize the TX Processor. */ - rc = load_cpu_fw(bp, &cpu_reg_txp, &mips_fw->txp); - if (rc) - goto init_cpu_err; + load_cpu_fw(bp, &cpu_reg_txp, &mips_fw->txp); /* Initialize the TX Patch-up Processor. */ - rc = load_cpu_fw(bp, &cpu_reg_tpat, &mips_fw->tpat); - if (rc) - goto init_cpu_err; + load_cpu_fw(bp, &cpu_reg_tpat, &mips_fw->tpat); /* Initialize the Completion Processor. */ - rc = load_cpu_fw(bp, &cpu_reg_com, &mips_fw->com); - if (rc) - goto init_cpu_err; + load_cpu_fw(bp, &cpu_reg_com, &mips_fw->com); /* Initialize the Command Processor. */ - rc = load_cpu_fw(bp, &cpu_reg_cp, &mips_fw->cp); - -init_cpu_err: - return rc; + load_cpu_fw(bp, &cpu_reg_cp, &mips_fw->cp); } -static int -bnx2_set_power_state(struct bnx2 *bp, pci_power_t state) +static void +bnx2_setup_wol(struct bnx2 *bp) { - u16 pmcsr; + int i; + u32 val, wol_msg; - pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr); + if (bp->wol) { + u32 advertising; + u8 autoneg; - switch (state) { - case PCI_D0: { - u32 val; + autoneg = bp->autoneg; + advertising = bp->advertising; - pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, - (pmcsr & ~PCI_PM_CTRL_STATE_MASK) | - PCI_PM_CTRL_PME_STATUS); + if (bp->phy_port == PORT_TP) { + bp->autoneg = AUTONEG_SPEED; + bp->advertising = ADVERTISED_10baseT_Half | + ADVERTISED_10baseT_Full | + ADVERTISED_100baseT_Half | + ADVERTISED_100baseT_Full | + ADVERTISED_Autoneg; + } - if (pmcsr & PCI_PM_CTRL_STATE_MASK) - /* delay required during transition out of D3hot */ - msleep(20); + spin_lock_bh(&bp->phy_lock); + bnx2_setup_phy(bp, bp->phy_port); + spin_unlock_bh(&bp->phy_lock); + + bp->autoneg = autoneg; + bp->advertising = advertising; + + bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0); val = BNX2_RD(bp, BNX2_EMAC_MODE); - val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD; - val &= ~BNX2_EMAC_MODE_MPKT; - BNX2_WR(bp, BNX2_EMAC_MODE, val); - val = BNX2_RD(bp, BNX2_RPM_CONFIG); - val &= ~BNX2_RPM_CONFIG_ACPI_ENA; - BNX2_WR(bp, BNX2_RPM_CONFIG, val); - break; - } - case PCI_D3hot: { - int i; - u32 val, wol_msg; - - if (bp->wol) { - u32 advertising; - u8 autoneg; - - autoneg = bp->autoneg; - advertising = bp->advertising; - - if (bp->phy_port == PORT_TP) { - bp->autoneg = AUTONEG_SPEED; - bp->advertising = ADVERTISED_10baseT_Half | - ADVERTISED_10baseT_Full | - ADVERTISED_100baseT_Half | - ADVERTISED_100baseT_Full | - ADVERTISED_Autoneg; - } + /* Enable port mode. */ + val &= ~BNX2_EMAC_MODE_PORT; + val |= BNX2_EMAC_MODE_MPKT_RCVD | + BNX2_EMAC_MODE_ACPI_RCVD | + BNX2_EMAC_MODE_MPKT; + if (bp->phy_port == PORT_TP) { + val |= BNX2_EMAC_MODE_PORT_MII; + } else { + val |= BNX2_EMAC_MODE_PORT_GMII; + if (bp->line_speed == SPEED_2500) + val |= BNX2_EMAC_MODE_25G_MODE; + } - spin_lock_bh(&bp->phy_lock); - bnx2_setup_phy(bp, bp->phy_port); - spin_unlock_bh(&bp->phy_lock); + BNX2_WR(bp, BNX2_EMAC_MODE, val); - bp->autoneg = autoneg; - bp->advertising = advertising; + /* receive all multicast */ + for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) { + BNX2_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4), + 0xffffffff); + } + BNX2_WR(bp, BNX2_EMAC_RX_MODE, BNX2_EMAC_RX_MODE_SORT_MODE); - bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0); + val = 1 | BNX2_RPM_SORT_USER0_BC_EN | BNX2_RPM_SORT_USER0_MC_EN; + BNX2_WR(bp, BNX2_RPM_SORT_USER0, 0x0); + BNX2_WR(bp, BNX2_RPM_SORT_USER0, val); + BNX2_WR(bp, BNX2_RPM_SORT_USER0, val | BNX2_RPM_SORT_USER0_ENA); - val = BNX2_RD(bp, BNX2_EMAC_MODE); + /* Need to enable EMAC and RPM for WOL. */ + BNX2_WR(bp, BNX2_MISC_ENABLE_SET_BITS, + BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE | + BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE | + BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE); - /* Enable port mode. */ - val &= ~BNX2_EMAC_MODE_PORT; - val |= BNX2_EMAC_MODE_MPKT_RCVD | - BNX2_EMAC_MODE_ACPI_RCVD | - BNX2_EMAC_MODE_MPKT; - if (bp->phy_port == PORT_TP) - val |= BNX2_EMAC_MODE_PORT_MII; - else { - val |= BNX2_EMAC_MODE_PORT_GMII; - if (bp->line_speed == SPEED_2500) - val |= BNX2_EMAC_MODE_25G_MODE; - } + val = BNX2_RD(bp, BNX2_RPM_CONFIG); + val &= ~BNX2_RPM_CONFIG_ACPI_ENA; + BNX2_WR(bp, BNX2_RPM_CONFIG, val); - BNX2_WR(bp, BNX2_EMAC_MODE, val); + wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL; + } else { + wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL; + } - /* receive all multicast */ - for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) { - BNX2_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4), - 0xffffffff); - } - BNX2_WR(bp, BNX2_EMAC_RX_MODE, - BNX2_EMAC_RX_MODE_SORT_MODE); + if (!(bp->flags & BNX2_FLAG_NO_WOL)) { + u32 val; - val = 1 | BNX2_RPM_SORT_USER0_BC_EN | - BNX2_RPM_SORT_USER0_MC_EN; - BNX2_WR(bp, BNX2_RPM_SORT_USER0, 0x0); - BNX2_WR(bp, BNX2_RPM_SORT_USER0, val); - BNX2_WR(bp, BNX2_RPM_SORT_USER0, val | - BNX2_RPM_SORT_USER0_ENA); + wol_msg |= BNX2_DRV_MSG_DATA_WAIT3; + if (bp->fw_last_msg || BNX2_CHIP(bp) != BNX2_CHIP_5709) { + bnx2_fw_sync(bp, wol_msg, 1, 0); + return; + } + /* Tell firmware not to power down the PHY yet, otherwise + * the chip will take a long time to respond to MMIO reads. + */ + val = bnx2_shmem_rd(bp, BNX2_PORT_FEATURE); + bnx2_shmem_wr(bp, BNX2_PORT_FEATURE, + val | BNX2_PORT_FEATURE_ASF_ENABLED); + bnx2_fw_sync(bp, wol_msg, 1, 0); + bnx2_shmem_wr(bp, BNX2_PORT_FEATURE, val); + } - /* Need to enable EMAC and RPM for WOL. */ - BNX2_WR(bp, BNX2_MISC_ENABLE_SET_BITS, - BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE | - BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE | - BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE); +} - val = BNX2_RD(bp, BNX2_RPM_CONFIG); - val &= ~BNX2_RPM_CONFIG_ACPI_ENA; - BNX2_WR(bp, BNX2_RPM_CONFIG, val); +static int +bnx2_set_power_state(struct bnx2 *bp, pci_power_t state) +{ + switch (state) { + case PCI_D0: { + u32 val; - wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL; - } - else { - wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL; - } + pci_enable_wake(bp->pdev, PCI_D0, false); + pci_set_power_state(bp->pdev, PCI_D0); - if (!(bp->flags & BNX2_FLAG_NO_WOL)) - bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg, - 1, 0); + val = BNX2_RD(bp, BNX2_EMAC_MODE); + val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD; + val &= ~BNX2_EMAC_MODE_MPKT; + BNX2_WR(bp, BNX2_EMAC_MODE, val); - pmcsr &= ~PCI_PM_CTRL_STATE_MASK; + val = BNX2_RD(bp, BNX2_RPM_CONFIG); + val &= ~BNX2_RPM_CONFIG_ACPI_ENA; + BNX2_WR(bp, BNX2_RPM_CONFIG, val); + break; + } + case PCI_D3hot: { + bnx2_setup_wol(bp); + pci_wake_from_d3(bp->pdev, bp->wol); if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) || (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1)) { if (bp->wol) - pmcsr |= 3; - } - else { - pmcsr |= 3; + pci_set_power_state(bp->pdev, PCI_D3hot); + break; + } - if (bp->wol) { - pmcsr |= PCI_PM_CTRL_PME_ENABLE; + if (!bp->fw_last_msg && BNX2_CHIP(bp) == BNX2_CHIP_5709) { + u32 val; + + /* Tell firmware not to power down the PHY yet, + * otherwise the other port may not respond to + * MMIO reads. + */ + val = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION); + val &= ~BNX2_CONDITION_PM_STATE_MASK; + val |= BNX2_CONDITION_PM_STATE_UNPREP; + bnx2_shmem_wr(bp, BNX2_BC_STATE_CONDITION, val); } - pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, - pmcsr); + pci_set_power_state(bp->pdev, PCI_D3hot); /* No more memory access after this point until * device is brought back to D0. */ - udelay(50); break; } default: @@ -4508,7 +4539,7 @@ bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf, if (align_start || align_end) { align_buf = kmalloc(len32, GFP_KERNEL); - if (align_buf == NULL) + if (!align_buf) return -ENOMEM; if (align_start) { memcpy(align_buf, start, 4); @@ -4522,7 +4553,7 @@ bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf, if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) { flash_buffer = kmalloc(264, GFP_KERNEL); - if (flash_buffer == NULL) { + if (!flash_buffer) { rc = -ENOMEM; goto nvram_write_end; } @@ -4628,7 +4659,7 @@ bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf, if (addr == page_end-4) { cmd_flags = BNX2_NVM_COMMAND_LAST; - } + } rc = bnx2_nvram_write_dword(bp, addr, &flash_buffer[i], cmd_flags); @@ -4705,15 +4736,16 @@ bnx2_setup_msix_tbl(struct bnx2 *bp) BNX2_WR(bp, BNX2_PCI_GRC_WINDOW3_ADDR, BNX2_MSIX_PBA_ADDR); } -static int -bnx2_reset_chip(struct bnx2 *bp, u32 reset_code) +static void +bnx2_wait_dma_complete(struct bnx2 *bp) { u32 val; - int i, rc = 0; - u8 old_port; + int i; - /* Wait for the current PCI transaction to complete before - * issuing a reset. */ + /* + * Wait for the current PCI transaction to complete before + * issuing a reset. + */ if ((BNX2_CHIP(bp) == BNX2_CHIP_5706) || (BNX2_CHIP(bp) == BNX2_CHIP_5708)) { BNX2_WR(bp, BNX2_MISC_ENABLE_CLR_BITS, @@ -4737,6 +4769,21 @@ bnx2_reset_chip(struct bnx2 *bp, u32 reset_code) } } + return; +} + + +static int +bnx2_reset_chip(struct bnx2 *bp, u32 reset_code) +{ + u32 val; + int i, rc = 0; + u8 old_port; + + /* Wait for the current PCI transaction to complete before + * issuing a reset. */ + bnx2_wait_dma_complete(bp); + /* Wait for the firmware to tell us it is ok to issue a reset. */ bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1, 1); @@ -4889,8 +4936,7 @@ bnx2_init_chip(struct bnx2 *bp) } else bnx2_init_context(bp); - if ((rc = bnx2_init_cpus(bp)) != 0) - return rc; + bnx2_init_cpus(bp); bnx2_init_nvram(bp); @@ -4931,12 +4977,12 @@ bnx2_init_chip(struct bnx2 *bp) /* Program the MTU. Also include 4 bytes for CRC32. */ mtu = bp->dev->mtu; val = mtu + ETH_HLEN + ETH_FCS_LEN; - if (val > (MAX_ETHERNET_PACKET_SIZE + 4)) + if (val > (MAX_ETHERNET_PACKET_SIZE + ETH_HLEN + 4)) val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA; BNX2_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val); - if (mtu < 1500) - mtu = 1500; + if (mtu < ETH_DATA_LEN) + mtu = ETH_DATA_LEN; bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG, BNX2_RBUF_CONFIG_VAL(mtu)); bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG2, BNX2_RBUF_CONFIG2_VAL(mtu)); @@ -4948,8 +4994,6 @@ bnx2_init_chip(struct bnx2 *bp) bp->idle_chk_status_idx = 0xffff; - bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE; - /* Set up how to generate a link change interrupt. */ BNX2_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK); @@ -5355,8 +5399,9 @@ bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size) bp->rx_buf_use_size = rx_size; /* hw alignment + build_skb() overhead*/ - bp->rx_buf_size = SKB_DATA_ALIGN(bp->rx_buf_use_size + BNX2_RX_ALIGN) + - NET_SKB_PAD + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); + bp->rx_buf_size = kmalloc_size_roundup( + SKB_DATA_ALIGN(bp->rx_buf_use_size + BNX2_RX_ALIGN) + + NET_SKB_PAD + SKB_DATA_ALIGN(sizeof(struct skb_shared_info))); bp->rx_jumbo_thresh = rx_size - BNX2_RX_OFFSET; bp->rx_ring_size = size; bp->rx_max_ring = bnx2_find_max_ring(size, BNX2_MAX_RX_RINGS); @@ -5373,7 +5418,7 @@ bnx2_free_tx_skbs(struct bnx2 *bp) struct bnx2_tx_ring_info *txr = &bnapi->tx_ring; int j; - if (txr->tx_buf_ring == NULL) + if (!txr->tx_buf_ring) continue; for (j = 0; j < BNX2_TX_DESC_CNT; ) { @@ -5381,7 +5426,7 @@ bnx2_free_tx_skbs(struct bnx2 *bp) struct sk_buff *skb = tx_buf->skb; int k, last; - if (skb == NULL) { + if (!skb) { j = BNX2_NEXT_TX_BD(j); continue; } @@ -5389,7 +5434,7 @@ bnx2_free_tx_skbs(struct bnx2 *bp) dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping), skb_headlen(skb), - PCI_DMA_TODEVICE); + DMA_TO_DEVICE); tx_buf->skb = NULL; @@ -5400,7 +5445,7 @@ bnx2_free_tx_skbs(struct bnx2 *bp) dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping), skb_frag_size(&skb_shinfo(skb)->frags[k]), - PCI_DMA_TODEVICE); + DMA_TO_DEVICE); } dev_kfree_skb(skb); } @@ -5418,20 +5463,20 @@ bnx2_free_rx_skbs(struct bnx2 *bp) struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring; int j; - if (rxr->rx_buf_ring == NULL) + if (!rxr->rx_buf_ring) return; for (j = 0; j < bp->rx_max_ring_idx; j++) { struct bnx2_sw_bd *rx_buf = &rxr->rx_buf_ring[j]; u8 *data = rx_buf->data; - if (data == NULL) + if (!data) continue; dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping), bp->rx_buf_use_size, - PCI_DMA_FROMDEVICE); + DMA_FROM_DEVICE); rx_buf->data = NULL; @@ -5751,8 +5796,8 @@ bnx2_run_loopback(struct bnx2 *bp, int loopback_mode) struct l2_fhdr *rx_hdr; int ret = -ENODEV; struct bnx2_napi *bnapi = &bp->bnx2_napi[0], *tx_napi; - struct bnx2_tx_ring_info *txr = &bnapi->tx_ring; - struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring; + struct bnx2_tx_ring_info *txr; + struct bnx2_rx_ring_info *rxr; tx_napi = bnapi; @@ -5777,13 +5822,13 @@ bnx2_run_loopback(struct bnx2 *bp, int loopback_mode) if (!skb) return -ENOMEM; packet = skb_put(skb, pkt_size); - memcpy(packet, bp->dev->dev_addr, 6); - memset(packet + 6, 0x0, 8); + memcpy(packet, bp->dev->dev_addr, ETH_ALEN); + memset(packet + ETH_ALEN, 0x0, 8); for (i = 14; i < pkt_size; i++) packet[i] = (unsigned char) (i & 0xff); map = dma_map_single(&bp->pdev->dev, skb->data, pkt_size, - PCI_DMA_TODEVICE); + DMA_TO_DEVICE); if (dma_mapping_error(&bp->pdev->dev, map)) { dev_kfree_skb(skb); return -EIO; @@ -5822,7 +5867,7 @@ bnx2_run_loopback(struct bnx2 *bp, int loopback_mode) udelay(5); - dma_unmap_single(&bp->pdev->dev, map, pkt_size, PCI_DMA_TODEVICE); + dma_unmap_single(&bp->pdev->dev, map, pkt_size, DMA_TO_DEVICE); dev_kfree_skb(skb); if (bnx2_get_hw_tx_cons(tx_napi) != txr->tx_prod) @@ -5841,7 +5886,7 @@ bnx2_run_loopback(struct bnx2 *bp, int loopback_mode) dma_sync_single_for_cpu(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping), - bp->rx_buf_use_size, PCI_DMA_FROMDEVICE); + bp->rx_buf_use_size, DMA_FROM_DEVICE); if (rx_hdr->l2_fhdr_status & (L2_FHDR_ERRORS_BAD_CRC | @@ -6116,9 +6161,9 @@ bnx2_5708_serdes_timer(struct bnx2 *bp) } static void -bnx2_timer(unsigned long data) +bnx2_timer(struct timer_list *t) { - struct bnx2 *bp = (struct bnx2 *) data; + struct bnx2 *bp = timer_container_of(bp, t, timer); if (!netif_running(bp->dev)) return; @@ -6204,7 +6249,7 @@ bnx2_free_irq(struct bnx2 *bp) static void bnx2_enable_msix(struct bnx2 *bp, int msix_vecs) { - int i, total_vecs, rc; + int i, total_vecs; struct msix_entry msix_ent[BNX2_MAX_MSIX_VEC]; struct net_device *dev = bp->dev; const int len = sizeof(bp->irq_tbl[0].name); @@ -6227,16 +6272,9 @@ bnx2_enable_msix(struct bnx2 *bp, int msix_vecs) #ifdef BCM_CNIC total_vecs++; #endif - rc = -ENOSPC; - while (total_vecs >= BNX2_MIN_MSIX_VEC) { - rc = pci_enable_msix(bp->pdev, msix_ent, total_vecs); - if (rc <= 0) - break; - if (rc > 0) - total_vecs = rc; - } - - if (rc != 0) + total_vecs = pci_enable_msix_range(bp->pdev, msix_ent, + BNX2_MIN_MSIX_VEC, total_vecs); + if (total_vecs < 0) return; msix_vecs = total_vecs; @@ -6317,7 +6355,6 @@ bnx2_open(struct net_device *dev) netif_carrier_off(dev); - bnx2_set_power_state(bp, PCI_D0); bnx2_disable_int(bp); rc = bnx2_setup_int_mode(bp, disable_msi); @@ -6363,7 +6400,7 @@ bnx2_open(struct net_device *dev) rc = bnx2_request_irq(bp); if (rc) { - del_timer_sync(&bp->timer); + timer_delete_sync(&bp->timer); goto open_err; } bnx2_enable_int(bp); @@ -6407,7 +6444,6 @@ bnx2_reset_task(struct work_struct *work) if (!(pcicmd & PCI_COMMAND_MEMORY)) { /* in case PCI block has reset */ pci_restore_state(bp->pdev); - pci_save_state(bp->pdev); } rc = bnx2_init_nic(bp, 1); if (rc) { @@ -6517,7 +6553,7 @@ bnx2_dump_state(struct bnx2 *bp) } static void -bnx2_tx_timeout(struct net_device *dev) +bnx2_tx_timeout(struct net_device *dev, unsigned int txqueue) { struct bnx2 *bp = netdev_priv(dev); @@ -6569,9 +6605,9 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev) vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM; } - if (vlan_tx_tag_present(skb)) { + if (skb_vlan_tag_present(skb)) { vlan_tag_flags |= - (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16)); + (TX_BD_FLAGS_VLAN_TAG | (skb_vlan_tag_get(skb) << 16)); } if ((mss = skb_shinfo(skb)->gso_size)) { @@ -6608,9 +6644,10 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev) } else mss = 0; - mapping = dma_map_single(&bp->pdev->dev, skb->data, len, PCI_DMA_TODEVICE); + mapping = dma_map_single(&bp->pdev->dev, skb->data, len, + DMA_TO_DEVICE); if (dma_mapping_error(&bp->pdev->dev, mapping)) { - dev_kfree_skb(skb); + dev_kfree_skb_any(skb); return NETDEV_TX_OK; } @@ -6663,8 +6700,6 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev) BNX2_WR16(bp, txr->tx_bidx_addr, prod); BNX2_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq); - mmiowb(); - txr->tx_prod = prod; if (unlikely(bnx2_tx_avail(bp, txr) <= MAX_SKB_FRAGS)) { @@ -6691,7 +6726,7 @@ dma_error: tx_buf = &txr->tx_buf_ring[ring_prod]; tx_buf->skb = NULL; dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping), - skb_headlen(skb), PCI_DMA_TODEVICE); + skb_headlen(skb), DMA_TO_DEVICE); /* unmap remaining mapped pages */ for (i = 0; i < last_frag; i++) { @@ -6700,10 +6735,10 @@ dma_error: tx_buf = &txr->tx_buf_ring[ring_prod]; dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping), skb_frag_size(&skb_shinfo(skb)->frags[i]), - PCI_DMA_TODEVICE); + DMA_TO_DEVICE); } - dev_kfree_skb(skb); + dev_kfree_skb_any(skb); return NETDEV_TX_OK; } @@ -6716,7 +6751,7 @@ bnx2_close(struct net_device *dev) bnx2_disable_int_sync(bp); bnx2_napi_disable(bp); netif_tx_disable(dev); - del_timer_sync(&bp->timer); + timer_delete_sync(&bp->timer); bnx2_shutdown_chip(bp); bnx2_free_irq(bp); bnx2_free_skbs(bp); @@ -6724,7 +6759,6 @@ bnx2_close(struct net_device *dev) bnx2_del_napi(bp); bp->link_up = 0; netif_carrier_off(bp->dev); - bnx2_set_power_state(bp, PCI_D3hot); return 0; } @@ -6763,13 +6797,13 @@ bnx2_save_stats(struct bnx2 *bp) (unsigned long) (bp->stats_blk->ctr + \ bp->temp_stats_blk->ctr) -static struct rtnl_link_stats64 * +static void bnx2_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *net_stats) { struct bnx2 *bp = netdev_priv(dev); - if (bp->stats_blk == NULL) - return net_stats; + if (!bp->stats_blk) + return; net_stats->rx_packets = GET_64BIT_NET_STATS(stat_IfHCInUcastPkts) + @@ -6833,18 +6867,19 @@ bnx2_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *net_stats) GET_32BIT_NET_STATS(stat_IfInMBUFDiscards) + GET_32BIT_NET_STATS(stat_FwRxDrop); - return net_stats; } /* All ethtool functions called with rtnl_lock */ static int -bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) +bnx2_get_link_ksettings(struct net_device *dev, + struct ethtool_link_ksettings *cmd) { struct bnx2 *bp = netdev_priv(dev); int support_serdes = 0, support_copper = 0; + u32 supported, advertising; - cmd->supported = SUPPORTED_Autoneg; + supported = SUPPORTED_Autoneg; if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) { support_serdes = 1; support_copper = 1; @@ -6854,50 +6889,59 @@ bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) support_copper = 1; if (support_serdes) { - cmd->supported |= SUPPORTED_1000baseT_Full | + supported |= SUPPORTED_1000baseT_Full | SUPPORTED_FIBRE; if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) - cmd->supported |= SUPPORTED_2500baseX_Full; - + supported |= SUPPORTED_2500baseX_Full; } if (support_copper) { - cmd->supported |= SUPPORTED_10baseT_Half | + supported |= SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | SUPPORTED_1000baseT_Full | SUPPORTED_TP; - } spin_lock_bh(&bp->phy_lock); - cmd->port = bp->phy_port; - cmd->advertising = bp->advertising; + cmd->base.port = bp->phy_port; + advertising = bp->advertising; if (bp->autoneg & AUTONEG_SPEED) { - cmd->autoneg = AUTONEG_ENABLE; + cmd->base.autoneg = AUTONEG_ENABLE; } else { - cmd->autoneg = AUTONEG_DISABLE; + cmd->base.autoneg = AUTONEG_DISABLE; } if (netif_carrier_ok(dev)) { - ethtool_cmd_speed_set(cmd, bp->line_speed); - cmd->duplex = bp->duplex; + cmd->base.speed = bp->line_speed; + cmd->base.duplex = bp->duplex; + if (!(bp->phy_flags & BNX2_PHY_FLAG_SERDES)) { + if (bp->phy_flags & BNX2_PHY_FLAG_MDIX) + cmd->base.eth_tp_mdix = ETH_TP_MDI_X; + else + cmd->base.eth_tp_mdix = ETH_TP_MDI; + } } else { - ethtool_cmd_speed_set(cmd, -1); - cmd->duplex = -1; + cmd->base.speed = SPEED_UNKNOWN; + cmd->base.duplex = DUPLEX_UNKNOWN; } spin_unlock_bh(&bp->phy_lock); - cmd->transceiver = XCVR_INTERNAL; - cmd->phy_address = bp->phy_addr; + cmd->base.phy_address = bp->phy_addr; + + ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported, + supported); + ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising, + advertising); return 0; } static int -bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) +bnx2_set_link_ksettings(struct net_device *dev, + const struct ethtool_link_ksettings *cmd) { struct bnx2 *bp = netdev_priv(dev); u8 autoneg = bp->autoneg; @@ -6908,24 +6952,26 @@ bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) spin_lock_bh(&bp->phy_lock); - if (cmd->port != PORT_TP && cmd->port != PORT_FIBRE) + if (cmd->base.port != PORT_TP && cmd->base.port != PORT_FIBRE) goto err_out_unlock; - if (cmd->port != bp->phy_port && + if (cmd->base.port != bp->phy_port && !(bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)) goto err_out_unlock; /* If device is down, we can store the settings only if the user * is setting the currently active port. */ - if (!netif_running(dev) && cmd->port != bp->phy_port) + if (!netif_running(dev) && cmd->base.port != bp->phy_port) goto err_out_unlock; - if (cmd->autoneg == AUTONEG_ENABLE) { + if (cmd->base.autoneg == AUTONEG_ENABLE) { autoneg |= AUTONEG_SPEED; - advertising = cmd->advertising; - if (cmd->port == PORT_TP) { + ethtool_convert_link_mode_to_legacy_u32( + &advertising, cmd->link_modes.advertising); + + if (cmd->base.port == PORT_TP) { advertising &= ETHTOOL_ALL_COPPER_SPEED; if (!advertising) advertising = ETHTOOL_ALL_COPPER_SPEED; @@ -6937,11 +6983,12 @@ bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) advertising |= ADVERTISED_Autoneg; } else { - u32 speed = ethtool_cmd_speed(cmd); - if (cmd->port == PORT_FIBRE) { + u32 speed = cmd->base.speed; + + if (cmd->base.port == PORT_FIBRE) { if ((speed != SPEED_1000 && speed != SPEED_2500) || - (cmd->duplex != DUPLEX_FULL)) + (cmd->base.duplex != DUPLEX_FULL)) goto err_out_unlock; if (speed == SPEED_2500 && @@ -6952,7 +6999,7 @@ bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) autoneg &= ~AUTONEG_SPEED; req_line_speed = speed; - req_duplex = cmd->duplex; + req_duplex = cmd->base.duplex; advertising = 0; } @@ -6966,7 +7013,7 @@ bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) * brought up. */ if (netif_running(dev)) - err = bnx2_setup_phy(bp, cmd->port); + err = bnx2_setup_phy(bp, cmd->base.port); err_out_unlock: spin_unlock_bh(&bp->phy_lock); @@ -6979,10 +7026,9 @@ bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { struct bnx2 *bp = netdev_priv(dev); - strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver)); - strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version)); - strlcpy(info->bus_info, pci_name(bp->pdev), sizeof(info->bus_info)); - strlcpy(info->fw_version, bp->fw_version, sizeof(info->fw_version)); + strscpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver)); + strscpy(info->bus_info, pci_name(bp->pdev), sizeof(info->bus_info)); + strscpy(info->fw_version, bp->fw_version, sizeof(info->fw_version)); } #define BNX2_REGDUMP_LEN (32 * 1024) @@ -7081,6 +7127,9 @@ bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) else { bp->wol = 0; } + + device_set_wakeup_enable(&bp->pdev->dev, bp->wol); + return 0; } @@ -7143,7 +7192,7 @@ bnx2_get_eeprom_len(struct net_device *dev) { struct bnx2 *bp = netdev_priv(dev); - if (bp->flash_info == NULL) + if (!bp->flash_info) return 0; return (int) bp->flash_size; @@ -7156,9 +7205,6 @@ bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, struct bnx2 *bp = netdev_priv(dev); int rc; - if (!netif_running(dev)) - return -EAGAIN; - /* parameters already validated in ethtool_get_eeprom */ rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len); @@ -7173,9 +7219,6 @@ bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, struct bnx2 *bp = netdev_priv(dev); int rc; - if (!netif_running(dev)) - return -EAGAIN; - /* parameters already validated in ethtool_set_eeprom */ rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len); @@ -7183,8 +7226,10 @@ bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, return rc; } -static int -bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal) +static int bnx2_get_coalesce(struct net_device *dev, + struct ethtool_coalesce *coal, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct bnx2 *bp = netdev_priv(dev); @@ -7205,8 +7250,10 @@ bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal) return 0; } -static int -bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal) +static int bnx2_set_coalesce(struct net_device *dev, + struct ethtool_coalesce *coal, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct bnx2 *bp = netdev_priv(dev); @@ -7255,7 +7302,9 @@ bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal) } static void -bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering) +bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering, + struct kernel_ethtool_ringparam *kernel_ering, + struct netlink_ext_ack *extack) { struct bnx2 *bp = netdev_priv(dev); @@ -7326,7 +7375,9 @@ bnx2_change_ring_size(struct bnx2 *bp, u32 rx, u32 tx, bool reset_irq) } static int -bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering) +bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering, + struct kernel_ethtool_ringparam *kernel_ering, + struct netlink_ext_ack *extack) { struct bnx2 *bp = netdev_priv(dev); int rc; @@ -7535,8 +7586,6 @@ bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf) { struct bnx2 *bp = netdev_priv(dev); - bnx2_set_power_state(bp, PCI_D0); - memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS); if (etest->flags & ETH_TEST_FL_OFFLINE) { int i; @@ -7585,8 +7634,6 @@ bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf) etest->flags |= ETH_TEST_FL_FAILED; } - if (!netif_running(bp->dev)) - bnx2_set_power_state(bp, PCI_D3hot); } static void @@ -7614,7 +7661,7 @@ bnx2_get_ethtool_stats(struct net_device *dev, u32 *temp_stats = (u32 *) bp->temp_stats_blk; u8 *stats_len_arr = NULL; - if (hw_stats == NULL) { + if (!hw_stats) { memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS); return; } @@ -7658,8 +7705,6 @@ bnx2_set_phys_id(struct net_device *dev, enum ethtool_phys_id_state state) switch (state) { case ETHTOOL_ID_ACTIVE: - bnx2_set_power_state(bp, PCI_D0); - bp->leds_save = BNX2_RD(bp, BNX2_MISC_CFG); BNX2_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC); return 1; /* cycle on/off once per second */ @@ -7680,26 +7725,12 @@ bnx2_set_phys_id(struct net_device *dev, enum ethtool_phys_id_state state) case ETHTOOL_ID_INACTIVE: BNX2_WR(bp, BNX2_EMAC_LED, 0); BNX2_WR(bp, BNX2_MISC_CFG, bp->leds_save); - - if (!netif_running(dev)) - bnx2_set_power_state(bp, PCI_D3hot); break; } return 0; } -static netdev_features_t -bnx2_fix_features(struct net_device *dev, netdev_features_t features) -{ - struct bnx2 *bp = netdev_priv(dev); - - if (!(bp->flags & BNX2_FLAG_CAN_KEEP_VLAN)) - features |= NETIF_F_HW_VLAN_CTAG_RX; - - return features; -} - static int bnx2_set_features(struct net_device *dev, netdev_features_t features) { @@ -7774,8 +7805,11 @@ static int bnx2_set_channels(struct net_device *dev, } static const struct ethtool_ops bnx2_ethtool_ops = { - .get_settings = bnx2_get_settings, - .set_settings = bnx2_set_settings, + .supported_coalesce_params = ETHTOOL_COALESCE_USECS | + ETHTOOL_COALESCE_MAX_FRAMES | + ETHTOOL_COALESCE_USECS_IRQ | + ETHTOOL_COALESCE_MAX_FRAMES_IRQ | + ETHTOOL_COALESCE_STATS_BLOCK_USECS, .get_drvinfo = bnx2_get_drvinfo, .get_regs_len = bnx2_get_regs_len, .get_regs = bnx2_get_regs, @@ -7799,6 +7833,8 @@ static const struct ethtool_ops bnx2_ethtool_ops = { .get_sset_count = bnx2_get_sset_count, .get_channels = bnx2_get_channels, .set_channels = bnx2_set_channels, + .get_link_ksettings = bnx2_get_link_ksettings, + .set_link_ksettings = bnx2_set_link_ksettings, }; /* Called with rtnl_lock */ @@ -7813,7 +7849,7 @@ bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) case SIOCGMIIPHY: data->phy_id = bp->phy_addr; - /* fallthru */ + fallthrough; case SIOCGMIIREG: { u32 mii_regval; @@ -7862,7 +7898,7 @@ bnx2_change_mac_addr(struct net_device *dev, void *p) if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; - memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); + eth_hw_addr_set(dev, addr->sa_data); if (netif_running(dev)) bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0); @@ -7875,11 +7911,7 @@ bnx2_change_mtu(struct net_device *dev, int new_mtu) { struct bnx2 *bp = netdev_priv(dev); - if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) || - ((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE)) - return -EINVAL; - - dev->mtu = new_mtu; + WRITE_ONCE(dev->mtu, new_mtu); return bnx2_change_ring_size(bp, bp->rx_ring_size, bp->tx_ring_size, false); } @@ -7994,62 +8026,40 @@ bnx2_get_pci_speed(struct bnx2 *bp) static void bnx2_read_vpd_fw_ver(struct bnx2 *bp) { + unsigned int len; int rc, i, j; u8 *data; - unsigned int block_end, rosize, len; #define BNX2_VPD_NVRAM_OFFSET 0x300 #define BNX2_VPD_LEN 128 #define BNX2_MAX_VER_SLEN 30 - data = kmalloc(256, GFP_KERNEL); + data = kmalloc(BNX2_VPD_LEN, GFP_KERNEL); if (!data) return; - rc = bnx2_nvram_read(bp, BNX2_VPD_NVRAM_OFFSET, data + BNX2_VPD_LEN, - BNX2_VPD_LEN); + rc = bnx2_nvram_read(bp, BNX2_VPD_NVRAM_OFFSET, data, BNX2_VPD_LEN); if (rc) goto vpd_done; - for (i = 0; i < BNX2_VPD_LEN; i += 4) { - data[i] = data[i + BNX2_VPD_LEN + 3]; - data[i + 1] = data[i + BNX2_VPD_LEN + 2]; - data[i + 2] = data[i + BNX2_VPD_LEN + 1]; - data[i + 3] = data[i + BNX2_VPD_LEN]; - } - - i = pci_vpd_find_tag(data, 0, BNX2_VPD_LEN, PCI_VPD_LRDT_RO_DATA); - if (i < 0) - goto vpd_done; - - rosize = pci_vpd_lrdt_size(&data[i]); - i += PCI_VPD_LRDT_TAG_SIZE; - block_end = i + rosize; + for (i = 0; i < BNX2_VPD_LEN; i += 4) + swab32s((u32 *)&data[i]); - if (block_end > BNX2_VPD_LEN) - goto vpd_done; - - j = pci_vpd_find_info_keyword(data, i, rosize, - PCI_VPD_RO_KEYWORD_MFR_ID); + j = pci_vpd_find_ro_info_keyword(data, BNX2_VPD_LEN, + PCI_VPD_RO_KEYWORD_MFR_ID, &len); if (j < 0) goto vpd_done; - len = pci_vpd_info_field_size(&data[j]); - - j += PCI_VPD_INFO_FLD_HDR_SIZE; - if (j + len > block_end || len != 4 || - memcmp(&data[j], "1028", 4)) + if (len != 4 || memcmp(&data[j], "1028", 4)) goto vpd_done; - j = pci_vpd_find_info_keyword(data, i, rosize, - PCI_VPD_RO_KEYWORD_VENDOR0); + j = pci_vpd_find_ro_info_keyword(data, BNX2_VPD_LEN, + PCI_VPD_RO_KEYWORD_VENDOR0, + &len); if (j < 0) goto vpd_done; - len = pci_vpd_info_field_size(&data[j]); - - j += PCI_VPD_INFO_FLD_HDR_SIZE; - if (j + len > block_end || len > BNX2_MAX_VER_SLEN) + if (len > BNX2_MAX_VER_SLEN) goto vpd_done; memcpy(bp->fw_version, &data[j], len); @@ -8066,7 +8076,6 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev) int rc, i, j; u32 reg; u64 dma_mask, persist_dma_mask; - int err; SET_NETDEV_DEV(dev, &pdev->dev); bp = netdev_priv(dev); @@ -8077,7 +8086,7 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev) bp->temp_stats_blk = kzalloc(sizeof(struct statistics_block), GFP_KERNEL); - if (bp->temp_stats_blk == NULL) { + if (!bp->temp_stats_blk) { rc = -ENOMEM; goto err_out; } @@ -8130,8 +8139,6 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev) goto err_out_release; } - bnx2_set_power_state(bp, PCI_D0); - /* Configure byte swap and enable write to the reg_window registers. * Rely on CPU to do target byte swapping on big endian systems * The chip's target access swapping will not swap all accesses @@ -8151,12 +8158,6 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev) bp->flags |= BNX2_FLAG_PCIE; if (BNX2_CHIP_REV(bp) == BNX2_CHIP_REV_Ax) bp->flags |= BNX2_FLAG_JUMBO_BROKEN; - - /* AER (Advanced Error Reporting) hooks */ - err = pci_enable_pcie_error_reporting(pdev); - if (!err) - bp->flags |= BNX2_FLAG_AER_ENABLED; - } else { bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX); if (bp->pcix_cap == 0) { @@ -8170,13 +8171,13 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev) if (BNX2_CHIP(bp) == BNX2_CHIP_5709 && BNX2_CHIP_REV(bp) != BNX2_CHIP_REV_Ax) { - if (pci_find_capability(pdev, PCI_CAP_ID_MSIX)) + if (pdev->msix_cap) bp->flags |= BNX2_FLAG_MSIX_CAP; } if (BNX2_CHIP_ID(bp) != BNX2_CHIP_ID_5706_A0 && BNX2_CHIP_ID(bp) != BNX2_CHIP_ID_5706_A1) { - if (pci_find_capability(pdev, PCI_CAP_ID_MSI)) + if (pdev->msi_cap) bp->flags |= BNX2_FLAG_MSI_CAP; } @@ -8187,15 +8188,15 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev) persist_dma_mask = dma_mask = DMA_BIT_MASK(64); /* Configure DMA attributes. */ - if (pci_set_dma_mask(pdev, dma_mask) == 0) { + if (dma_set_mask(&pdev->dev, dma_mask) == 0) { dev->features |= NETIF_F_HIGHDMA; - rc = pci_set_consistent_dma_mask(pdev, persist_dma_mask); + rc = dma_set_coherent_mask(&pdev->dev, persist_dma_mask); if (rc) { dev_err(&pdev->dev, - "pci_set_consistent_dma_mask failed, aborting\n"); + "dma_set_coherent_mask failed, aborting\n"); goto err_out_unmap; } - } else if ((rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) != 0) { + } else if ((rc = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) != 0) { dev_err(&pdev->dev, "System does not support DMA, aborting\n"); goto err_out_unmap; } @@ -8210,9 +8211,9 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev) BNX2_WR(bp, PCI_COMMAND, reg); } else if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1) && !(bp->flags & BNX2_FLAG_PCIX)) { - dev_err(&pdev->dev, "5706 A1 can only be used in a PCIX bus, aborting\n"); + rc = -EPERM; goto err_out_unmap; } @@ -8324,6 +8325,11 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev) bp->phy_addr = 1; + /* allocate stats_blk */ + rc = bnx2_alloc_stats_blk(dev); + if (rc) + goto err_out_unmap; + /* Disable WOL support if we are running on a SERDES chip. */ if (BNX2_CHIP(bp) == BNX2_CHIP_5709) bnx2_get_5709_media(bp); @@ -8369,6 +8375,11 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev) bp->wol = 0; } + if (bp->flags & BNX2_FLAG_NO_WOL) + device_set_wakeup_capable(&bp->pdev->dev, false); + else + device_set_wakeup_enable(&bp->pdev->dev, bp->wol); + if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) { bp->tx_quick_cons_trip_int = bp->tx_quick_cons_trip; @@ -8410,10 +8421,8 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev) bnx2_set_default_link(bp); bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX; - init_timer(&bp->timer); + timer_setup(&bp->timer, bnx2_timer, 0); bp->timer.expires = RUN_AT(BNX2_TIMER_INTERVAL); - bp->timer.data = (unsigned long) bp; - bp->timer.function = bnx2_timer; #ifdef BCM_CNIC if (bnx2_shmem_rd(bp, BNX2_ISCSI_INITIATOR) & BNX2_ISCSI_INITIATOR_EN) @@ -8427,11 +8436,6 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev) return 0; err_out_unmap: - if (bp->flags & BNX2_FLAG_AER_ENABLED) { - pci_disable_pcie_error_reporting(pdev); - bp->flags &= ~BNX2_FLAG_AER_ENABLED; - } - pci_iounmap(pdev, bp->regview); bp->regview = NULL; @@ -8440,9 +8444,10 @@ err_out_release: err_out_disable: pci_disable_device(pdev); - pci_set_drvdata(pdev, NULL); err_out: + kfree(bp->temp_stats_blk); + return rc; } @@ -8489,7 +8494,7 @@ bnx2_init_napi(struct bnx2 *bp) else poll = bnx2_poll_msix; - netif_napi_add(bp->dev, &bp->bnx2_napi[i].napi, poll, 64); + netif_napi_add(bp->dev, &bp->bnx2_napi[i].napi, poll); bnapi->bp = bp; } } @@ -8500,11 +8505,10 @@ static const struct net_device_ops bnx2_netdev_ops = { .ndo_stop = bnx2_close, .ndo_get_stats64 = bnx2_get_stats64, .ndo_set_rx_mode = bnx2_set_rx_mode, - .ndo_do_ioctl = bnx2_ioctl, + .ndo_eth_ioctl = bnx2_ioctl, .ndo_validate_addr = eth_validate_addr, .ndo_set_mac_address = bnx2_change_mac_addr, .ndo_change_mtu = bnx2_change_mtu, - .ndo_fix_features = bnx2_fix_features, .ndo_set_features = bnx2_set_features, .ndo_tx_timeout = bnx2_tx_timeout, #ifdef CONFIG_NET_POLL_CONTROLLER @@ -8515,15 +8519,11 @@ static const struct net_device_ops bnx2_netdev_ops = { static int bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) { - static int version_printed = 0; struct net_device *dev; struct bnx2 *bp; int rc; char str[40]; - if (version_printed++ == 0) - pr_info("%s", version); - /* dev zeroed in init_etherdev */ dev = alloc_etherdev_mq(sizeof(*bp), TX_MAX_RINGS); if (!dev) @@ -8541,7 +8541,16 @@ bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) pci_set_drvdata(pdev, dev); - memcpy(dev->dev_addr, bp->mac_addr, 6); + /* + * In-flight DMA from 1st kernel could continue going in kdump kernel. + * New io-page table has been created before bnx2 does reset at open stage. + * We have to wait for the in-flight DMA to complete to avoid it look up + * into the newly created io-page table. + */ + if (is_kdump_kernel()) + bnx2_wait_dma_complete(bp); + + eth_hw_addr_set(dev, bp->mac_addr); dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO_ECN | @@ -8554,6 +8563,11 @@ bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) dev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX; dev->features |= dev->hw_features; dev->priv_flags |= IFF_UNICAST_FLT; + dev->min_mtu = MIN_ETHERNET_PACKET_SIZE; + dev->max_mtu = MAX_ETHERNET_JUMBO_PACKET_SIZE; + + if (!(bp->flags & BNX2_FLAG_CAN_KEEP_VLAN)) + dev->hw_features &= ~NETIF_F_HW_VLAN_CTAG_RX; if ((rc = register_netdev(dev))) { dev_err(&pdev->dev, "Cannot register net device\n"); @@ -8573,8 +8587,8 @@ error: pci_iounmap(pdev, bp->regview); pci_release_regions(pdev); pci_disable_device(pdev); - pci_set_drvdata(pdev, NULL); err_free: + bnx2_free_stats_blk(dev); free_netdev(dev); return rc; } @@ -8587,68 +8601,67 @@ bnx2_remove_one(struct pci_dev *pdev) unregister_netdev(dev); - del_timer_sync(&bp->timer); + timer_delete_sync(&bp->timer); cancel_work_sync(&bp->reset_task); pci_iounmap(bp->pdev, bp->regview); + bnx2_free_stats_blk(dev); kfree(bp->temp_stats_blk); - if (bp->flags & BNX2_FLAG_AER_ENABLED) { - pci_disable_pcie_error_reporting(pdev); - bp->flags &= ~BNX2_FLAG_AER_ENABLED; - } - bnx2_release_firmware(bp); free_netdev(dev); pci_release_regions(pdev); pci_disable_device(pdev); - pci_set_drvdata(pdev, NULL); } +#ifdef CONFIG_PM_SLEEP static int -bnx2_suspend(struct pci_dev *pdev, pm_message_t state) +bnx2_suspend(struct device *device) { - struct net_device *dev = pci_get_drvdata(pdev); + struct net_device *dev = dev_get_drvdata(device); struct bnx2 *bp = netdev_priv(dev); - /* PCI register 4 needs to be saved whether netif_running() or not. - * MSI address and data need to be saved if using MSI and - * netif_running(). - */ - pci_save_state(pdev); - if (!netif_running(dev)) - return 0; - - cancel_work_sync(&bp->reset_task); - bnx2_netif_stop(bp, true); - netif_device_detach(dev); - del_timer_sync(&bp->timer); - bnx2_shutdown_chip(bp); - bnx2_free_skbs(bp); - bnx2_set_power_state(bp, pci_choose_state(pdev, state)); + if (netif_running(dev)) { + cancel_work_sync(&bp->reset_task); + bnx2_netif_stop(bp, true); + netif_device_detach(dev); + timer_delete_sync(&bp->timer); + bnx2_shutdown_chip(bp); + __bnx2_free_irq(bp); + bnx2_free_skbs(bp); + } + bnx2_setup_wol(bp); return 0; } static int -bnx2_resume(struct pci_dev *pdev) +bnx2_resume(struct device *device) { - struct net_device *dev = pci_get_drvdata(pdev); + struct net_device *dev = dev_get_drvdata(device); struct bnx2 *bp = netdev_priv(dev); - pci_restore_state(pdev); if (!netif_running(dev)) return 0; bnx2_set_power_state(bp, PCI_D0); netif_device_attach(dev); + bnx2_request_irq(bp); bnx2_init_nic(bp, 1); bnx2_netif_start(bp, true); return 0; } +static SIMPLE_DEV_PM_OPS(bnx2_pm_ops, bnx2_suspend, bnx2_resume); +#define BNX2_PM_OPS (&bnx2_pm_ops) + +#else + +#define BNX2_PM_OPS NULL + +#endif /* CONFIG_PM_SLEEP */ /** * bnx2_io_error_detected - called when PCI error is detected * @pdev: Pointer to PCI device @@ -8673,7 +8686,7 @@ static pci_ers_result_t bnx2_io_error_detected(struct pci_dev *pdev, if (netif_running(dev)) { bnx2_netif_stop(bp, true); - del_timer_sync(&bp->timer); + timer_delete_sync(&bp->timer); bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET); } @@ -8694,36 +8707,29 @@ static pci_ers_result_t bnx2_io_slot_reset(struct pci_dev *pdev) { struct net_device *dev = pci_get_drvdata(pdev); struct bnx2 *bp = netdev_priv(dev); - pci_ers_result_t result; - int err; + pci_ers_result_t result = PCI_ERS_RESULT_DISCONNECT; + int err = 0; rtnl_lock(); if (pci_enable_device(pdev)) { dev_err(&pdev->dev, "Cannot re-enable PCI device after reset\n"); - result = PCI_ERS_RESULT_DISCONNECT; } else { pci_set_master(pdev); pci_restore_state(pdev); - pci_save_state(pdev); - if (netif_running(dev)) { - bnx2_set_power_state(bp, PCI_D0); - bnx2_init_nic(bp, 1); - } - result = PCI_ERS_RESULT_RECOVERED; - } - rtnl_unlock(); + if (netif_running(dev)) + err = bnx2_init_nic(bp, 1); - if (!(bp->flags & BNX2_FLAG_AER_ENABLED)) - return result; + if (!err) + result = PCI_ERS_RESULT_RECOVERED; + } - err = pci_cleanup_aer_uncorrect_error_status(pdev); - if (err) { - dev_err(&pdev->dev, - "pci_cleanup_aer_uncorrect_error_status failed 0x%0x\n", - err); /* non-fatal, continue */ + if (result != PCI_ERS_RESULT_RECOVERED && netif_running(dev)) { + bnx2_napi_enable(bp); + dev_close(dev); } + rtnl_unlock(); return result; } @@ -8748,6 +8754,28 @@ static void bnx2_io_resume(struct pci_dev *pdev) rtnl_unlock(); } +static void bnx2_shutdown(struct pci_dev *pdev) +{ + struct net_device *dev = pci_get_drvdata(pdev); + struct bnx2 *bp; + + if (!dev) + return; + + bp = netdev_priv(dev); + if (!bp) + return; + + rtnl_lock(); + if (netif_running(dev)) + dev_close(bp->dev); + + if (system_state == SYSTEM_POWER_OFF) + bnx2_set_power_state(bp, PCI_D3hot); + + rtnl_unlock(); +} + static const struct pci_error_handlers bnx2_err_handler = { .error_detected = bnx2_io_error_detected, .slot_reset = bnx2_io_slot_reset, @@ -8759,9 +8787,9 @@ static struct pci_driver bnx2_pci_driver = { .id_table = bnx2_pci_tbl, .probe = bnx2_init_one, .remove = bnx2_remove_one, - .suspend = bnx2_suspend, - .resume = bnx2_resume, + .driver.pm = BNX2_PM_OPS, .err_handler = &bnx2_err_handler, + .shutdown = bnx2_shutdown, }; module_pci_driver(bnx2_pci_driver); |
