diff options
Diffstat (limited to 'drivers/net/ethernet/broadcom')
22 files changed, 547 insertions, 226 deletions
diff --git a/drivers/net/ethernet/broadcom/b44.c b/drivers/net/ethernet/broadcom/b44.c index 035dbb1b2c98..a780b7215021 100644 --- a/drivers/net/ethernet/broadcom/b44.c +++ b/drivers/net/ethernet/broadcom/b44.c @@ -948,7 +948,7 @@ irq_ack: return IRQ_RETVAL(handled); } -static void b44_tx_timeout(struct net_device *dev) +static void b44_tx_timeout(struct net_device *dev, unsigned int txqueue) { struct b44 *bp = netdev_priv(dev); @@ -1516,8 +1516,10 @@ static int b44_magic_pattern(u8 *macaddr, u8 *ppattern, u8 *pmask, int offset) int ethaddr_bytes = ETH_ALEN; memset(ppattern + offset, 0xff, magicsync); - for (j = 0; j < magicsync; j++) - set_bit(len++, (unsigned long *) pmask); + for (j = 0; j < magicsync; j++) { + pmask[len >> 3] |= BIT(len & 7); + len++; + } for (j = 0; j < B44_MAX_PATTERNS; j++) { if ((B44_PATTERN_SIZE - len) >= ETH_ALEN) @@ -1529,7 +1531,8 @@ static int b44_magic_pattern(u8 *macaddr, u8 *ppattern, u8 *pmask, int offset) for (k = 0; k< ethaddr_bytes; k++) { ppattern[offset + magicsync + (j * ETH_ALEN) + k] = macaddr[k]; - set_bit(len++, (unsigned long *) pmask); + pmask[len >> 3] |= BIT(len & 7); + len++; } } return len - 1; diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c index 825af709708e..f07ac0e0af59 100644 --- a/drivers/net/ethernet/broadcom/bcmsysport.c +++ b/drivers/net/ethernet/broadcom/bcmsysport.c @@ -1354,7 +1354,7 @@ out: return ret; } -static void bcm_sysport_tx_timeout(struct net_device *dev) +static void bcm_sysport_tx_timeout(struct net_device *dev, unsigned int txqueue) { netdev_warn(dev, "transmit timeout!\n"); @@ -2323,7 +2323,7 @@ static int bcm_sysport_map_queues(struct notifier_block *nb, ring->switch_queue = qp; ring->switch_port = port; ring->inspect = true; - priv->ring_map[q + port * num_tx_queues] = ring; + priv->ring_map[qp + port * num_tx_queues] = ring; qp++; } @@ -2338,7 +2338,7 @@ static int bcm_sysport_unmap_queues(struct notifier_block *nb, struct net_device *slave_dev; unsigned int num_tx_queues; struct net_device *dev; - unsigned int q, port; + unsigned int q, qp, port; priv = container_of(nb, struct bcm_sysport_priv, dsa_notifier); if (priv->netdev != info->master) @@ -2364,7 +2364,8 @@ static int bcm_sysport_unmap_queues(struct notifier_block *nb, continue; ring->inspect = false; - priv->ring_map[q + port * num_tx_queues] = NULL; + qp = ring->switch_queue; + priv->ring_map[qp + port * num_tx_queues] = NULL; } return 0; @@ -2427,6 +2428,14 @@ static int bcm_sysport_probe(struct platform_device *pdev) if (!of_id || !of_id->data) return -EINVAL; + ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(40)); + if (ret) + ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); + if (ret) { + dev_err(&pdev->dev, "unable to set DMA mask: %d\n", ret); + return ret; + } + /* Fairly quickly we need to know the type of adapter we have */ params = of_id->data; diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c index 148734b166f0..1bb07a5d82c9 100644 --- a/drivers/net/ethernet/broadcom/bgmac.c +++ b/drivers/net/ethernet/broadcom/bgmac.c @@ -1248,14 +1248,6 @@ static int bgmac_set_mac_address(struct net_device *net_dev, void *addr) return 0; } -static int bgmac_ioctl(struct net_device *net_dev, struct ifreq *ifr, int cmd) -{ - if (!netif_running(net_dev)) - return -EINVAL; - - return phy_mii_ioctl(net_dev->phydev, ifr, cmd); -} - static const struct net_device_ops bgmac_netdev_ops = { .ndo_open = bgmac_open, .ndo_stop = bgmac_stop, @@ -1263,7 +1255,7 @@ static const struct net_device_ops bgmac_netdev_ops = { .ndo_set_rx_mode = bgmac_set_rx_mode, .ndo_set_mac_address = bgmac_set_mac_address, .ndo_validate_addr = eth_validate_addr, - .ndo_do_ioctl = bgmac_ioctl, + .ndo_do_ioctl = phy_do_ioctl_running, }; /************************************************** diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c index fbc196b480b6..dbb7874607ca 100644 --- a/drivers/net/ethernet/broadcom/bnx2.c +++ b/drivers/net/ethernet/broadcom/bnx2.c @@ -6575,7 +6575,7 @@ bnx2_dump_state(struct bnx2 *bp) } static void -bnx2_tx_timeout(struct net_device *dev) +bnx2_tx_timeout(struct net_device *dev, unsigned int txqueue) { struct bnx2 *bp = netdev_priv(dev); diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c index 5e037a305b83..ee9e9290f112 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c @@ -4970,7 +4970,7 @@ int bnx2x_set_features(struct net_device *dev, netdev_features_t features) return 0; } -void bnx2x_tx_timeout(struct net_device *dev) +void bnx2x_tx_timeout(struct net_device *dev, unsigned int txqueue) { struct bnx2x *bp = netdev_priv(dev); diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h index 8b08cb18e363..6f1352d51cb2 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h @@ -617,7 +617,7 @@ int bnx2x_set_features(struct net_device *dev, netdev_features_t features); * * @dev: net device */ -void bnx2x_tx_timeout(struct net_device *dev); +void bnx2x_tx_timeout(struct net_device *dev, unsigned int txqueue); /** bnx2x_get_c2s_mapping - read inner-to-outer vlan configuration * c2s_map should have BNX2X_MAX_PRIORITY entries. @@ -1109,7 +1109,7 @@ static inline u8 bnx2x_get_path_func_num(struct bnx2x *bp) for (i = 0; i < E1H_FUNC_MAX / 2; i++) { u32 func_config = MF_CFG_RD(bp, - func_mf_config[BP_PORT(bp) + 2 * i]. + func_mf_config[BP_PATH(bp) + 2 * i]. config); func_num += ((func_config & FUNC_MF_CFG_FUNC_HIDE) ? 0 : 1); diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index 192ff8d5da32..1c26fa962233 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c @@ -9976,10 +9976,18 @@ static void bnx2x_recovery_failed(struct bnx2x *bp) */ static void bnx2x_parity_recover(struct bnx2x *bp) { - bool global = false; u32 error_recovered, error_unrecovered; - bool is_parity; + bool is_parity, global = false; +#ifdef CONFIG_BNX2X_SRIOV + int vf_idx; + + for (vf_idx = 0; vf_idx < bp->requested_nr_virtfn; vf_idx++) { + struct bnx2x_virtf *vf = BP_VF(bp, vf_idx); + if (vf) + vf->state = VF_LOST; + } +#endif DP(NETIF_MSG_HW, "Handling parity\n"); while (1) { switch (bp->recovery_state) { @@ -14045,7 +14053,7 @@ static int bnx2x_init_one(struct pci_dev *pdev, rc = -ENOMEM; goto init_one_freemem; } - bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2), + bp->doorbells = ioremap(pci_resource_start(pdev, 2), doorbell_size); } if (!bp->doorbells) { @@ -15402,6 +15410,7 @@ int bnx2x_configure_ptp_filters(struct bnx2x *bp) REG_WR(bp, rule, BNX2X_PTP_TX_ON_RULE_MASK); break; case HWTSTAMP_TX_ONESTEP_SYNC: + case HWTSTAMP_TX_ONESTEP_P2P: BNX2X_ERR("One-step timestamping is not supported\n"); return -ERANGE; } diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h index 7a6e82db4231..bacc8552bce1 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h @@ -1536,8 +1536,11 @@ void bnx2x_get_rss_ind_table(struct bnx2x_rss_config_obj *rss_obj, ((MAX_MAC_CREDIT_E2 - GET_NUM_VFS_PER_PATH(bp) * VF_MAC_CREDIT_CNT) / \ func_num + GET_NUM_VFS_PER_PF(bp) * VF_MAC_CREDIT_CNT) +#define BNX2X_VFS_VLAN_CREDIT(bp) \ + (GET_NUM_VFS_PER_PATH(bp) * VF_VLAN_CREDIT_CNT) + #define PF_VLAN_CREDIT_E2(bp, func_num) \ - ((MAX_MAC_CREDIT_E2 - GET_NUM_VFS_PER_PATH(bp) * VF_VLAN_CREDIT_CNT) / \ + ((MAX_VLAN_CREDIT_E2 - 1 - BNX2X_VFS_VLAN_CREDIT(bp)) / \ func_num + GET_NUM_VFS_PER_PF(bp) * VF_VLAN_CREDIT_CNT) #endif /* BNX2X_SP_VERBS */ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h index b6ebd92ec565..3a716c015415 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h @@ -139,6 +139,7 @@ struct bnx2x_virtf { #define VF_ACQUIRED 1 /* VF acquired, but not initialized */ #define VF_ENABLED 2 /* VF Enabled */ #define VF_RESET 3 /* VF FLR'd, pending cleanup */ +#define VF_LOST 4 /* Recovery while VFs are loaded */ bool flr_clnup_stage; /* true during flr cleanup */ bool malicious; /* true if FW indicated so, until FLR */ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c index 0752b7fa4d9c..ea0e9394f898 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c @@ -2107,6 +2107,18 @@ static void bnx2x_vf_mbx_request(struct bnx2x *bp, struct bnx2x_virtf *vf, { int i; + if (vf->state == VF_LOST) { + /* Just ack the FW and return if VFs are lost + * in case of parity error. VFs are supposed to be timedout + * on waiting for PF response. + */ + DP(BNX2X_MSG_IOV, + "VF 0x%x lost, not handling the request\n", vf->abs_vfid); + + storm_memset_vf_mbx_ack(bp, vf->abs_vfid); + return; + } + /* check if tlv type is known */ if (bnx2x_tlv_supported(mbx->first_tlv.tl.type)) { /* Lock the per vf op mutex and note the locker's identity. diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 85983f0e3134..483935b001c8 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -944,6 +944,7 @@ static struct sk_buff *bnxt_rx_page_skb(struct bnxt *bp, dma_addr -= bp->rx_dma_offset; dma_unmap_page_attrs(&bp->pdev->dev, dma_addr, PAGE_SIZE, bp->rx_dir, DMA_ATTR_WEAK_ORDERING); + page_pool_release_page(rxr->page_pool, page); if (unlikely(!payload)) payload = eth_get_headlen(bp->dev, data_ptr, len); @@ -2001,6 +2002,9 @@ static int bnxt_async_event_process(struct bnxt *bp, case ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY: { u32 data1 = le32_to_cpu(cmpl->event_data1); + if (!bp->fw_health) + goto async_event_process_exit; + bp->fw_reset_timestamp = jiffies; bp->fw_reset_min_dsecs = cmpl->timestamp_lo; if (!bp->fw_reset_min_dsecs) @@ -4421,8 +4425,9 @@ int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp, unsigned long *bmap, int bmap_size, FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD); req.os_type = cpu_to_le16(FUNC_DRV_RGTR_REQ_OS_TYPE_LINUX); - flags = FUNC_DRV_RGTR_REQ_FLAGS_16BIT_VER_MODE | - FUNC_DRV_RGTR_REQ_FLAGS_HOT_RESET_SUPPORT; + flags = FUNC_DRV_RGTR_REQ_FLAGS_16BIT_VER_MODE; + if (bp->fw_cap & BNXT_FW_CAP_HOT_RESET) + flags |= FUNC_DRV_RGTR_REQ_FLAGS_HOT_RESET_SUPPORT; if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) flags |= FUNC_DRV_RGTR_REQ_FLAGS_ERROR_RECOVERY_SUPPORT | FUNC_DRV_RGTR_REQ_FLAGS_MASTER_SUPPORT; @@ -6186,7 +6191,7 @@ static void bnxt_hwrm_set_coal_params(struct bnxt *bp, tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks_irq); val = clamp_t(u16, tmr, 1, coal_cap->cmpl_aggr_dma_tmr_during_int_max); - req->cmpl_aggr_dma_tmr_during_int = cpu_to_le16(tmr); + req->cmpl_aggr_dma_tmr_during_int = cpu_to_le16(val); req->enables |= cpu_to_le16(BNXT_COAL_CMPL_AGGR_TMR_DURING_INT_ENABLE); } @@ -6993,7 +6998,6 @@ static int __bnxt_hwrm_func_qcaps(struct bnxt *bp) pf->fw_fid = le16_to_cpu(resp->fid); pf->port_id = le16_to_cpu(resp->port_id); - bp->dev->dev_port = pf->port_id; memcpy(pf->mac_addr, resp->mac_address, ETH_ALEN); pf->first_vf_id = le16_to_cpu(resp->first_vf_id); pf->max_vfs = le16_to_cpu(resp->max_vfs); @@ -7115,14 +7119,6 @@ static int bnxt_hwrm_error_recovery_qcfg(struct bnxt *bp) rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); if (rc) goto err_recovery_out; - if (!fw_health) { - fw_health = kzalloc(sizeof(*fw_health), GFP_KERNEL); - bp->fw_health = fw_health; - if (!fw_health) { - rc = -ENOMEM; - goto err_recovery_out; - } - } fw_health->flags = le32_to_cpu(resp->flags); if ((fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU) && !(bp->fw_cap & BNXT_FW_CAP_KONG_MB_CHNL)) { @@ -7292,6 +7288,7 @@ static int bnxt_hwrm_ver_get(struct bnxt *bp) bp->hwrm_max_ext_req_len = HWRM_MAX_REQ_LEN; bp->chip_num = le16_to_cpu(resp->chip_num); + bp->chip_rev = resp->chip_rev; if (bp->chip_num == CHIP_NUM_58700 && !resp->chip_rev && !resp->chip_metal) bp->flags |= BNXT_FLAG_CHIP_NITRO_A0; @@ -8796,6 +8793,9 @@ static int bnxt_hwrm_if_change(struct bnxt *bp, bool up) if (fw_reset) { if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) bnxt_ulp_stop(bp); + bnxt_free_ctx_mem(bp); + kfree(bp->ctx); + bp->ctx = NULL; rc = bnxt_fw_init_one(bp); if (rc) { set_bit(BNXT_STATE_ABORT_ERR, &bp->state); @@ -9064,7 +9064,7 @@ static int bnxt_update_phy_setting(struct bnxt *bp) /* The last close may have shutdown the link, so need to call * PHY_CFG to bring it back up. */ - if (!netif_carrier_ok(bp->dev)) + if (!bp->link_info.link_up) update_link = true; if (!bnxt_eee_config_ok(bp)) @@ -9976,7 +9976,7 @@ static void bnxt_reset_task(struct bnxt *bp, bool silent) } } -static void bnxt_tx_timeout(struct net_device *dev) +static void bnxt_tx_timeout(struct net_device *dev, unsigned int txqueue) { struct bnxt *bp = netdev_priv(dev); @@ -9990,8 +9990,7 @@ static void bnxt_fw_health_check(struct bnxt *bp) struct bnxt_fw_health *fw_health = bp->fw_health; u32 val; - if (!fw_health || !fw_health->enabled || - test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) + if (!fw_health->enabled || test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) return; if (fw_health->tmr_counter) { @@ -10042,6 +10041,13 @@ static void bnxt_timer(struct timer_list *t) bnxt_queue_sp_work(bp); } +#ifdef CONFIG_RFS_ACCEL + if ((bp->flags & BNXT_FLAG_RFS) && bp->ntp_fltr_count) { + set_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event); + bnxt_queue_sp_work(bp); + } +#endif /*CONFIG_RFS_ACCEL*/ + if (bp->link_info.phy_retry) { if (time_after(jiffies, bp->link_info.phy_retry_expires)) { bp->link_info.phy_retry = false; @@ -10052,7 +10058,8 @@ static void bnxt_timer(struct timer_list *t) } } - if ((bp->flags & BNXT_FLAG_CHIP_P5) && netif_carrier_ok(dev)) { + if ((bp->flags & BNXT_FLAG_CHIP_P5) && !bp->chip_rev && + netif_carrier_ok(dev)) { set_bit(BNXT_RING_COAL_NOW_SP_EVENT, &bp->sp_event); bnxt_queue_sp_work(bp); } @@ -10482,6 +10489,23 @@ static void bnxt_init_dflt_coal(struct bnxt *bp) bp->stats_coal_ticks = BNXT_DEF_STATS_COAL_TICKS; } +static void bnxt_alloc_fw_health(struct bnxt *bp) +{ + if (bp->fw_health) + return; + + if (!(bp->fw_cap & BNXT_FW_CAP_HOT_RESET) && + !(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)) + return; + + bp->fw_health = kzalloc(sizeof(*bp->fw_health), GFP_KERNEL); + if (!bp->fw_health) { + netdev_warn(bp->dev, "Failed to allocate fw_health\n"); + bp->fw_cap &= ~BNXT_FW_CAP_HOT_RESET; + bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY; + } +} + static int bnxt_fw_init_one_p1(struct bnxt *bp) { int rc; @@ -10528,6 +10552,7 @@ static int bnxt_fw_init_one_p2(struct bnxt *bp) netdev_warn(bp->dev, "hwrm query adv flow mgnt failure rc: %d\n", rc); + bnxt_alloc_fw_health(bp); rc = bnxt_hwrm_error_recovery_qcfg(bp); if (rc) netdev_warn(bp->dev, "hwrm query error recovery failure rc: %d\n", @@ -10552,7 +10577,7 @@ static void bnxt_set_dflt_rss_hash_type(struct bnxt *bp) VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4 | VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6 | VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6; - if (BNXT_CHIP_P4(bp) && bp->hwrm_spec_code >= 0x10501) { + if (BNXT_CHIP_P4_PLUS(bp) && bp->hwrm_spec_code >= 0x10501) { bp->flags |= BNXT_FLAG_UDP_RSS_CAP; bp->rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4 | VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6; @@ -10609,6 +10634,12 @@ static int bnxt_fw_init_one(struct bnxt *bp) rc = bnxt_approve_mac(bp, bp->dev->dev_addr, false); if (rc) return rc; + + /* In case fw capabilities have changed, destroy the unneeded + * reporters and create newly capable ones. + */ + bnxt_dl_fw_reporters_destroy(bp, false); + bnxt_dl_fw_reporters_create(bp); bnxt_fw_init_one_p3(bp); return 0; } @@ -10751,8 +10782,7 @@ static void bnxt_fw_reset_task(struct work_struct *work) bnxt_queue_fw_reset_work(bp, bp->fw_reset_min_dsecs * HZ / 10); return; case BNXT_FW_RESET_STATE_ENABLE_DEV: - if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state) && - bp->fw_health) { + if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state)) { u32 val; val = bnxt_fw_health_readl(bp, @@ -10801,6 +10831,7 @@ static void bnxt_fw_reset_task(struct work_struct *work) smp_mb__before_atomic(); clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state); bnxt_ulp_start(bp, rc); + bnxt_dl_health_recovery_done(bp); bnxt_dl_health_status_update(bp, true); rtnl_unlock(); break; @@ -11044,11 +11075,23 @@ static bool bnxt_fltr_match(struct bnxt_ntuple_filter *f1, struct flow_keys *keys1 = &f1->fkeys; struct flow_keys *keys2 = &f2->fkeys; - if (keys1->addrs.v4addrs.src == keys2->addrs.v4addrs.src && - keys1->addrs.v4addrs.dst == keys2->addrs.v4addrs.dst && - keys1->ports.ports == keys2->ports.ports && - keys1->basic.ip_proto == keys2->basic.ip_proto && - keys1->basic.n_proto == keys2->basic.n_proto && + if (keys1->basic.n_proto != keys2->basic.n_proto || + keys1->basic.ip_proto != keys2->basic.ip_proto) + return false; + + if (keys1->basic.n_proto == htons(ETH_P_IP)) { + if (keys1->addrs.v4addrs.src != keys2->addrs.v4addrs.src || + keys1->addrs.v4addrs.dst != keys2->addrs.v4addrs.dst) + return false; + } else { + if (memcmp(&keys1->addrs.v6addrs.src, &keys2->addrs.v6addrs.src, + sizeof(keys1->addrs.v6addrs.src)) || + memcmp(&keys1->addrs.v6addrs.dst, &keys2->addrs.v6addrs.dst, + sizeof(keys1->addrs.v6addrs.dst))) + return false; + } + + if (keys1->ports.ports == keys2->ports.ports && keys1->control.flags == keys2->control.flags && ether_addr_equal(f1->src_mac_addr, f2->src_mac_addr) && ether_addr_equal(f1->dst_mac_addr, f2->dst_mac_addr)) @@ -11066,6 +11109,7 @@ static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb, struct ethhdr *eth = (struct ethhdr *)skb_mac_header(skb); int rc = 0, idx, bit_id, l2_idx = 0; struct hlist_head *head; + u32 flags; if (!ether_addr_equal(dev->dev_addr, eth->h_dest)) { struct bnxt_vnic_info *vnic = &bp->vnic_info[0]; @@ -11105,8 +11149,9 @@ static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb, rc = -EPROTONOSUPPORT; goto err_free; } - if ((fkeys->control.flags & FLOW_DIS_ENCAPSULATION) && - bp->hwrm_spec_code < 0x10601) { + flags = fkeys->control.flags; + if (((flags & FLOW_DIS_ENCAPSULATION) && + bp->hwrm_spec_code < 0x10601) || (flags & FLOW_DIS_IS_FRAGMENT)) { rc = -EPROTONOSUPPORT; goto err_free; } @@ -11340,11 +11385,11 @@ int bnxt_get_port_parent_id(struct net_device *dev, return -EOPNOTSUPP; /* The PF and it's VF-reps only support the switchdev framework */ - if (!BNXT_PF(bp)) + if (!BNXT_PF(bp) || !(bp->flags & BNXT_FLAG_DSN_VALID)) return -EOPNOTSUPP; - ppid->id_len = sizeof(bp->switch_id); - memcpy(ppid->id, bp->switch_id, ppid->id_len); + ppid->id_len = sizeof(bp->dsn); + memcpy(ppid->id, bp->dsn, ppid->id_len); return 0; } @@ -11396,13 +11441,13 @@ static void bnxt_remove_one(struct pci_dev *pdev) struct net_device *dev = pci_get_drvdata(pdev); struct bnxt *bp = netdev_priv(dev); - if (BNXT_PF(bp)) { + if (BNXT_PF(bp)) bnxt_sriov_disable(bp); - bnxt_dl_unregister(bp); - } + bnxt_dl_fw_reporters_destroy(bp, true); pci_disable_pcie_error_reporting(pdev); unregister_netdev(dev); + bnxt_dl_unregister(bp); bnxt_shutdown_tc(bp); bnxt_cancel_sp_work(bp); bp->sp_event = 0; @@ -11415,6 +11460,8 @@ static void bnxt_remove_one(struct pci_dev *pdev) bnxt_dcb_free(bp); kfree(bp->edev); bp->edev = NULL; + kfree(bp->fw_health); + bp->fw_health = NULL; bnxt_cleanup_pci(bp); bnxt_free_ctx_mem(bp); kfree(bp->ctx); @@ -11434,6 +11481,9 @@ static int bnxt_probe_phy(struct bnxt *bp, bool fw_dflt) rc); return rc; } + if (!fw_dflt) + return 0; + rc = bnxt_update_link(bp, false); if (rc) { netdev_err(bp->dev, "Probe phy can't update link (rc: %x)\n", @@ -11447,9 +11497,6 @@ static int bnxt_probe_phy(struct bnxt *bp, bool fw_dflt) if (link_info->auto_link_speeds && !link_info->support_auto_speeds) link_info->support_auto_speeds = link_info->support_speeds; - if (!fw_dflt) - return 0; - bnxt_init_ethtool_link_settings(bp); return 0; } @@ -11711,6 +11758,7 @@ static int bnxt_pcie_dsn_get(struct bnxt *bp, u8 dsn[]) put_unaligned_le32(dw, &dsn[0]); pci_read_config_dword(pdev, pos + 4, &dw); put_unaligned_le32(dw, &dsn[4]); + bp->flags |= BNXT_FLAG_DSN_VALID; return 0; } @@ -11822,9 +11870,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) if (BNXT_PF(bp)) { /* Read the adapter's DSN to use as the eswitch switch_id */ - rc = bnxt_pcie_dsn_get(bp, bp->switch_id); - if (rc) - goto init_err_pci_clean; + rc = bnxt_pcie_dsn_get(bp, bp->dsn); } /* MTU range: 60 - FW defined max */ @@ -11871,12 +11917,15 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) bnxt_init_tc(bp); } + bnxt_dl_register(bp); + rc = register_netdev(dev); if (rc) - goto init_err_cleanup_tc; + goto init_err_cleanup; if (BNXT_PF(bp)) - bnxt_dl_register(bp); + devlink_port_type_eth_set(&bp->dl_port, bp->dev); + bnxt_dl_fw_reporters_create(bp); netdev_info(dev, "%s found at mem %lx, node addr %pM\n", board_info[ent->driver_data].name, @@ -11885,7 +11934,8 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) return 0; -init_err_cleanup_tc: +init_err_cleanup: + bnxt_dl_unregister(bp); bnxt_shutdown_tc(bp); bnxt_clear_int_mode(bp); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h index 505af5cfb1bd..cabef0b4f5fb 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h @@ -1457,6 +1457,8 @@ struct bnxt { #define CHIP_NUM_58804 0xd804 #define CHIP_NUM_58808 0xd808 + u8 chip_rev; + #define BNXT_CHIP_NUM_5730X(chip_num) \ ((chip_num) >= CHIP_NUM_57301 && \ (chip_num) <= CHIP_NUM_57304) @@ -1532,6 +1534,7 @@ struct bnxt { #define BNXT_FLAG_NO_AGG_RINGS 0x20000 #define BNXT_FLAG_RX_PAGE_MODE 0x40000 #define BNXT_FLAG_MULTI_HOST 0x100000 + #define BNXT_FLAG_DSN_VALID 0x200000 #define BNXT_FLAG_DOUBLE_DB 0x400000 #define BNXT_FLAG_CHIP_NITRO_A0 0x1000000 #define BNXT_FLAG_DIM 0x2000000 @@ -1845,7 +1848,7 @@ struct bnxt { enum devlink_eswitch_mode eswitch_mode; struct bnxt_vf_rep **vf_reps; /* array of vf-rep ptrs */ u16 *cfa_code_map; /* cfa_code -> vf_idx map */ - u8 switch_id[8]; + u8 dsn[8]; struct bnxt_tc_info *tc_info; struct list_head tc_indr_block_list; struct notifier_block tc_netdev_nb; @@ -1936,9 +1939,6 @@ static inline bool bnxt_cfa_hwrm_message(u16 req_type) case HWRM_CFA_ENCAP_RECORD_FREE: case HWRM_CFA_DECAP_FILTER_ALLOC: case HWRM_CFA_DECAP_FILTER_FREE: - case HWRM_CFA_NTUPLE_FILTER_ALLOC: - case HWRM_CFA_NTUPLE_FILTER_FREE: - case HWRM_CFA_NTUPLE_FILTER_CFG: case HWRM_CFA_EM_FLOW_ALLOC: case HWRM_CFA_EM_FLOW_FREE: case HWRM_CFA_EM_FLOW_CFG: diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c index acb2dd64c023..eec0168330b7 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c @@ -21,6 +21,7 @@ bnxt_dl_flash_update(struct devlink *dl, const char *filename, const char *region, struct netlink_ext_ack *extack) { struct bnxt *bp = bnxt_get_bp_from_dl(dl); + int rc; if (region) return -EOPNOTSUPP; @@ -31,7 +32,18 @@ bnxt_dl_flash_update(struct devlink *dl, const char *filename, return -EPERM; } - return bnxt_flash_package_from_file(bp->dev, filename, 0); + devlink_flash_update_begin_notify(dl); + devlink_flash_update_status_notify(dl, "Preparing to flash", region, 0, + 0); + rc = bnxt_flash_package_from_file(bp->dev, filename, 0); + if (!rc) + devlink_flash_update_status_notify(dl, "Flashing done", region, + 0, 0); + else + devlink_flash_update_status_notify(dl, "Flashing failed", + region, 0, 0); + devlink_flash_update_end_notify(dl); + return rc; } static int bnxt_fw_reporter_diagnose(struct devlink_health_reporter *reporter, @@ -39,11 +51,10 @@ static int bnxt_fw_reporter_diagnose(struct devlink_health_reporter *reporter, struct netlink_ext_ack *extack) { struct bnxt *bp = devlink_health_reporter_priv(reporter); - struct bnxt_fw_health *health = bp->fw_health; u32 val, health_status; int rc; - if (!health || test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) + if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) return 0; val = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG); @@ -90,7 +101,7 @@ static int bnxt_fw_reset_recover(struct devlink_health_reporter *reporter, return -EOPNOTSUPP; bnxt_fw_reset(bp); - return 0; + return -EINPROGRESS; } static const @@ -117,7 +128,7 @@ static int bnxt_fw_fatal_recover(struct devlink_health_reporter *reporter, else if (event == BNXT_FW_EXCEPTION_SP_EVENT) bnxt_fw_exception(bp); - return 0; + return -EINPROGRESS; } static const @@ -126,21 +137,15 @@ struct devlink_health_reporter_ops bnxt_dl_fw_fatal_reporter_ops = { .recover = bnxt_fw_fatal_recover, }; -static void bnxt_dl_fw_reporters_create(struct bnxt *bp) +void bnxt_dl_fw_reporters_create(struct bnxt *bp) { struct bnxt_fw_health *health = bp->fw_health; - if (!health) + if (!bp->dl || !health) return; - health->fw_reporter = - devlink_health_reporter_create(bp->dl, &bnxt_dl_fw_reporter_ops, - 0, false, bp); - if (IS_ERR(health->fw_reporter)) { - netdev_warn(bp->dev, "Failed to create FW health reporter, rc = %ld\n", - PTR_ERR(health->fw_reporter)); - health->fw_reporter = NULL; - } + if (!(bp->fw_cap & BNXT_FW_CAP_HOT_RESET) || health->fw_reset_reporter) + goto err_recovery; health->fw_reset_reporter = devlink_health_reporter_create(bp->dl, @@ -150,8 +155,30 @@ static void bnxt_dl_fw_reporters_create(struct bnxt *bp) netdev_warn(bp->dev, "Failed to create FW fatal health reporter, rc = %ld\n", PTR_ERR(health->fw_reset_reporter)); health->fw_reset_reporter = NULL; + bp->fw_cap &= ~BNXT_FW_CAP_HOT_RESET; + } + +err_recovery: + if (!(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)) + return; + + if (!health->fw_reporter) { + health->fw_reporter = + devlink_health_reporter_create(bp->dl, + &bnxt_dl_fw_reporter_ops, + 0, false, bp); + if (IS_ERR(health->fw_reporter)) { + netdev_warn(bp->dev, "Failed to create FW health reporter, rc = %ld\n", + PTR_ERR(health->fw_reporter)); + health->fw_reporter = NULL; + bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY; + return; + } } + if (health->fw_fatal_reporter) + return; + health->fw_fatal_reporter = devlink_health_reporter_create(bp->dl, &bnxt_dl_fw_fatal_reporter_ops, @@ -160,24 +187,35 @@ static void bnxt_dl_fw_reporters_create(struct bnxt *bp) netdev_warn(bp->dev, "Failed to create FW fatal health reporter, rc = %ld\n", PTR_ERR(health->fw_fatal_reporter)); health->fw_fatal_reporter = NULL; + bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY; } } -static void bnxt_dl_fw_reporters_destroy(struct bnxt *bp) +void bnxt_dl_fw_reporters_destroy(struct bnxt *bp, bool all) { struct bnxt_fw_health *health = bp->fw_health; - if (!health) + if (!bp->dl || !health) return; - if (health->fw_reporter) - devlink_health_reporter_destroy(health->fw_reporter); - - if (health->fw_reset_reporter) + if ((all || !(bp->fw_cap & BNXT_FW_CAP_HOT_RESET)) && + health->fw_reset_reporter) { devlink_health_reporter_destroy(health->fw_reset_reporter); + health->fw_reset_reporter = NULL; + } - if (health->fw_fatal_reporter) + if ((bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) && !all) + return; + + if (health->fw_reporter) { + devlink_health_reporter_destroy(health->fw_reporter); + health->fw_reporter = NULL; + } + + if (health->fw_fatal_reporter) { devlink_health_reporter_destroy(health->fw_fatal_reporter); + health->fw_fatal_reporter = NULL; + } } void bnxt_devlink_health_report(struct bnxt *bp, unsigned long event) @@ -185,9 +223,6 @@ void bnxt_devlink_health_report(struct bnxt *bp, unsigned long event) struct bnxt_fw_health *fw_health = bp->fw_health; struct bnxt_fw_reporter_ctx fw_reporter_ctx; - if (!fw_health) - return; - fw_reporter_ctx.sp_event = event; switch (event) { case BNXT_FW_RESET_NOTIFY_SP_EVENT: @@ -239,14 +274,30 @@ void bnxt_dl_health_status_update(struct bnxt *bp, bool healthy) health->fatal = false; } +void bnxt_dl_health_recovery_done(struct bnxt *bp) +{ + struct bnxt_fw_health *hlth = bp->fw_health; + + if (hlth->fatal) + devlink_health_reporter_recovery_done(hlth->fw_fatal_reporter); + else + devlink_health_reporter_recovery_done(hlth->fw_reset_reporter); +} + +static int bnxt_dl_info_get(struct devlink *dl, struct devlink_info_req *req, + struct netlink_ext_ack *extack); + static const struct devlink_ops bnxt_dl_ops = { #ifdef CONFIG_BNXT_SRIOV .eswitch_mode_set = bnxt_dl_eswitch_mode_set, .eswitch_mode_get = bnxt_dl_eswitch_mode_get, #endif /* CONFIG_BNXT_SRIOV */ + .info_get = bnxt_dl_info_get, .flash_update = bnxt_dl_flash_update, }; +static const struct devlink_ops bnxt_vf_dl_ops; + enum bnxt_dl_param_id { BNXT_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX, BNXT_DEVLINK_PARAM_ID_GRE_VER_CHECK, @@ -308,6 +359,136 @@ static void bnxt_copy_from_nvm_data(union devlink_param_value *dst, dst->vu8 = (u8)val32; } +static int bnxt_hwrm_get_nvm_cfg_ver(struct bnxt *bp, + union devlink_param_value *nvm_cfg_ver) +{ + struct hwrm_nvm_get_variable_input req = {0}; + union bnxt_nvm_data *data; + dma_addr_t data_dma_addr; + int rc; + + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_GET_VARIABLE, -1, -1); + data = dma_alloc_coherent(&bp->pdev->dev, sizeof(*data), + &data_dma_addr, GFP_KERNEL); + if (!data) + return -ENOMEM; + + req.dest_data_addr = cpu_to_le64(data_dma_addr); + req.data_len = cpu_to_le16(BNXT_NVM_CFG_VER_BITS); + req.option_num = cpu_to_le16(NVM_OFF_NVM_CFG_VER); + + rc = hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + if (!rc) + bnxt_copy_from_nvm_data(nvm_cfg_ver, data, + BNXT_NVM_CFG_VER_BITS, + BNXT_NVM_CFG_VER_BYTES); + + dma_free_coherent(&bp->pdev->dev, sizeof(*data), data, data_dma_addr); + return rc; +} + +static int bnxt_dl_info_get(struct devlink *dl, struct devlink_info_req *req, + struct netlink_ext_ack *extack) +{ + struct bnxt *bp = bnxt_get_bp_from_dl(dl); + union devlink_param_value nvm_cfg_ver; + struct hwrm_ver_get_output *ver_resp; + char mgmt_ver[FW_VER_STR_LEN]; + char roce_ver[FW_VER_STR_LEN]; + char fw_ver[FW_VER_STR_LEN]; + char buf[32]; + int rc; + + rc = devlink_info_driver_name_put(req, DRV_MODULE_NAME); + if (rc) + return rc; + + sprintf(buf, "%X", bp->chip_num); + rc = devlink_info_version_fixed_put(req, + DEVLINK_INFO_VERSION_GENERIC_ASIC_ID, buf); + if (rc) + return rc; + + ver_resp = &bp->ver_resp; + sprintf(buf, "%X", ver_resp->chip_rev); + rc = devlink_info_version_fixed_put(req, + DEVLINK_INFO_VERSION_GENERIC_ASIC_REV, buf); + if (rc) + return rc; + + if (BNXT_PF(bp)) { + sprintf(buf, "%02X-%02X-%02X-%02X-%02X-%02X-%02X-%02X", + bp->dsn[7], bp->dsn[6], bp->dsn[5], bp->dsn[4], + bp->dsn[3], bp->dsn[2], bp->dsn[1], bp->dsn[0]); + rc = devlink_info_serial_number_put(req, buf); + if (rc) + return rc; + } + + if (strlen(ver_resp->active_pkg_name)) { + rc = + devlink_info_version_running_put(req, + DEVLINK_INFO_VERSION_GENERIC_FW, + ver_resp->active_pkg_name); + if (rc) + return rc; + } + + if (BNXT_PF(bp) && !bnxt_hwrm_get_nvm_cfg_ver(bp, &nvm_cfg_ver)) { + u32 ver = nvm_cfg_ver.vu32; + + sprintf(buf, "%X.%X.%X", (ver >> 16) & 0xF, (ver >> 8) & 0xF, + ver & 0xF); + rc = devlink_info_version_running_put(req, + DEVLINK_INFO_VERSION_GENERIC_FW_PSID, buf); + if (rc) + return rc; + } + + if (ver_resp->flags & VER_GET_RESP_FLAGS_EXT_VER_AVAIL) { + snprintf(fw_ver, FW_VER_STR_LEN, "%d.%d.%d.%d", + ver_resp->hwrm_fw_major, ver_resp->hwrm_fw_minor, + ver_resp->hwrm_fw_build, ver_resp->hwrm_fw_patch); + + snprintf(mgmt_ver, FW_VER_STR_LEN, "%d.%d.%d.%d", + ver_resp->mgmt_fw_major, ver_resp->mgmt_fw_minor, + ver_resp->mgmt_fw_build, ver_resp->mgmt_fw_patch); + + snprintf(roce_ver, FW_VER_STR_LEN, "%d.%d.%d.%d", + ver_resp->roce_fw_major, ver_resp->roce_fw_minor, + ver_resp->roce_fw_build, ver_resp->roce_fw_patch); + } else { + snprintf(fw_ver, FW_VER_STR_LEN, "%d.%d.%d.%d", + ver_resp->hwrm_fw_maj_8b, ver_resp->hwrm_fw_min_8b, + ver_resp->hwrm_fw_bld_8b, ver_resp->hwrm_fw_rsvd_8b); + + snprintf(mgmt_ver, FW_VER_STR_LEN, "%d.%d.%d.%d", + ver_resp->mgmt_fw_maj_8b, ver_resp->mgmt_fw_min_8b, + ver_resp->mgmt_fw_bld_8b, ver_resp->mgmt_fw_rsvd_8b); + + snprintf(roce_ver, FW_VER_STR_LEN, "%d.%d.%d.%d", + ver_resp->roce_fw_maj_8b, ver_resp->roce_fw_min_8b, + ver_resp->roce_fw_bld_8b, ver_resp->roce_fw_rsvd_8b); + } + rc = devlink_info_version_running_put(req, + DEVLINK_INFO_VERSION_GENERIC_FW_APP, fw_ver); + if (rc) + return rc; + + if (!(bp->flags & BNXT_FLAG_CHIP_P5)) { + rc = devlink_info_version_running_put(req, + DEVLINK_INFO_VERSION_GENERIC_FW_MGMT, mgmt_ver); + if (rc) + return rc; + + rc = devlink_info_version_running_put(req, + DEVLINK_INFO_VERSION_GENERIC_FW_ROCE, roce_ver); + if (rc) + return rc; + } + return 0; +} + static int bnxt_hwrm_nvm_req(struct bnxt *bp, u32 param_id, void *msg, int msg_len, union devlink_param_value *val) { @@ -450,17 +631,53 @@ static const struct devlink_param bnxt_dl_params[] = { static const struct devlink_param bnxt_dl_port_params[] = { }; -int bnxt_dl_register(struct bnxt *bp) +static int bnxt_dl_params_register(struct bnxt *bp) { - struct devlink *dl; int rc; - if (bp->hwrm_spec_code < 0x10600) { - netdev_warn(bp->dev, "Firmware does not support NVM params"); - return -ENOTSUPP; + if (bp->hwrm_spec_code < 0x10600) + return 0; + + rc = devlink_params_register(bp->dl, bnxt_dl_params, + ARRAY_SIZE(bnxt_dl_params)); + if (rc) { + netdev_warn(bp->dev, "devlink_params_register failed. rc=%d", + rc); + return rc; + } + rc = devlink_port_params_register(&bp->dl_port, bnxt_dl_port_params, + ARRAY_SIZE(bnxt_dl_port_params)); + if (rc) { + netdev_err(bp->dev, "devlink_port_params_register failed"); + devlink_params_unregister(bp->dl, bnxt_dl_params, + ARRAY_SIZE(bnxt_dl_params)); + return rc; } + devlink_params_publish(bp->dl); - dl = devlink_alloc(&bnxt_dl_ops, sizeof(struct bnxt_dl)); + return 0; +} + +static void bnxt_dl_params_unregister(struct bnxt *bp) +{ + if (bp->hwrm_spec_code < 0x10600) + return; + + devlink_params_unregister(bp->dl, bnxt_dl_params, + ARRAY_SIZE(bnxt_dl_params)); + devlink_port_params_unregister(&bp->dl_port, bnxt_dl_port_params, + ARRAY_SIZE(bnxt_dl_port_params)); +} + +int bnxt_dl_register(struct bnxt *bp) +{ + struct devlink *dl; + int rc; + + if (BNXT_PF(bp)) + dl = devlink_alloc(&bnxt_dl_ops, sizeof(struct bnxt_dl)); + else + dl = devlink_alloc(&bnxt_vf_dl_ops, sizeof(struct bnxt_dl)); if (!dl) { netdev_warn(bp->dev, "devlink_alloc failed"); return -ENOMEM; @@ -479,42 +696,26 @@ int bnxt_dl_register(struct bnxt *bp) goto err_dl_free; } - rc = devlink_params_register(dl, bnxt_dl_params, - ARRAY_SIZE(bnxt_dl_params)); - if (rc) { - netdev_warn(bp->dev, "devlink_params_register failed. rc=%d", - rc); - goto err_dl_unreg; - } + if (!BNXT_PF(bp)) + return 0; devlink_port_attrs_set(&bp->dl_port, DEVLINK_PORT_FLAVOUR_PHYSICAL, - bp->pf.port_id, false, 0, - bp->switch_id, sizeof(bp->switch_id)); + bp->pf.port_id, false, 0, bp->dsn, + sizeof(bp->dsn)); rc = devlink_port_register(dl, &bp->dl_port, bp->pf.port_id); if (rc) { netdev_err(bp->dev, "devlink_port_register failed"); - goto err_dl_param_unreg; + goto err_dl_unreg; } - devlink_port_type_eth_set(&bp->dl_port, bp->dev); - rc = devlink_port_params_register(&bp->dl_port, bnxt_dl_port_params, - ARRAY_SIZE(bnxt_dl_port_params)); - if (rc) { - netdev_err(bp->dev, "devlink_port_params_register failed"); + rc = bnxt_dl_params_register(bp); + if (rc) goto err_dl_port_unreg; - } - - devlink_params_publish(dl); - - bnxt_dl_fw_reporters_create(bp); return 0; err_dl_port_unreg: devlink_port_unregister(&bp->dl_port); -err_dl_param_unreg: - devlink_params_unregister(dl, bnxt_dl_params, - ARRAY_SIZE(bnxt_dl_params)); err_dl_unreg: devlink_unregister(dl); err_dl_free: @@ -530,12 +731,10 @@ void bnxt_dl_unregister(struct bnxt *bp) if (!dl) return; - bnxt_dl_fw_reporters_destroy(bp); - devlink_port_params_unregister(&bp->dl_port, bnxt_dl_port_params, - ARRAY_SIZE(bnxt_dl_port_params)); - devlink_port_unregister(&bp->dl_port); - devlink_params_unregister(dl, bnxt_dl_params, - ARRAY_SIZE(bnxt_dl_params)); + if (BNXT_PF(bp)) { + bnxt_dl_params_unregister(bp); + devlink_port_unregister(&bp->dl_port); + } devlink_unregister(dl); devlink_free(dl); } diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h index 665d4bdcd8c0..95f893f2a74d 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h @@ -38,6 +38,10 @@ static inline void bnxt_link_bp_to_dl(struct bnxt *bp, struct devlink *dl) #define NVM_OFF_IGNORE_ARI 164 #define NVM_OFF_DIS_GRE_VER_CHECK 171 #define NVM_OFF_ENABLE_SRIOV 401 +#define NVM_OFF_NVM_CFG_VER 602 + +#define BNXT_NVM_CFG_VER_BITS 24 +#define BNXT_NVM_CFG_VER_BYTES 4 #define BNXT_MSIX_VEC_MAX 1280 #define BNXT_MSIX_VEC_MIN_MAX 128 @@ -58,6 +62,9 @@ struct bnxt_dl_nvm_param { void bnxt_devlink_health_report(struct bnxt *bp, unsigned long event); void bnxt_dl_health_status_update(struct bnxt *bp, bool healthy); +void bnxt_dl_health_recovery_done(struct bnxt *bp); +void bnxt_dl_fw_reporters_create(struct bnxt *bp); +void bnxt_dl_fw_reporters_destroy(struct bnxt *bp, bool all); int bnxt_dl_register(struct bnxt *bp); void bnxt_dl_unregister(struct bnxt *bp); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c index 2ccf79cdcb1e..6171fa8b3677 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c @@ -1462,15 +1462,15 @@ static int bnxt_get_link_ksettings(struct net_device *dev, ethtool_link_ksettings_add_link_mode(lk_ksettings, advertising, Autoneg); base->autoneg = AUTONEG_ENABLE; - if (link_info->phy_link_status == BNXT_LINK_LINK) + base->duplex = DUPLEX_UNKNOWN; + if (link_info->phy_link_status == BNXT_LINK_LINK) { bnxt_fw_to_ethtool_lp_adv(link_info, lk_ksettings); + if (link_info->duplex & BNXT_LINK_DUPLEX_FULL) + base->duplex = DUPLEX_FULL; + else + base->duplex = DUPLEX_HALF; + } ethtool_speed = bnxt_fw_to_ethtool_speed(link_info->link_speed); - if (!netif_carrier_ok(dev)) - base->duplex = DUPLEX_UNKNOWN; - else if (link_info->duplex & BNXT_LINK_DUPLEX_FULL) - base->duplex = DUPLEX_FULL; - else - base->duplex = DUPLEX_HALF; } else { base->autoneg = AUTONEG_DISABLE; ethtool_speed = @@ -2707,7 +2707,7 @@ static int bnxt_disable_an_for_lpbk(struct bnxt *bp, return rc; fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_1GB; - if (netif_carrier_ok(bp->dev)) + if (bp->link_info.link_up) fw_speed = bp->link_info.link_speed; else if (fw_advertising & BNXT_LINK_SPEED_MSK_10GB) fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10GB; @@ -3071,8 +3071,15 @@ static int bnxt_hwrm_dbg_dma_data(struct bnxt *bp, void *msg, int msg_len, } } - if (info->dest_buf) - memcpy(info->dest_buf + off, dma_buf, len); + if (info->dest_buf) { + if ((info->seg_start + off + len) <= + BNXT_COREDUMP_BUF_LEN(info->buf_len)) { + memcpy(info->dest_buf + off, dma_buf, len); + } else { + rc = -ENOBUFS; + break; + } + } if (cmn_req->req_type == cpu_to_le16(HWRM_DBG_COREDUMP_RETRIEVE)) @@ -3126,7 +3133,7 @@ static int bnxt_hwrm_dbg_coredump_initiate(struct bnxt *bp, u16 component_id, static int bnxt_hwrm_dbg_coredump_retrieve(struct bnxt *bp, u16 component_id, u16 segment_id, u32 *seg_len, - void *buf, u32 offset) + void *buf, u32 buf_len, u32 offset) { struct hwrm_dbg_coredump_retrieve_input req = {0}; struct bnxt_hwrm_dbg_dma_info info = {NULL}; @@ -3141,8 +3148,11 @@ static int bnxt_hwrm_dbg_coredump_retrieve(struct bnxt *bp, u16 component_id, seq_no); info.data_len_off = offsetof(struct hwrm_dbg_coredump_retrieve_output, data_len); - if (buf) + if (buf) { info.dest_buf = buf + offset; + info.buf_len = buf_len; + info.seg_start = offset; + } rc = bnxt_hwrm_dbg_dma_data(bp, &req, sizeof(req), &info); if (!rc) @@ -3232,14 +3242,17 @@ bnxt_fill_coredump_record(struct bnxt *bp, struct bnxt_coredump_record *record, static int bnxt_get_coredump(struct bnxt *bp, void *buf, u32 *dump_len) { u32 ver_get_resp_len = sizeof(struct hwrm_ver_get_output); + u32 offset = 0, seg_hdr_len, seg_record_len, buf_len = 0; struct coredump_segment_record *seg_record = NULL; - u32 offset = 0, seg_hdr_len, seg_record_len; struct bnxt_coredump_segment_hdr seg_hdr; struct bnxt_coredump coredump = {NULL}; time64_t start_time; u16 start_utc; int rc = 0, i; + if (buf) + buf_len = *dump_len; + start_time = ktime_get_real_seconds(); start_utc = sys_tz.tz_minuteswest * 60; seg_hdr_len = sizeof(seg_hdr); @@ -3272,6 +3285,12 @@ static int bnxt_get_coredump(struct bnxt *bp, void *buf, u32 *dump_len) u32 duration = 0, seg_len = 0; unsigned long start, end; + if (buf && ((offset + seg_hdr_len) > + BNXT_COREDUMP_BUF_LEN(buf_len))) { + rc = -ENOBUFS; + goto err; + } + start = jiffies; rc = bnxt_hwrm_dbg_coredump_initiate(bp, comp_id, seg_id); @@ -3284,9 +3303,11 @@ static int bnxt_get_coredump(struct bnxt *bp, void *buf, u32 *dump_len) /* Write segment data into the buffer */ rc = bnxt_hwrm_dbg_coredump_retrieve(bp, comp_id, seg_id, - &seg_len, buf, + &seg_len, buf, buf_len, offset + seg_hdr_len); - if (rc) + if (rc && rc == -ENOBUFS) + goto err; + else if (rc) netdev_err(bp->dev, "Failed to retrieve coredump for seg = %d\n", seg_record->segment_id); @@ -3316,7 +3337,8 @@ err: rc); kfree(coredump.data); *dump_len += sizeof(struct bnxt_coredump_record); - + if (rc == -ENOBUFS) + netdev_err(bp->dev, "Firmware returned large coredump buffer"); return rc; } diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h index 4428d0abcbc1..3576d951727b 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h @@ -31,6 +31,8 @@ struct bnxt_coredump { u16 total_segs; }; +#define BNXT_COREDUMP_BUF_LEN(len) ((len) - sizeof(struct bnxt_coredump_record)) + struct bnxt_hwrm_dbg_dma_info { void *dest_buf; int dest_buf_size; @@ -38,6 +40,8 @@ struct bnxt_hwrm_dbg_dma_info { u16 seq_off; u16 data_len_off; u16 segs; + u32 seg_start; + u32 buf_len; }; struct hwrm_dbg_cmn_input { diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c index c601ff7b8f61..4a316c4b3fa8 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c @@ -113,8 +113,10 @@ static int bnxt_req_msix_vecs(struct bnxt_en_dev *edev, int ulp_id, { struct net_device *dev = edev->net; struct bnxt *bp = netdev_priv(dev); + struct bnxt_hw_resc *hw_resc; int max_idx, max_cp_rings; int avail_msix, idx; + int total_vecs; int rc = 0; ASSERT_RTNL(); @@ -142,7 +144,10 @@ static int bnxt_req_msix_vecs(struct bnxt_en_dev *edev, int ulp_id, } edev->ulp_tbl[ulp_id].msix_base = idx; edev->ulp_tbl[ulp_id].msix_requested = avail_msix; - if (bp->total_irqs < (idx + avail_msix)) { + hw_resc = &bp->hw_resc; + total_vecs = idx + avail_msix; + if (bp->total_irqs < total_vecs || + (BNXT_NEW_RM(bp) && hw_resc->resv_irqs < total_vecs)) { if (netif_running(dev)) { bnxt_close_nic(bp, true, false); rc = bnxt_open_nic(bp, true, false); @@ -156,7 +161,6 @@ static int bnxt_req_msix_vecs(struct bnxt_en_dev *edev, int ulp_id, } if (BNXT_NEW_RM(bp)) { - struct bnxt_hw_resc *hw_resc = &bp->hw_resc; int resv_msix; resv_msix = hw_resc->resv_irqs - bp->cp_nr_rings; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c index f9bf7d7250ab..b010b34cdaf8 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c @@ -398,6 +398,9 @@ static int bnxt_vf_reps_create(struct bnxt *bp) struct net_device *dev; int rc, i; + if (!(bp->flags & BNXT_FLAG_DSN_VALID)) + return -ENODEV; + bp->vf_reps = kcalloc(num_vfs, sizeof(vf_rep), GFP_KERNEL); if (!bp->vf_reps) return -ENOMEM; diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c index 120fa05a39ff..e50a15397e11 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c @@ -2,7 +2,7 @@ /* * Broadcom GENET (Gigabit Ethernet) controller driver * - * Copyright (c) 2014-2017 Broadcom + * Copyright (c) 2014-2019 Broadcom */ #define pr_fmt(fmt) "bcmgenet: " fmt @@ -508,8 +508,8 @@ static int bcmgenet_set_link_ksettings(struct net_device *dev, return phy_ethtool_ksettings_set(dev->phydev, cmd); } -static int bcmgenet_set_rx_csum(struct net_device *dev, - netdev_features_t wanted) +static void bcmgenet_set_rx_csum(struct net_device *dev, + netdev_features_t wanted) { struct bcmgenet_priv *priv = netdev_priv(dev); u32 rbuf_chk_ctrl; @@ -521,7 +521,7 @@ static int bcmgenet_set_rx_csum(struct net_device *dev, /* enable rx checksumming */ if (rx_csum_en) - rbuf_chk_ctrl |= RBUF_RXCHK_EN; + rbuf_chk_ctrl |= RBUF_RXCHK_EN | RBUF_L3_PARSE_DIS; else rbuf_chk_ctrl &= ~RBUF_RXCHK_EN; priv->desc_rxchk_en = rx_csum_en; @@ -535,12 +535,10 @@ static int bcmgenet_set_rx_csum(struct net_device *dev, rbuf_chk_ctrl &= ~RBUF_SKIP_FCS; bcmgenet_rbuf_writel(priv, rbuf_chk_ctrl, RBUF_CHK_CTRL); - - return 0; } -static int bcmgenet_set_tx_csum(struct net_device *dev, - netdev_features_t wanted) +static void bcmgenet_set_tx_csum(struct net_device *dev, + netdev_features_t wanted) { struct bcmgenet_priv *priv = netdev_priv(dev); bool desc_64b_en; @@ -549,7 +547,7 @@ static int bcmgenet_set_tx_csum(struct net_device *dev, tbuf_ctrl = bcmgenet_tbuf_ctrl_get(priv); rbuf_ctrl = bcmgenet_rbuf_readl(priv, RBUF_CTRL); - desc_64b_en = !!(wanted & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)); + desc_64b_en = !!(wanted & NETIF_F_HW_CSUM); /* enable 64 bytes descriptor in both directions (RBUF and TBUF) */ if (desc_64b_en) { @@ -563,21 +561,27 @@ static int bcmgenet_set_tx_csum(struct net_device *dev, bcmgenet_tbuf_ctrl_set(priv, tbuf_ctrl); bcmgenet_rbuf_writel(priv, rbuf_ctrl, RBUF_CTRL); - - return 0; } static int bcmgenet_set_features(struct net_device *dev, netdev_features_t features) { - netdev_features_t changed = features ^ dev->features; - netdev_features_t wanted = dev->wanted_features; - int ret = 0; + struct bcmgenet_priv *priv = netdev_priv(dev); + u32 reg; + int ret; + + ret = clk_prepare_enable(priv->clk); + if (ret) + return ret; - if (changed & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)) - ret = bcmgenet_set_tx_csum(dev, wanted); - if (changed & (NETIF_F_RXCSUM)) - ret = bcmgenet_set_rx_csum(dev, wanted); + /* Make sure we reflect the value of CRC_CMD_FWD */ + reg = bcmgenet_umac_readl(priv, UMAC_CMD); + priv->crc_fwd_en = !!(reg & CMD_CRC_FWD); + + bcmgenet_set_tx_csum(dev, features); + bcmgenet_set_rx_csum(dev, features); + + clk_disable_unprepare(priv->clk); return ret; } @@ -857,6 +861,9 @@ static const struct bcmgenet_stats bcmgenet_gstrings_stats[] = { STAT_GENET_SOFT_MIB("alloc_rx_buff_failed", mib.alloc_rx_buff_failed), STAT_GENET_SOFT_MIB("rx_dma_failed", mib.rx_dma_failed), STAT_GENET_SOFT_MIB("tx_dma_failed", mib.tx_dma_failed), + STAT_GENET_SOFT_MIB("tx_realloc_tsb", mib.tx_realloc_tsb), + STAT_GENET_SOFT_MIB("tx_realloc_tsb_failed", + mib.tx_realloc_tsb_failed), /* Per TX queues */ STAT_GENET_Q(0), STAT_GENET_Q(1), @@ -1218,18 +1225,6 @@ static void bcmgenet_power_up(struct bcmgenet_priv *priv, } } -/* ioctl handle special commands that are not present in ethtool. */ -static int bcmgenet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) -{ - if (!netif_running(dev)) - return -EINVAL; - - if (!dev->phydev) - return -ENODEV; - - return phy_mii_ioctl(dev->phydev, rq, cmd); -} - static struct enet_cb *bcmgenet_get_txcb(struct bcmgenet_priv *priv, struct bcmgenet_tx_ring *ring) { @@ -1483,6 +1478,7 @@ static void bcmgenet_tx_reclaim_all(struct net_device *dev) static struct sk_buff *bcmgenet_put_tx_csum(struct net_device *dev, struct sk_buff *skb) { + struct bcmgenet_priv *priv = netdev_priv(dev); struct status_64 *status = NULL; struct sk_buff *new_skb; u16 offset; @@ -1495,12 +1491,15 @@ static struct sk_buff *bcmgenet_put_tx_csum(struct net_device *dev, * enough headroom for us to insert 64B status block. */ new_skb = skb_realloc_headroom(skb, sizeof(*status)); - dev_kfree_skb(skb); if (!new_skb) { + dev_kfree_skb_any(skb); + priv->mib.tx_realloc_tsb_failed++; dev->stats.tx_dropped++; return NULL; } + dev_consume_skb_any(skb); skb = new_skb; + priv->mib.tx_realloc_tsb++; } skb_push(skb, sizeof(*status)); @@ -1516,24 +1515,19 @@ static struct sk_buff *bcmgenet_put_tx_csum(struct net_device *dev, ip_proto = ipv6_hdr(skb)->nexthdr; break; default: - return skb; + /* don't use UDP flag */ + ip_proto = 0; + break; } offset = skb_checksum_start_offset(skb) - sizeof(*status); tx_csum_info = (offset << STATUS_TX_CSUM_START_SHIFT) | - (offset + skb->csum_offset); + (offset + skb->csum_offset) | + STATUS_TX_CSUM_LV; - /* Set the length valid bit for TCP and UDP and just set - * the special UDP flag for IPv4, else just set to 0. - */ - if (ip_proto == IPPROTO_TCP || ip_proto == IPPROTO_UDP) { - tx_csum_info |= STATUS_TX_CSUM_LV; - if (ip_proto == IPPROTO_UDP && - ip_ver == htons(ETH_P_IP)) - tx_csum_info |= STATUS_TX_CSUM_PROTO_UDP; - } else { - tx_csum_info = 0; - } + /* Set the special UDP flag for UDP */ + if (ip_proto == IPPROTO_UDP) + tx_csum_info |= STATUS_TX_CSUM_PROTO_UDP; status->tx_csum_info = tx_csum_info; } @@ -1744,7 +1738,6 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring, unsigned int bytes_processed = 0; unsigned int p_index, mask; unsigned int discards; - unsigned int chksum_ok = 0; /* Clear status before servicing to reduce spurious interrupts */ if (ring->index == DESC_INDEX) { @@ -1795,9 +1788,15 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring, dmadesc_get_length_status(priv, cb->bd_addr); } else { struct status_64 *status; + __be16 rx_csum; status = (struct status_64 *)skb->data; dma_length_status = status->length_status; + rx_csum = (__force __be16)(status->rx_csum & 0xffff); + if (priv->desc_rxchk_en) { + skb->csum = (__force __wsum)ntohs(rx_csum); + skb->ip_summed = CHECKSUM_COMPLETE; + } } /* DMA flags and length are still valid no matter how @@ -1840,18 +1839,12 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring, goto next; } /* error packet */ - chksum_ok = (dma_flag & priv->dma_rx_chk_bit) && - priv->desc_rxchk_en; - skb_put(skb, len); if (priv->desc_64b_en) { skb_pull(skb, 64); len -= 64; } - if (likely(chksum_ok)) - skb->ip_summed = CHECKSUM_UNNECESSARY; - /* remove hardware 2bytes added for IP alignment */ skb_pull(skb, 2); len -= 2; @@ -2164,8 +2157,8 @@ static void bcmgenet_init_tx_ring(struct bcmgenet_priv *priv, DMA_END_ADDR); /* Initialize Tx NAPI */ - netif_napi_add(priv->dev, &ring->napi, bcmgenet_tx_poll, - NAPI_POLL_WEIGHT); + netif_tx_napi_add(priv->dev, &ring->napi, bcmgenet_tx_poll, + NAPI_POLL_WEIGHT); } /* Initialize a RDMA ring */ @@ -2886,9 +2879,10 @@ static int bcmgenet_open(struct net_device *dev) init_umac(priv); - /* Make sure we reflect the value of CRC_CMD_FWD */ - reg = bcmgenet_umac_readl(priv, UMAC_CMD); - priv->crc_fwd_en = !!(reg & CMD_CRC_FWD); + /* Apply features again in case we changed them while interface was + * down + */ + bcmgenet_set_features(dev, dev->features); bcmgenet_set_hw_addr(priv, dev->dev_addr); @@ -3055,7 +3049,7 @@ static void bcmgenet_dump_tx_queue(struct bcmgenet_tx_ring *ring) ring->cb_ptr, ring->end_ptr); } -static void bcmgenet_timeout(struct net_device *dev) +static void bcmgenet_timeout(struct net_device *dev, unsigned int txqueue) { struct bcmgenet_priv *priv = netdev_priv(dev); u32 int0_enable = 0; @@ -3216,7 +3210,7 @@ static const struct net_device_ops bcmgenet_netdev_ops = { .ndo_tx_timeout = bcmgenet_timeout, .ndo_set_rx_mode = bcmgenet_set_rx_mode, .ndo_set_mac_address = bcmgenet_set_mac_addr, - .ndo_do_ioctl = bcmgenet_ioctl, + .ndo_do_ioctl = phy_do_ioctl_running, .ndo_set_features = bcmgenet_set_features, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = bcmgenet_poll_controller, @@ -3327,19 +3321,15 @@ static void bcmgenet_set_hw_params(struct bcmgenet_priv *priv) if (GENET_IS_V5(priv) || GENET_IS_V4(priv)) { bcmgenet_dma_regs = bcmgenet_dma_regs_v3plus; genet_dma_ring_regs = genet_dma_ring_regs_v4; - priv->dma_rx_chk_bit = DMA_RX_CHK_V3PLUS; } else if (GENET_IS_V3(priv)) { bcmgenet_dma_regs = bcmgenet_dma_regs_v3plus; genet_dma_ring_regs = genet_dma_ring_regs_v123; - priv->dma_rx_chk_bit = DMA_RX_CHK_V3PLUS; } else if (GENET_IS_V2(priv)) { bcmgenet_dma_regs = bcmgenet_dma_regs_v2; genet_dma_ring_regs = genet_dma_ring_regs_v123; - priv->dma_rx_chk_bit = DMA_RX_CHK_V12; } else if (GENET_IS_V1(priv)) { bcmgenet_dma_regs = bcmgenet_dma_regs_v1; genet_dma_ring_regs = genet_dma_ring_regs_v123; - priv->dma_rx_chk_bit = DMA_RX_CHK_V12; } /* enum genet_version starts at 1 */ @@ -3535,9 +3525,11 @@ static int bcmgenet_probe(struct platform_device *pdev) priv->msg_enable = netif_msg_init(-1, GENET_MSG_DEFAULT); - /* Set hardware features */ - dev->hw_features |= NETIF_F_SG | NETIF_F_IP_CSUM | - NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM; + /* Set default features */ + dev->features |= NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_HW_CSUM | + NETIF_F_RXCSUM; + dev->hw_features |= dev->features; + dev->vlan_features |= dev->features; /* Request the WOL interrupt and advertise suspend if available */ priv->wol_irq_disabled = true; @@ -3574,6 +3566,14 @@ static int bcmgenet_probe(struct platform_device *pdev) bcmgenet_set_hw_params(priv); + err = -EIO; + if (priv->hw_params->flags & GENET_HAS_40BITS) + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(40)); + if (err) + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); + if (err) + goto err; + /* Mii wait queue */ init_waitqueue_head(&priv->wq); /* Always use RX_BUF_LENGTH (2KB) buffer for all chips */ @@ -3689,6 +3689,9 @@ static int bcmgenet_resume(struct device *d) genphy_config_aneg(dev->phydev); bcmgenet_mii_config(priv->dev, false); + /* Restore enabled features */ + bcmgenet_set_features(dev, dev->features); + bcmgenet_set_hw_addr(priv, dev->dev_addr); if (priv->internal_phy) { diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.h b/drivers/net/ethernet/broadcom/genet/bcmgenet.h index a5659197598f..61a6fe9f4cec 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.h +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.h @@ -144,6 +144,8 @@ struct bcmgenet_mib_counters { u32 alloc_rx_buff_failed; u32 rx_dma_failed; u32 tx_dma_failed; + u32 tx_realloc_tsb; + u32 tx_realloc_tsb_failed; }; #define UMAC_HD_BKP_CTRL 0x004 @@ -251,6 +253,7 @@ struct bcmgenet_mib_counters { #define RBUF_CHK_CTRL 0x14 #define RBUF_RXCHK_EN (1 << 0) #define RBUF_SKIP_FCS (1 << 4) +#define RBUF_L3_PARSE_DIS (1 << 5) #define RBUF_ENERGY_CTRL 0x9c #define RBUF_EEE_EN (1 << 0) @@ -663,7 +666,6 @@ struct bcmgenet_priv { bool desc_rxchk_en; bool crc_fwd_en; - unsigned int dma_rx_chk_bit; u32 dma_max_burst_length; u32 msg_enable; diff --git a/drivers/net/ethernet/broadcom/sb1250-mac.c b/drivers/net/ethernet/broadcom/sb1250-mac.c index 1604ad32e920..5b4568c2ad1c 100644 --- a/drivers/net/ethernet/broadcom/sb1250-mac.c +++ b/drivers/net/ethernet/broadcom/sb1250-mac.c @@ -294,7 +294,7 @@ static int sbmac_set_duplex(struct sbmac_softc *s, enum sbmac_duplex duplex, enum sbmac_fc fc); static int sbmac_open(struct net_device *dev); -static void sbmac_tx_timeout (struct net_device *dev); +static void sbmac_tx_timeout (struct net_device *dev, unsigned int txqueue); static void sbmac_set_rx_mode(struct net_device *dev); static int sbmac_mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); static int sbmac_close(struct net_device *dev); @@ -2419,7 +2419,7 @@ static void sbmac_mii_poll(struct net_device *dev) } -static void sbmac_tx_timeout (struct net_device *dev) +static void sbmac_tx_timeout (struct net_device *dev, unsigned int txqueue) { struct sbmac_softc *sc = netdev_priv(dev); unsigned long flags; @@ -2537,7 +2537,7 @@ static int sbmac_probe(struct platform_device *pldev) res = platform_get_resource(pldev, IORESOURCE_MEM, 0); BUG_ON(!res); - sbm_base = ioremap_nocache(res->start, resource_size(res)); + sbm_base = ioremap(res->start, resource_size(res)); if (!sbm_base) { printk(KERN_ERR "%s: unable to map device registers\n", dev_name(&pldev->dev)); diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index ca3aa1250dd1..88466255bf66 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c @@ -7645,7 +7645,7 @@ static void tg3_poll_controller(struct net_device *dev) } #endif -static void tg3_tx_timeout(struct net_device *dev) +static void tg3_tx_timeout(struct net_device *dev, unsigned int txqueue) { struct tg3 *tp = netdev_priv(dev); @@ -7874,8 +7874,8 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *); static int tg3_tso_bug(struct tg3 *tp, struct tg3_napi *tnapi, struct netdev_queue *txq, struct sk_buff *skb) { - struct sk_buff *segs, *nskb; u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3; + struct sk_buff *segs, *seg, *next; /* Estimate the number of fragments in the worst case */ if (unlikely(tg3_tx_avail(tnapi) <= frag_cnt_est)) { @@ -7898,12 +7898,10 @@ static int tg3_tso_bug(struct tg3 *tp, struct tg3_napi *tnapi, if (IS_ERR(segs) || !segs) goto tg3_tso_bug_end; - do { - nskb = segs; - segs = segs->next; - nskb->next = NULL; - tg3_start_xmit(nskb, tp->dev); - } while (segs); + skb_list_walk_safe(segs, seg, next) { + skb_mark_not_on_list(seg); + tg3_start_xmit(seg, tp->dev); + } tg3_tso_bug_end: dev_consume_skb_any(skb); |