diff options
Diffstat (limited to 'drivers/net/ethernet/broadcom/bnxt/bnxt.c')
-rw-r--r-- | drivers/net/ethernet/broadcom/bnxt/bnxt.c | 202 |
1 files changed, 125 insertions, 77 deletions
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index d5495762c945..5578ddcb465d 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -58,8 +58,8 @@ #include <net/netdev_queues.h> #include <net/netdev_rx_queue.h> #include <linux/pci-tph.h> +#include <linux/bnxt/hsi.h> -#include "bnxt_hsi.h" #include "bnxt.h" #include "bnxt_hwrm.h" #include "bnxt_ulp.h" @@ -477,6 +477,7 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev) struct bnxt_tx_ring_info *txr; struct bnxt_sw_tx_bd *tx_buf; __le32 lflags = 0; + skb_frag_t *frag; i = skb_get_queue_mapping(skb); if (unlikely(i >= bp->tx_nr_rings)) { @@ -563,7 +564,7 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev) lflags |= cpu_to_le32(TX_BD_FLAGS_NO_CRC); if (free_size == bp->tx_ring_size && length <= bp->tx_push_thresh && - !lflags) { + skb_frags_readable(skb) && !lflags) { struct tx_push_buffer *tx_push_buf = txr->tx_push; struct tx_push_bd *tx_push = &tx_push_buf->push_bd; struct tx_bd_ext *tx_push1 = &tx_push->txbd2; @@ -598,9 +599,9 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev) skb_copy_from_linear_data(skb, pdata, len); pdata += len; for (j = 0; j < last_frag; j++) { - skb_frag_t *frag = &skb_shinfo(skb)->frags[j]; void *fptr; + frag = &skb_shinfo(skb)->frags[j]; fptr = skb_frag_address_safe(frag); if (!fptr) goto normal_tx; @@ -708,8 +709,7 @@ normal_tx: cpu_to_le32(cfa_action << TX_BD_CFA_ACTION_SHIFT); txbd0 = txbd; for (i = 0; i < last_frag; i++) { - skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; - + frag = &skb_shinfo(skb)->frags[i]; prod = NEXT_TX(prod); txbd = &txr->tx_desc_ring[TX_RING(bp, prod)][TX_IDX(prod)]; @@ -721,7 +721,8 @@ normal_tx: goto tx_dma_error; tx_buf = &txr->tx_buf_ring[RING_TX(bp, prod)]; - dma_unmap_addr_set(tx_buf, mapping, mapping); + netmem_dma_unmap_addr_set(skb_frag_netmem(frag), tx_buf, + mapping, mapping); txbd->tx_bd_haddr = cpu_to_le64(mapping); @@ -778,9 +779,11 @@ tx_dma_error: for (i = 0; i < last_frag; i++) { prod = NEXT_TX(prod); tx_buf = &txr->tx_buf_ring[RING_TX(bp, prod)]; - dma_unmap_page(&pdev->dev, dma_unmap_addr(tx_buf, mapping), - skb_frag_size(&skb_shinfo(skb)->frags[i]), - DMA_TO_DEVICE); + frag = &skb_shinfo(skb)->frags[i]; + netmem_dma_unmap_page_attrs(&pdev->dev, + dma_unmap_addr(tx_buf, mapping), + skb_frag_size(frag), + DMA_TO_DEVICE, 0); } tx_free: @@ -809,6 +812,7 @@ static bool __bnxt_tx_int(struct bnxt *bp, struct bnxt_tx_ring_info *txr, u16 hw_cons = txr->tx_hw_cons; unsigned int tx_bytes = 0; u16 cons = txr->tx_cons; + skb_frag_t *frag; int tx_pkts = 0; bool rc = false; @@ -848,13 +852,14 @@ static bool __bnxt_tx_int(struct bnxt *bp, struct bnxt_tx_ring_info *txr, last = tx_buf->nr_frags; for (j = 0; j < last; j++) { + frag = &skb_shinfo(skb)->frags[j]; cons = NEXT_TX(cons); tx_buf = &txr->tx_buf_ring[RING_TX(bp, cons)]; - dma_unmap_page( - &pdev->dev, - dma_unmap_addr(tx_buf, mapping), - skb_frag_size(&skb_shinfo(skb)->frags[j]), - DMA_TO_DEVICE); + netmem_dma_unmap_page_attrs(&pdev->dev, + dma_unmap_addr(tx_buf, + mapping), + skb_frag_size(frag), + DMA_TO_DEVICE, 0); } if (unlikely(is_ts_pkt)) { if (BNXT_CHIP_P5(bp)) { @@ -1810,7 +1815,7 @@ static struct net_device *bnxt_get_pkt_dev(struct bnxt *bp, u16 cfa_code) { struct net_device *dev = bnxt_get_vf_rep(bp, cfa_code); - /* if vf-rep dev is NULL, the must belongs to the PF */ + /* if vf-rep dev is NULL, it must belong to the PF */ return dev ? dev : bp->dev; } @@ -2989,6 +2994,7 @@ static int __bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, { struct bnxt_napi *bnapi = cpr->bnapi; u32 raw_cons = cpr->cp_raw_cons; + bool flush_xdp = false; u32 cons; int rx_pkts = 0; u8 event = 0; @@ -3042,6 +3048,8 @@ static int __bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, else rc = bnxt_force_rx_discard(bp, cpr, &raw_cons, &event); + if (event & BNXT_REDIRECT_EVENT) + flush_xdp = true; if (likely(rc >= 0)) rx_pkts += rc; /* Increment rx_pkts when rc is -ENOMEM to count towards @@ -3066,7 +3074,7 @@ static int __bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, } } - if (event & BNXT_REDIRECT_EVENT) { + if (flush_xdp) { xdp_do_flush(); event &= ~BNXT_REDIRECT_EVENT; } @@ -3422,9 +3430,11 @@ static void bnxt_free_one_tx_ring_skbs(struct bnxt *bp, skb_frag_t *frag = &skb_shinfo(skb)->frags[j]; tx_buf = &txr->tx_buf_ring[ring_idx]; - dma_unmap_page(&pdev->dev, - dma_unmap_addr(tx_buf, mapping), - skb_frag_size(frag), DMA_TO_DEVICE); + netmem_dma_unmap_page_attrs(&pdev->dev, + dma_unmap_addr(tx_buf, + mapping), + skb_frag_size(frag), + DMA_TO_DEVICE, 0); } dev_kfree_skb(skb); } @@ -3800,12 +3810,14 @@ static int bnxt_alloc_rx_page_pool(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, int numa_node) { + const unsigned int agg_size_fac = PAGE_SIZE / BNXT_RX_PAGE_SIZE; + const unsigned int rx_size_fac = PAGE_SIZE / SZ_4K; struct page_pool_params pp = { 0 }; struct page_pool *pool; - pp.pool_size = bp->rx_agg_ring_size; + pp.pool_size = bp->rx_agg_ring_size / agg_size_fac; if (BNXT_RX_PAGE_MODE(bp)) - pp.pool_size += bp->rx_ring_size; + pp.pool_size += bp->rx_ring_size / rx_size_fac; pp.nid = numa_node; pp.napi = &rxr->bnapi->napi; pp.netdev = bp->dev; @@ -3823,7 +3835,7 @@ static int bnxt_alloc_rx_page_pool(struct bnxt *bp, rxr->need_head_pool = page_pool_is_unreadable(pool); if (bnxt_separate_head_pool(rxr)) { - pp.pool_size = max(bp->rx_ring_size, 1024); + pp.pool_size = min(bp->rx_ring_size / rx_size_fac, 1024); pp.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV; pool = page_pool_create(&pp); if (IS_ERR(pool)) @@ -7116,7 +7128,7 @@ static int hwrm_ring_alloc_send_msg(struct bnxt *bp, default: netdev_err(bp->dev, "hwrm alloc invalid ring type %d\n", ring_type); - return -1; + return -EINVAL; } resp = hwrm_req_hold(bp, req); @@ -10780,6 +10792,72 @@ void bnxt_del_one_rss_ctx(struct bnxt *bp, struct bnxt_rss_ctx *rss_ctx, bp->num_rss_ctx--; } +static bool bnxt_vnic_has_rx_ring(struct bnxt *bp, struct bnxt_vnic_info *vnic, + int rxr_id) +{ + u16 tbl_size = bnxt_get_rxfh_indir_size(bp->dev); + int i, vnic_rx; + + /* Ntuple VNIC always has all the rx rings. Any change of ring id + * must be updated because a future filter may use it. + */ + if (vnic->flags & BNXT_VNIC_NTUPLE_FLAG) + return true; + + for (i = 0; i < tbl_size; i++) { + if (vnic->flags & BNXT_VNIC_RSSCTX_FLAG) + vnic_rx = ethtool_rxfh_context_indir(vnic->rss_ctx)[i]; + else + vnic_rx = bp->rss_indir_tbl[i]; + + if (rxr_id == vnic_rx) + return true; + } + + return false; +} + +static int bnxt_set_vnic_mru_p5(struct bnxt *bp, struct bnxt_vnic_info *vnic, + u16 mru, int rxr_id) +{ + int rc; + + if (!bnxt_vnic_has_rx_ring(bp, vnic, rxr_id)) + return 0; + + if (mru) { + rc = bnxt_hwrm_vnic_set_rss_p5(bp, vnic, true); + if (rc) { + netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %d\n", + vnic->vnic_id, rc); + return rc; + } + } + vnic->mru = mru; + bnxt_hwrm_vnic_update(bp, vnic, + VNIC_UPDATE_REQ_ENABLES_MRU_VALID); + + return 0; +} + +static int bnxt_set_rss_ctx_vnic_mru(struct bnxt *bp, u16 mru, int rxr_id) +{ + struct ethtool_rxfh_context *ctx; + unsigned long context; + int rc; + + xa_for_each(&bp->dev->ethtool->rss_ctx, context, ctx) { + struct bnxt_rss_ctx *rss_ctx = ethtool_rxfh_context_priv(ctx); + struct bnxt_vnic_info *vnic = &rss_ctx->vnic; + + rc = bnxt_set_vnic_mru_p5(bp, vnic, mru, rxr_id); + if (rc) + return rc; + } + + return 0; +} + static void bnxt_hwrm_realloc_rss_ctx_vnic(struct bnxt *bp) { bool set_tpa = !!(bp->flags & BNXT_FLAG_TPA); @@ -11538,11 +11616,9 @@ static void bnxt_free_irq(struct bnxt *bp) static int bnxt_request_irq(struct bnxt *bp) { + struct cpu_rmap *rmap = NULL; int i, j, rc = 0; unsigned long flags = 0; -#ifdef CONFIG_RFS_ACCEL - struct cpu_rmap *rmap; -#endif rc = bnxt_setup_int_mode(bp); if (rc) { @@ -11563,15 +11639,15 @@ static int bnxt_request_irq(struct bnxt *bp) int map_idx = bnxt_cp_num_to_irq_num(bp, i); struct bnxt_irq *irq = &bp->irq_tbl[map_idx]; -#ifdef CONFIG_RFS_ACCEL - if (rmap && bp->bnapi[i]->rx_ring) { + if (IS_ENABLED(CONFIG_RFS_ACCEL) && + rmap && bp->bnapi[i]->rx_ring) { rc = irq_cpu_rmap_add(rmap, irq->vector); if (rc) netdev_warn(bp->dev, "failed adding irq rmap for ring %d\n", j); j++; } -#endif + rc = request_irq(irq->vector, irq->handler, flags, irq->name, bp->bnapi[i]); if (rc) @@ -13995,7 +14071,7 @@ fw_reset: static void bnxt_timer(struct timer_list *t) { - struct bnxt *bp = from_timer(bp, t, timer); + struct bnxt *bp = timer_container_of(bp, t, timer); struct net_device *dev = bp->dev; if (!netif_running(dev) || !test_bit(BNXT_STATE_OPEN, &bp->state)) @@ -14055,28 +14131,13 @@ static void bnxt_unlock_sp(struct bnxt *bp) netdev_unlock(bp->dev); } -/* Same as bnxt_lock_sp() with additional rtnl_lock */ -static void bnxt_rtnl_lock_sp(struct bnxt *bp) -{ - clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state); - rtnl_lock(); - netdev_lock(bp->dev); -} - -static void bnxt_rtnl_unlock_sp(struct bnxt *bp) -{ - set_bit(BNXT_STATE_IN_SP_TASK, &bp->state); - netdev_unlock(bp->dev); - rtnl_unlock(); -} - /* Only called from bnxt_sp_task() */ static void bnxt_reset(struct bnxt *bp, bool silent) { - bnxt_rtnl_lock_sp(bp); + bnxt_lock_sp(bp); if (test_bit(BNXT_STATE_OPEN, &bp->state)) bnxt_reset_task(bp, silent); - bnxt_rtnl_unlock_sp(bp); + bnxt_unlock_sp(bp); } /* Only called from bnxt_sp_task() */ @@ -14084,9 +14145,9 @@ static void bnxt_rx_ring_reset(struct bnxt *bp) { int i; - bnxt_rtnl_lock_sp(bp); + bnxt_lock_sp(bp); if (!test_bit(BNXT_STATE_OPEN, &bp->state)) { - bnxt_rtnl_unlock_sp(bp); + bnxt_unlock_sp(bp); return; } /* Disable and flush TPA before resetting the RX ring */ @@ -14125,7 +14186,7 @@ static void bnxt_rx_ring_reset(struct bnxt *bp) } if (bp->flags & BNXT_FLAG_TPA) bnxt_set_tpa(bp, true); - bnxt_rtnl_unlock_sp(bp); + bnxt_unlock_sp(bp); } static void bnxt_fw_fatal_close(struct bnxt *bp) @@ -15017,17 +15078,15 @@ static void bnxt_fw_reset_task(struct work_struct *work) bp->fw_reset_state = BNXT_FW_RESET_STATE_OPENING; fallthrough; case BNXT_FW_RESET_STATE_OPENING: - while (!rtnl_trylock()) { + while (!netdev_trylock(bp->dev)) { bnxt_queue_fw_reset_work(bp, HZ / 10); return; } - netdev_lock(bp->dev); rc = bnxt_open(bp->dev); if (rc) { netdev_err(bp->dev, "bnxt_open() failed during FW reset\n"); bnxt_fw_reset_abort(bp, rc); netdev_unlock(bp->dev); - rtnl_unlock(); goto ulp_start; } @@ -15047,7 +15106,6 @@ static void bnxt_fw_reset_task(struct work_struct *work) bnxt_dl_health_fw_status_update(bp, true); } netdev_unlock(bp->dev); - rtnl_unlock(); bnxt_ulp_start(bp, 0); bnxt_reenable_sriov(bp); netdev_lock(bp->dev); @@ -15573,8 +15631,7 @@ static int bnxt_udp_tunnel_unset_port(struct net_device *netdev, unsigned int ta static const struct udp_tunnel_nic_info bnxt_udp_tunnels = { .set_port = bnxt_udp_tunnel_set_port, .unset_port = bnxt_udp_tunnel_unset_port, - .flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP | - UDP_TUNNEL_NIC_INFO_OPEN_ONLY, + .flags = UDP_TUNNEL_NIC_INFO_OPEN_ONLY, .tables = { { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, }, { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_GENEVE, }, @@ -15582,8 +15639,7 @@ static const struct udp_tunnel_nic_info bnxt_udp_tunnels = { }, bnxt_udp_tunnels_p7 = { .set_port = bnxt_udp_tunnel_set_port, .unset_port = bnxt_udp_tunnel_unset_port, - .flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP | - UDP_TUNNEL_NIC_INFO_OPEN_ONLY, + .flags = UDP_TUNNEL_NIC_INFO_OPEN_ONLY, .tables = { { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, }, { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_GENEVE, }, @@ -15927,6 +15983,7 @@ static int bnxt_queue_start(struct net_device *dev, void *qmem, int idx) struct bnxt_vnic_info *vnic; struct bnxt_napi *bnapi; int i, rc; + u16 mru; rxr = &bp->rx_ring[idx]; clone = qmem; @@ -15977,28 +16034,22 @@ static int bnxt_queue_start(struct net_device *dev, void *qmem, int idx) napi_enable_locked(&bnapi->napi); bnxt_db_nq_arm(bp, &cpr->cp_db, cpr->cp_raw_cons); + mru = bp->dev->mtu + ETH_HLEN + VLAN_HLEN; for (i = 0; i < bp->nr_vnics; i++) { vnic = &bp->vnic_info[i]; - rc = bnxt_hwrm_vnic_set_rss_p5(bp, vnic, true); - if (rc) { - netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %d\n", - vnic->vnic_id, rc); + rc = bnxt_set_vnic_mru_p5(bp, vnic, mru, idx); + if (rc) return rc; - } - vnic->mru = bp->dev->mtu + ETH_HLEN + VLAN_HLEN; - bnxt_hwrm_vnic_update(bp, vnic, - VNIC_UPDATE_REQ_ENABLES_MRU_VALID); } - - return 0; + return bnxt_set_rss_ctx_vnic_mru(bp, mru, idx); err_reset: netdev_err(bp->dev, "Unexpected HWRM error during queue start rc: %d\n", rc); napi_enable_locked(&bnapi->napi); bnxt_db_nq_arm(bp, &cpr->cp_db, cpr->cp_raw_cons); - netif_close(dev); + bnxt_reset_task(bp, true); return rc; } @@ -16013,10 +16064,10 @@ static int bnxt_queue_stop(struct net_device *dev, void *qmem, int idx) for (i = 0; i < bp->nr_vnics; i++) { vnic = &bp->vnic_info[i]; - vnic->mru = 0; - bnxt_hwrm_vnic_update(bp, vnic, - VNIC_UPDATE_REQ_ENABLES_MRU_VALID); + + bnxt_set_vnic_mru_p5(bp, vnic, 0, idx); } + bnxt_set_rss_ctx_vnic_mru(bp, 0, idx); /* Make sure NAPI sees that the VNIC is disabled */ synchronize_net(); rxr = &bp->rx_ring[idx]; @@ -16713,6 +16764,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) if (BNXT_SUPPORTS_QUEUE_API(bp)) dev->queue_mgmt_ops = &bnxt_queue_mgmt_ops; dev->request_ops_lock = true; + dev->netmem_tx = true; rc = register_netdev(dev); if (rc) @@ -16814,7 +16866,6 @@ static int bnxt_resume(struct device *device) struct bnxt *bp = netdev_priv(dev); int rc = 0; - rtnl_lock(); netdev_lock(dev); rc = pci_enable_device(bp->pdev); if (rc) { @@ -16859,7 +16910,6 @@ static int bnxt_resume(struct device *device) resume_exit: netdev_unlock(bp->dev); - rtnl_unlock(); bnxt_ulp_start(bp, rc); if (!rc) bnxt_reenable_sriov(bp); @@ -16924,7 +16974,7 @@ static pci_ers_result_t bnxt_io_error_detected(struct pci_dev *pdev, bnxt_free_ctx_mem(bp, false); netdev_unlock(netdev); - /* Request a slot slot reset. */ + /* Request a slot reset. */ return PCI_ERS_RESULT_NEED_RESET; } @@ -17025,7 +17075,6 @@ static void bnxt_io_resume(struct pci_dev *pdev) int err; netdev_info(bp->dev, "PCI Slot Resume\n"); - rtnl_lock(); netdev_lock(netdev); err = bnxt_hwrm_func_qcaps(bp); @@ -17043,7 +17092,6 @@ static void bnxt_io_resume(struct pci_dev *pdev) netif_device_attach(netdev); netdev_unlock(netdev); - rtnl_unlock(); bnxt_ulp_start(bp, err); if (!err) bnxt_reenable_sriov(bp); |