diff options
| author | David S. Miller <davem@davemloft.net> | 2020-05-28 11:17:20 -0700 |
|---|---|---|
| committer | David S. Miller <davem@davemloft.net> | 2020-05-28 11:17:20 -0700 |
| commit | 62c027883c736d56bbde559f755b1221263a43f2 (patch) | |
| tree | eda8ca8878844b9fc8b4f43cd7ae7680738f6047 /drivers/net/ethernet/intel/ice/ice_base.c | |
| parent | 1e372dbd683dbecbaa7b0c0ac392d13bf07c7aad (diff) | |
| parent | 3f0d97cdfe6e900a5c71817b0dfb77247afee36d (diff) | |
Merge branch '100GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/next-queue
Jeff Kirsher says:
====================
100GbE Intel Wired LAN Driver Updates 2020-05-27
This series contains updates to the ice driver only.
Jesse fixes a number of issues, starting with fixing the remaining
signed versus unsigned comparison issues. Cleaned up an unused code
define. Fixed the implementation of the manage MAC write command, to
simplify it by using a simple array to represent the MAC address when
writing it.
Paul fixes the setting of the VF default LAN address, by removing a
check that assumed that the address had been deleted and zeroed.
Surabhi prevents a memory leak on filter management initialization
failures and during queue initialization and buffer allocation failures.
Brett adds additional receive error counters that are reported by
ethtool. Fixed the enabling and disabling of VLAN stripping when the
PVID has been set.
Evan fixes a race condition between the firmware and software, which can
occur between the admin queue setup and the first command sent.
Marta fixes the driver when XDP transmit rings are destroyed, also make
sure the XDP transmit queues are also destroyed. Update the statistics
when XDP transmit programs are loaded and packets are sent. Changed the
number of XDP transmit queues to match the number of receive queues,
instead of matching the number of transmit queues.
Bruce avoids undefined behavior by not writing the 8-bit element
init_q_state with the associated internal-to-hardware field which is
122-bits.
Anirudh (Ani) refactors the receive checksum checks.
Krzysztof notifies the user if the fill queue is not long enough to
prepare all buffers before packet processing starts and allocates the
buffers during the NAPI poll.
====================
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/ethernet/intel/ice/ice_base.c')
| -rw-r--r-- | drivers/net/ethernet/intel/ice/ice_base.c | 39 |
1 files changed, 26 insertions, 13 deletions
diff --git a/drivers/net/ethernet/intel/ice/ice_base.c b/drivers/net/ethernet/intel/ice/ice_base.c index 94d833b4e745..a174911d8994 100644 --- a/drivers/net/ethernet/intel/ice/ice_base.c +++ b/drivers/net/ethernet/intel/ice/ice_base.c @@ -13,7 +13,7 @@ */ static int __ice_vsi_get_qs_contig(struct ice_qs_cfg *qs_cfg) { - int offset, i; + unsigned int offset, i; mutex_lock(qs_cfg->qs_mutex); offset = bitmap_find_next_zero_area(qs_cfg->pf_map, qs_cfg->pf_map_size, @@ -39,7 +39,7 @@ static int __ice_vsi_get_qs_contig(struct ice_qs_cfg *qs_cfg) */ static int __ice_vsi_get_qs_sc(struct ice_qs_cfg *qs_cfg) { - int i, index = 0; + unsigned int i, index = 0; mutex_lock(qs_cfg->qs_mutex); for (i = 0; i < qs_cfg->q_count; i++) { @@ -281,7 +281,9 @@ ice_setup_tx_ctx(struct ice_ring *ring, struct ice_tlan_ctx *tlan_ctx, u16 pf_q) */ int ice_setup_rx_ctx(struct ice_ring *ring) { + struct device *dev = ice_pf_to_dev(ring->vsi->back); int chain_len = ICE_MAX_CHAINED_RX_BUFS; + u16 num_bufs = ICE_DESC_UNUSED(ring); struct ice_vsi *vsi = ring->vsi; u32 rxdid = ICE_RXDID_FLEX_NIC; struct ice_rlan_ctx rlan_ctx; @@ -324,7 +326,7 @@ int ice_setup_rx_ctx(struct ice_ring *ring) return err; xsk_buff_set_rxq_info(ring->xsk_umem, &ring->xdp_rxq); - dev_info(ice_pf_to_dev(vsi->back), "Registered XDP mem model MEM_TYPE_XSK_BUFF_POOL on Rx ring %d\n", + dev_info(dev, "Registered XDP mem model MEM_TYPE_XSK_BUFF_POOL on Rx ring %d\n", ring->q_index); } else { if (!xdp_rxq_info_is_reg(&ring->xdp_rxq)) @@ -408,7 +410,7 @@ int ice_setup_rx_ctx(struct ice_ring *ring) /* Absolute queue number out of 2K needs to be passed */ err = ice_write_rxq_ctx(hw, &rlan_ctx, pf_q); if (err) { - dev_err(ice_pf_to_dev(vsi->back), "Failed to set LAN Rx queue context for absolute Rx queue %d error: %d\n", + dev_err(dev, "Failed to set LAN Rx queue context for absolute Rx queue %d error: %d\n", pf_q, err); return -EIO; } @@ -426,13 +428,23 @@ int ice_setup_rx_ctx(struct ice_ring *ring) ring->tail = hw->hw_addr + QRX_TAIL(pf_q); writel(0, ring->tail); - err = ring->xsk_umem ? - ice_alloc_rx_bufs_zc(ring, ICE_DESC_UNUSED(ring)) : - ice_alloc_rx_bufs(ring, ICE_DESC_UNUSED(ring)); - if (err) - dev_info(ice_pf_to_dev(vsi->back), "Failed allocate some buffers on %sRx ring %d (pf_q %d)\n", - ring->xsk_umem ? "UMEM enabled " : "", - ring->q_index, pf_q); + if (ring->xsk_umem) { + if (!xsk_buff_can_alloc(ring->xsk_umem, num_bufs)) { + dev_warn(dev, "UMEM does not provide enough addresses to fill %d buffers on Rx ring %d\n", + num_bufs, ring->q_index); + dev_warn(dev, "Change Rx ring/fill queue size to avoid performance issues\n"); + + return 0; + } + + err = ice_alloc_rx_bufs_zc(ring, num_bufs); + if (err) + dev_info(dev, "Failed to allocate some buffers on UMEM enabled Rx ring %d (pf_q %d)\n", + ring->q_index, pf_q); + return 0; + } + + ice_alloc_rx_bufs(ring, num_bufs); return 0; } @@ -638,6 +650,7 @@ ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_ring *ring, struct ice_aqc_add_txqs_perq *txq; struct ice_pf *pf = vsi->back; u8 buf_len = sizeof(*qg_buf); + struct ice_hw *hw = &pf->hw; enum ice_status status; u16 pf_q; u8 tc; @@ -646,13 +659,13 @@ ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_ring *ring, ice_setup_tx_ctx(ring, &tlan_ctx, pf_q); /* copy context contents into the qg_buf */ qg_buf->txqs[0].txq_id = cpu_to_le16(pf_q); - ice_set_ctx((u8 *)&tlan_ctx, qg_buf->txqs[0].txq_ctx, + ice_set_ctx(hw, (u8 *)&tlan_ctx, qg_buf->txqs[0].txq_ctx, ice_tlan_ctx_info); /* init queue specific tail reg. It is referred as * transmit comm scheduler queue doorbell. */ - ring->tail = pf->hw.hw_addr + QTX_COMM_DBELL(pf_q); + ring->tail = hw->hw_addr + QTX_COMM_DBELL(pf_q); if (IS_ENABLED(CONFIG_DCB)) tc = ring->dcb_tc; |
