diff options
Diffstat (limited to 'drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c')
| -rw-r--r-- | drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c | 212 |
1 files changed, 114 insertions, 98 deletions
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c index be9d2a8da515..7b941505a9d0 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c @@ -8,8 +8,8 @@ #include "ixgbe.h" #include "ixgbe_txrx_common.h" -struct xdp_umem *ixgbe_xsk_umem(struct ixgbe_adapter *adapter, - struct ixgbe_ring *ring) +struct xsk_buff_pool *ixgbe_xsk_pool(struct ixgbe_adapter *adapter, + struct ixgbe_ring *ring) { bool xdp_on = READ_ONCE(adapter->xdp_prog); int qid = ring->ring_idx; @@ -17,11 +17,11 @@ struct xdp_umem *ixgbe_xsk_umem(struct ixgbe_adapter *adapter, if (!xdp_on || !test_bit(qid, adapter->af_xdp_zc_qps)) return NULL; - return xdp_get_umem_from_qid(adapter->netdev, qid); + return xsk_get_pool_from_qid(adapter->netdev, qid); } -static int ixgbe_xsk_umem_enable(struct ixgbe_adapter *adapter, - struct xdp_umem *umem, +static int ixgbe_xsk_pool_enable(struct ixgbe_adapter *adapter, + struct xsk_buff_pool *pool, u16 qid) { struct net_device *netdev = adapter->netdev; @@ -35,7 +35,7 @@ static int ixgbe_xsk_umem_enable(struct ixgbe_adapter *adapter, qid >= netdev->real_num_tx_queues) return -EINVAL; - err = xsk_buff_dma_map(umem, &adapter->pdev->dev, IXGBE_RX_DMA_ATTR); + err = xsk_pool_dma_map(pool, &adapter->pdev->dev, IXGBE_RX_DMA_ATTR); if (err) return err; @@ -52,20 +52,23 @@ static int ixgbe_xsk_umem_enable(struct ixgbe_adapter *adapter, /* Kick start the NAPI context so that receiving will start */ err = ixgbe_xsk_wakeup(adapter->netdev, qid, XDP_WAKEUP_RX); - if (err) + if (err) { + clear_bit(qid, adapter->af_xdp_zc_qps); + xsk_pool_dma_unmap(pool, IXGBE_RX_DMA_ATTR); return err; + } } return 0; } -static int ixgbe_xsk_umem_disable(struct ixgbe_adapter *adapter, u16 qid) +static int ixgbe_xsk_pool_disable(struct ixgbe_adapter *adapter, u16 qid) { - struct xdp_umem *umem; + struct xsk_buff_pool *pool; bool if_running; - umem = xdp_get_umem_from_qid(adapter->netdev, qid); - if (!umem) + pool = xsk_get_pool_from_qid(adapter->netdev, qid); + if (!pool) return -EINVAL; if_running = netif_running(adapter->netdev) && @@ -75,7 +78,7 @@ static int ixgbe_xsk_umem_disable(struct ixgbe_adapter *adapter, u16 qid) ixgbe_txrx_ring_disable(adapter, qid); clear_bit(qid, adapter->af_xdp_zc_qps); - xsk_buff_dma_unmap(umem, IXGBE_RX_DMA_ATTR); + xsk_pool_dma_unmap(pool, IXGBE_RX_DMA_ATTR); if (if_running) ixgbe_txrx_ring_enable(adapter, qid); @@ -83,11 +86,12 @@ static int ixgbe_xsk_umem_disable(struct ixgbe_adapter *adapter, u16 qid) return 0; } -int ixgbe_xsk_umem_setup(struct ixgbe_adapter *adapter, struct xdp_umem *umem, +int ixgbe_xsk_pool_setup(struct ixgbe_adapter *adapter, + struct xsk_buff_pool *pool, u16 qid) { - return umem ? ixgbe_xsk_umem_enable(adapter, umem, qid) : - ixgbe_xsk_umem_disable(adapter, qid); + return pool ? ixgbe_xsk_pool_enable(adapter, pool, qid) : + ixgbe_xsk_pool_disable(adapter, qid); } static int ixgbe_run_xdp_zc(struct ixgbe_adapter *adapter, @@ -96,39 +100,51 @@ static int ixgbe_run_xdp_zc(struct ixgbe_adapter *adapter, { int err, result = IXGBE_XDP_PASS; struct bpf_prog *xdp_prog; + struct ixgbe_ring *ring; struct xdp_frame *xdpf; u32 act; - rcu_read_lock(); xdp_prog = READ_ONCE(rx_ring->xdp_prog); act = bpf_prog_run_xdp(xdp_prog, xdp); + if (likely(act == XDP_REDIRECT)) { + err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog); + if (!err) + return IXGBE_XDP_REDIR; + if (xsk_uses_need_wakeup(rx_ring->xsk_pool) && err == -ENOBUFS) + result = IXGBE_XDP_EXIT; + else + result = IXGBE_XDP_CONSUMED; + goto out_failure; + } + switch (act) { case XDP_PASS: break; case XDP_TX: xdpf = xdp_convert_buff_to_frame(xdp); - if (unlikely(!xdpf)) { - result = IXGBE_XDP_CONSUMED; - break; - } - result = ixgbe_xmit_xdp_ring(adapter, xdpf); + if (unlikely(!xdpf)) + goto out_failure; + ring = ixgbe_determine_xdp_ring(adapter); + if (static_branch_unlikely(&ixgbe_xdp_locking_key)) + spin_lock(&ring->tx_lock); + result = ixgbe_xmit_xdp_ring(ring, xdpf); + if (static_branch_unlikely(&ixgbe_xdp_locking_key)) + spin_unlock(&ring->tx_lock); + if (result == IXGBE_XDP_CONSUMED) + goto out_failure; break; - case XDP_REDIRECT: - err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog); - result = !err ? IXGBE_XDP_REDIR : IXGBE_XDP_CONSUMED; + case XDP_DROP: + result = IXGBE_XDP_CONSUMED; break; default: - bpf_warn_invalid_xdp_action(act); - /* fallthrough */ + bpf_warn_invalid_xdp_action(rx_ring->netdev, xdp_prog, act); + fallthrough; case XDP_ABORTED: - trace_xdp_exception(rx_ring->netdev, xdp_prog, act); - /* fallthrough -- handle aborts by dropping packet */ - case XDP_DROP: result = IXGBE_XDP_CONSUMED; - break; +out_failure: + trace_xdp_exception(rx_ring->netdev, xdp_prog, act); } - rcu_read_unlock(); return result; } @@ -149,7 +165,7 @@ bool ixgbe_alloc_rx_buffers_zc(struct ixgbe_ring *rx_ring, u16 count) i -= rx_ring->count; do { - bi->xdp = xsk_buff_alloc(rx_ring->xsk_umem); + bi->xdp = xsk_buff_alloc(rx_ring->xsk_pool); if (!bi->xdp) { ok = false; break; @@ -195,26 +211,27 @@ bool ixgbe_alloc_rx_buffers_zc(struct ixgbe_ring *rx_ring, u16 count) } static struct sk_buff *ixgbe_construct_skb_zc(struct ixgbe_ring *rx_ring, - struct ixgbe_rx_buffer *bi) + const struct xdp_buff *xdp) { - unsigned int metasize = bi->xdp->data - bi->xdp->data_meta; - unsigned int datasize = bi->xdp->data_end - bi->xdp->data; + unsigned int totalsize = xdp->data_end - xdp->data_meta; + unsigned int metasize = xdp->data - xdp->data_meta; struct sk_buff *skb; + net_prefetch(xdp->data_meta); + /* allocate a skb to store the frags */ - skb = __napi_alloc_skb(&rx_ring->q_vector->napi, - bi->xdp->data_end - bi->xdp->data_hard_start, - GFP_ATOMIC | __GFP_NOWARN); + skb = napi_alloc_skb(&rx_ring->q_vector->napi, totalsize); if (unlikely(!skb)) return NULL; - skb_reserve(skb, bi->xdp->data - bi->xdp->data_hard_start); - memcpy(__skb_put(skb, datasize), bi->xdp->data, datasize); - if (metasize) + memcpy(__skb_put(skb, totalsize), xdp->data_meta, + ALIGN(totalsize, sizeof(long))); + + if (metasize) { skb_metadata_set(skb, metasize); + __skb_pull(skb, metasize); + } - xsk_buff_free(bi->xdp); - bi->xdp = NULL; return skb; } @@ -289,28 +306,36 @@ int ixgbe_clean_rx_irq_zc(struct ixgbe_q_vector *q_vector, xsk_buff_dma_sync_for_cpu(bi->xdp); xdp_res = ixgbe_run_xdp_zc(adapter, rx_ring, bi->xdp); - if (xdp_res) { - if (xdp_res & (IXGBE_XDP_TX | IXGBE_XDP_REDIR)) - xdp_xmit |= xdp_res; - else - xsk_buff_free(bi->xdp); + if (likely(xdp_res & (IXGBE_XDP_TX | IXGBE_XDP_REDIR))) { + xdp_xmit |= xdp_res; + } else if (xdp_res == IXGBE_XDP_EXIT) { + failure = true; + break; + } else if (xdp_res == IXGBE_XDP_CONSUMED) { + xsk_buff_free(bi->xdp); + } else if (xdp_res == IXGBE_XDP_PASS) { + goto construct_skb; + } - bi->xdp = NULL; - total_rx_packets++; - total_rx_bytes += size; + bi->xdp = NULL; + total_rx_packets++; + total_rx_bytes += size; - cleaned_count++; - ixgbe_inc_ntc(rx_ring); - continue; - } + cleaned_count++; + ixgbe_inc_ntc(rx_ring); + continue; +construct_skb: /* XDP_PASS path */ - skb = ixgbe_construct_skb_zc(rx_ring, bi); + skb = ixgbe_construct_skb_zc(rx_ring, bi->xdp); if (!skb) { rx_ring->rx_stats.alloc_rx_buff_failed++; break; } + xsk_buff_free(bi->xdp); + bi->xdp = NULL; + cleaned_count++; ixgbe_inc_ntc(rx_ring); @@ -325,30 +350,22 @@ int ixgbe_clean_rx_irq_zc(struct ixgbe_q_vector *q_vector, } if (xdp_xmit & IXGBE_XDP_REDIR) - xdp_do_flush_map(); + xdp_do_flush(); if (xdp_xmit & IXGBE_XDP_TX) { - struct ixgbe_ring *ring = adapter->xdp_ring[smp_processor_id()]; + struct ixgbe_ring *ring = ixgbe_determine_xdp_ring(adapter); - /* Force memory writes to complete before letting h/w - * know there are new descriptors to fetch. - */ - wmb(); - writel(ring->next_to_use, ring->tail); + ixgbe_xdp_ring_update_tail_locked(ring); } - u64_stats_update_begin(&rx_ring->syncp); - rx_ring->stats.packets += total_rx_packets; - rx_ring->stats.bytes += total_rx_bytes; - u64_stats_update_end(&rx_ring->syncp); - q_vector->rx.total_packets += total_rx_packets; - q_vector->rx.total_bytes += total_rx_bytes; + ixgbe_update_rx_ring_stats(rx_ring, q_vector, total_rx_packets, + total_rx_bytes); - if (xsk_umem_uses_need_wakeup(rx_ring->xsk_umem)) { + if (xsk_uses_need_wakeup(rx_ring->xsk_pool)) { if (failure || rx_ring->next_to_clean == rx_ring->next_to_use) - xsk_set_rx_need_wakeup(rx_ring->xsk_umem); + xsk_set_rx_need_wakeup(rx_ring->xsk_pool); else - xsk_clear_rx_need_wakeup(rx_ring->xsk_umem); + xsk_clear_rx_need_wakeup(rx_ring->xsk_pool); return (int)total_rx_packets; } @@ -373,6 +390,7 @@ void ixgbe_xsk_clean_rx_ring(struct ixgbe_ring *rx_ring) static bool ixgbe_xmit_zc(struct ixgbe_ring *xdp_ring, unsigned int budget) { + struct xsk_buff_pool *pool = xdp_ring->xsk_pool; union ixgbe_adv_tx_desc *tx_desc = NULL; struct ixgbe_tx_buffer *tx_bi; bool work_done = true; @@ -380,19 +398,20 @@ static bool ixgbe_xmit_zc(struct ixgbe_ring *xdp_ring, unsigned int budget) dma_addr_t dma; u32 cmd_type; - while (budget-- > 0) { - if (unlikely(!ixgbe_desc_unused(xdp_ring)) || - !netif_carrier_ok(xdp_ring->netdev)) { + while (likely(budget)) { + if (unlikely(!ixgbe_desc_unused(xdp_ring))) { work_done = false; break; } - if (!xsk_umem_consume_tx(xdp_ring->xsk_umem, &desc)) + if (!netif_carrier_ok(xdp_ring->netdev)) + break; + + if (!xsk_tx_peek_desc(pool, &desc)) break; - dma = xsk_buff_raw_get_dma(xdp_ring->xsk_umem, desc.addr); - xsk_buff_raw_dma_sync_for_device(xdp_ring->xsk_umem, dma, - desc.len); + dma = xsk_buff_raw_get_dma(pool, desc.addr); + xsk_buff_raw_dma_sync_for_device(pool, dma, desc.len); tx_bi = &xdp_ring->tx_buffer_info[xdp_ring->next_to_use]; tx_bi->bytecount = desc.len; @@ -414,11 +433,13 @@ static bool ixgbe_xmit_zc(struct ixgbe_ring *xdp_ring, unsigned int budget) xdp_ring->next_to_use++; if (xdp_ring->next_to_use == xdp_ring->count) xdp_ring->next_to_use = 0; + + budget--; } if (tx_desc) { ixgbe_xdp_ring_update_tail(xdp_ring); - xsk_umem_consume_tx_done(xdp_ring->xsk_umem); + xsk_tx_release(pool); } return !!budget && work_done; @@ -439,7 +460,7 @@ bool ixgbe_clean_xdp_tx_irq(struct ixgbe_q_vector *q_vector, { u16 ntc = tx_ring->next_to_clean, ntu = tx_ring->next_to_use; unsigned int total_packets = 0, total_bytes = 0; - struct xdp_umem *umem = tx_ring->xsk_umem; + struct xsk_buff_pool *pool = tx_ring->xsk_pool; union ixgbe_adv_tx_desc *tx_desc; struct ixgbe_tx_buffer *tx_bi; u32 xsk_frames = 0; @@ -475,44 +496,39 @@ bool ixgbe_clean_xdp_tx_irq(struct ixgbe_q_vector *q_vector, } tx_ring->next_to_clean = ntc; - - u64_stats_update_begin(&tx_ring->syncp); - tx_ring->stats.bytes += total_bytes; - tx_ring->stats.packets += total_packets; - u64_stats_update_end(&tx_ring->syncp); - q_vector->tx.total_bytes += total_bytes; - q_vector->tx.total_packets += total_packets; + ixgbe_update_tx_ring_stats(tx_ring, q_vector, total_packets, + total_bytes); if (xsk_frames) - xsk_umem_complete_tx(umem, xsk_frames); + xsk_tx_completed(pool, xsk_frames); - if (xsk_umem_uses_need_wakeup(tx_ring->xsk_umem)) - xsk_set_tx_need_wakeup(tx_ring->xsk_umem); + if (xsk_uses_need_wakeup(pool)) + xsk_set_tx_need_wakeup(pool); return ixgbe_xmit_zc(tx_ring, q_vector->tx.work_limit); } int ixgbe_xsk_wakeup(struct net_device *dev, u32 qid, u32 flags) { - struct ixgbe_adapter *adapter = netdev_priv(dev); + struct ixgbe_adapter *adapter = ixgbe_from_netdev(dev); struct ixgbe_ring *ring; if (test_bit(__IXGBE_DOWN, &adapter->state)) return -ENETDOWN; if (!READ_ONCE(adapter->xdp_prog)) - return -ENXIO; + return -EINVAL; if (qid >= adapter->num_xdp_queues) - return -ENXIO; + return -EINVAL; ring = adapter->xdp_ring[qid]; if (test_bit(__IXGBE_TX_DISABLED, &ring->state)) return -ENETDOWN; - if (!ring->xsk_umem) - return -ENXIO; + if (!ring->xsk_pool) + return -EINVAL; if (!napi_if_scheduled_mark_missed(&ring->q_vector->napi)) { u64 eics = BIT_ULL(ring->q_vector->v_idx); @@ -526,7 +542,7 @@ int ixgbe_xsk_wakeup(struct net_device *dev, u32 qid, u32 flags) void ixgbe_xsk_clean_tx_ring(struct ixgbe_ring *tx_ring) { u16 ntc = tx_ring->next_to_clean, ntu = tx_ring->next_to_use; - struct xdp_umem *umem = tx_ring->xsk_umem; + struct xsk_buff_pool *pool = tx_ring->xsk_pool; struct ixgbe_tx_buffer *tx_bi; u32 xsk_frames = 0; @@ -546,5 +562,5 @@ void ixgbe_xsk_clean_tx_ring(struct ixgbe_ring *tx_ring) } if (xsk_frames) - xsk_umem_complete_tx(umem, xsk_frames); + xsk_tx_completed(pool, xsk_frames); } |
