diff options
author | Krzysztof Kazimierczak <krzysztof.kazimierczak@intel.com> | 2019-11-04 09:38:56 -0800 |
---|---|---|
committer | Jeff Kirsher <jeffrey.t.kirsher@intel.com> | 2019-11-04 12:01:55 -0800 |
commit | 2d4238f5569722197612656163d824098208519c (patch) | |
tree | 76b2eaa0a20472069e1a4c7e9d1e79fa5f68b530 /drivers/net/ethernet/intel/ice/ice_txrx.h | |
parent | 0891d6d4b1fe1becf1a77353b03449955820084b (diff) |
ice: Add support for AF_XDP
Add zero copy AF_XDP support. This patch adds zero copy support for
Tx and Rx; code for zero copy is added to ice_xsk.h and ice_xsk.c.
For Tx, implement ndo_xsk_wakeup. As with other drivers, reuse
existing XDP Tx queues for this task, since XDP_REDIRECT guarantees
mutual exclusion between different NAPI contexts based on CPU ID. In
turn, a netdev can XDP_REDIRECT to another netdev with a different
NAPI context, since the operation is bound to a specific core and each
core has its own hardware ring.
For Rx, allocate frames as MEM_TYPE_ZERO_COPY on queues that AF_XDP is
enabled.
Signed-off-by: Krzysztof Kazimierczak <krzysztof.kazimierczak@intel.com>
Co-developed-by: Maciej Fijalkowski <maciej.fijalkowski@intel.com>
Signed-off-by: Maciej Fijalkowski <maciej.fijalkowski@intel.com>
Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
Diffstat (limited to 'drivers/net/ethernet/intel/ice/ice_txrx.h')
-rw-r--r-- | drivers/net/ethernet/intel/ice/ice_txrx.h | 20 |
1 files changed, 17 insertions, 3 deletions
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.h b/drivers/net/ethernet/intel/ice/ice_txrx.h index a07101b13226..d5d243b8e69f 100644 --- a/drivers/net/ethernet/intel/ice/ice_txrx.h +++ b/drivers/net/ethernet/intel/ice/ice_txrx.h @@ -4,6 +4,8 @@ #ifndef _ICE_TXRX_H_ #define _ICE_TXRX_H_ +#include "ice_type.h" + #define ICE_DFLT_IRQ_WORK 256 #define ICE_RXBUF_2048 2048 #define ICE_MAX_CHAINED_RX_BUFS 5 @@ -88,9 +90,17 @@ struct ice_tx_offload_params { struct ice_rx_buf { struct sk_buff *skb; dma_addr_t dma; - struct page *page; - unsigned int page_offset; - u16 pagecnt_bias; + union { + struct { + struct page *page; + unsigned int page_offset; + u16 pagecnt_bias; + }; + struct { + void *addr; + u64 handle; + }; + }; }; struct ice_q_stats { @@ -211,6 +221,8 @@ struct ice_ring { struct rcu_head rcu; /* to avoid race on free */ struct bpf_prog *xdp_prog; + struct xdp_umem *xsk_umem; + struct zero_copy_allocator zca; /* CL3 - 3rd cacheline starts here */ struct xdp_rxq_info xdp_rxq; /* CLX - the below items are only accessed infrequently and should be @@ -250,6 +262,8 @@ struct ice_ring_container { #define ice_for_each_ring(pos, head) \ for (pos = (head).ring; pos; pos = pos->next) +union ice_32b_rx_flex_desc; + bool ice_alloc_rx_bufs(struct ice_ring *rxr, u16 cleaned_count); netdev_tx_t ice_start_xmit(struct sk_buff *skb, struct net_device *netdev); void ice_clean_tx_ring(struct ice_ring *tx_ring); |