diff options
Diffstat (limited to 'drivers/net/ethernet/intel/igb/igb.h')
| -rw-r--r-- | drivers/net/ethernet/intel/igb/igb.h | 173 |
1 files changed, 155 insertions, 18 deletions
diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h index ca54e268d157..0fff1df81b7b 100644 --- a/drivers/net/ethernet/intel/igb/igb.h +++ b/drivers/net/ethernet/intel/igb/igb.h @@ -18,6 +18,10 @@ #include <linux/i2c-algo-bit.h> #include <linux/pci.h> #include <linux/mdio.h> +#include <linux/lockdep.h> + +#include <net/xdp.h> +#include <net/xdp_sock_drv.h> struct igb_adapter; @@ -32,11 +36,11 @@ struct igb_adapter; /* TX/RX descriptor defines */ #define IGB_DEFAULT_TXD 256 #define IGB_DEFAULT_TX_WORK 128 -#define IGB_MIN_TXD 80 +#define IGB_MIN_TXD 64 #define IGB_MAX_TXD 4096 #define IGB_DEFAULT_RXD 256 -#define IGB_MIN_RXD 80 +#define IGB_MIN_RXD 64 #define IGB_MAX_RXD 4096 #define IGB_DEFAULT_ITR 3 /* dynamic */ @@ -79,6 +83,13 @@ struct igb_adapter; #define IGB_I210_RX_LATENCY_100 2213 #define IGB_I210_RX_LATENCY_1000 448 +/* XDP */ +#define IGB_XDP_PASS 0 +#define IGB_XDP_CONSUMED BIT(0) +#define IGB_XDP_TX BIT(1) +#define IGB_XDP_REDIR BIT(2) +#define IGB_XDP_EXIT BIT(3) + struct vf_data_storage { unsigned char vf_mac_addresses[ETH_ALEN]; u16 vf_mc_hashes[IGB_MAX_VF_MC_ENTRIES]; @@ -130,19 +141,66 @@ struct vf_mac_filter { /* this is the size past which hardware will drop packets when setting LPE=0 */ #define MAXIMUM_ETHERNET_VLAN_SIZE 1522 +#define IGB_ETH_PKT_HDR_PAD (ETH_HLEN + ETH_FCS_LEN + (VLAN_HLEN * 2)) + /* Supported Rx Buffer Sizes */ #define IGB_RXBUFFER_256 256 +#define IGB_RXBUFFER_1536 1536 #define IGB_RXBUFFER_2048 2048 #define IGB_RXBUFFER_3072 3072 #define IGB_RX_HDR_LEN IGB_RXBUFFER_256 #define IGB_TS_HDR_LEN 16 -#define IGB_SKB_PAD (NET_SKB_PAD + NET_IP_ALIGN) +/* Attempt to maximize the headroom available for incoming frames. We + * use a 2K buffer for receives and need 1536/1534 to store the data for + * the frame. This leaves us with 512 bytes of room. From that we need + * to deduct the space needed for the shared info and the padding needed + * to IP align the frame. + * + * Note: For cache line sizes 256 or larger this value is going to end + * up negative. In these cases we should fall back to the 3K + * buffers. + */ #if (PAGE_SIZE < 8192) -#define IGB_MAX_FRAME_BUILD_SKB \ - (SKB_WITH_OVERHEAD(IGB_RXBUFFER_2048) - IGB_SKB_PAD - IGB_TS_HDR_LEN) +#define IGB_MAX_FRAME_BUILD_SKB (IGB_RXBUFFER_1536 - NET_IP_ALIGN) +#define IGB_2K_TOO_SMALL_WITH_PADDING \ +((NET_SKB_PAD + IGB_TS_HDR_LEN + IGB_RXBUFFER_1536) > SKB_WITH_OVERHEAD(IGB_RXBUFFER_2048)) + +static inline int igb_compute_pad(int rx_buf_len) +{ + int page_size, pad_size; + + page_size = ALIGN(rx_buf_len, PAGE_SIZE / 2); + pad_size = SKB_WITH_OVERHEAD(page_size) - rx_buf_len; + + return pad_size; +} + +static inline int igb_skb_pad(void) +{ + int rx_buf_len; + + /* If a 2K buffer cannot handle a standard Ethernet frame then + * optimize padding for a 3K buffer instead of a 1.5K buffer. + * + * For a 3K buffer we need to add enough padding to allow for + * tailroom due to NET_IP_ALIGN possibly shifting us out of + * cache-line alignment. + */ + if (IGB_2K_TOO_SMALL_WITH_PADDING) + rx_buf_len = IGB_RXBUFFER_3072 + SKB_DATA_ALIGN(NET_IP_ALIGN); + else + rx_buf_len = IGB_RXBUFFER_1536; + + /* if needed make room for NET_IP_ALIGN */ + rx_buf_len -= NET_IP_ALIGN; + + return igb_compute_pad(rx_buf_len); +} + +#define IGB_SKB_PAD igb_skb_pad() #else -#define IGB_MAX_FRAME_BUILD_SKB (IGB_RXBUFFER_2048 - IGB_TS_HDR_LEN) +#define IGB_SKB_PAD (NET_SKB_PAD + NET_IP_ALIGN) #endif /* How many Rx Buffers do we bundle into one write to the hardware ? */ @@ -159,7 +217,7 @@ struct vf_mac_filter { #define IGB_MASTER_SLAVE e1000_ms_hw_default #endif -#define IGB_MNG_VLAN_NONE -1 +#define IGB_MNG_VLAN_NONE 0xFFFF enum igb_tx_flags { /* cmd_type flags */ @@ -194,13 +252,26 @@ enum igb_tx_flags { #define IGB_SFF_ADDRESSING_MODE 0x4 #define IGB_SFF_8472_UNSUP 0x00 +/* TX resources are shared between XDP and netstack + * and we need to tag the buffer type to distinguish them + */ +enum igb_tx_buf_type { + IGB_TYPE_SKB = 0, + IGB_TYPE_XDP, + IGB_TYPE_XSK +}; + /* wrapper around a pointer to a socket buffer, * so a DMA handle can be stored along with the buffer */ struct igb_tx_buffer { union e1000_adv_tx_desc *next_to_watch; unsigned long time_stamp; - struct sk_buff *skb; + enum igb_tx_buf_type type; + union { + struct sk_buff *skb; + struct xdp_frame *xdpf; + }; unsigned int bytecount; u16 gso_segs; __be16 protocol; @@ -248,10 +319,12 @@ struct igb_ring_container { struct igb_ring { struct igb_q_vector *q_vector; /* backlink to q_vector */ struct net_device *netdev; /* back pointer to net_device */ + struct bpf_prog *xdp_prog; struct device *dev; /* device pointer for dma mapping */ union { /* array of buffer info structs */ struct igb_tx_buffer *tx_buffer_info; struct igb_rx_buffer *rx_buffer_info; + struct xdp_buff **rx_buffer_info_zc; }; void *desc; /* descriptor ring memory */ unsigned long flags; /* ring specific flags */ @@ -288,6 +361,8 @@ struct igb_ring { struct u64_stats_sync rx_syncp; }; }; + struct xdp_rxq_info xdp_rxq; + struct xsk_buff_pool *xsk_pool; } ____cacheline_internodealigned_in_smp; struct igb_q_vector { @@ -306,7 +381,7 @@ struct igb_q_vector { char name[IFNAMSIZ + 9]; /* for dynamic allocation of rings associated with this q_vector */ - struct igb_ring ring[0] ____cacheline_internodealigned_in_smp; + struct igb_ring ring[] ____cacheline_internodealigned_in_smp; }; enum e1000_ring_flags_t { @@ -315,7 +390,9 @@ enum e1000_ring_flags_t { IGB_RING_FLAG_RX_SCTP_CSUM, IGB_RING_FLAG_RX_LB_VLAN_BSWAP, IGB_RING_FLAG_TX_CTX_IDX, - IGB_RING_FLAG_TX_DETECT_HANG + IGB_RING_FLAG_TX_DETECT_HANG, + IGB_RING_FLAG_TX_DISABLED, + IGB_RING_FLAG_RX_ALLOC_FAILED, }; #define ring_uses_large_buffer(ring) \ @@ -339,7 +416,7 @@ static inline unsigned int igb_rx_bufsz(struct igb_ring *ring) return IGB_RXBUFFER_3072; if (ring_uses_build_skb(ring)) - return IGB_MAX_FRAME_BUILD_SKB + IGB_TS_HDR_LEN; + return IGB_MAX_FRAME_BUILD_SKB; #endif return IGB_RXBUFFER_2048; } @@ -467,6 +544,7 @@ struct igb_adapter { unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; struct net_device *netdev; + struct bpf_prog *xdp_prog; unsigned long state; unsigned int flags; @@ -548,7 +626,7 @@ struct igb_adapter { struct delayed_work ptp_overflow_work; struct work_struct ptp_tx_work; struct sk_buff *ptp_tx_skb; - struct hwtstamp_config tstamp_config; + struct kernel_hwtstamp_config tstamp_config; unsigned long ptp_tx_start; unsigned long last_rx_ptp_check; unsigned long last_rx_timestamp; @@ -567,7 +645,7 @@ struct igb_adapter { struct timespec64 period; } perout[IGB_N_PEROUT]; - char fw_version[32]; + char fw_version[48]; #ifdef CONFIG_IGB_HWMON struct hwmon_buff *igb_hwmon_buff; bool ets; @@ -594,6 +672,8 @@ struct igb_adapter { struct igb_mac_addr *mac_table; struct vf_mac_filter vf_macs; struct vf_mac_filter *vf_mac_list; + /* lock for VF resources */ + spinlock_t vfs_lock; }; /* flags controlling PTP/1588 function */ @@ -642,8 +722,12 @@ enum igb_boards { }; extern char igb_driver_name[]; -extern char igb_driver_version[]; +void igb_set_queue_napi(struct igb_adapter *adapter, int q_idx, + struct napi_struct *napi); +int igb_xmit_xdp_ring(struct igb_adapter *adapter, + struct igb_ring *ring, + struct xdp_frame *xdpf); int igb_open(struct net_device *netdev); int igb_close(struct net_device *netdev); int igb_up(struct igb_adapter *); @@ -657,11 +741,21 @@ int igb_setup_tx_resources(struct igb_ring *); int igb_setup_rx_resources(struct igb_ring *); void igb_free_tx_resources(struct igb_ring *); void igb_free_rx_resources(struct igb_ring *); +void igb_clean_tx_ring(struct igb_ring *tx_ring); +void igb_clean_rx_ring(struct igb_ring *rx_ring); void igb_configure_tx_ring(struct igb_adapter *, struct igb_ring *); void igb_configure_rx_ring(struct igb_adapter *, struct igb_ring *); +void igb_finalize_xdp(struct igb_adapter *adapter, unsigned int status); +void igb_update_rx_stats(struct igb_q_vector *q_vector, unsigned int packets, + unsigned int bytes); void igb_setup_tctl(struct igb_adapter *); void igb_setup_rctl(struct igb_adapter *); +void igb_setup_srrctl(struct igb_adapter *, struct igb_ring *); netdev_tx_t igb_xmit_frame_ring(struct sk_buff *, struct igb_ring *); +int igb_xdp_xmit_back(struct igb_adapter *adapter, struct xdp_buff *xdp); +void igb_process_skb_fields(struct igb_ring *rx_ring, + union e1000_adv_rx_desc *rx_desc, + struct sk_buff *skb); void igb_alloc_rx_buffers(struct igb_ring *, u16); void igb_update_stats(struct igb_adapter *); bool igb_has_link(struct igb_adapter *adapter); @@ -675,10 +769,13 @@ void igb_ptp_suspend(struct igb_adapter *adapter); void igb_ptp_rx_hang(struct igb_adapter *adapter); void igb_ptp_tx_hang(struct igb_adapter *adapter); void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector, struct sk_buff *skb); -void igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, void *va, - struct sk_buff *skb); -int igb_ptp_set_ts_config(struct net_device *netdev, struct ifreq *ifr); -int igb_ptp_get_ts_config(struct net_device *netdev, struct ifreq *ifr); +int igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, void *va, + ktime_t *timestamp); +int igb_ptp_hwtstamp_get(struct net_device *netdev, + struct kernel_hwtstamp_config *config); +int igb_ptp_hwtstamp_set(struct net_device *netdev, + struct kernel_hwtstamp_config *config, + struct netlink_ext_ack *extack); void igb_set_flag_queue_pairs(struct igb_adapter *, const u32); unsigned int igb_get_max_rss_queues(struct igb_adapter *); #ifdef CONFIG_IGB_HWMON @@ -722,6 +819,33 @@ static inline struct netdev_queue *txring_txq(const struct igb_ring *tx_ring) return netdev_get_tx_queue(tx_ring->netdev, tx_ring->queue_index); } +/* This function assumes __netif_tx_lock is held by the caller. */ +static inline void igb_xdp_ring_update_tail(struct igb_ring *ring) +{ + lockdep_assert_held(&txring_txq(ring)->_xmit_lock); + + /* Force memory writes to complete before letting h/w know there + * are new descriptors to fetch. + */ + wmb(); + writel(ring->next_to_use, ring->tail); +} + +static inline struct igb_ring *igb_xdp_tx_queue_mapping(struct igb_adapter *adapter) +{ + unsigned int r_idx = smp_processor_id(); + + if (r_idx >= adapter->num_tx_queues) + r_idx = r_idx % adapter->num_tx_queues; + + return adapter->tx_ring[r_idx]; +} + +static inline bool igb_xdp_is_enabled(struct igb_adapter *adapter) +{ + return !!READ_ONCE(adapter->xdp_prog); +} + int igb_add_filter(struct igb_adapter *adapter, struct igb_nfc_filter *input); int igb_erase_filter(struct igb_adapter *adapter, @@ -732,4 +856,17 @@ int igb_add_mac_steering_filter(struct igb_adapter *adapter, int igb_del_mac_steering_filter(struct igb_adapter *adapter, const u8 *addr, u8 queue, u8 flags); +struct xsk_buff_pool *igb_xsk_pool(struct igb_adapter *adapter, + struct igb_ring *ring); +int igb_xsk_pool_setup(struct igb_adapter *adapter, + struct xsk_buff_pool *pool, + u16 qid); +bool igb_alloc_rx_buffers_zc(struct igb_ring *rx_ring, + struct xsk_buff_pool *xsk_pool, u16 count); +void igb_clean_rx_ring_zc(struct igb_ring *rx_ring); +int igb_clean_rx_irq_zc(struct igb_q_vector *q_vector, + struct xsk_buff_pool *xsk_pool, const int budget); +bool igb_xmit_zc(struct igb_ring *tx_ring, struct xsk_buff_pool *xsk_pool); +int igb_xsk_wakeup(struct net_device *dev, u32 qid, u32 flags); + #endif /* _IGB_H_ */ |
