diff options
Diffstat (limited to 'drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h')
| -rw-r--r-- | drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h | 154 |
1 files changed, 138 insertions, 16 deletions
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h index cdb623d5f2c1..834cba8c3a41 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */ /* Copyright 2014-2016 Freescale Semiconductor Inc. - * Copyright 2016-2020 NXP + * Copyright 2016-2022 NXP */ #ifndef __DPAA2_ETH_H @@ -12,6 +12,7 @@ #include <linux/fsl/mc.h> #include <linux/net_tstamp.h> #include <net/devlink.h> +#include <net/xdp.h> #include <soc/fsl/dpaa2-io.h> #include <soc/fsl/dpaa2-fd.h> @@ -53,6 +54,12 @@ */ #define DPAA2_ETH_TXCONF_PER_NAPI 256 +/* Maximum number of Tx frames to be processed in a single NAPI + * call when AF_XDP is running. Bind it to DPAA2_ETH_TXCONF_PER_NAPI + * to maximize the throughput. + */ +#define DPAA2_ETH_TX_ZC_PER_NAPI DPAA2_ETH_TXCONF_PER_NAPI + /* Buffer qouta per channel. We want to keep in check number of ingress frames * in flight: for small sized frames, congestion group taildrop may kick in * first; for large sizes, Rx FQ taildrop threshold will ensure only a @@ -109,6 +116,14 @@ #define DPAA2_ETH_RX_BUF_ALIGN_REV1 256 #define DPAA2_ETH_RX_BUF_ALIGN 64 +/* The firmware allows assigning multiple buffer pools to a single DPNI - + * maximum 8 DPBP objects. By default, only the first DPBP (idx 0) is used for + * all queues. Thus, when enabling AF_XDP we must accommodate up to 9 DPBPs + * object: the default and 8 other distinct buffer pools, one for each queue. + */ +#define DPAA2_ETH_DEFAULT_BP_IDX 0 +#define DPAA2_ETH_MAX_BPS 9 + /* We are accommodating a skb backpointer and some S/G info * in the frame's software annotation. The hardware * options are either 0 or 64, so we choose the latter. @@ -122,6 +137,8 @@ enum dpaa2_eth_swa_type { DPAA2_ETH_SWA_SINGLE, DPAA2_ETH_SWA_SG, DPAA2_ETH_SWA_XDP, + DPAA2_ETH_SWA_XSK, + DPAA2_ETH_SWA_SW_TSO, }; /* Must keep this struct smaller than DPAA2_ETH_SWA_SIZE */ @@ -142,6 +159,16 @@ struct dpaa2_eth_swa { int dma_size; struct xdp_frame *xdpf; } xdp; + struct { + struct xdp_buff *xdp_buff; + int sgt_size; + } xsk; + struct { + struct sk_buff *skb; + int num_sg; + int sgt_size; + int is_last_fd; + } tso; }; }; @@ -354,6 +381,8 @@ struct dpaa2_eth_drv_stats { __u64 tx_conf_bytes; __u64 tx_sg_frames; __u64 tx_sg_bytes; + __u64 tx_tso_frames; + __u64 tx_tso_bytes; __u64 rx_sg_frames; __u64 rx_sg_bytes; /* Linear skbs sent as a S/G FD due to insufficient headroom */ @@ -384,8 +413,12 @@ struct dpaa2_eth_ch_stats { __u64 xdp_redirect; /* Must be last, does not show up in ethtool stats */ __u64 frames; + __u64 frames_per_cdan; + __u64 bytes_per_cdan; }; +#define DPAA2_ETH_CH_STATS 7 + /* Maximum number of queues associated with a DPNI */ #define DPAA2_ETH_MAX_TCS 8 #define DPAA2_ETH_MAX_RX_QUEUES_PER_TC 16 @@ -408,12 +441,19 @@ enum dpaa2_eth_fq_type { }; struct dpaa2_eth_priv; +struct dpaa2_eth_channel; +struct dpaa2_eth_fq; struct dpaa2_eth_xdp_fds { struct dpaa2_fd fds[DEV_MAP_BULK_SIZE]; ssize_t num; }; +typedef void dpaa2_eth_consume_cb_t(struct dpaa2_eth_priv *priv, + struct dpaa2_eth_channel *ch, + const struct dpaa2_fd *fd, + struct dpaa2_eth_fq *fq); + struct dpaa2_eth_fq { u32 fqid; u32 tx_qdbin; @@ -426,10 +466,7 @@ struct dpaa2_eth_fq { struct dpaa2_eth_channel *channel; enum dpaa2_eth_fq_type type; - void (*consume)(struct dpaa2_eth_priv *priv, - struct dpaa2_eth_channel *ch, - const struct dpaa2_fd *fd, - struct dpaa2_eth_fq *fq); + dpaa2_eth_consume_cb_t *consume; struct dpaa2_eth_fq_stats stats; struct dpaa2_eth_xdp_fds xdp_redirect_fds; @@ -441,6 +478,11 @@ struct dpaa2_eth_ch_xdp { unsigned int res; }; +struct dpaa2_eth_bp { + struct fsl_mc_device *dev; + int bpid; +}; + struct dpaa2_eth_channel { struct dpaa2_io_notification_ctx nctx; struct fsl_mc_device *dpcon; @@ -459,6 +501,11 @@ struct dpaa2_eth_channel { /* Buffers to be recycled back in the buffer pool */ u64 recycled_bufs[DPAA2_ETH_BUFS_PER_CMD]; int recycled_bufs_cnt; + + bool xsk_zc; + int xsk_tx_pkts_sent; + struct xsk_buff_pool *xsk_pool; + struct dpaa2_eth_bp *bp; }; struct dpaa2_eth_dist_fields { @@ -489,8 +536,15 @@ struct dpaa2_eth_trap_data { struct dpaa2_eth_priv *priv; }; +#define DPAA2_ETH_SG_ENTRIES_MAX (PAGE_SIZE / sizeof(struct scatterlist)) + #define DPAA2_ETH_DEFAULT_COPYBREAK 512 +#define DPAA2_ETH_ENQUEUE_MAX_FDS 256 +struct dpaa2_eth_fds { + struct dpaa2_fd array[DPAA2_ETH_ENQUEUE_MAX_FDS]; +}; + /* Driver private data */ struct dpaa2_eth_priv { struct net_device *net_dev; @@ -506,20 +560,25 @@ struct dpaa2_eth_priv { u8 num_channels; struct dpaa2_eth_channel *channel[DPAA2_ETH_MAX_DPCONS]; struct dpaa2_eth_sgt_cache __percpu *sgt_cache; - + unsigned long features; struct dpni_attr dpni_attrs; u16 dpni_ver_major; u16 dpni_ver_minor; u16 tx_data_offset; - - struct fsl_mc_device *dpbp_dev; + void __iomem *onestep_reg_base; + u8 ptp_correction_off; + void (*dpaa2_set_onestep_params_cb)(struct dpaa2_eth_priv *priv, + u32 offset, u8 udp); u16 rx_buf_size; - u16 bpid; struct iommu_domain *iommu_domain; enum hwtstamp_tx_types tx_tstamp_type; /* Tx timestamping type */ bool rx_tstamp; /* Rx timestamping enabled */ + /* Buffer pool management */ + struct dpaa2_eth_bp *bp[DPAA2_ETH_MAX_BPS]; + int num_bps; + u16 tx_qdid; struct fsl_mc_io *mc_io; /* Cores which have an affine DPIO/DPCON. @@ -557,6 +616,8 @@ struct dpaa2_eth_priv { #endif struct dpaa2_mac *mac; + /* Serializes changes to priv->mac */ + struct mutex mac_lock; struct workqueue_struct *dpaa2_ptp_wq; struct work_struct tx_onestep_tstamp; struct sk_buff_head tx_skbs; @@ -573,6 +634,8 @@ struct dpaa2_eth_priv { struct devlink_port devlink_port; u32 rx_copybreak; + + struct dpaa2_eth_fds __percpu *fd; }; struct dpaa2_eth_devlink_priv { @@ -651,6 +714,13 @@ enum dpaa2_eth_rx_dist { #define DPAA2_ETH_DIST_L4DST BIT(8) #define DPAA2_ETH_DIST_ALL (~0ULL) +#define DPNI_PTP_ONESTEP_VER_MAJOR 8 +#define DPNI_PTP_ONESTEP_VER_MINOR 2 +#define DPAA2_ETH_FEATURE_ONESTEP_CFG_DIRECT BIT(0) +#define DPAA2_PTP_SINGLE_STEP_ENABLE BIT(31) +#define DPAA2_PTP_SINGLE_STEP_CH BIT(7) +#define DPAA2_PTP_SINGLE_CORRECTION_OFF(v) ((v) << 8) + #define DPNI_PAUSE_VER_MAJOR 7 #define DPNI_PAUSE_VER_MINOR 13 #define dpaa2_eth_has_pause_support(priv) \ @@ -670,7 +740,7 @@ static inline bool dpaa2_eth_rx_pause_enabled(u64 link_options) static inline unsigned int dpaa2_eth_needed_headroom(struct sk_buff *skb) { - unsigned int headroom = DPAA2_ETH_SWA_SIZE; + unsigned int headroom = DPAA2_ETH_SWA_SIZE + DPAA2_ETH_TX_BUF_ALIGN; /* If we don't have an skb (e.g. XDP buffer), we only need space for * the software annotation area @@ -701,16 +771,15 @@ static inline unsigned int dpaa2_eth_rx_head_room(struct dpaa2_eth_priv *priv) static inline bool dpaa2_eth_is_type_phy(struct dpaa2_eth_priv *priv) { - if (priv->mac && - (priv->mac->attr.link_type == DPMAC_LINK_TYPE_PHY || - priv->mac->attr.link_type == DPMAC_LINK_TYPE_BACKPLANE)) - return true; + lockdep_assert_held(&priv->mac_lock); - return false; + return dpaa2_mac_is_type_phy(priv->mac); } static inline bool dpaa2_eth_has_mac(struct dpaa2_eth_priv *priv) { + lockdep_assert_held(&priv->mac_lock); + return priv->mac ? true : false; } @@ -725,7 +794,10 @@ void dpaa2_eth_set_rx_taildrop(struct dpaa2_eth_priv *priv, extern const struct dcbnl_rtnl_ops dpaa2_eth_dcbnl_ops; -int dpaa2_eth_dl_register(struct dpaa2_eth_priv *priv); +int dpaa2_eth_dl_alloc(struct dpaa2_eth_priv *priv); +void dpaa2_eth_dl_free(struct dpaa2_eth_priv *priv); + +void dpaa2_eth_dl_register(struct dpaa2_eth_priv *priv); void dpaa2_eth_dl_unregister(struct dpaa2_eth_priv *priv); int dpaa2_eth_dl_port_add(struct dpaa2_eth_priv *priv); @@ -736,4 +808,54 @@ void dpaa2_eth_dl_traps_unregister(struct dpaa2_eth_priv *priv); struct dpaa2_eth_trap_item *dpaa2_eth_dl_get_trap(struct dpaa2_eth_priv *priv, struct dpaa2_fapr *fapr); + +struct dpaa2_eth_bp *dpaa2_eth_allocate_dpbp(struct dpaa2_eth_priv *priv); +void dpaa2_eth_free_dpbp(struct dpaa2_eth_priv *priv, struct dpaa2_eth_bp *bp); + +struct sk_buff *dpaa2_eth_alloc_skb(struct dpaa2_eth_priv *priv, + struct dpaa2_eth_channel *ch, + const struct dpaa2_fd *fd, u32 fd_length, + void *fd_vaddr); + +void dpaa2_eth_receive_skb(struct dpaa2_eth_priv *priv, + struct dpaa2_eth_channel *ch, + const struct dpaa2_fd *fd, void *vaddr, + struct dpaa2_eth_fq *fq, + struct rtnl_link_stats64 *percpu_stats, + struct sk_buff *skb); + +void dpaa2_eth_rx(struct dpaa2_eth_priv *priv, + struct dpaa2_eth_channel *ch, + const struct dpaa2_fd *fd, + struct dpaa2_eth_fq *fq); + +struct dpaa2_eth_bp *dpaa2_eth_allocate_dpbp(struct dpaa2_eth_priv *priv); +void dpaa2_eth_free_dpbp(struct dpaa2_eth_priv *priv, + struct dpaa2_eth_bp *bp); + +void *dpaa2_iova_to_virt(struct iommu_domain *domain, dma_addr_t iova_addr); +void dpaa2_eth_recycle_buf(struct dpaa2_eth_priv *priv, + struct dpaa2_eth_channel *ch, + dma_addr_t addr); + +void dpaa2_eth_xdp_enqueue(struct dpaa2_eth_priv *priv, + struct dpaa2_eth_channel *ch, + struct dpaa2_fd *fd, + void *buf_start, u16 queue_id); + +int dpaa2_xsk_wakeup(struct net_device *dev, u32 qid, u32 flags); +int dpaa2_xsk_setup_pool(struct net_device *dev, struct xsk_buff_pool *pool, u16 qid); + +void dpaa2_eth_free_tx_fd(struct dpaa2_eth_priv *priv, + struct dpaa2_eth_channel *ch, + struct dpaa2_eth_fq *fq, + const struct dpaa2_fd *fd, bool in_napi); +bool dpaa2_xsk_tx(struct dpaa2_eth_priv *priv, + struct dpaa2_eth_channel *ch); + +/* SGT (Scatter-Gather Table) cache management */ +void *dpaa2_eth_sgt_get(struct dpaa2_eth_priv *priv); + +void dpaa2_eth_sgt_recycle(struct dpaa2_eth_priv *priv, void *sgt_buf); + #endif /* __DPAA2_H */ |
