diff options
Diffstat (limited to 'drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h')
-rw-r--r-- | drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h | 152 |
1 files changed, 109 insertions, 43 deletions
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h index 7e16a341ec58..ca0e6ab12ceb 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h @@ -21,6 +21,7 @@ #include <linux/time64.h> #include <linux/dim.h> #include <uapi/linux/if_macsec.h> +#include <net/page_pool/helpers.h> #include <mbox.h> #include <npc.h> @@ -29,6 +30,8 @@ #include "otx2_devlink.h" #include <rvu_trace.h> #include "qos.h" +#include "rep.h" +#include "cn10k_ipsec.h" /* IPv4 flag more fragment bit */ #define IPV4_FLAG_MORE 0x20 @@ -39,8 +42,11 @@ #define PCI_DEVID_OCTEONTX2_RVU_AFVF 0xA0F8 #define PCI_SUBSYS_DEVID_96XX_RVU_PFVF 0xB200 +#define PCI_SUBSYS_DEVID_CN10K_A_RVU_PFVF 0xB900 #define PCI_SUBSYS_DEVID_CN10K_B_RVU_PFVF 0xBD00 +#define PCI_DEVID_OCTEONTX2_SDP_REP 0xA0F7 + /* PCI BAR nos */ #define PCI_CFG_REG_BAR_NUM 2 #define PCI_MBOX_BAR_NUM 4 @@ -52,6 +58,9 @@ #define NIX_PF_PFC_PRIO_MAX 8 #endif +/* Number of segments per SG structure */ +#define MAX_SEGS_PER_SG 3 + enum arua_mapped_qtypes { AURA_NIX_RQ, AURA_NIX_SQ, @@ -120,31 +129,10 @@ enum otx2_errcodes_re { ERRCODE_IL4_CSUM = 0x22, }; -/* NIX TX stats */ -enum nix_stat_lf_tx { - TX_UCAST = 0x0, - TX_BCAST = 0x1, - TX_MCAST = 0x2, - TX_DROP = 0x3, - TX_OCTS = 0x4, - TX_STATS_ENUM_LAST, -}; - -/* NIX RX stats */ -enum nix_stat_lf_rx { - RX_OCTS = 0x0, - RX_UCAST = 0x1, - RX_BCAST = 0x2, - RX_MCAST = 0x3, - RX_DROP = 0x4, - RX_DROP_OCTS = 0x5, - RX_FCS = 0x6, - RX_ERR = 0x7, - RX_DRP_BCAST = 0x8, - RX_DRP_MCAST = 0x9, - RX_DRP_L3BCAST = 0xa, - RX_DRP_L3MCAST = 0xb, - RX_STATS_ENUM_LAST, +enum otx2_xdp_action { + OTX2_XDP_TX = BIT(0), + OTX2_XDP_REDIRECT = BIT(1), + OTX2_AF_XDP_FRAME = BIT(2), }; struct otx2_dev_stats { @@ -224,15 +212,19 @@ struct otx2_hw { /* NIX */ u8 txschq_link_cfg_lvl; + u8 txschq_cnt[NIX_TXSCH_LVL_CNT]; u8 txschq_aggr_lvl_rr_prio; u16 txschq_list[NIX_TXSCH_LVL_CNT][MAX_TXSCHQ_PER_FUNC]; u16 matchall_ipolicer; u32 dwrr_mtu; + u32 max_mtu; u8 smq_link_type; /* HW settings, coalescing etc */ u16 rx_chan_base; u16 tx_chan_base; + u8 rx_chan_cnt; + u8 tx_chan_cnt; u16 cq_qcount_wait; u16 cq_ecount_wait; u16 rq_skid; @@ -346,12 +338,9 @@ struct otx2_flow_config { u16 *def_ent; u16 nr_flows; #define OTX2_DEFAULT_FLOWCOUNT 16 -#define OTX2_MAX_UNICAST_FLOWS 8 +#define OTX2_DEFAULT_UNICAST_FLOWS 4 #define OTX2_MAX_VLAN_FLOWS 1 #define OTX2_MAX_TC_FLOWS OTX2_DEFAULT_FLOWCOUNT -#define OTX2_MCAM_COUNT (OTX2_DEFAULT_FLOWCOUNT + \ - OTX2_MAX_UNICAST_FLOWS + \ - OTX2_MAX_VLAN_FLOWS) u16 unicast_offset; u16 rx_vlan_offset; u16 vf_vlan_offset; @@ -363,12 +352,16 @@ struct otx2_flow_config { struct list_head flow_list; u32 dmacflt_max_flows; u16 max_flows; + refcount_t mark_flows; struct list_head flow_list_tc; + u8 ucast_flt_cnt; bool ntuple; + u16 ntuple_cnt; }; struct dev_hw_ops { - int (*sq_aq_init)(void *dev, u16 qidx, u16 sqb_aura); + int (*sq_aq_init)(void *dev, u16 qidx, u8 chan_offset, + u16 sqb_aura); void (*sqe_flush)(void *dev, struct otx2_snd_queue *sq, int size, int qidx); int (*refill_pool_ptrs)(void *dev, struct otx2_cq_queue *cq); @@ -465,6 +458,10 @@ struct otx2_nic { #define OTX2_FLAG_DMACFLTR_SUPPORT BIT_ULL(14) #define OTX2_FLAG_PTP_ONESTEP_SYNC BIT_ULL(15) #define OTX2_FLAG_ADPTV_INT_COAL_ENABLED BIT_ULL(16) +#define OTX2_FLAG_TC_MARK_ENABLED BIT_ULL(17) +#define OTX2_FLAG_REP_MODE_ENABLED BIT_ULL(18) +#define OTX2_FLAG_PORT_UP BIT_ULL(19) +#define OTX2_FLAG_IPSEC_OFFLOAD_ENABLED BIT_ULL(20) u64 flags; u64 *cq_op_addr; @@ -516,9 +513,9 @@ struct otx2_nic { /* Devlink */ struct otx2_devlink *dl; -#ifdef CONFIG_DCB /* PFC */ u8 pfc_en; +#ifdef CONFIG_DCB u8 *queue_to_pfc_map; u16 pfc_schq_list[NIX_TXSCH_LVL_CNT][MAX_TXSCHQ_PER_FUNC]; bool pfc_alloc_status[NIX_PF_PFC_PRIO_MAX]; @@ -532,11 +529,24 @@ struct otx2_nic { #if IS_ENABLED(CONFIG_MACSEC) struct cn10k_mcs_cfg *macsec_cfg; #endif + +#if IS_ENABLED(CONFIG_RVU_ESWITCH) + struct rep_dev **reps; + int rep_cnt; + u16 rep_pf_map[RVU_MAX_REP]; + u16 esw_mode; +#endif + + /* Inline ipsec */ + struct cn10k_ipsec ipsec; + /* af_xdp zero-copy */ + unsigned long *af_xdp_zc_qidx; }; static inline bool is_otx2_lbkvf(struct pci_dev *pdev) { - return pdev->device == PCI_DEVID_OCTEONTX2_RVU_AFVF; + return (pdev->device == PCI_DEVID_OCTEONTX2_RVU_AFVF) || + (pdev->device == PCI_DEVID_RVU_REP); } static inline bool is_96xx_A0(struct pci_dev *pdev) @@ -551,6 +561,11 @@ static inline bool is_96xx_B0(struct pci_dev *pdev) (pdev->subsystem_device == PCI_SUBSYS_DEVID_96XX_RVU_PFVF); } +static inline bool is_otx2_sdp_rep(struct pci_dev *pdev) +{ + return pdev->device == PCI_DEVID_OCTEONTX2_SDP_REP; +} + /* REVID for PCIe devices. * Bits 0..1: minor pass, bit 3..2: major pass * bits 7..4: midr id @@ -576,6 +591,15 @@ static inline bool is_dev_cn10kb(struct pci_dev *pdev) return pdev->subsystem_device == PCI_SUBSYS_DEVID_CN10K_B_RVU_PFVF; } +static inline bool is_dev_cn10ka_b0(struct pci_dev *pdev) +{ + if (pdev->subsystem_device == PCI_SUBSYS_DEVID_CN10K_A_RVU_PFVF && + (pdev->revision & 0xFF) == 0x54) + return true; + + return false; +} + static inline void otx2_setup_dev_hw_settings(struct otx2_nic *pfvf) { struct otx2_hw *hw = &pfvf->hw; @@ -608,9 +632,6 @@ static inline void otx2_setup_dev_hw_settings(struct otx2_nic *pfvf) __set_bit(CN10K_PTP_ONESTEP, &hw->cap_flag); __set_bit(QOS_CIR_PIR_SUPPORT, &hw->cap_flag); } - - if (is_dev_cn10kb(pfvf->pdev)) - __set_bit(CN10K_HW_MACSEC, &hw->cap_flag); } /* Register read/write APIs */ @@ -625,6 +646,9 @@ static inline void __iomem *otx2_get_regaddr(struct otx2_nic *nic, u64 offset) case BLKTYPE_NPA: blkaddr = BLKADDR_NPA; break; + case BLKTYPE_CPT: + blkaddr = BLKADDR_CPT0; + break; default: blkaddr = BLKADDR_RVUM; break; @@ -845,6 +869,7 @@ static struct _req_type __maybe_unused \ *otx2_mbox_alloc_msg_ ## _fn_name(struct mbox *mbox) \ { \ struct _req_type *req; \ + u16 pcifunc = mbox->pfvf->pcifunc; \ \ req = (struct _req_type *)otx2_mbox_alloc_msg_rsp( \ &mbox->mbox, 0, sizeof(struct _req_type), \ @@ -853,7 +878,8 @@ static struct _req_type __maybe_unused \ return NULL; \ req->hdr.sig = OTX2_MBOX_REQ_SIG; \ req->hdr.id = _id; \ - trace_otx2_msg_alloc(mbox->mbox.pdev, _id, sizeof(*req)); \ + req->hdr.pcifunc = pcifunc; \ + trace_otx2_msg_alloc(mbox->mbox.pdev, _id, sizeof(*req), pcifunc); \ return req; \ } @@ -913,15 +939,19 @@ static inline void otx2_dma_unmap_page(struct otx2_nic *pfvf, static inline u16 otx2_get_smq_idx(struct otx2_nic *pfvf, u16 qidx) { u16 smq; + int idx; + #ifdef CONFIG_DCB if (qidx < NIX_PF_PFC_PRIO_MAX && pfvf->pfc_alloc_status[qidx]) return pfvf->pfc_schq_list[NIX_TXSCH_LVL_SMQ][qidx]; #endif /* check if qidx falls under QOS queues */ - if (qidx >= pfvf->hw.non_qos_queues) + if (qidx >= pfvf->hw.non_qos_queues) { smq = pfvf->qos.qid_to_sqmap[qidx - pfvf->hw.non_qos_queues]; - else - smq = pfvf->hw.txschq_list[NIX_TXSCH_LVL_SMQ][0]; + } else { + idx = qidx % pfvf->hw.txschq_cnt[NIX_TXSCH_LVL_SMQ]; + smq = pfvf->hw.txschq_list[NIX_TXSCH_LVL_SMQ][idx]; + } return smq; } @@ -961,6 +991,7 @@ void otx2_get_mac_from_af(struct net_device *netdev); void otx2_config_irq_coalescing(struct otx2_nic *pfvf, int qidx); int otx2_config_pause_frm(struct otx2_nic *pfvf); void otx2_setup_segmentation(struct otx2_nic *pfvf); +int otx2_reset_mac_stats(struct otx2_nic *pfvf); /* RVU block related APIs */ int otx2_attach_npa_nix(struct otx2_nic *pfvf); @@ -980,21 +1011,39 @@ void otx2_txschq_free_one(struct otx2_nic *pfvf, u16 lvl, u16 schq); void otx2_free_pending_sqe(struct otx2_nic *pfvf); void otx2_sqb_flush(struct otx2_nic *pfvf); int otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool, - dma_addr_t *dma); + dma_addr_t *dma, int qidx, int idx); int otx2_rxtx_enable(struct otx2_nic *pfvf, bool enable); void otx2_ctx_disable(struct mbox *mbox, int type, bool npa); int otx2_nix_config_bp(struct otx2_nic *pfvf, bool enable); +int otx2_nix_cpt_config_bp(struct otx2_nic *pfvf, bool enable); void otx2_cleanup_rx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq, int qidx); void otx2_cleanup_tx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq); int otx2_sq_init(struct otx2_nic *pfvf, u16 qidx, u16 sqb_aura); -int otx2_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura); -int cn10k_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura); +int otx2_sq_aq_init(void *dev, u16 qidx, u8 chan_offset, u16 sqb_aura); +int cn10k_sq_aq_init(void *dev, u16 qidx, u8 chan_offset, u16 sqb_aura); int otx2_alloc_buffer(struct otx2_nic *pfvf, struct otx2_cq_queue *cq, dma_addr_t *dma); int otx2_pool_init(struct otx2_nic *pfvf, u16 pool_id, int stack_pages, int numptrs, int buf_size, int type); int otx2_aura_init(struct otx2_nic *pfvf, int aura_id, int pool_id, int numptrs); +int otx2_init_rsrc(struct pci_dev *pdev, struct otx2_nic *pf); +void otx2_free_queue_mem(struct otx2_qset *qset); +int otx2_alloc_queue_mem(struct otx2_nic *pf); +int otx2_init_hw_resources(struct otx2_nic *pfvf); +void otx2_free_hw_resources(struct otx2_nic *pf); +int otx2_wq_init(struct otx2_nic *pf); +int otx2_check_pf_usable(struct otx2_nic *pf); +int otx2_pfaf_mbox_init(struct otx2_nic *pf); +int otx2_register_mbox_intr(struct otx2_nic *pf, bool probe_af); +int otx2_realloc_msix_vectors(struct otx2_nic *pf); +void otx2_pfaf_mbox_destroy(struct otx2_nic *pf); +void otx2_disable_mbox_intr(struct otx2_nic *pf); +void otx2_disable_napi(struct otx2_nic *pf); +irqreturn_t otx2_cq_intr_handler(int irq, void *cq_irq); +int otx2_rq_init(struct otx2_nic *pfvf, u16 qidx, u16 lpb_aura); +int otx2_cq_init(struct otx2_nic *pfvf, u16 qidx); +int otx2_set_hw_capabilities(struct otx2_nic *pfvf); /* RSS configuration APIs*/ int otx2_rss_init(struct otx2_nic *pfvf); @@ -1057,13 +1106,17 @@ int otx2_del_macfilter(struct net_device *netdev, const u8 *mac); int otx2_add_macfilter(struct net_device *netdev, const u8 *mac); int otx2_enable_rxvlan(struct otx2_nic *pf, bool enable); int otx2_install_rxvlan_offload_flow(struct otx2_nic *pfvf); -bool otx2_xdp_sq_append_pkt(struct otx2_nic *pfvf, u64 iova, int len, u16 qidx); +bool otx2_xdp_sq_append_pkt(struct otx2_nic *pfvf, struct xdp_frame *xdpf, + u64 iova, int len, u16 qidx, u16 flags); +void otx2_xdp_sqe_add_sg(struct otx2_snd_queue *sq, struct xdp_frame *xdpf, + u64 dma_addr, int len, int *offset, u16 flags); u16 otx2_get_max_mtu(struct otx2_nic *pfvf); int otx2_handle_ntuple_tc_features(struct net_device *netdev, netdev_features_t features); int otx2_smq_flush(struct otx2_nic *pfvf, int smq); void otx2_free_bufs(struct otx2_nic *pfvf, struct otx2_pool *pool, u64 iova, int size); +int otx2_mcam_entry_init(struct otx2_nic *pfvf); /* tc support */ int otx2_init_tc(struct otx2_nic *nic); @@ -1125,4 +1178,17 @@ u16 otx2_select_queue(struct net_device *netdev, struct sk_buff *skb, int otx2_get_txq_by_classid(struct otx2_nic *pfvf, u16 classid); void otx2_qos_config_txschq(struct otx2_nic *pfvf); void otx2_clean_qos_queues(struct otx2_nic *pfvf); +int rvu_event_up_notify(struct otx2_nic *pf, struct rep_event *info); +int otx2_setup_tc_cls_flower(struct otx2_nic *nic, + struct flow_cls_offload *cls_flower); + +static inline int mcam_entry_cmp(const void *a, const void *b) +{ + return *(u16 *)a - *(u16 *)b; +} + +dma_addr_t otx2_dma_map_skb_frag(struct otx2_nic *pfvf, + struct sk_buff *skb, int seg, int *len); +void otx2_dma_unmap_skb_frags(struct otx2_nic *pfvf, struct sg_list *sg); +int otx2_read_free_sqe(struct otx2_nic *pfvf, u16 qidx); #endif /* OTX2_COMMON_H */ |