diff options
Diffstat (limited to 'drivers/net/ethernet/marvell/octeontx2')
50 files changed, 6221 insertions, 873 deletions
diff --git a/drivers/net/ethernet/marvell/octeontx2/Kconfig b/drivers/net/ethernet/marvell/octeontx2/Kconfig index a32d85d6f599..35c4f5f64f58 100644 --- a/drivers/net/ethernet/marvell/octeontx2/Kconfig +++ b/drivers/net/ethernet/marvell/octeontx2/Kconfig @@ -46,3 +46,11 @@ config OCTEONTX2_VF depends on OCTEONTX2_PF help This driver supports Marvell's OcteonTX2 NIC virtual function. + +config RVU_ESWITCH + tristate "Marvell RVU E-Switch support" + depends on OCTEONTX2_PF + default m + help + This driver supports Marvell's RVU E-Switch that + provides internal SRIOV packet steering and switching. diff --git a/drivers/net/ethernet/marvell/octeontx2/af/Makefile b/drivers/net/ethernet/marvell/octeontx2/af/Makefile index 3cf4c8285c90..ccea37847df8 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/Makefile +++ b/drivers/net/ethernet/marvell/octeontx2/af/Makefile @@ -11,4 +11,5 @@ rvu_mbox-y := mbox.o rvu_trace.o rvu_af-y := cgx.o rvu.o rvu_cgx.o rvu_npa.o rvu_nix.o \ rvu_reg.o rvu_npc.o rvu_debugfs.o ptp.o rvu_npc_fs.o \ rvu_cpt.o rvu_devlink.o rpm.o rvu_cn10k.o rvu_switch.o \ - rvu_sdp.o rvu_npc_hash.o mcs.o mcs_rvu_if.o mcs_cnf10kb.o + rvu_sdp.o rvu_npc_hash.o mcs.o mcs_rvu_if.o mcs_cnf10kb.o \ + rvu_rep.o diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c index 6c70c8498690..8216f843a7cd 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c @@ -24,6 +24,8 @@ #define DRV_NAME "Marvell-CGX/RPM" #define DRV_STRING "Marvell CGX/RPM Driver" +#define CGX_RX_STAT_GLOBAL_INDEX 9 + static LIST_HEAD(cgx_list); /* Convert firmware speed encoding to user format(Mbps) */ @@ -110,6 +112,11 @@ struct mac_ops *get_mac_ops(void *cgxd) return ((struct cgx *)cgxd)->mac_ops; } +u32 cgx_get_fifo_len(void *cgxd) +{ + return ((struct cgx *)cgxd)->fifo_len; +} + void cgx_write(struct cgx *cgx, u64 lmac, u64 offset, u64 val) { writeq(val, cgx->reg_base + (lmac << cgx->mac_ops->lmac_offset) + @@ -207,6 +214,24 @@ u8 cgx_lmac_get_p2x(int cgx_id, int lmac_id) return (cfg & CMR_P2X_SEL_MASK) >> CMR_P2X_SEL_SHIFT; } +static u8 cgx_get_nix_resetbit(struct cgx *cgx) +{ + int first_lmac; + u8 p2x; + + /* non 98XX silicons supports only NIX0 block */ + if (cgx->pdev->subsystem_device != PCI_SUBSYS_DEVID_98XX) + return CGX_NIX0_RESET; + + first_lmac = find_first_bit(&cgx->lmac_bmap, cgx->max_lmac_per_mac); + p2x = cgx_lmac_get_p2x(cgx->cgx_id, first_lmac); + + if (p2x == CMR_P2X_SEL_NIX1) + return CGX_NIX1_RESET; + else + return CGX_NIX0_RESET; +} + /* Ensure the required lock for event queue(where asynchronous events are * posted) is acquired before calling this API. Else an asynchronous event(with * latest link status) can reach the destination before this function returns @@ -499,7 +524,7 @@ static u32 cgx_get_lmac_fifo_len(void *cgxd, int lmac_id) u8 num_lmacs; u32 fifo_len; - fifo_len = cgx->mac_ops->fifo_len; + fifo_len = cgx->fifo_len; num_lmacs = cgx->mac_ops->get_nr_lmacs(cgx); switch (num_lmacs) { @@ -701,6 +726,30 @@ u64 cgx_features_get(void *cgxd) return ((struct cgx *)cgxd)->hw_features; } +int cgx_stats_reset(void *cgxd, int lmac_id) +{ + struct cgx *cgx = cgxd; + int stat_id; + + if (!is_lmac_valid(cgx, lmac_id)) + return -ENODEV; + + for (stat_id = 0 ; stat_id < CGX_RX_STATS_COUNT; stat_id++) { + if (stat_id >= CGX_RX_STAT_GLOBAL_INDEX) + /* pass lmac as 0 for CGX_CMR_RX_STAT9-12 */ + cgx_write(cgx, 0, + (CGXX_CMRX_RX_STAT0 + (stat_id * 8)), 0); + else + cgx_write(cgx, lmac_id, + (CGXX_CMRX_RX_STAT0 + (stat_id * 8)), 0); + } + + for (stat_id = 0 ; stat_id < CGX_TX_STATS_COUNT; stat_id++) + cgx_write(cgx, lmac_id, CGXX_CMRX_TX_STAT0 + (stat_id * 8), 0); + + return 0; +} + static int cgx_set_fec_stats_count(struct cgx_link_user_info *linfo) { if (!linfo->fec) @@ -808,6 +857,11 @@ static int cgx_lmac_enadis_pause_frm(void *cgxd, int lmac_id, if (!is_lmac_valid(cgx, lmac_id)) return -ENODEV; + cfg = cgx_read(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL); + cfg &= ~CGX_GMP_GMI_RXX_FRM_CTL_CTL_BCK; + cfg |= rx_pause ? CGX_GMP_GMI_RXX_FRM_CTL_CTL_BCK : 0x0; + cgx_write(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL, cfg); + cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL); cfg &= ~CGX_SMUX_RX_FRM_CTL_CTL_BCK; cfg |= rx_pause ? CGX_SMUX_RX_FRM_CTL_CTL_BCK : 0x0; @@ -1338,7 +1392,7 @@ static irqreturn_t cgx_fwi_event_handler(int irq, void *data) /* Release thread waiting for completion */ lmac->cmd_pend = false; - wake_up_interruptible(&lmac->wq_cmd_cmplt); + wake_up(&lmac->wq_cmd_cmplt); break; case CGX_EVT_ASYNC: if (cgx_event_is_linkevent(event)) @@ -1688,6 +1742,8 @@ static int cgx_lmac_init(struct cgx *cgx) lmac->lmac_type = cgx->mac_ops->get_lmac_type(cgx, lmac->lmac_id); } + /* Start X2P reset on given MAC block */ + cgx->mac_ops->mac_x2p_reset(cgx, true); return cgx_lmac_verify_fwi_version(cgx); err_bitmap_free: @@ -1733,7 +1789,7 @@ static void cgx_populate_features(struct cgx *cgx) u64 cfg; cfg = cgx_read(cgx, 0, CGX_CONST); - cgx->mac_ops->fifo_len = FIELD_GET(CGX_CONST_RXFIFO_SIZE, cfg); + cgx->fifo_len = FIELD_GET(CGX_CONST_RXFIFO_SIZE, cfg); cgx->max_lmac_per_mac = FIELD_GET(CGX_CONST_MAX_LMACS, cfg); if (is_dev_rpm(cgx)) @@ -1753,6 +1809,45 @@ static u8 cgx_get_rxid_mapoffset(struct cgx *cgx) return 0x60; } +static void cgx_x2p_reset(void *cgxd, bool enable) +{ + struct cgx *cgx = cgxd; + int lmac_id; + u64 cfg; + + if (enable) { + for_each_set_bit(lmac_id, &cgx->lmac_bmap, cgx->max_lmac_per_mac) + cgx->mac_ops->mac_enadis_rx(cgx, lmac_id, false); + + usleep_range(1000, 2000); + + cfg = cgx_read(cgx, 0, CGXX_CMR_GLOBAL_CONFIG); + cfg |= cgx_get_nix_resetbit(cgx) | CGX_NSCI_DROP; + cgx_write(cgx, 0, CGXX_CMR_GLOBAL_CONFIG, cfg); + } else { + cfg = cgx_read(cgx, 0, CGXX_CMR_GLOBAL_CONFIG); + cfg &= ~(cgx_get_nix_resetbit(cgx) | CGX_NSCI_DROP); + cgx_write(cgx, 0, CGXX_CMR_GLOBAL_CONFIG, cfg); + } +} + +static int cgx_enadis_rx(void *cgxd, int lmac_id, bool enable) +{ + struct cgx *cgx = cgxd; + u64 cfg; + + if (!is_lmac_valid(cgx, lmac_id)) + return -ENODEV; + + cfg = cgx_read(cgx, lmac_id, CGXX_CMRX_CFG); + if (enable) + cfg |= DATA_PKT_RX_EN; + else + cfg &= ~DATA_PKT_RX_EN; + cgx_write(cgx, lmac_id, CGXX_CMRX_CFG, cfg); + return 0; +} + static struct mac_ops cgx_mac_ops = { .name = "cgx", .csr_offset = 0, @@ -1783,6 +1878,9 @@ static struct mac_ops cgx_mac_ops = { .pfc_config = cgx_lmac_pfc_config, .mac_get_pfc_frm_cfg = cgx_lmac_get_pfc_frm_cfg, .mac_reset = cgx_lmac_reset, + .mac_stats_reset = cgx_stats_reset, + .mac_x2p_reset = cgx_x2p_reset, + .mac_enadis_rx = cgx_enadis_rx, }; static int cgx_probe(struct pci_dev *pdev, const struct pci_device_id *id) diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.h b/drivers/net/ethernet/marvell/octeontx2/af/cgx.h index 6f7d1dee5830..1cf12e5c7da8 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.h @@ -32,6 +32,10 @@ #define CGX_LMAC_TYPE_MASK 0xF #define CGXX_CMRX_INT 0x040 #define FW_CGX_INT BIT_ULL(1) +#define CGXX_CMR_GLOBAL_CONFIG 0x08 +#define CGX_NIX0_RESET BIT_ULL(2) +#define CGX_NIX1_RESET BIT_ULL(3) +#define CGX_NSCI_DROP BIT_ULL(9) #define CGXX_CMRX_INT_ENA_W1S 0x058 #define CGXX_CMRX_RX_ID_MAP 0x060 #define CGXX_CMRX_RX_STAT0 0x070 @@ -141,6 +145,7 @@ int cgx_lmac_evh_register(struct cgx_event_cb *cb, void *cgxd, int lmac_id); int cgx_lmac_evh_unregister(void *cgxd, int lmac_id); int cgx_get_tx_stats(void *cgxd, int lmac_id, int idx, u64 *tx_stat); int cgx_get_rx_stats(void *cgxd, int lmac_id, int idx, u64 *rx_stat); +int cgx_stats_reset(void *cgxd, int lmac_id); int cgx_lmac_rx_tx_enable(void *cgxd, int lmac_id, bool enable); int cgx_lmac_tx_enable(void *cgxd, int lmac_id, bool enable); int cgx_lmac_addr_set(u8 cgx_id, u8 lmac_id, u8 *mac_addr); @@ -184,4 +189,5 @@ int cgx_lmac_get_pfc_frm_cfg(void *cgxd, int lmac_id, u8 *tx_pause, int verify_lmac_fc_cfg(void *cgxd, int lmac_id, u8 tx_pause, u8 rx_pause, int pfvf_idx); int cgx_lmac_reset(void *cgxd, int lmac_id, u8 pf_req_flr); +u32 cgx_get_fifo_len(void *cgxd); #endif /* CGX_H */ diff --git a/drivers/net/ethernet/marvell/octeontx2/af/common.h b/drivers/net/ethernet/marvell/octeontx2/af/common.h index 2436c1ff9ba4..406c59100a35 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/common.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/common.h @@ -156,8 +156,10 @@ enum nix_scheduler { #define NIC_HW_MIN_FRS 40 #define NIC_HW_MAX_FRS 9212 #define SDP_HW_MAX_FRS 65535 +#define SDP_HW_MIN_FRS 16 #define CN10K_LMAC_LINK_MAX_FRS 16380 /* 16k - FCS */ #define CN10K_LBK_LINK_MAX_FRS 65535 /* 64k */ +#define SDP_LINK_CREDIT 0x320202 /* NIX RX action operation*/ #define NIX_RX_ACTIONOP_DROP (0x0ull) diff --git a/drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h b/drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h index 0b4cba03f2e8..6180e68e1765 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h @@ -72,7 +72,6 @@ struct mac_ops { u8 irq_offset; u8 int_ena_bit; u8 lmac_fwi; - u32 fifo_len; bool non_contiguous_serdes_lane; /* RPM & CGX differs in number of Receive/transmit stats */ u8 rx_stats_cnt; @@ -132,6 +131,9 @@ struct mac_ops { /* FEC stats */ int (*get_fec_stats)(void *cgxd, int lmac_id, struct cgx_fec_stats_rsp *rsp); + int (*mac_stats_reset)(void *cgxd, int lmac_id); + void (*mac_x2p_reset)(void *cgxd, bool enable); + int (*mac_enadis_rx)(void *cgxd, int lmac_id, bool enable); }; struct cgx { @@ -141,6 +143,10 @@ struct cgx { u8 lmac_count; /* number of LMACs per MAC could be 4 or 8 */ u8 max_lmac_per_mac; + /* length of fifo varies depending on the number + * of LMACS + */ + u32 fifo_len; #define MAX_LMAC_COUNT 8 struct lmac *lmac_idmap[MAX_LMAC_COUNT]; struct work_struct cgx_cmd_work; diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.c b/drivers/net/ethernet/marvell/octeontx2/af/mbox.c index b92264d0a77e..1e5aa5397504 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.c @@ -214,11 +214,12 @@ int otx2_mbox_busy_poll_for_rsp(struct otx2_mbox *mbox, int devid) } EXPORT_SYMBOL(otx2_mbox_busy_poll_for_rsp); -void otx2_mbox_msg_send(struct otx2_mbox *mbox, int devid) +static void otx2_mbox_msg_send_data(struct otx2_mbox *mbox, int devid, u64 data) { struct otx2_mbox_dev *mdev = &mbox->dev[devid]; struct mbox_hdr *tx_hdr, *rx_hdr; void *hw_mbase = mdev->hwbase; + u64 intr_val; tx_hdr = hw_mbase + mbox->tx_start; rx_hdr = hw_mbase + mbox->rx_start; @@ -254,14 +255,52 @@ void otx2_mbox_msg_send(struct otx2_mbox *mbox, int devid) spin_unlock(&mdev->mbox_lock); + /* Check if interrupt pending */ + intr_val = readq((void __iomem *)mbox->reg_base + + (mbox->trigger | (devid << mbox->tr_shift))); + + intr_val |= data; /* The interrupt should be fired after num_msgs is written * to the shared memory */ - writeq(1, (void __iomem *)mbox->reg_base + + writeq(intr_val, (void __iomem *)mbox->reg_base + (mbox->trigger | (devid << mbox->tr_shift))); } + +void otx2_mbox_msg_send(struct otx2_mbox *mbox, int devid) +{ + otx2_mbox_msg_send_data(mbox, devid, MBOX_DOWN_MSG); +} EXPORT_SYMBOL(otx2_mbox_msg_send); +void otx2_mbox_msg_send_up(struct otx2_mbox *mbox, int devid) +{ + otx2_mbox_msg_send_data(mbox, devid, MBOX_UP_MSG); +} +EXPORT_SYMBOL(otx2_mbox_msg_send_up); + +bool otx2_mbox_wait_for_zero(struct otx2_mbox *mbox, int devid) +{ + u64 data; + + data = readq((void __iomem *)mbox->reg_base + + (mbox->trigger | (devid << mbox->tr_shift))); + + /* If data is non-zero wait for ~1ms and return to caller + * whether data has changed to zero or not after the wait. + */ + if (!data) + return true; + + usleep_range(950, 1000); + + data = readq((void __iomem *)mbox->reg_base + + (mbox->trigger | (devid << mbox->tr_shift))); + + return data == 0; +} +EXPORT_SYMBOL(otx2_mbox_wait_for_zero); + struct mbox_msghdr *otx2_mbox_alloc_msg_rsp(struct otx2_mbox *mbox, int devid, int size, int size_rsp) { diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h index edeb0f737312..005ca8a056c0 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h @@ -16,6 +16,9 @@ #define MBOX_SIZE SZ_64K +#define MBOX_DOWN_MSG 1 +#define MBOX_UP_MSG 2 + /* AF/PF: PF initiated, PF/VF VF initiated */ #define MBOX_DOWN_RX_START 0 #define MBOX_DOWN_RX_SIZE (46 * SZ_1K) @@ -101,6 +104,7 @@ int otx2_mbox_regions_init(struct otx2_mbox *mbox, void __force **hwbase, struct pci_dev *pdev, void __force *reg_base, int direction, int ndevs, unsigned long *bmap); void otx2_mbox_msg_send(struct otx2_mbox *mbox, int devid); +void otx2_mbox_msg_send_up(struct otx2_mbox *mbox, int devid); int otx2_mbox_wait_for_rsp(struct otx2_mbox *mbox, int devid); int otx2_mbox_busy_poll_for_rsp(struct otx2_mbox *mbox, int devid); struct mbox_msghdr *otx2_mbox_alloc_msg_rsp(struct otx2_mbox *mbox, int devid, @@ -118,6 +122,8 @@ static inline struct mbox_msghdr *otx2_mbox_alloc_msg(struct otx2_mbox *mbox, return otx2_mbox_alloc_msg_rsp(mbox, devid, size, 0); } +bool otx2_mbox_wait_for_zero(struct otx2_mbox *mbox, int devid); + /* Mailbox message types */ #define MBOX_MSG_MASK 0xFFFF #define MBOX_MSG_INVALID 0xFFFE @@ -133,10 +139,14 @@ M(MSIX_OFFSET, 0x005, msix_offset, msg_req, msix_offset_rsp) \ M(VF_FLR, 0x006, vf_flr, msg_req, msg_rsp) \ M(PTP_OP, 0x007, ptp_op, ptp_req, ptp_rsp) \ M(GET_HW_CAP, 0x008, get_hw_cap, msg_req, get_hw_cap_rsp) \ +M(NDC_SYNC_OP, 0x009, ndc_sync_op, ndc_sync_op, msg_rsp) \ M(LMTST_TBL_SETUP, 0x00a, lmtst_tbl_setup, lmtst_tbl_setup_req, \ msg_rsp) \ M(SET_VF_PERM, 0x00b, set_vf_perm, set_vf_perm, msg_rsp) \ M(PTP_GET_CAP, 0x00c, ptp_get_cap, msg_req, ptp_get_cap_rsp) \ +M(GET_REP_CNT, 0x00d, get_rep_cnt, msg_req, get_rep_cnt_rsp) \ +M(ESW_CFG, 0x00e, esw_cfg, esw_cfg_req, msg_rsp) \ +M(REP_EVENT_NOTIFY, 0x00f, rep_event_notify, rep_event, msg_rsp) \ /* CGX mbox IDs (range 0x200 - 0x3FF) */ \ M(CGX_START_RXTX, 0x200, cgx_start_rxtx, msg_req, msg_rsp) \ M(CGX_STOP_RXTX, 0x201, cgx_stop_rxtx, msg_req, msg_rsp) \ @@ -168,6 +178,7 @@ M(CGX_FEC_STATS, 0x217, cgx_fec_stats, msg_req, cgx_fec_stats_rsp) \ M(CGX_SET_LINK_MODE, 0x218, cgx_set_link_mode, cgx_set_link_mode_req,\ cgx_set_link_mode_rsp) \ M(CGX_GET_PHY_FEC_STATS, 0x219, cgx_get_phy_fec_stats, msg_req, msg_rsp) \ +M(CGX_STATS_RST, 0x21A, cgx_stats_rst, msg_req, msg_rsp) \ M(CGX_FEATURES_GET, 0x21B, cgx_features_get, msg_req, \ cgx_features_info_msg) \ M(RPM_STATS, 0x21C, rpm_stats, msg_req, rpm_stats_rsp) \ @@ -302,6 +313,10 @@ M(NIX_BANDPROF_FREE, 0x801e, nix_bandprof_free, nix_bandprof_free_req, \ msg_rsp) \ M(NIX_BANDPROF_GET_HWINFO, 0x801f, nix_bandprof_get_hwinfo, msg_req, \ nix_bandprof_get_hwinfo_rsp) \ +M(NIX_CPT_BP_ENABLE, 0x8020, nix_cpt_bp_enable, nix_bp_cfg_req, \ + nix_bp_cfg_rsp) \ +M(NIX_CPT_BP_DISABLE, 0x8021, nix_cpt_bp_disable, nix_bp_cfg_req, \ + msg_rsp) \ M(NIX_READ_INLINE_IPSEC_CFG, 0x8023, nix_read_inline_ipsec_cfg, \ msg_req, nix_inline_ipsec_cfg) \ M(NIX_MCAST_GRP_CREATE, 0x802b, nix_mcast_grp_create, nix_mcast_grp_create_req, \ @@ -311,6 +326,7 @@ M(NIX_MCAST_GRP_DESTROY, 0x802c, nix_mcast_grp_destroy, nix_mcast_grp_destroy_re M(NIX_MCAST_GRP_UPDATE, 0x802d, nix_mcast_grp_update, \ nix_mcast_grp_update_req, \ nix_mcast_grp_update_rsp) \ +M(NIX_LF_STATS, 0x802e, nix_lf_stats, nix_stats_req, nix_stats_rsp) \ /* MCS mbox IDs (range 0xA000 - 0xBFFF) */ \ M(MCS_ALLOC_RESOURCES, 0xa000, mcs_alloc_resources, mcs_alloc_rsrc_req, \ mcs_alloc_rsrc_rsp) \ @@ -372,12 +388,16 @@ M(CPT_INST_LMTST, 0xD00, cpt_inst_lmtst, cpt_inst_lmtst_req, msg_rsp) #define MBOX_UP_MCS_MESSAGES \ M(MCS_INTR_NOTIFY, 0xE00, mcs_intr_notify, mcs_intr_info, msg_rsp) +#define MBOX_UP_REP_MESSAGES \ +M(REP_EVENT_UP_NOTIFY, 0xEF0, rep_event_up_notify, rep_event, msg_rsp) \ + enum { #define M(_name, _id, _1, _2, _3) MBOX_MSG_ ## _name = _id, MBOX_MESSAGES MBOX_UP_CGX_MESSAGES MBOX_UP_CPT_MESSAGES MBOX_UP_MCS_MESSAGES +MBOX_UP_REP_MESSAGES #undef M }; @@ -837,6 +857,8 @@ enum nix_af_status { NIX_AF_ERR_CQ_CTX_WRITE_ERR = -429, NIX_AF_ERR_AQ_CTX_RETRY_WRITE = -430, NIX_AF_ERR_LINK_CREDITS = -431, + NIX_AF_ERR_INVALID_BPID = -434, + NIX_AF_ERR_INVALID_BPID_REQ = -435, NIX_AF_ERR_INVALID_MCAST_GRP = -436, NIX_AF_ERR_INVALID_MCAST_DEL_REQ = -437, NIX_AF_ERR_NON_CONTIG_MCE_LIST = -438, @@ -1114,6 +1136,7 @@ struct nix_rss_flowkey_cfg { #define NIX_FLOW_KEY_TYPE_INNR_UDP BIT(15) #define NIX_FLOW_KEY_TYPE_INNR_SCTP BIT(16) #define NIX_FLOW_KEY_TYPE_INNR_ETH_DMAC BIT(17) +#define NIX_FLOW_KEY_TYPE_CUSTOM0 BIT(19) #define NIX_FLOW_KEY_TYPE_VLAN BIT(20) #define NIX_FLOW_KEY_TYPE_IPV4_PROTO BIT(21) #define NIX_FLOW_KEY_TYPE_AH BIT(22) @@ -1204,10 +1227,8 @@ struct nix_bp_cfg_req { /* bpid_per_chan = 1 assigns separate bp id for each channel */ }; -/* PF can be mapped to either CGX or LBK interface, - * so maximum 64 channels are possible. - */ -#define NIX_MAX_BPID_CHAN 64 +/* Maximum channels any single NIX interface can have */ +#define NIX_MAX_BPID_CHAN 256 struct nix_bp_cfg_rsp { struct mbox_msghdr hdr; u16 chan_bpid[NIX_MAX_BPID_CHAN]; /* Channel and bpid mapping */ @@ -1355,6 +1376,37 @@ struct nix_bandprof_get_hwinfo_rsp { u32 policer_timeunit; }; +struct nix_stats_req { + struct mbox_msghdr hdr; + u8 reset; + u16 pcifunc; + u64 rsvd; +}; + +struct nix_stats_rsp { + struct mbox_msghdr hdr; + u16 pcifunc; + struct { + u64 octs; + u64 ucast; + u64 bcast; + u64 mcast; + u64 drop; + u64 drop_octs; + u64 drop_mcast; + u64 drop_bcast; + u64 err; + u64 rsvd[5]; + } rx; + struct { + u64 ucast; + u64 bcast; + u64 mcast; + u64 drop; + u64 octs; + } tx; +}; + /* NPC mbox message structs */ #define NPC_MCAM_ENTRY_INVALID 0xFFFF @@ -1516,6 +1568,41 @@ struct ptp_get_cap_rsp { u64 cap; }; +struct get_rep_cnt_rsp { + struct mbox_msghdr hdr; + u16 rep_cnt; + u16 rep_pf_map[64]; + u64 rsvd; +}; + +struct esw_cfg_req { + struct mbox_msghdr hdr; + u8 ena; + u64 rsvd; +}; + +struct rep_evt_data { + u8 port_state; + u8 vf_state; + u16 rx_mode; + u16 rx_flags; + u16 mtu; + u8 mac[ETH_ALEN]; + u64 rsvd[5]; +}; + +struct rep_event { + struct mbox_msghdr hdr; + u16 pcifunc; +#define RVU_EVENT_PORT_STATE BIT_ULL(0) +#define RVU_EVENT_PFVF_STATE BIT_ULL(1) +#define RVU_EVENT_MTU_CHANGE BIT_ULL(2) +#define RVU_EVENT_RX_MODE_CHANGE BIT_ULL(3) +#define RVU_EVENT_MAC_ADDR_CHANGE BIT_ULL(4) + u16 event; + struct rep_evt_data evt_data; +}; + struct flow_msg { unsigned char dmac[6]; unsigned char smac[6]; @@ -1553,6 +1640,8 @@ struct flow_msg { u32 mpls_lse[4]; u8 icmp_type; u8 icmp_code; + __be16 tcp_flags; + u16 sq_id; }; struct npc_install_flow_req { @@ -1707,6 +1796,13 @@ struct lmtst_tbl_setup_req { u64 rsvd[4]; }; +struct ndc_sync_op { + struct mbox_msghdr hdr; + u8 nix_lf_tx_sync; + u8 nix_lf_rx_sync; + u8 npa_lf_sync; +}; + /* CPT mailbox error codes * Range 901 - 1000. */ @@ -1736,7 +1832,7 @@ struct cpt_lf_alloc_req_msg { u16 nix_pf_func; u16 sso_pf_func; u16 eng_grpmsk; - int blkaddr; + u8 blkaddr; u8 ctx_ilen_valid : 1; u8 ctx_ilen : 7; }; @@ -1839,8 +1935,9 @@ struct cpt_flt_eng_info_req { struct cpt_flt_eng_info_rsp { struct mbox_msghdr hdr; - u64 flt_eng_map[CPT_10K_AF_INT_VEC_RVU]; - u64 rcvrd_eng_map[CPT_10K_AF_INT_VEC_RVU]; +#define CPT_AF_MAX_FLT_INT_VECS 3 + u64 flt_eng_map[CPT_AF_MAX_FLT_INT_VECS]; + u64 rcvrd_eng_map[CPT_AF_MAX_FLT_INT_VECS]; u64 rsvd; }; diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mcs_rvu_if.c b/drivers/net/ethernet/marvell/octeontx2/af/mcs_rvu_if.c index dfd23580e3b8..d39d86e694cc 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/mcs_rvu_if.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/mcs_rvu_if.c @@ -121,13 +121,17 @@ int mcs_add_intr_wq_entry(struct mcs *mcs, struct mcs_intr_event *event) static int mcs_notify_pfvf(struct mcs_intr_event *event, struct rvu *rvu) { struct mcs_intr_info *req; - int err, pf; + int pf; pf = rvu_get_pf(event->pcifunc); + mutex_lock(&rvu->mbox_lock); + req = otx2_mbox_alloc_msg_mcs_intr_notify(rvu, pf); - if (!req) + if (!req) { + mutex_unlock(&rvu->mbox_lock); return -ENOMEM; + } req->mcs_id = event->mcs_id; req->intr_mask = event->intr_mask; @@ -135,10 +139,11 @@ static int mcs_notify_pfvf(struct mcs_intr_event *event, struct rvu *rvu) req->hdr.pcifunc = event->pcifunc; req->lmac_id = event->lmac_id; - otx2_mbox_msg_send(&rvu->afpf_wq_info.mbox_up, pf); - err = otx2_mbox_wait_for_rsp(&rvu->afpf_wq_info.mbox_up, pf); - if (err) - dev_warn(rvu->dev, "MCS notification to pf %d failed\n", pf); + otx2_mbox_wait_for_zero(&rvu->afpf_wq_info.mbox_up, pf); + + otx2_mbox_msg_send_up(&rvu->afpf_wq_info.mbox_up, pf); + + mutex_unlock(&rvu->mbox_lock); return 0; } diff --git a/drivers/net/ethernet/marvell/octeontx2/af/npc.h b/drivers/net/ethernet/marvell/octeontx2/af/npc.h index b0b4dea548e1..6c3aca6f278d 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/npc.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/npc.h @@ -63,8 +63,13 @@ enum npc_kpu_lb_ltype { NPC_LT_LB_CUSTOM1 = 0xF, }; +/* Don't modify ltypes up to IP6_EXT, otherwise length and checksum of IP + * headers may not be checked correctly. IPv4 ltypes and IPv6 ltypes must + * differ only at bit 0 so mask 0xE can be used to detect extended headers. + */ enum npc_kpu_lc_ltype { - NPC_LT_LC_IP = 1, + NPC_LT_LC_PTP = 1, + NPC_LT_LC_IP, NPC_LT_LC_IP_OPT, NPC_LT_LC_IP6, NPC_LT_LC_IP6_EXT, @@ -72,7 +77,6 @@ enum npc_kpu_lc_ltype { NPC_LT_LC_RARP, NPC_LT_LC_MPLS, NPC_LT_LC_NSH, - NPC_LT_LC_PTP, NPC_LT_LC_FCOE, NPC_LT_LC_NGIO, NPC_LT_LC_CUSTOM0 = 0xE, @@ -85,8 +89,7 @@ enum npc_kpu_lc_ltype { enum npc_kpu_ld_ltype { NPC_LT_LD_TCP = 1, NPC_LT_LD_UDP, - NPC_LT_LD_ICMP, - NPC_LT_LD_SCTP, + NPC_LT_LD_SCTP = 4, NPC_LT_LD_ICMP6, NPC_LT_LD_CUSTOM0, NPC_LT_LD_CUSTOM1, @@ -97,6 +100,7 @@ enum npc_kpu_ld_ltype { NPC_LT_LD_NSH, NPC_LT_LD_TU_MPLS_IN_NSH, NPC_LT_LD_TU_MPLS_IN_IP, + NPC_LT_LD_ICMP, }; enum npc_kpu_le_ltype { @@ -140,14 +144,14 @@ enum npc_kpu_lg_ltype { enum npc_kpu_lh_ltype { NPC_LT_LH_TU_TCP = 1, NPC_LT_LH_TU_UDP, - NPC_LT_LH_TU_ICMP, - NPC_LT_LH_TU_SCTP, + NPC_LT_LH_TU_SCTP = 4, NPC_LT_LH_TU_ICMP6, + NPC_LT_LH_CUSTOM0, + NPC_LT_LH_CUSTOM1, NPC_LT_LH_TU_IGMP = 8, NPC_LT_LH_TU_ESP, NPC_LT_LH_TU_AH, - NPC_LT_LH_CUSTOM0 = 0xE, - NPC_LT_LH_CUSTOM1 = 0xF, + NPC_LT_LH_TU_ICMP = 0xF, }; /* NPC port kind defines how the incoming or outgoing packets @@ -155,10 +159,11 @@ enum npc_kpu_lh_ltype { * Software assigns pkind for each incoming port such as CGX * Ethernet interfaces, LBK interfaces, etc. */ -#define NPC_UNRESERVED_PKIND_COUNT NPC_RX_CUSTOM_PRE_L2_PKIND +#define NPC_UNRESERVED_PKIND_COUNT NPC_RX_CPT_HDR_PTP_PKIND enum npc_pkind_type { NPC_RX_LBK_PKIND = 0ULL, + NPC_RX_CPT_HDR_PTP_PKIND = 54ULL, NPC_RX_CUSTOM_PRE_L2_PKIND = 55ULL, NPC_RX_VLAN_EXDSA_PKIND = 56ULL, NPC_RX_CHLEN24B_PKIND = 57ULL, @@ -216,6 +221,7 @@ enum key_fields { NPC_MPLS4_TTL, NPC_TYPE_ICMP, NPC_CODE_ICMP, + NPC_TCP_FLAGS, NPC_HEADER_FIELDS_MAX, NPC_CHAN = NPC_HEADER_FIELDS_MAX, /* Valid when Rx */ NPC_PF_FUNC, /* Valid when Tx */ diff --git a/drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h b/drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h index a820bad3abb2..41de72c8607f 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h @@ -35,6 +35,7 @@ #define NPC_ETYPE_NSH 0x894f #define NPC_ETYPE_DSA 0xdada #define NPC_ETYPE_PPPOE 0x8864 +#define NPC_ETYPE_ERSPA 0x88be #define NPC_PPP_IP 0x0021 #define NPC_PPP_IP6 0x0057 @@ -59,6 +60,9 @@ #define NPC_IPNH_MPLS 137 #define NPC_IPNH_HOSTID 139 #define NPC_IPNH_SHIM6 140 +#define NPC_IPNH_CUSTOM 253 + +#define NPC_IP6_ROUTE_TYPE 4 #define NPC_UDP_PORT_PTP_E 319 #define NPC_UDP_PORT_PTP_G 320 @@ -187,6 +191,7 @@ enum npc_kpu_parser_state { NPC_S_KPU2_EXDSA, NPC_S_KPU2_CPT_CTAG, NPC_S_KPU2_CPT_QINQ, + NPC_S_KPU2_MT, NPC_S_KPU3_CTAG, NPC_S_KPU3_STAG, NPC_S_KPU3_QINQ, @@ -231,6 +236,7 @@ enum npc_kpu_parser_state { NPC_S_KPU8_ICMP6, NPC_S_KPU8_GRE, NPC_S_KPU8_AH, + NPC_S_KPU8_CUSTOM, NPC_S_KPU9_TU_MPLS_IN_GRE, NPC_S_KPU9_TU_MPLS_IN_NSH, NPC_S_KPU9_TU_MPLS_IN_IP, @@ -242,6 +248,7 @@ enum npc_kpu_parser_state { NPC_S_KPU9_GTPC, NPC_S_KPU9_GTPU, NPC_S_KPU9_ESP, + NPC_S_KPU9_CUSTOM, NPC_S_KPU10_TU_MPLS_IN_VXLANGPE, NPC_S_KPU10_TU_MPLS_PL, NPC_S_KPU10_TU_MPLS, @@ -318,10 +325,10 @@ enum npc_kpu_lc_uflag { NPC_F_LC_U_UNK_PROTO = 0x10, NPC_F_LC_U_IP_FRAG = 0x20, NPC_F_LC_U_IP6_FRAG = 0x40, + NPC_F_LC_L_6TO4 = 0x80, }; enum npc_kpu_lc_lflag { NPC_F_LC_L_IP_IN_IP = 1, - NPC_F_LC_L_6TO4, NPC_F_LC_L_MPLS_IN_IP, NPC_F_LC_L_IP6_TUN_IP6, NPC_F_LC_L_IP6_MPLS_IN_IP, @@ -334,6 +341,8 @@ enum npc_kpu_lc_lflag { NPC_F_LC_L_EXT_MOBILITY, NPC_F_LC_L_EXT_HOSTID, NPC_F_LC_L_EXT_SHIM6, + NPC_F_LC_L_IP6_SRH_SEG_1, + NPC_F_LC_L_IP6_SRH_SEG_2, }; enum npc_kpu_ld_lflag { @@ -970,10 +979,10 @@ static struct npc_kpu_profile_action ikpu_action_entries[] = { { NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 16, 20, 0, 0, - NPC_S_KPU1_ETHER, 0, 0, + NPC_S_KPU1_CPT_HDR, 48, 0, NPC_LID_LA, NPC_LT_NA, 0, - 0, 0, 0, 0, + 0, 7, 0, 0, }, { @@ -2786,6 +2795,24 @@ static struct npc_kpu_profile_cam kpu2_cam_entries[] = { 0x0000, }, { + NPC_S_KPU2_MT, 0xff, + NPC_ETYPE_CTAG, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU2_MT, 0xff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { NPC_S_NA, 0X00, 0x0000, 0x0000, @@ -4501,6 +4528,24 @@ static struct npc_kpu_profile_cam kpu5_cam_entries[] = { 0xff00, NPC_IP_VER_6, NPC_IP_VER_MASK, + (NPC_IP6_ROUTE_TYPE << 8) | 1, + 0xffff, + }, + { + NPC_S_KPU5_IP6, 0xff, + NPC_IPNH_ROUT << 8, + 0xff00, + NPC_IP_VER_6, + NPC_IP_VER_MASK, + (NPC_IP6_ROUTE_TYPE << 8) | 2, + 0xffff, + }, + { + NPC_S_KPU5_IP6, 0xff, + NPC_IPNH_ROUT << 8, + 0xff00, + NPC_IP_VER_6, + NPC_IP_VER_MASK, 0x0000, 0x0000, }, @@ -4776,6 +4821,15 @@ static struct npc_kpu_profile_cam kpu5_cam_entries[] = { }, { NPC_S_KPU5_CPT_IP, 0xff, + NPC_IPNH_CUSTOM, + 0x00ff, + NPC_IP_VER_4 | NPC_IP_HDR_LEN_5, + NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU5_CPT_IP, 0xff, 0x0000, 0x0000, NPC_IP_VER_4 | NPC_IP_HDR_LEN_5, @@ -4884,6 +4938,15 @@ static struct npc_kpu_profile_cam kpu5_cam_entries[] = { }, { NPC_S_KPU5_CPT_IP, 0xff, + NPC_IPNH_CUSTOM, + 0x00ff, + NPC_IP_VER_4, + NPC_IP_VER_MASK, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU5_CPT_IP, 0xff, 0x0000, 0x0000, NPC_IP_VER_4, @@ -5064,6 +5127,15 @@ static struct npc_kpu_profile_cam kpu5_cam_entries[] = { }, { NPC_S_KPU5_CPT_IP6, 0xff, + NPC_IPNH_CUSTOM << 8, + 0xff00, + NPC_IP_VER_6, + NPC_IP_VER_MASK, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU5_CPT_IP6, 0xff, 0x0000, 0x0000, NPC_IP_VER_6, @@ -5208,6 +5280,15 @@ static struct npc_kpu_profile_cam kpu6_cam_entries[] = { }, { NPC_S_KPU6_IP6_FRAG, 0xff, + NPC_IPNH_CUSTOM << 8, + 0xff00, + 0x0000, + NPC_IP6_FRAG_FRAGOFF, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU6_IP6_FRAG, 0xff, 0x0000, 0x0000, 0x0000, @@ -5325,6 +5406,15 @@ static struct npc_kpu_profile_cam kpu6_cam_entries[] = { }, { NPC_S_KPU6_IP6_HOP_DEST, 0xff, + NPC_IPNH_CUSTOM << 8, + 0xff00, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU6_IP6_HOP_DEST, 0xff, 0x0000, 0x0000, 0x0000, @@ -5433,6 +5523,15 @@ static struct npc_kpu_profile_cam kpu6_cam_entries[] = { }, { NPC_S_KPU6_IP6_ROUT, 0xff, + NPC_IPNH_CUSTOM << 8, + 0xff00, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU6_IP6_ROUT, 0xff, 0x0000, 0x0000, 0x0000, @@ -5532,6 +5631,15 @@ static struct npc_kpu_profile_cam kpu6_cam_entries[] = { }, { NPC_S_KPU6_IP6_CPT_FRAG, 0xff, + NPC_IPNH_CUSTOM << 8, + 0xff00, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU6_IP6_CPT_FRAG, 0xff, 0x0000, 0x0000, 0x0000, @@ -5649,6 +5757,15 @@ static struct npc_kpu_profile_cam kpu6_cam_entries[] = { }, { NPC_S_KPU6_IP6_CPT_HOP_DEST, 0xff, + NPC_IPNH_CUSTOM << 8, + 0xff00, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU6_IP6_CPT_HOP_DEST, 0xff, 0x0000, 0x0000, 0x0000, @@ -5757,6 +5874,15 @@ static struct npc_kpu_profile_cam kpu6_cam_entries[] = { }, { NPC_S_KPU6_IP6_CPT_ROUT, 0xff, + NPC_IPNH_CUSTOM << 8, + 0xff00, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU6_IP6_CPT_ROUT, 0xff, 0x0000, 0x0000, 0x0000, @@ -5883,6 +6009,15 @@ static struct npc_kpu_profile_cam kpu7_cam_entries[] = { }, { NPC_S_KPU7_IP6_ROUT, 0xff, + NPC_IPNH_CUSTOM << 8, + 0xff00, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU7_IP6_ROUT, 0xff, 0x0000, 0x0000, 0x0000, @@ -5982,6 +6117,15 @@ static struct npc_kpu_profile_cam kpu7_cam_entries[] = { }, { NPC_S_KPU7_IP6_FRAG, 0xff, + NPC_IPNH_CUSTOM << 8, + 0xff00, + 0x0000, + NPC_IP6_FRAG_FRAGOFF, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU7_IP6_FRAG, 0xff, 0x0000, 0x0000, 0x0000, @@ -6081,6 +6225,15 @@ static struct npc_kpu_profile_cam kpu7_cam_entries[] = { }, { NPC_S_KPU7_CPT_IP6_FRAG, 0xff, + NPC_IPNH_CUSTOM << 8, + 0xff00, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU7_CPT_IP6_FRAG, 0xff, 0x0000, 0x0000, 0x0000, @@ -6310,6 +6463,15 @@ static struct npc_kpu_profile_cam kpu8_cam_entries[] = { 0xffff, 0x0000, 0x0000, + 0x0009, + 0xffff, + }, + { + NPC_S_KPU8_UDP, 0xff, + NPC_UDP_PORT_ESP, + 0xffff, + 0x0000, + 0x0000, 0x0000, 0x0000, }, @@ -6756,6 +6918,78 @@ static struct npc_kpu_profile_cam kpu8_cam_entries[] = { }, { NPC_S_KPU8_GRE, 0xff, + NPC_ETYPE_ERSPA, + 0xffff, + 0x0000, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU8_GRE, 0xff, + NPC_ETYPE_ERSPA, + 0xffff, + NPC_GRE_F_CSUM, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU8_GRE, 0xff, + NPC_ETYPE_ERSPA, + 0xffff, + NPC_GRE_F_KEY, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU8_GRE, 0xff, + NPC_ETYPE_ERSPA, + 0xffff, + NPC_GRE_F_SEQ, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU8_GRE, 0xff, + NPC_ETYPE_ERSPA, + 0xffff, + NPC_GRE_F_CSUM | NPC_GRE_F_KEY, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU8_GRE, 0xff, + NPC_ETYPE_ERSPA, + 0xffff, + NPC_GRE_F_CSUM | NPC_GRE_F_SEQ, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU8_GRE, 0xff, + NPC_ETYPE_ERSPA, + 0xffff, + NPC_GRE_F_KEY | NPC_GRE_F_SEQ, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU8_GRE, 0xff, + NPC_ETYPE_ERSPA, + 0xffff, + NPC_GRE_F_CSUM | NPC_GRE_F_KEY | NPC_GRE_F_SEQ, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU8_GRE, 0xff, 0x0000, 0xffff, NPC_GRE_F_ROUTE, @@ -6836,6 +7070,15 @@ static struct npc_kpu_profile_cam kpu8_cam_entries[] = { 0x0000, }, { + NPC_S_KPU8_CUSTOM, 0xff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { NPC_S_NA, 0X00, 0x0000, 0x0000, @@ -7304,6 +7547,24 @@ static struct npc_kpu_profile_cam kpu9_cam_entries[] = { 0x0000, }, { + NPC_S_KPU9_CUSTOM, 0xff, + 0x4000, + 0xf000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU9_CUSTOM, 0xff, + 0x6000, + 0xf000, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { NPC_S_NA, 0X00, 0x0000, 0x0000, @@ -8384,7 +8645,7 @@ static struct npc_kpu_profile_action kpu1_action_entries[] = { }, { NPC_ERRLEV_RE, NPC_EC_NOERR, - 6, 0, 0, 3, 0, + 6, 0, 42, 3, 0, NPC_S_KPU5_IP6, 14, 1, NPC_LID_LA, NPC_LT_LA_ETHER, 0, @@ -8536,7 +8797,7 @@ static struct npc_kpu_profile_action kpu1_action_entries[] = { }, { NPC_ERRLEV_RE, NPC_EC_NOERR, - 6, 0, 0, 3, 0, + 6, 0, 42, 3, 0, NPC_S_KPU5_IP6, 22, 1, NPC_LID_LA, NPC_LT_LA_IH_NIX_ETHER, NPC_F_LA_U_HAS_IH_NIX, @@ -8693,7 +8954,7 @@ static struct npc_kpu_profile_action kpu1_action_entries[] = { }, { NPC_ERRLEV_RE, NPC_EC_NOERR, - 6, 0, 0, 3, 0, + 6, 0, 42, 3, 0, NPC_S_KPU5_IP6, 30, 1, NPC_LID_LA, NPC_LT_LA_HIGIG2_ETHER, NPC_F_LA_U_HAS_HIGIG2, @@ -8818,7 +9079,7 @@ static struct npc_kpu_profile_action kpu1_action_entries[] = { }, { NPC_ERRLEV_RE, NPC_EC_NOERR, - 6, 0, 0, 3, 0, + 6, 0, 42, 3, 0, NPC_S_KPU5_IP6, 38, 1, NPC_LID_LA, NPC_LT_LA_IH_NIX_HIGIG2_ETHER, NPC_F_LA_U_HAS_IH_NIX | NPC_F_LA_U_HAS_HIGIG2, @@ -8947,7 +9208,7 @@ static struct npc_kpu_profile_action kpu1_action_entries[] = { }, { NPC_ERRLEV_RE, NPC_EC_NOERR, - 6, 0, 0, 3, 0, + 6, 0, 42, 3, 0, NPC_S_KPU5_IP6, 14, 0, NPC_LID_LA, NPC_LT_NA, 0, @@ -9124,7 +9385,7 @@ static struct npc_kpu_profile_action kpu2_action_entries[] = { }, { NPC_ERRLEV_RE, NPC_EC_NOERR, - 6, 0, 0, 2, 0, + 6, 0, 42, 2, 0, NPC_S_KPU5_IP6, 6, 1, NPC_LID_LB, NPC_LT_LB_CTAG, 0, @@ -9204,7 +9465,7 @@ static struct npc_kpu_profile_action kpu2_action_entries[] = { }, { NPC_ERRLEV_RE, NPC_EC_NOERR, - 6, 0, 0, 2, 0, + 6, 0, 42, 2, 0, NPC_S_KPU5_IP6, 14, 1, NPC_LID_LB, NPC_LT_LB_PPPOE, 0, @@ -9213,7 +9474,7 @@ static struct npc_kpu_profile_action kpu2_action_entries[] = { { NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, 0, 1, - NPC_S_NA, 0, 1, + NPC_S_NA, 6, 1, NPC_LID_LB, NPC_LT_LB_CTAG, NPC_F_LB_U_UNK_ETYPE, 0, 0, 0, 0, @@ -9228,7 +9489,7 @@ static struct npc_kpu_profile_action kpu2_action_entries[] = { }, { NPC_ERRLEV_RE, NPC_EC_NOERR, - 6, 0, 0, 2, 0, + 6, 0, 42, 2, 0, NPC_S_KPU5_IP6, 10, 1, NPC_LID_LB, NPC_LT_LB_STAG_QINQ, NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_CTAG, @@ -9324,7 +9585,7 @@ static struct npc_kpu_profile_action kpu2_action_entries[] = { }, { NPC_ERRLEV_RE, NPC_EC_NOERR, - 6, 0, 0, 2, 0, + 6, 0, 42, 2, 0, NPC_S_KPU5_IP6, 24, 1, NPC_LID_LB, NPC_LT_LB_BTAG, NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_ITAG, @@ -9428,7 +9689,7 @@ static struct npc_kpu_profile_action kpu2_action_entries[] = { }, { NPC_ERRLEV_RE, NPC_EC_NOERR, - 6, 0, 0, 2, 0, + 6, 0, 42, 2, 0, NPC_S_KPU5_IP6, 10, 1, NPC_LID_LB, NPC_LT_LB_STAG_QINQ, NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_CTAG, @@ -9532,7 +9793,7 @@ static struct npc_kpu_profile_action kpu2_action_entries[] = { }, { NPC_ERRLEV_RE, NPC_EC_NOERR, - 6, 0, 0, 2, 0, + 6, 0, 42, 2, 0, NPC_S_KPU5_IP6, 10, 1, NPC_LID_LB, NPC_LT_LB_ETAG, 0, @@ -9628,7 +9889,7 @@ static struct npc_kpu_profile_action kpu2_action_entries[] = { }, { NPC_ERRLEV_RE, NPC_EC_NOERR, - 6, 0, 0, 2, 0, + 6, 0, 42, 2, 0, NPC_S_KPU5_IP6, 28, 1, NPC_LID_LB, NPC_LT_LB_ETAG, NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_ITAG, @@ -9684,7 +9945,7 @@ static struct npc_kpu_profile_action kpu2_action_entries[] = { }, { NPC_ERRLEV_RE, NPC_EC_NOERR, - 6, 0, 0, 2, 0, + 6, 0, 42, 2, 0, NPC_S_KPU5_IP6, 10, 1, NPC_LID_LB, NPC_LT_LB_STAG_QINQ, 0, @@ -9757,7 +10018,7 @@ static struct npc_kpu_profile_action kpu2_action_entries[] = { { NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, 0, 1, - NPC_S_NA, 0, 1, + NPC_S_NA, 8, 1, NPC_LID_LB, NPC_LT_LB_STAG_QINQ, NPC_F_LB_U_UNK_ETYPE, 0, 0, 0, 0, @@ -9772,7 +10033,7 @@ static struct npc_kpu_profile_action kpu2_action_entries[] = { }, { NPC_ERRLEV_RE, NPC_EC_NOERR, - 6, 0, 0, 2, 0, + 6, 0, 42, 2, 0, NPC_S_KPU5_IP6, 18, 1, NPC_LID_LB, NPC_LT_LB_EDSA, NPC_F_LB_L_EDSA, @@ -9836,7 +10097,7 @@ static struct npc_kpu_profile_action kpu2_action_entries[] = { }, { NPC_ERRLEV_RE, NPC_EC_NOERR, - 6, 0, 0, 2, 0, + 6, 0, 42, 2, 0, NPC_S_KPU5_IP6, 10, 1, NPC_LID_LB, NPC_LT_LB_EXDSA, NPC_F_LB_L_EXDSA, @@ -9923,6 +10184,22 @@ static struct npc_kpu_profile_action kpu2_action_entries[] = { 0, 0, 0, 0, }, { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 4, 0, 0, 0, 0, + NPC_S_KPU3_CTAG, 0, 1, + NPC_LID_LB, NPC_LT_LB_CTAG, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 0, + NPC_S_KPU3_CTAG_C, 0, 0, + NPC_LID_LB, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, + { NPC_ERRLEV_LB, NPC_EC_L2_K3, 0, 0, 0, 0, 1, NPC_S_NA, 0, 0, @@ -9949,7 +10226,7 @@ static struct npc_kpu_profile_action kpu3_action_entries[] = { }, { NPC_ERRLEV_RE, NPC_EC_NOERR, - 6, 0, 0, 1, 0, + 6, 0, 42, 1, 0, NPC_S_KPU5_IP6, 6, 0, NPC_LID_LB, NPC_LT_NA, 0, @@ -10029,7 +10306,7 @@ static struct npc_kpu_profile_action kpu3_action_entries[] = { }, { NPC_ERRLEV_RE, NPC_EC_NOERR, - 6, 0, 0, 1, 0, + 6, 0, 42, 1, 0, NPC_S_KPU5_IP6, 8, 0, NPC_LID_LB, NPC_LT_NA, 0, @@ -10101,7 +10378,7 @@ static struct npc_kpu_profile_action kpu3_action_entries[] = { }, { NPC_ERRLEV_RE, NPC_EC_NOERR, - 6, 0, 0, 1, 0, + 6, 0, 42, 1, 0, NPC_S_KPU5_IP6, 4, 0, NPC_LID_LB, NPC_LT_NA, 0, @@ -10165,7 +10442,7 @@ static struct npc_kpu_profile_action kpu3_action_entries[] = { }, { NPC_ERRLEV_RE, NPC_EC_NOERR, - 6, 0, 0, 1, 0, + 6, 0, 42, 1, 0, NPC_S_KPU5_IP6, 8, 0, NPC_LID_LB, NPC_LT_NA, 0, @@ -10237,7 +10514,7 @@ static struct npc_kpu_profile_action kpu3_action_entries[] = { }, { NPC_ERRLEV_RE, NPC_EC_NOERR, - 6, 0, 0, 1, 0, + 6, 0, 42, 1, 0, NPC_S_KPU5_IP6, 4, 0, NPC_LID_LB, NPC_LT_NA, 0, @@ -10310,80 +10587,80 @@ static struct npc_kpu_profile_action kpu3_action_entries[] = { { NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 6, 1, 0, - NPC_S_KPU5_IP, 4, 1, - NPC_LID_LB, NPC_LT_LB_CTAG, + NPC_S_KPU5_IP, 2, 0, + NPC_LID_LB, NPC_LT_NA, 0, 0, 0, 0, 0, }, { NPC_ERRLEV_RE, NPC_EC_NOERR, - 6, 0, 0, 1, 0, - NPC_S_KPU5_IP6, 4, 1, - NPC_LID_LB, NPC_LT_LB_CTAG, + 6, 0, 42, 1, 0, + NPC_S_KPU5_IP6, 2, 0, + NPC_LID_LB, NPC_LT_NA, 0, 0, 0, 0, 0, }, { NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, 1, 0, - NPC_S_KPU5_ARP, 4, 1, - NPC_LID_LB, NPC_LT_LB_CTAG, + NPC_S_KPU5_ARP, 2, 0, + NPC_LID_LB, NPC_LT_NA, 0, 0, 0, 0, 0, }, { NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, 1, 0, - NPC_S_KPU5_RARP, 4, 1, - NPC_LID_LB, NPC_LT_LB_CTAG, + NPC_S_KPU5_RARP, 2, 0, + NPC_LID_LB, NPC_LT_NA, 0, 0, 0, 0, 0, }, { NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, 1, 0, - NPC_S_KPU5_PTP, 4, 1, - NPC_LID_LB, NPC_LT_LB_CTAG, + NPC_S_KPU5_PTP, 2, 0, + NPC_LID_LB, NPC_LT_NA, 0, 0, 0, 0, 0, }, { NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, 1, 0, - NPC_S_KPU5_FCOE, 4, 1, - NPC_LID_LB, NPC_LT_LB_CTAG, + NPC_S_KPU5_FCOE, 2, 0, + NPC_LID_LB, NPC_LT_NA, 0, 0, 0, 0, 0, }, { NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10, 0, 0, - NPC_S_KPU4_MPLS, 4, 1, - NPC_LID_LB, NPC_LT_LB_CTAG, + NPC_S_KPU4_MPLS, 2, 0, + NPC_LID_LB, NPC_LT_NA, 0, 0, 0, 0, 0, }, { NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10, 0, 0, - NPC_S_KPU4_MPLS, 4, 1, - NPC_LID_LB, NPC_LT_LB_CTAG, + NPC_S_KPU4_MPLS, 2, 0, + NPC_LID_LB, NPC_LT_NA, 0, 0, 0, 0, 0, }, { NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0, 0, 0, - NPC_S_KPU4_NSH, 4, 1, - NPC_LID_LB, NPC_LT_LB_CTAG, + NPC_S_KPU4_NSH, 2, 0, + NPC_LID_LB, NPC_LT_NA, 0, 0, 0, 0, 0, }, { NPC_ERRLEV_LB, NPC_EC_L2_K3_ETYPE_UNK, 0, 0, 0, 0, 1, - NPC_S_NA, 0, 1, - NPC_LID_LB, NPC_LT_LB_CTAG, + NPC_S_NA, 0, 0, + NPC_LID_LB, NPC_LT_NA, 0, 0, 0, 0, 0, }, @@ -10397,7 +10674,7 @@ static struct npc_kpu_profile_action kpu3_action_entries[] = { }, { NPC_ERRLEV_RE, NPC_EC_NOERR, - 6, 0, 0, 1, 0, + 6, 0, 42, 1, 0, NPC_S_KPU5_IP6, 8, 1, NPC_LID_LB, NPC_LT_LB_STAG_QINQ, 0, @@ -10469,7 +10746,7 @@ static struct npc_kpu_profile_action kpu3_action_entries[] = { }, { NPC_ERRLEV_RE, NPC_EC_NOERR, - 6, 0, 0, 1, 0, + 6, 0, 42, 1, 0, NPC_S_KPU5_IP6, 4, 1, NPC_LID_LB, NPC_LT_LB_STAG_QINQ, 0, @@ -10533,7 +10810,7 @@ static struct npc_kpu_profile_action kpu3_action_entries[] = { }, { NPC_ERRLEV_RE, NPC_EC_NOERR, - 6, 0, 0, 1, 0, + 6, 0, 42, 1, 0, NPC_S_KPU5_IP6, 8, 1, NPC_LID_LB, NPC_LT_LB_STAG_QINQ, 0, @@ -10605,7 +10882,7 @@ static struct npc_kpu_profile_action kpu3_action_entries[] = { }, { NPC_ERRLEV_RE, NPC_EC_NOERR, - 6, 0, 0, 1, 0, + 6, 0, 42, 1, 0, NPC_S_KPU5_IP6, 4, 1, NPC_LID_LB, NPC_LT_LB_STAG_QINQ, 0, @@ -10685,7 +10962,7 @@ static struct npc_kpu_profile_action kpu3_action_entries[] = { }, { NPC_ERRLEV_RE, NPC_EC_NOERR, - 6, 0, 0, 1, 0, + 6, 0, 42, 1, 0, NPC_S_KPU5_IP6, 10, 1, NPC_LID_LB, NPC_LT_LB_DSA, NPC_F_LB_L_DSA, @@ -10733,7 +11010,7 @@ static struct npc_kpu_profile_action kpu3_action_entries[] = { }, { NPC_ERRLEV_RE, NPC_EC_NOERR, - 6, 0, 0, 1, 0, + 6, 0, 42, 1, 0, NPC_S_KPU5_IP6, 14, 1, NPC_LID_LB, NPC_LT_LB_DSA_VLAN, NPC_F_LB_L_DSA_VLAN, @@ -10894,7 +11171,7 @@ static struct npc_kpu_profile_action kpu4_action_entries[] = { }, { NPC_ERRLEV_RE, NPC_EC_NOERR, - 6, 0, 0, 0, 0, + 6, 0, 42, 0, 0, NPC_S_KPU5_IP6, 6, 1, NPC_LID_LB, NPC_LT_LB_FDSA, NPC_F_LB_L_FDSA, @@ -10942,7 +11219,7 @@ static struct npc_kpu_profile_action kpu4_action_entries[] = { }, { NPC_ERRLEV_RE, NPC_EC_NOERR, - 6, 0, 0, 0, 0, + 6, 0, 42, 0, 0, NPC_S_KPU5_IP6, 10, 1, NPC_LID_LB, NPC_LT_LB_FDSA, NPC_F_LB_L_FDSA, @@ -10990,7 +11267,7 @@ static struct npc_kpu_profile_action kpu4_action_entries[] = { }, { NPC_ERRLEV_RE, NPC_EC_NOERR, - 6, 0, 0, 0, 0, + 6, 0, 42, 0, 0, NPC_S_KPU5_IP6, 14, 1, NPC_LID_LB, NPC_LT_LB_PPPOE, 0, @@ -11014,7 +11291,7 @@ static struct npc_kpu_profile_action kpu4_action_entries[] = { }, { NPC_ERRLEV_RE, NPC_EC_NOERR, - 6, 0, 0, 0, 0, + 6, 0, 42, 0, 0, NPC_S_KPU5_IP6, 2, 0, NPC_LID_LC, NPC_LT_NA, 0, @@ -11063,15 +11340,15 @@ static struct npc_kpu_profile_action kpu4_action_entries[] = { { NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 6, 0, 0, - NPC_S_KPU5_IP, 10, 0, + NPC_S_KPU5_IP, 10, 1, NPC_LID_LB, NPC_LT_LB_PPPOE, 0, 0, 0, 0, 0, }, { NPC_ERRLEV_RE, NPC_EC_NOERR, - 6, 0, 0, 0, 0, - NPC_S_KPU5_IP6, 10, 0, + 6, 0, 42, 0, 0, + NPC_S_KPU5_IP6, 10, 1, NPC_LID_LB, NPC_LT_LB_PPPOE, 0, 0, 0, 0, 0, @@ -11119,7 +11396,7 @@ static struct npc_kpu_profile_action kpu5_action_entries[] = { }, { NPC_ERRLEV_RE, NPC_EC_NOERR, - 2, 0, 0, 2, 0, + 2, 0, 4, 2, 0, NPC_S_KPU8_UDP, 20, 1, NPC_LID_LC, NPC_LT_LC_IP, 0, @@ -11223,7 +11500,7 @@ static struct npc_kpu_profile_action kpu5_action_entries[] = { }, { NPC_ERRLEV_RE, NPC_EC_NOERR, - 2, 8, 10, 2, 0, + 2, 8, 4, 2, 0, NPC_S_KPU8_UDP, 0, 1, NPC_LID_LC, NPC_LT_LC_IP_OPT, 0, @@ -11450,6 +11727,22 @@ static struct npc_kpu_profile_action kpu5_action_entries[] = { 0, 0, 0, 0, 0, NPC_S_KPU6_IP6_ROUT, 40, 1, NPC_LID_LC, NPC_LT_LC_IP6_EXT, + NPC_F_LC_L_IP6_SRH_SEG_1, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 0, + NPC_S_KPU6_IP6_ROUT, 40, 1, + NPC_LID_LC, NPC_LT_LC_IP6_EXT, + NPC_F_LC_L_IP6_SRH_SEG_2, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 0, + NPC_S_KPU6_IP6_ROUT, 40, 1, + NPC_LID_LC, NPC_LT_LC_IP6_EXT, NPC_F_LC_L_EXT_ROUT, 0, 0, 0, 0, }, @@ -11695,6 +11988,14 @@ static struct npc_kpu_profile_action kpu5_action_entries[] = { }, { NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 2, 0, + NPC_S_KPU8_CUSTOM, 20, 1, + NPC_LID_LC, NPC_LT_LC_IP, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, 0, 1, NPC_S_NA, 0, 1, NPC_LID_LC, NPC_LT_LC_IP, @@ -11791,6 +12092,14 @@ static struct npc_kpu_profile_action kpu5_action_entries[] = { }, { NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 2, 0, + NPC_S_KPU8_CUSTOM, 0, 1, + NPC_LID_LC, NPC_LT_LC_IP_OPT, + 0, + 0, 0xf, 0, 2, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, 0, 1, NPC_S_NA, 0, 1, NPC_LID_LC, NPC_LT_LC_IP_OPT, @@ -11951,6 +12260,14 @@ static struct npc_kpu_profile_action kpu5_action_entries[] = { }, { NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 2, 0, + NPC_S_KPU8_CUSTOM, 40, 1, + NPC_LID_LC, NPC_LT_LC_IP6, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, 0, 1, NPC_S_NA, 0, 1, NPC_LID_LC, NPC_LT_LC_IP6, @@ -12080,6 +12397,14 @@ static struct npc_kpu_profile_action kpu6_action_entries[] = { }, { NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 1, 0, + NPC_S_KPU8_CUSTOM, 8, 0, + NPC_LID_LC, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, 0, 1, NPC_S_NA, 0, 0, NPC_LID_LC, NPC_LT_NA, @@ -12184,6 +12509,14 @@ static struct npc_kpu_profile_action kpu6_action_entries[] = { }, { NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 1, 0, + NPC_S_KPU8_CUSTOM, 8, 0, + NPC_LID_LC, NPC_LT_NA, + 0, + 1, 0xff, 0, 3, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, 0, 1, NPC_S_NA, 0, 0, NPC_LID_LC, NPC_LT_NA, @@ -12280,6 +12613,14 @@ static struct npc_kpu_profile_action kpu6_action_entries[] = { }, { NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 1, 0, + NPC_S_KPU8_CUSTOM, 8, 0, + NPC_LID_LC, NPC_LT_NA, + 0, + 1, 0xff, 0, 3, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, 0, 1, NPC_S_NA, 0, 0, NPC_LID_LC, NPC_LT_NA, @@ -12368,6 +12709,14 @@ static struct npc_kpu_profile_action kpu6_action_entries[] = { }, { NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 1, 0, + NPC_S_KPU8_CUSTOM, 8, 0, + NPC_LID_LC, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, 0, 1, NPC_S_NA, 0, 0, NPC_LID_LC, NPC_LT_NA, @@ -12472,6 +12821,14 @@ static struct npc_kpu_profile_action kpu6_action_entries[] = { }, { NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 1, 0, + NPC_S_KPU8_CUSTOM, 8, 0, + NPC_LID_LC, NPC_LT_NA, + 0, + 1, 0xff, 0, 3, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, 0, 1, NPC_S_NA, 0, 0, NPC_LID_LC, NPC_LT_NA, @@ -12568,6 +12925,14 @@ static struct npc_kpu_profile_action kpu6_action_entries[] = { }, { NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 1, 0, + NPC_S_KPU8_CUSTOM, 8, 0, + NPC_LID_LC, NPC_LT_NA, + 0, + 1, 0xff, 0, 3, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, 0, 1, NPC_S_NA, 0, 0, NPC_LID_LC, NPC_LT_NA, @@ -12681,6 +13046,14 @@ static struct npc_kpu_profile_action kpu7_action_entries[] = { }, { NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 0, + NPC_S_KPU8_CUSTOM, 8, 0, + NPC_LID_LC, NPC_LT_NA, + 0, + 1, 0xff, 0, 3, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, 0, 1, NPC_S_NA, 0, 0, NPC_LID_LC, NPC_LT_NA, @@ -12769,6 +13142,14 @@ static struct npc_kpu_profile_action kpu7_action_entries[] = { }, { NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 0, + NPC_S_KPU8_CUSTOM, 8, 0, + NPC_LID_LC, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, 0, 1, NPC_S_NA, 0, 0, NPC_LID_LC, NPC_LT_NA, @@ -12857,6 +13238,14 @@ static struct npc_kpu_profile_action kpu7_action_entries[] = { }, { NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 0, + NPC_S_KPU8_CUSTOM, 8, 0, + NPC_LID_LC, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, 0, 1, NPC_S_NA, 0, 0, NPC_LID_LC, NPC_LT_NA, @@ -13058,6 +13447,14 @@ static struct npc_kpu_profile_action kpu8_action_entries[] = { }, { NPC_ERRLEV_RE, NPC_EC_NOERR, + 0, 0, 0, 0, 1, + NPC_S_NA, 8, 1, + NPC_LID_LD, NPC_LT_LD_UDP, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, 0, 0, NPC_S_KPU9_ESP, 8, 1, NPC_LID_LD, NPC_LT_LD_UDP, @@ -13458,6 +13855,70 @@ static struct npc_kpu_profile_action kpu8_action_entries[] = { }, { NPC_ERRLEV_RE, NPC_EC_NOERR, + 12, 16, 20, 2, 0, + NPC_S_KPU11_TU_ETHER, 12, 1, + NPC_LID_LD, NPC_LT_LD_GRE, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 12, 16, 20, 2, 0, + NPC_S_KPU11_TU_ETHER, 16, 1, + NPC_LID_LD, NPC_LT_LD_GRE, + NPC_F_LD_L_GRE_HAS_CSUM, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 12, 16, 20, 2, 0, + NPC_S_KPU11_TU_ETHER, 16, 1, + NPC_LID_LD, NPC_LT_LD_GRE, + NPC_F_LD_L_GRE_HAS_KEY, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 12, 16, 20, 2, 0, + NPC_S_KPU11_TU_ETHER, 16, 1, + NPC_LID_LD, NPC_LT_LD_GRE, + NPC_F_LD_L_GRE_HAS_SEQ, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 12, 16, 20, 2, 0, + NPC_S_KPU11_TU_ETHER, 20, 1, + NPC_LID_LD, NPC_LT_LD_GRE, + NPC_F_LD_L_GRE_HAS_CSUM_KEY, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 12, 16, 20, 2, 0, + NPC_S_KPU11_TU_ETHER, 20, 1, + NPC_LID_LD, NPC_LT_LD_GRE, + NPC_F_LD_L_GRE_HAS_CSUM_SEQ, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 12, 16, 20, 2, 0, + NPC_S_KPU11_TU_ETHER, 20, 1, + NPC_LID_LD, NPC_LT_LD_GRE, + NPC_F_LD_L_GRE_HAS_KEY_SEQ, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 12, 16, 20, 2, 0, + NPC_S_KPU11_TU_ETHER, 24, 1, + NPC_LID_LD, NPC_LT_LD_GRE, + NPC_F_LD_L_GRE_HAS_CSUM_KEY_SEQ, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, 0, 1, NPC_S_NA, 0, 1, NPC_LID_LD, NPC_LT_LD_GRE, @@ -13529,6 +13990,14 @@ static struct npc_kpu_profile_action kpu8_action_entries[] = { 0, 0, 0, 0, }, { + NPC_ERRLEV_LD, NPC_EC_NOERR, + 0, 0, 0, 0, 0, + NPC_S_KPU9_CUSTOM, 0, 1, + NPC_LID_LF, NPC_LT_LF_CUSTOM0, + 0, + 0, 0xff, 0, 0, + }, + { NPC_ERRLEV_LD, NPC_EC_UNK, 0, 0, 0, 0, 1, NPC_S_NA, 0, 0, @@ -13946,6 +14415,22 @@ static struct npc_kpu_profile_action kpu9_action_entries[] = { 0, 0, 0, 0, }, { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 8, 0, 6, 2, 0, + NPC_S_KPU12_TU_IP, 0, 0, + NPC_LID_LE, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 6, 0, 0, 2, 0, + NPC_S_KPU12_TU_IP6, 0, 0, + NPC_LID_LE, NPC_LT_NA, + 0, + 0, 0, 0, 0, + }, + { NPC_ERRLEV_LE, NPC_EC_UNK, 0, 0, 0, 0, 1, NPC_S_NA, 0, 0, @@ -15105,7 +15590,9 @@ static struct npc_lt_def_cfg npc_lt_defaults = { }, .rx_et = { { - .lid = NPC_LID_LB, + .offset = -2, + .valid = 1, + .lid = NPC_LID_LC, .ltype_match = NPC_LT_NA, .ltype_mask = 0x0, }, @@ -15139,6 +15626,12 @@ static struct npc_mcam_kex npc_mkex_default = { /* Ethertype: 2 bytes, KW0[55:40] */ KEX_LD_CFG(0x01, 0xc, 0x1, 0x0, 0x5), }, + [NPC_LT_LA_CPT_HDR] = { + /* DMAC: 6 bytes, KW1[55:8] */ + KEX_LD_CFG(0x05, 0x0, 0x1, 0x0, NPC_KEXOF_DMAC), + /* Ethertype: 2 bytes, KW0[55:40] */ + KEX_LD_CFG(0x01, 0xc, 0x1, 0x0, 0x5), + }, /* Layer A: HiGig2: */ [NPC_LT_LA_HIGIG2_ETHER] = { /* Classification: 2 bytes, KW1[23:8] */ diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rpm.c b/drivers/net/ethernet/marvell/octeontx2/af/rpm.c index 76218f1cb459..2e9945446199 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rpm.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rpm.c @@ -38,6 +38,9 @@ static struct mac_ops rpm_mac_ops = { .pfc_config = rpm_lmac_pfc_config, .mac_get_pfc_frm_cfg = rpm_lmac_get_pfc_frm_cfg, .mac_reset = rpm_lmac_reset, + .mac_stats_reset = rpm_stats_reset, + .mac_x2p_reset = rpm_x2p_reset, + .mac_enadis_rx = rpm_enadis_rx, }; static struct mac_ops rpm2_mac_ops = { @@ -70,6 +73,9 @@ static struct mac_ops rpm2_mac_ops = { .pfc_config = rpm_lmac_pfc_config, .mac_get_pfc_frm_cfg = rpm_lmac_get_pfc_frm_cfg, .mac_reset = rpm_lmac_reset, + .mac_stats_reset = rpm_stats_reset, + .mac_x2p_reset = rpm_x2p_reset, + .mac_enadis_rx = rpm_enadis_rx, }; bool is_dev_rpm2(void *rpmd) @@ -443,6 +449,21 @@ int rpm_get_tx_stats(void *rpmd, int lmac_id, int idx, u64 *tx_stat) return 0; } +int rpm_stats_reset(void *rpmd, int lmac_id) +{ + rpm_t *rpm = rpmd; + u64 cfg; + + if (!is_lmac_valid(rpm, lmac_id)) + return -ENODEV; + + cfg = rpm_read(rpm, 0, RPMX_MTI_STAT_STATN_CONTROL); + cfg |= RPMX_CMD_CLEAR_TX | RPMX_CMD_CLEAR_RX | BIT_ULL(lmac_id); + rpm_write(rpm, 0, RPMX_MTI_STAT_STATN_CONTROL, cfg); + + return 0; +} + u8 rpm_get_lmac_type(void *rpmd, int lmac_id) { rpm_t *rpm = rpmd; @@ -450,7 +471,7 @@ u8 rpm_get_lmac_type(void *rpmd, int lmac_id) int err; req = FIELD_SET(CMDREG_ID, CGX_CMD_GET_LINK_STS, req); - err = cgx_fwi_cmd_generic(req, &resp, rpm, 0); + err = cgx_fwi_cmd_generic(req, &resp, rpm, lmac_id); if (!err) return FIELD_GET(RESP_LINKSTAT_LMAC_TYPE, resp); return err; @@ -463,7 +484,7 @@ u32 rpm_get_lmac_fifo_len(void *rpmd, int lmac_id) u8 num_lmacs; u32 fifo_len; - fifo_len = rpm->mac_ops->fifo_len; + fifo_len = rpm->fifo_len; num_lmacs = rpm->mac_ops->get_nr_lmacs(rpm); switch (num_lmacs) { @@ -516,9 +537,9 @@ u32 rpm2_get_lmac_fifo_len(void *rpmd, int lmac_id) */ max_lmac = (rpm_read(rpm, 0, CGX_CONST) >> 24) & 0xFF; if (max_lmac > 4) - fifo_len = rpm->mac_ops->fifo_len / 2; + fifo_len = rpm->fifo_len / 2; else - fifo_len = rpm->mac_ops->fifo_len; + fifo_len = rpm->fifo_len; if (lmac_id < 4) { num_lmacs = hweight8(lmac_info & 0xF); @@ -682,46 +703,51 @@ int rpm_get_fec_stats(void *rpmd, int lmac_id, struct cgx_fec_stats_rsp *rsp) if (rpm->lmac_idmap[lmac_id]->link_info.fec == OTX2_FEC_NONE) return 0; + /* latched registers FCFECX_CW_HI/RSFEC_STAT_FAST_DATA_HI_CDC are common + * for all counters. Acquire lock to ensure serialized reads + */ + mutex_lock(&rpm->lock); if (rpm->lmac_idmap[lmac_id]->link_info.fec == OTX2_FEC_BASER) { - val_lo = rpm_read(rpm, lmac_id, RPMX_MTI_FCFECX_VL0_CCW_LO); - val_hi = rpm_read(rpm, lmac_id, RPMX_MTI_FCFECX_CW_HI); + val_lo = rpm_read(rpm, 0, RPMX_MTI_FCFECX_VL0_CCW_LO(lmac_id)); + val_hi = rpm_read(rpm, 0, RPMX_MTI_FCFECX_CW_HI(lmac_id)); rsp->fec_corr_blks = (val_hi << 16 | val_lo); - val_lo = rpm_read(rpm, lmac_id, RPMX_MTI_FCFECX_VL0_NCCW_LO); - val_hi = rpm_read(rpm, lmac_id, RPMX_MTI_FCFECX_CW_HI); + val_lo = rpm_read(rpm, 0, RPMX_MTI_FCFECX_VL0_NCCW_LO(lmac_id)); + val_hi = rpm_read(rpm, 0, RPMX_MTI_FCFECX_CW_HI(lmac_id)); rsp->fec_uncorr_blks = (val_hi << 16 | val_lo); /* 50G uses 2 Physical serdes lines */ if (rpm->lmac_idmap[lmac_id]->link_info.lmac_type_id == LMAC_MODE_50G_R) { - val_lo = rpm_read(rpm, lmac_id, - RPMX_MTI_FCFECX_VL1_CCW_LO); - val_hi = rpm_read(rpm, lmac_id, - RPMX_MTI_FCFECX_CW_HI); + val_lo = rpm_read(rpm, 0, + RPMX_MTI_FCFECX_VL1_CCW_LO(lmac_id)); + val_hi = rpm_read(rpm, 0, + RPMX_MTI_FCFECX_CW_HI(lmac_id)); rsp->fec_corr_blks += (val_hi << 16 | val_lo); - val_lo = rpm_read(rpm, lmac_id, - RPMX_MTI_FCFECX_VL1_NCCW_LO); - val_hi = rpm_read(rpm, lmac_id, - RPMX_MTI_FCFECX_CW_HI); + val_lo = rpm_read(rpm, 0, + RPMX_MTI_FCFECX_VL1_NCCW_LO(lmac_id)); + val_hi = rpm_read(rpm, 0, + RPMX_MTI_FCFECX_CW_HI(lmac_id)); rsp->fec_uncorr_blks += (val_hi << 16 | val_lo); } } else { /* enable RS-FEC capture */ - cfg = rpm_read(rpm, 0, RPMX_MTI_STAT_STATN_CONTROL); + cfg = rpm_read(rpm, 0, RPMX_MTI_RSFEC_STAT_STATN_CONTROL); cfg |= RPMX_RSFEC_RX_CAPTURE | BIT(lmac_id); - rpm_write(rpm, 0, RPMX_MTI_STAT_STATN_CONTROL, cfg); + rpm_write(rpm, 0, RPMX_MTI_RSFEC_STAT_STATN_CONTROL, cfg); val_lo = rpm_read(rpm, 0, RPMX_MTI_RSFEC_STAT_COUNTER_CAPTURE_2); - val_hi = rpm_read(rpm, 0, RPMX_MTI_STAT_DATA_HI_CDC); + val_hi = rpm_read(rpm, 0, RPMX_MTI_RSFEC_STAT_FAST_DATA_HI_CDC); rsp->fec_corr_blks = (val_hi << 32 | val_lo); val_lo = rpm_read(rpm, 0, RPMX_MTI_RSFEC_STAT_COUNTER_CAPTURE_3); - val_hi = rpm_read(rpm, 0, RPMX_MTI_STAT_DATA_HI_CDC); + val_hi = rpm_read(rpm, 0, RPMX_MTI_RSFEC_STAT_FAST_DATA_HI_CDC); rsp->fec_uncorr_blks = (val_hi << 32 | val_lo); } + mutex_unlock(&rpm->lock); return 0; } @@ -746,3 +772,41 @@ int rpm_lmac_reset(void *rpmd, int lmac_id, u8 pf_req_flr) return 0; } + +void rpm_x2p_reset(void *rpmd, bool enable) +{ + rpm_t *rpm = rpmd; + int lmac_id; + u64 cfg; + + if (enable) { + for_each_set_bit(lmac_id, &rpm->lmac_bmap, rpm->max_lmac_per_mac) + rpm->mac_ops->mac_enadis_rx(rpm, lmac_id, false); + + usleep_range(1000, 2000); + + cfg = rpm_read(rpm, 0, RPMX_CMR_GLOBAL_CFG); + rpm_write(rpm, 0, RPMX_CMR_GLOBAL_CFG, cfg | RPM_NIX0_RESET); + } else { + cfg = rpm_read(rpm, 0, RPMX_CMR_GLOBAL_CFG); + cfg &= ~RPM_NIX0_RESET; + rpm_write(rpm, 0, RPMX_CMR_GLOBAL_CFG, cfg); + } +} + +int rpm_enadis_rx(void *rpmd, int lmac_id, bool enable) +{ + rpm_t *rpm = rpmd; + u64 cfg; + + if (!is_lmac_valid(rpm, lmac_id)) + return -ENODEV; + + cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG); + if (enable) + cfg |= RPM_RX_EN; + else + cfg &= ~RPM_RX_EN; + rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG, cfg); + return 0; +} diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rpm.h b/drivers/net/ethernet/marvell/octeontx2/af/rpm.h index b79cfbc6f877..b8d3972e096a 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rpm.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/rpm.h @@ -17,6 +17,8 @@ /* Registers */ #define RPMX_CMRX_CFG 0x00 +#define RPMX_CMR_GLOBAL_CFG 0x08 +#define RPM_NIX0_RESET BIT_ULL(3) #define RPMX_RX_TS_PREPEND BIT_ULL(22) #define RPMX_TX_PTP_1S_SUPPORT BIT_ULL(17) #define RPMX_CMRX_RX_ID_MAP 0x80 @@ -84,14 +86,18 @@ /* FEC stats */ #define RPMX_MTI_STAT_STATN_CONTROL 0x10018 #define RPMX_MTI_STAT_DATA_HI_CDC 0x10038 -#define RPMX_RSFEC_RX_CAPTURE BIT_ULL(27) +#define RPMX_RSFEC_RX_CAPTURE BIT_ULL(28) +#define RPMX_CMD_CLEAR_RX BIT_ULL(30) +#define RPMX_CMD_CLEAR_TX BIT_ULL(31) +#define RPMX_MTI_RSFEC_STAT_STATN_CONTROL 0x40018 +#define RPMX_MTI_RSFEC_STAT_FAST_DATA_HI_CDC 0x40000 #define RPMX_MTI_RSFEC_STAT_COUNTER_CAPTURE_2 0x40050 #define RPMX_MTI_RSFEC_STAT_COUNTER_CAPTURE_3 0x40058 -#define RPMX_MTI_FCFECX_VL0_CCW_LO 0x38618 -#define RPMX_MTI_FCFECX_VL0_NCCW_LO 0x38620 -#define RPMX_MTI_FCFECX_VL1_CCW_LO 0x38628 -#define RPMX_MTI_FCFECX_VL1_NCCW_LO 0x38630 -#define RPMX_MTI_FCFECX_CW_HI 0x38638 +#define RPMX_MTI_FCFECX_VL0_CCW_LO(a) (0x38618 + ((a) * 0x40)) +#define RPMX_MTI_FCFECX_VL0_NCCW_LO(a) (0x38620 + ((a) * 0x40)) +#define RPMX_MTI_FCFECX_VL1_CCW_LO(a) (0x38628 + ((a) * 0x40)) +#define RPMX_MTI_FCFECX_VL1_NCCW_LO(a) (0x38630 + ((a) * 0x40)) +#define RPMX_MTI_FCFECX_CW_HI(a) (0x38638 + ((a) * 0x40)) /* CN10KB CSR Declaration */ #define RPM2_CMRX_SW_INT 0x1b0 @@ -134,4 +140,7 @@ int rpm2_get_nr_lmacs(void *rpmd); bool is_dev_rpm2(void *rpmd); int rpm_get_fec_stats(void *cgxd, int lmac_id, struct cgx_fec_stats_rsp *rsp); int rpm_lmac_reset(void *rpmd, int lmac_id, u8 pf_req_flr); +int rpm_stats_reset(void *rpmd, int lmac_id); +void rpm_x2p_reset(void *rpmd, bool enable); +int rpm_enadis_rx(void *rpmd, int lmac_id, bool enable); #endif /* RPM_H */ diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c index 5c1d04a3c559..cd0d7b7774f1 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c @@ -817,6 +817,8 @@ static int rvu_fwdata_init(struct rvu *rvu) err = cgx_get_fwdata_base(&fwdbase); if (err) goto fail; + + BUILD_BUG_ON(offsetof(struct rvu_fwdata, cgx_fw_data) > FWDATA_CGX_LMAC_OFFSET); rvu->fwdata = ioremap_wc(fwdbase, sizeof(struct rvu_fwdata)); if (!rvu->fwdata) goto fail; @@ -1160,6 +1162,7 @@ cpt: } rvu_program_channels(rvu); + cgx_start_linkup(rvu); err = rvu_mcs_init(rvu); if (err) { @@ -1484,7 +1487,7 @@ int rvu_get_nix_blkaddr(struct rvu *rvu, u16 pcifunc) /* All CGX mapped PFs are set with assigned NIX block during init */ if (is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc))) { blkaddr = pf->nix_blkaddr; - } else if (is_afvf(pcifunc)) { + } else if (is_lbk_vf(rvu, pcifunc)) { vf = pcifunc - 1; /* Assign NIX based on VF number. All even numbered VFs get * NIX0 and odd numbered gets NIX1 @@ -1641,7 +1644,7 @@ static int rvu_check_rsrc_availability(struct rvu *rvu, if (req->ssow > block->lf.max) { dev_err(&rvu->pdev->dev, "Func 0x%x: Invalid SSOW req, %d > max %d\n", - pcifunc, req->sso, block->lf.max); + pcifunc, req->ssow, block->lf.max); return -EINVAL; } mappedlfs = rvu_get_rsrc_mapcount(pfvf, block->addr); @@ -2012,6 +2015,13 @@ int rvu_mbox_handler_vf_flr(struct rvu *rvu, struct msg_req *req, return 0; } +int rvu_ndc_sync(struct rvu *rvu, int lfblkaddr, int lfidx, u64 lfoffset) +{ + /* Sync cached info for this LF in NDC to LLC/DRAM */ + rvu_write64(rvu, lfblkaddr, lfoffset, BIT_ULL(12) | lfidx); + return rvu_poll_reg(rvu, lfblkaddr, lfoffset, BIT_ULL(12), true); +} + int rvu_mbox_handler_get_hw_cap(struct rvu *rvu, struct msg_req *req, struct get_hw_cap_rsp *rsp) { @@ -2034,7 +2044,7 @@ int rvu_mbox_handler_set_vf_perm(struct rvu *rvu, struct set_vf_perm *req, u16 target; /* Only PF can add VF permissions */ - if ((pcifunc & RVU_PFVF_FUNC_MASK) || is_afvf(pcifunc)) + if ((pcifunc & RVU_PFVF_FUNC_MASK) || is_lbk_vf(rvu, pcifunc)) return -EOPNOTSUPP; target = (pcifunc & ~RVU_PFVF_FUNC_MASK) | (req->vf + 1); @@ -2066,6 +2076,65 @@ int rvu_mbox_handler_set_vf_perm(struct rvu *rvu, struct set_vf_perm *req, return 0; } +int rvu_mbox_handler_ndc_sync_op(struct rvu *rvu, + struct ndc_sync_op *req, + struct msg_rsp *rsp) +{ + struct rvu_hwinfo *hw = rvu->hw; + u16 pcifunc = req->hdr.pcifunc; + int err, lfidx, lfblkaddr; + + if (req->npa_lf_sync) { + /* Get NPA LF data */ + lfblkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, pcifunc); + if (lfblkaddr < 0) + return NPA_AF_ERR_AF_LF_INVALID; + + lfidx = rvu_get_lf(rvu, &hw->block[lfblkaddr], pcifunc, 0); + if (lfidx < 0) + return NPA_AF_ERR_AF_LF_INVALID; + + /* Sync NPA NDC */ + err = rvu_ndc_sync(rvu, lfblkaddr, + lfidx, NPA_AF_NDC_SYNC); + if (err) + dev_err(rvu->dev, + "NDC-NPA sync failed for LF %u\n", lfidx); + } + + if (!req->nix_lf_tx_sync && !req->nix_lf_rx_sync) + return 0; + + /* Get NIX LF data */ + lfblkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); + if (lfblkaddr < 0) + return NIX_AF_ERR_AF_LF_INVALID; + + lfidx = rvu_get_lf(rvu, &hw->block[lfblkaddr], pcifunc, 0); + if (lfidx < 0) + return NIX_AF_ERR_AF_LF_INVALID; + + if (req->nix_lf_tx_sync) { + /* Sync NIX TX NDC */ + err = rvu_ndc_sync(rvu, lfblkaddr, + lfidx, NIX_AF_NDC_TX_SYNC); + if (err) + dev_err(rvu->dev, + "NDC-NIX-TX sync fail for LF %u\n", lfidx); + } + + if (req->nix_lf_rx_sync) { + /* Sync NIX RX NDC */ + err = rvu_ndc_sync(rvu, lfblkaddr, + lfidx, NIX_AF_NDC_RX_SYNC); + if (err) + dev_err(rvu->dev, + "NDC-NIX-RX sync failed for LF %u\n", lfidx); + } + + return 0; +} + static int rvu_process_mbox_msg(struct otx2_mbox *mbox, int devid, struct mbox_msghdr *req) { @@ -2117,7 +2186,7 @@ bad_message: } } -static void __rvu_mbox_handler(struct rvu_work *mwork, int type) +static void __rvu_mbox_handler(struct rvu_work *mwork, int type, bool poll) { struct rvu *rvu = mwork->rvu; int offset, err, id, devid; @@ -2184,6 +2253,9 @@ static void __rvu_mbox_handler(struct rvu_work *mwork, int type) } mw->mbox_wrk[devid].num_msgs = 0; + if (poll) + otx2_mbox_wait_for_zero(mbox, devid); + /* Send mbox responses to VF/PF */ otx2_mbox_msg_send(mbox, devid); } @@ -2191,15 +2263,18 @@ static void __rvu_mbox_handler(struct rvu_work *mwork, int type) static inline void rvu_afpf_mbox_handler(struct work_struct *work) { struct rvu_work *mwork = container_of(work, struct rvu_work, work); + struct rvu *rvu = mwork->rvu; - __rvu_mbox_handler(mwork, TYPE_AFPF); + mutex_lock(&rvu->mbox_lock); + __rvu_mbox_handler(mwork, TYPE_AFPF, true); + mutex_unlock(&rvu->mbox_lock); } static inline void rvu_afvf_mbox_handler(struct work_struct *work) { struct rvu_work *mwork = container_of(work, struct rvu_work, work); - __rvu_mbox_handler(mwork, TYPE_AFVF); + __rvu_mbox_handler(mwork, TYPE_AFVF, false); } static void __rvu_mbox_up_handler(struct rvu_work *mwork, int type) @@ -2374,6 +2449,8 @@ static int rvu_mbox_init(struct rvu *rvu, struct mbox_wq_info *mw, } } + mutex_init(&rvu->mbox_lock); + mbox_regions = kcalloc(num, sizeof(void *), GFP_KERNEL); if (!mbox_regions) { err = -ENOMEM; @@ -2403,9 +2480,9 @@ static int rvu_mbox_init(struct rvu *rvu, struct mbox_wq_info *mw, goto free_regions; } - mw->mbox_wq = alloc_workqueue(name, + mw->mbox_wq = alloc_workqueue("%s", WQ_UNBOUND | WQ_HIGHPRI | WQ_MEM_RECLAIM, - num); + num, name); if (!mw->mbox_wq) { err = -ENOMEM; goto unmap_regions; @@ -2523,10 +2600,9 @@ static void rvu_queue_work(struct mbox_wq_info *mw, int first, } } -static irqreturn_t rvu_mbox_intr_handler(int irq, void *rvu_irq) +static irqreturn_t rvu_mbox_pf_intr_handler(int irq, void *rvu_irq) { struct rvu *rvu = (struct rvu *)rvu_irq; - int vfs = rvu->vfs; u64 intr; intr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_PFAF_MBOX_INT); @@ -2540,6 +2616,18 @@ static irqreturn_t rvu_mbox_intr_handler(int irq, void *rvu_irq) rvu_queue_work(&rvu->afpf_wq_info, 0, rvu->hw->total_pfs, intr); + return IRQ_HANDLED; +} + +static irqreturn_t rvu_mbox_intr_handler(int irq, void *rvu_irq) +{ + struct rvu *rvu = (struct rvu *)rvu_irq; + int vfs = rvu->vfs; + u64 intr; + + /* Sync with mbox memory region */ + rmb(); + /* Handle VF interrupts */ if (vfs > 64) { intr = rvupf_read64(rvu, RVU_PF_VFPF_MBOX_INTX(1)); @@ -2618,6 +2706,9 @@ static void __rvu_flr_handler(struct rvu *rvu, u16 pcifunc) * 3. Cleanup pools (NPA) */ + /* Free allocated BPIDs */ + rvu_nix_flr_free_bpids(rvu, pcifunc); + /* Free multicast/mirror node associated with the 'pcifunc' */ rvu_nix_mcast_flr_free_entries(rvu, pcifunc); @@ -2881,7 +2972,7 @@ static int rvu_register_interrupts(struct rvu *rvu) /* Register mailbox interrupt handler */ sprintf(&rvu->irq_name[RVU_AF_INT_VEC_MBOX * NAME_SIZE], "RVUAF Mbox"); ret = request_irq(pci_irq_vector(rvu->pdev, RVU_AF_INT_VEC_MBOX), - rvu_mbox_intr_handler, 0, + rvu_mbox_pf_intr_handler, 0, &rvu->irq_name[RVU_AF_INT_VEC_MBOX * NAME_SIZE], rvu); if (ret) { dev_err(rvu->dev, @@ -3151,6 +3242,7 @@ static int rvu_enable_sriov(struct rvu *rvu) { struct pci_dev *pdev = rvu->pdev; int err, chans, vfs; + int pos = 0; if (!rvu_afvf_msix_vectors_num_ok(rvu)) { dev_warn(&pdev->dev, @@ -3158,6 +3250,12 @@ static int rvu_enable_sriov(struct rvu *rvu) return 0; } + /* Get RVU VFs device id */ + pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV); + if (!pos) + return 0; + pci_read_config_word(pdev, pos + PCI_SRIOV_VF_DID, &rvu->vf_devid); + chans = rvu_get_num_lbk_chans(); if (chans < 0) return chans; diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h index 43be37dd1f32..a383b5ef5b2d 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h @@ -76,6 +76,7 @@ struct rvu_debugfs { struct dump_ctx nix_cq_ctx; struct dump_ctx nix_rq_ctx; struct dump_ctx nix_sq_ctx; + struct dump_ctx nix_tm_ctx; struct cpt_ctx cpt_ctx[MAX_CPT_BLKS]; int npa_qsize_id; int nix_qsize_id; @@ -288,6 +289,16 @@ enum rvu_pfvf_flags { #define RVU_CLEAR_VF_PERM ~GENMASK(PF_SET_VF_TRUSTED, PF_SET_VF_MAC) +struct nix_bp { + struct rsrc_bmap bpids; /* free bpids bitmap */ + u16 cgx_bpid_cnt; + u16 sdp_bpid_cnt; + u16 free_pool_base; + u16 *fn_map; /* pcifunc mapping */ + u8 *intf_map; /* interface type map */ + u8 *ref_cnt; +}; + struct nix_txsch { struct rsrc_bmap schq; u8 lvl; @@ -308,6 +319,7 @@ struct nix_mark_format { /* smq(flush) to tl1 cir/pir info */ struct nix_smq_tree_ctx { + u16 schq; u64 cir_off; u64 cir_val; u64 pir_off; @@ -317,8 +329,6 @@ struct nix_smq_tree_ctx { /* smq flush context */ struct nix_smq_flush_ctx { int smq; - u16 tl1_schq; - u16 tl2_schq; struct nix_smq_tree_ctx smq_tree_ctx[NIX_TXSCH_LVL_CNT]; }; @@ -363,6 +373,7 @@ struct nix_hw { struct nix_lso lso; struct nix_txvlan txvlan; struct nix_ipolicer *ipolicer; + struct nix_bp bp; u64 *tx_credits; u8 cc_mcs_cnt; }; @@ -388,6 +399,7 @@ struct hw_cap { bool nix_multiple_dwrr_mtu; /* Multiple DWRR_MTU to choose from */ bool npc_hash_extract; /* Hash extract enabled ? */ bool npc_exact_match_enabled; /* Exact match supported ? */ + bool cpt_rxc; /* Is CPT-RXC supported */ }; struct rvu_hwinfo { @@ -432,6 +444,13 @@ struct mbox_wq_info { struct workqueue_struct *mbox_wq; }; +struct channel_fwdata { + struct sdp_node_info info; + u8 valid; +#define RVU_CHANL_INFO_RESERVED 379 + u8 reserved[RVU_CHANL_INFO_RESERVED]; +}; + struct rvu_fwdata { #define RVU_FWDATA_HEADER_MAGIC 0xCFDA /* Custom Firmware Data*/ #define RVU_FWDATA_VERSION 0x0001 @@ -450,11 +469,13 @@ struct rvu_fwdata { u64 msixtr_base; u32 ptp_ext_clk_rate; u32 ptp_ext_tstamp; -#define FWDATA_RESERVED_MEM 1022 + struct channel_fwdata channel_data; +#define FWDATA_RESERVED_MEM 958 u64 reserved[FWDATA_RESERVED_MEM]; #define CGX_MAX 9 #define CGX_LMACS_MAX 4 #define CGX_LMACS_USX 8 +#define FWDATA_CGX_LMAC_OFFSET 10536 union { struct cgx_lmac_fwdata_s cgx_fw_data[CGX_MAX][CGX_LMACS_MAX]; @@ -492,6 +513,11 @@ struct rvu_switch { u16 start_entry; }; +struct rep_evtq_ent { + struct list_head node; + struct rep_event event; +}; + struct rvu { void __iomem *afreg_base; void __iomem *pfreg_base; @@ -503,6 +529,8 @@ struct rvu { struct mutex rsrc_lock; /* Serialize resource alloc/free */ struct mutex alias_lock; /* Serialize bar2 alias access */ int vfs; /* Number of VFs attached to RVU */ + u16 vf_devid; /* VF devices id */ + bool def_rule_cntr_en; int nix_blkaddr[MAX_NIX_BLKS]; /* Mbox */ @@ -570,6 +598,17 @@ struct rvu { spinlock_t mcs_intrq_lock; /* CPT interrupt lock */ spinlock_t cpt_intr_lock; + + struct mutex mbox_lock; /* Serialize mbox up and down msgs */ + u16 rep_pcifunc; + int rep_cnt; + u16 *rep2pfvf_map; + u8 rep_mode; + struct work_struct rep_evt_work; + struct workqueue_struct *rep_evt_wq; + struct list_head rep_evtq_head; + /* Representor event lock */ + spinlock_t rep_evtq_lock; }; static inline void rvu_write64(struct rvu *rvu, u64 block, u64 offset, u64 val) @@ -666,6 +705,35 @@ static inline bool is_cnf10ka_a0(struct rvu *rvu) return false; } +static inline bool is_cn10ka_a0(struct rvu *rvu) +{ + struct pci_dev *pdev = rvu->pdev; + + if (pdev->subsystem_device == PCI_SUBSYS_DEVID_CN10K_A && + (pdev->revision & 0x0F) == 0x0) + return true; + return false; +} + +static inline bool is_cn10ka_a1(struct rvu *rvu) +{ + struct pci_dev *pdev = rvu->pdev; + + if (pdev->subsystem_device == PCI_SUBSYS_DEVID_CN10K_A && + (pdev->revision & 0x0F) == 0x1) + return true; + return false; +} + +static inline bool is_cn10kb(struct rvu *rvu) +{ + struct pci_dev *pdev = rvu->pdev; + + if (pdev->subsystem_device == PCI_SUBSYS_DEVID_CN10K_B) + return true; + return false; +} + static inline bool is_rvu_npc_hash_extract_en(struct rvu *rvu) { u64 npc_const3; @@ -732,9 +800,11 @@ static inline bool is_rvu_supports_nix1(struct rvu *rvu) /* Function Prototypes * RVU */ -static inline bool is_afvf(u16 pcifunc) +#define RVU_LBK_VF_DEVID 0xA0F8 +static inline bool is_lbk_vf(struct rvu *rvu, u16 pcifunc) { - return !(pcifunc & ~RVU_PFVF_FUNC_MASK); + return (!(pcifunc & ~RVU_PFVF_FUNC_MASK) && + (rvu->vf_devid == RVU_LBK_VF_DEVID)); } static inline bool is_vf(u16 pcifunc) @@ -774,6 +844,7 @@ int rvu_lf_reset(struct rvu *rvu, struct rvu_block *block, int lf); int rvu_get_blkaddr(struct rvu *rvu, int blktype, u16 pcifunc); int rvu_poll_reg(struct rvu *rvu, u64 block, u64 offset, u64 mask, bool zero); int rvu_get_num_lbk_chans(void); +int rvu_ndc_sync(struct rvu *rvu, int lfblkid, int lfidx, u64 lfoffset); int rvu_get_blkaddr_from_slot(struct rvu *rvu, int blktype, u16 pcifunc, u16 global_slot, u16 *slot_in_block); @@ -794,7 +865,15 @@ void rvu_aq_free(struct rvu *rvu, struct admin_queue *aq); int rvu_sdp_init(struct rvu *rvu); bool is_sdp_pfvf(u16 pcifunc); bool is_sdp_pf(u16 pcifunc); -bool is_sdp_vf(u16 pcifunc); +bool is_sdp_vf(struct rvu *rvu, u16 pcifunc); + +static inline bool is_rep_dev(struct rvu *rvu, u16 pcifunc) +{ + if (rvu->rep_pcifunc && rvu->rep_pcifunc == pcifunc) + return true; + + return false; +} /* CGX APIs */ static inline bool is_pf_cgxmapped(struct rvu *rvu, u8 pf) @@ -873,6 +952,7 @@ int rvu_nix_mcast_get_mce_index(struct rvu *rvu, u16 pcifunc, u32 mcast_grp_idx); int rvu_nix_mcast_update_mcam_entry(struct rvu *rvu, u16 pcifunc, u32 mcast_grp_idx, u16 mcam_index); +void rvu_nix_flr_free_bpids(struct rvu *rvu, u16 pcifunc); /* NPC APIs */ void rvu_npc_freemem(struct rvu *rvu); @@ -903,7 +983,11 @@ void rvu_npc_disable_default_entries(struct rvu *rvu, u16 pcifunc, int nixlf); void rvu_npc_enable_default_entries(struct rvu *rvu, u16 pcifunc, int nixlf); void rvu_npc_update_flowkey_alg_idx(struct rvu *rvu, u16 pcifunc, int nixlf, int group, int alg_idx, int mcam_index); - +void __rvu_mcam_remove_counter_from_rule(struct rvu *rvu, u16 pcifunc, + struct rvu_npc_mcam_rule *rule); +void __rvu_mcam_add_counter_to_rule(struct rvu *rvu, u16 pcifunc, + struct rvu_npc_mcam_rule *rule, + struct npc_install_flow_rsp *rsp); void rvu_npc_get_mcam_entry_alloc_info(struct rvu *rvu, u16 pcifunc, int blkaddr, int *alloc_cnt, int *enable_cnt); @@ -928,6 +1012,7 @@ void npc_set_mcam_action(struct rvu *rvu, struct npc_mcam *mcam, void npc_read_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam, int blkaddr, u16 src, struct mcam_entry *entry, u8 *intf, u8 *ena); +int npc_config_cntr_default_entries(struct rvu *rvu, bool enable); bool is_cgx_config_permitted(struct rvu *rvu, u16 pcifunc); bool is_mac_feature_supported(struct rvu *rvu, int pf, int feature); u32 rvu_cgx_get_fifolen(struct rvu *rvu); @@ -940,6 +1025,7 @@ int rvu_cgx_prio_flow_ctrl_cfg(struct rvu *rvu, u16 pcifunc, u8 tx_pause, u8 rx_ int rvu_cgx_cfg_pause_frm(struct rvu *rvu, u16 pcifunc, u8 tx_pause, u8 rx_pause); void rvu_mac_reset(struct rvu *rvu, u16 pcifunc); u32 rvu_cgx_get_lmac_fifolen(struct rvu *rvu, int cgx, int lmac); +void cgx_start_linkup(struct rvu *rvu); int npc_get_nixlf_mcam_index(struct npc_mcam *mcam, u16 pcifunc, int nixlf, int type); bool is_mcam_entry_enabled(struct rvu *rvu, struct npc_mcam *mcam, int blkaddr, @@ -988,7 +1074,8 @@ int rvu_ndc_fix_locked_cacheline(struct rvu *rvu, int blkaddr); /* RVU Switch */ void rvu_switch_enable(struct rvu *rvu); void rvu_switch_disable(struct rvu *rvu); -void rvu_switch_update_rules(struct rvu *rvu, u16 pcifunc); +void rvu_switch_update_rules(struct rvu *rvu, u16 pcifunc, bool ena); +void rvu_switch_enable_lbk_link(struct rvu *rvu, u16 pcifunc, bool ena); int rvu_npc_set_parse_mode(struct rvu *rvu, u16 pcifunc, u64 mode, u8 dir, u64 pkind, u8 var_len_off, u8 var_len_off_mask, @@ -1001,4 +1088,9 @@ int rvu_mcs_flr_handler(struct rvu *rvu, u16 pcifunc); void rvu_mcs_ptp_cfg(struct rvu *rvu, u8 rpm_id, u8 lmac_id, bool ena); void rvu_mcs_exit(struct rvu *rvu); +/* Representor APIs */ +int rvu_rep_pf_init(struct rvu *rvu); +int rvu_rep_install_mcam_rules(struct rvu *rvu); +void rvu_rep_update_rules(struct rvu *rvu, u16 pcifunc, bool ena); +int rvu_rep_notify_pfvf_state(struct rvu *rvu, u16 pcifunc, bool enable); #endif /* RVU_H */ diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c index 38acdc7a73bb..992fa0b82e8d 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c @@ -160,6 +160,8 @@ static int rvu_map_cgx_lmac_pf(struct rvu *rvu) continue; lmac_bmap = cgx_get_lmac_bmap(rvu_cgx_pdata(cgx, rvu)); for_each_set_bit(iter, &lmac_bmap, rvu->hw->lmac_per_cgx) { + if (iter >= MAX_LMAC_COUNT) + continue; lmac = cgx_get_lmacid(rvu_cgx_pdata(cgx, rvu), iter); rvu->pf2cgxlmac_map[pf] = cgxlmac_id_to_bmap(cgx, lmac); @@ -232,7 +234,7 @@ static void cgx_notify_pfs(struct cgx_link_event *event, struct rvu *rvu) struct cgx_link_user_info *linfo; struct cgx_link_info_msg *msg; unsigned long pfmap; - int err, pfid; + int pfid; linfo = &event->link_uinfo; pfmap = cgxlmac_to_pfmap(rvu, event->cgx_id, event->lmac_id); @@ -255,16 +257,22 @@ static void cgx_notify_pfs(struct cgx_link_event *event, struct rvu *rvu) continue; } + mutex_lock(&rvu->mbox_lock); + /* Send mbox message to PF */ msg = otx2_mbox_alloc_msg_cgx_link_event(rvu, pfid); - if (!msg) + if (!msg) { + mutex_unlock(&rvu->mbox_lock); continue; + } + msg->link_info = *linfo; - otx2_mbox_msg_send(&rvu->afpf_wq_info.mbox_up, pfid); - err = otx2_mbox_wait_for_rsp(&rvu->afpf_wq_info.mbox_up, pfid); - if (err) - dev_warn(rvu->dev, "notification to pf %d failed\n", - pfid); + + otx2_mbox_wait_for_zero(&rvu->afpf_wq_info.mbox_up, pfid); + + otx2_mbox_msg_send_up(&rvu->afpf_wq_info.mbox_up, pfid); + + mutex_unlock(&rvu->mbox_lock); } while (pfmap); } @@ -341,6 +349,7 @@ static void rvu_cgx_wq_destroy(struct rvu *rvu) int rvu_cgx_init(struct rvu *rvu) { + struct mac_ops *mac_ops; int cgx, err; void *cgxd; @@ -367,6 +376,15 @@ int rvu_cgx_init(struct rvu *rvu) if (err) return err; + /* Clear X2P reset on all MAC blocks */ + for (cgx = 0; cgx < rvu->cgx_cnt_max; cgx++) { + cgxd = rvu_cgx_pdata(cgx, rvu); + if (!cgxd) + continue; + mac_ops = get_mac_ops(cgxd); + mac_ops->mac_x2p_reset(cgxd, false); + } + /* Register for CGX events */ err = cgx_lmac_event_handler_init(rvu); if (err) @@ -374,10 +392,26 @@ int rvu_cgx_init(struct rvu *rvu) mutex_init(&rvu->cgx_cfg_lock); - /* Ensure event handler registration is completed, before - * we turn on the links - */ - mb(); + return 0; +} + +void cgx_start_linkup(struct rvu *rvu) +{ + unsigned long lmac_bmap; + struct mac_ops *mac_ops; + int cgx, lmac, err; + void *cgxd; + + /* Enable receive on all LMACS */ + for (cgx = 0; cgx <= rvu->cgx_cnt_max; cgx++) { + cgxd = rvu_cgx_pdata(cgx, rvu); + if (!cgxd) + continue; + mac_ops = get_mac_ops(cgxd); + lmac_bmap = cgx_get_lmac_bmap(cgxd); + for_each_set_bit(lmac, &lmac_bmap, rvu->hw->lmac_per_cgx) + mac_ops->mac_enadis_rx(cgxd, lmac, true); + } /* Do link up for all CGX ports */ for (cgx = 0; cgx <= rvu->cgx_cnt_max; cgx++) { @@ -390,8 +424,6 @@ int rvu_cgx_init(struct rvu *rvu) "Link up process failed to start on cgx %d\n", cgx); } - - return 0; } int rvu_cgx_exit(struct rvu *rvu) @@ -596,6 +628,35 @@ int rvu_mbox_handler_rpm_stats(struct rvu *rvu, struct msg_req *req, return rvu_lmac_get_stats(rvu, req, (void *)rsp); } +int rvu_mbox_handler_cgx_stats_rst(struct rvu *rvu, struct msg_req *req, + struct msg_rsp *rsp) +{ + int pf = rvu_get_pf(req->hdr.pcifunc); + struct rvu_pfvf *parent_pf; + struct mac_ops *mac_ops; + u8 cgx_idx, lmac; + void *cgxd; + + if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc)) + return LMAC_AF_ERR_PERM_DENIED; + + parent_pf = &rvu->pf[pf]; + /* To ensure reset cgx stats won't affect VF stats, + * check if it used by only PF interface. + * If not, return + */ + if (parent_pf->cgx_users > 1) { + dev_info(rvu->dev, "CGX busy, could not reset statistics\n"); + return 0; + } + + rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_idx, &lmac); + cgxd = rvu_cgx_pdata(cgx_idx, rvu); + mac_ops = get_mac_ops(cgxd); + + return mac_ops->mac_stats_reset(cgxd, lmac); +} + int rvu_mbox_handler_cgx_fec_stats(struct rvu *rvu, struct msg_req *req, struct cgx_fec_stats_rsp *rsp) @@ -886,13 +947,12 @@ int rvu_mbox_handler_cgx_features_get(struct rvu *rvu, u32 rvu_cgx_get_fifolen(struct rvu *rvu) { - struct mac_ops *mac_ops; - u32 fifo_len; + void *cgxd = rvu_first_cgx_pdata(rvu); - mac_ops = get_mac_ops(rvu_first_cgx_pdata(rvu)); - fifo_len = mac_ops ? mac_ops->fifo_len : 0; + if (!cgxd) + return 0; - return fifo_len; + return cgx_get_fifo_len(cgxd); } u32 rvu_cgx_get_lmac_fifolen(struct rvu *rvu, int cgx, int lmac) diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c index f047185f38e0..3c5bbaf12e59 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c @@ -19,6 +19,12 @@ /* Length of initial context fetch in 128 byte words */ #define CPT_CTX_ILEN 1ULL +/* Interrupt vector count of CPT RVU and RAS interrupts */ +#define CPT_10K_AF_RVU_RAS_INT_VEC_CNT 2 + +/* Default CPT_AF_RXC_CFG1:max_rxc_icb_cnt */ +#define CPT_DFLT_MAX_RXC_ICB_CNT 0xC0ULL + #define cpt_get_eng_sts(e_min, e_max, rsp, etype) \ ({ \ u64 free_sts = 0, busy_sts = 0; \ @@ -37,6 +43,41 @@ (_rsp)->free_sts_##etype = free_sts; \ }) +#define MAX_AE GENMASK_ULL(47, 32) +#define MAX_IE GENMASK_ULL(31, 16) +#define MAX_SE GENMASK_ULL(15, 0) + +static u16 cpt_max_engines_get(struct rvu *rvu) +{ + u16 max_ses, max_ies, max_aes; + u64 reg; + + reg = rvu_read64(rvu, BLKADDR_CPT0, CPT_AF_CONSTANTS1); + max_ses = FIELD_GET(MAX_SE, reg); + max_ies = FIELD_GET(MAX_IE, reg); + max_aes = FIELD_GET(MAX_AE, reg); + + return max_ses + max_ies + max_aes; +} + +/* Number of flt interrupt vectors are depends on number of engines that the + * chip has. Each flt vector represents 64 engines. + */ +static int cpt_10k_flt_nvecs_get(struct rvu *rvu, u16 max_engs) +{ + int flt_vecs; + + flt_vecs = DIV_ROUND_UP(max_engs, 64); + + if (flt_vecs > CPT_10K_AF_INT_VEC_FLT_MAX) { + dev_warn_once(rvu->dev, "flt_vecs:%d exceeds the max vectors:%d\n", + flt_vecs, CPT_10K_AF_INT_VEC_FLT_MAX); + flt_vecs = CPT_10K_AF_INT_VEC_FLT_MAX; + } + + return flt_vecs; +} + static irqreturn_t cpt_af_flt_intr_handler(int vec, void *ptr) { struct rvu_block *block = ptr; @@ -150,17 +191,26 @@ static void cpt_10k_unregister_interrupts(struct rvu_block *block, int off) { struct rvu *rvu = block->rvu; int blkaddr = block->addr; - int i; + int i, flt_vecs; + u16 max_engs; + u8 nr; + + max_engs = cpt_max_engines_get(rvu); + flt_vecs = cpt_10k_flt_nvecs_get(rvu, max_engs); /* Disable all CPT AF interrupts */ - rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT_ENA_W1C(0), ~0ULL); - rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT_ENA_W1C(1), ~0ULL); - rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT_ENA_W1C(2), 0xFFFF); + for (i = CPT_10K_AF_INT_VEC_FLT0; i < flt_vecs; i++) { + nr = (max_engs > 64) ? 64 : max_engs; + max_engs -= nr; + rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT_ENA_W1C(i), + INTR_MASK(nr)); + } rvu_write64(rvu, blkaddr, CPT_AF_RVU_INT_ENA_W1C, 0x1); rvu_write64(rvu, blkaddr, CPT_AF_RAS_INT_ENA_W1C, 0x1); - for (i = 0; i < CPT_10K_AF_INT_VEC_CNT; i++) + /* CPT AF interrupt vectors are flt_int, rvu_int and ras_int. */ + for (i = 0; i < flt_vecs + CPT_10K_AF_RVU_RAS_INT_VEC_CNT; i++) if (rvu->irq_allocated[off + i]) { free_irq(pci_irq_vector(rvu->pdev, off + i), block); rvu->irq_allocated[off + i] = false; @@ -206,12 +256,18 @@ void rvu_cpt_unregister_interrupts(struct rvu *rvu) static int cpt_10k_register_interrupts(struct rvu_block *block, int off) { + int rvu_intr_vec, ras_intr_vec; struct rvu *rvu = block->rvu; int blkaddr = block->addr; irq_handler_t flt_fn; - int i, ret; + int i, ret, flt_vecs; + u16 max_engs; + u8 nr; + + max_engs = cpt_max_engines_get(rvu); + flt_vecs = cpt_10k_flt_nvecs_get(rvu, max_engs); - for (i = CPT_10K_AF_INT_VEC_FLT0; i < CPT_10K_AF_INT_VEC_RVU; i++) { + for (i = CPT_10K_AF_INT_VEC_FLT0; i < flt_vecs; i++) { sprintf(&rvu->irq_name[(off + i) * NAME_SIZE], "CPTAF FLT%d", i); switch (i) { @@ -229,20 +285,24 @@ static int cpt_10k_register_interrupts(struct rvu_block *block, int off) flt_fn, &rvu->irq_name[(off + i) * NAME_SIZE]); if (ret) goto err; - if (i == CPT_10K_AF_INT_VEC_FLT2) - rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT_ENA_W1S(i), 0xFFFF); - else - rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT_ENA_W1S(i), ~0ULL); + + nr = (max_engs > 64) ? 64 : max_engs; + max_engs -= nr; + rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT_ENA_W1S(i), + INTR_MASK(nr)); } - ret = rvu_cpt_do_register_interrupt(block, off + CPT_10K_AF_INT_VEC_RVU, + rvu_intr_vec = flt_vecs; + ras_intr_vec = rvu_intr_vec + 1; + + ret = rvu_cpt_do_register_interrupt(block, off + rvu_intr_vec, rvu_cpt_af_rvu_intr_handler, "CPTAF RVU"); if (ret) goto err; rvu_write64(rvu, blkaddr, CPT_AF_RVU_INT_ENA_W1S, 0x1); - ret = rvu_cpt_do_register_interrupt(block, off + CPT_10K_AF_INT_VEC_RAS, + ret = rvu_cpt_do_register_interrupt(block, off + ras_intr_vec, rvu_cpt_af_ras_intr_handler, "CPTAF RAS"); if (ret) @@ -632,7 +692,9 @@ int rvu_mbox_handler_cpt_inline_ipsec_cfg(struct rvu *rvu, return ret; } -static bool is_valid_offset(struct rvu *rvu, struct cpt_rd_wr_reg_msg *req) +static bool validate_and_update_reg_offset(struct rvu *rvu, + struct cpt_rd_wr_reg_msg *req, + u64 *reg_offset) { u64 offset = req->reg_offset; int blkaddr, num_lfs, lf; @@ -663,6 +725,11 @@ static bool is_valid_offset(struct rvu *rvu, struct cpt_rd_wr_reg_msg *req) if (lf < 0) return false; + /* Translate local LF's offset to global CPT LF's offset to + * access LFX register. + */ + *reg_offset = (req->reg_offset & 0xFF000) + (lf << 3); + return true; } else if (!(req->hdr.pcifunc & RVU_PFVF_FUNC_MASK)) { /* Registers that can be accessed from PF */ @@ -673,6 +740,7 @@ static bool is_valid_offset(struct rvu *rvu, struct cpt_rd_wr_reg_msg *req) case CPT_AF_BLK_RST: case CPT_AF_CONSTANTS1: case CPT_AF_CTX_FLUSH_TIMER: + case CPT_AF_RXC_CFG1: return true; } @@ -696,6 +764,7 @@ int rvu_mbox_handler_cpt_rd_wr_register(struct rvu *rvu, struct cpt_rd_wr_reg_msg *req, struct cpt_rd_wr_reg_msg *rsp) { + u64 offset = req->reg_offset; int blkaddr; blkaddr = validate_and_get_cpt_blkaddr(req->blkaddr); @@ -707,23 +776,25 @@ int rvu_mbox_handler_cpt_rd_wr_register(struct rvu *rvu, !is_cpt_vf(rvu, req->hdr.pcifunc)) return CPT_AF_ERR_ACCESS_DENIED; + if (!validate_and_update_reg_offset(rvu, req, &offset)) + return CPT_AF_ERR_ACCESS_DENIED; + rsp->reg_offset = req->reg_offset; rsp->ret_val = req->ret_val; rsp->is_write = req->is_write; - if (!is_valid_offset(rvu, req)) - return CPT_AF_ERR_ACCESS_DENIED; - if (req->is_write) - rvu_write64(rvu, blkaddr, req->reg_offset, req->val); + rvu_write64(rvu, blkaddr, offset, req->val); else - rsp->val = rvu_read64(rvu, blkaddr, req->reg_offset); + rsp->val = rvu_read64(rvu, blkaddr, offset); return 0; } static void get_ctx_pc(struct rvu *rvu, struct cpt_sts_rsp *rsp, int blkaddr) { + struct rvu_hwinfo *hw = rvu->hw; + if (is_rvu_otx2(rvu)) return; @@ -747,14 +818,16 @@ static void get_ctx_pc(struct rvu *rvu, struct cpt_sts_rsp *rsp, int blkaddr) rsp->ctx_err = rvu_read64(rvu, blkaddr, CPT_AF_CTX_ERR); rsp->ctx_enc_id = rvu_read64(rvu, blkaddr, CPT_AF_CTX_ENC_ID); rsp->ctx_flush_timer = rvu_read64(rvu, blkaddr, CPT_AF_CTX_FLUSH_TIMER); + rsp->x2p_link_cfg0 = rvu_read64(rvu, blkaddr, CPT_AF_X2PX_LINK_CFG(0)); + rsp->x2p_link_cfg1 = rvu_read64(rvu, blkaddr, CPT_AF_X2PX_LINK_CFG(1)); + if (!hw->cap.cpt_rxc) + return; rsp->rxc_time = rvu_read64(rvu, blkaddr, CPT_AF_RXC_TIME); rsp->rxc_time_cfg = rvu_read64(rvu, blkaddr, CPT_AF_RXC_TIME_CFG); rsp->rxc_active_sts = rvu_read64(rvu, blkaddr, CPT_AF_RXC_ACTIVE_STS); rsp->rxc_zombie_sts = rvu_read64(rvu, blkaddr, CPT_AF_RXC_ZOMBIE_STS); rsp->rxc_dfrg = rvu_read64(rvu, blkaddr, CPT_AF_RXC_DFRG); - rsp->x2p_link_cfg0 = rvu_read64(rvu, blkaddr, CPT_AF_X2PX_LINK_CFG(0)); - rsp->x2p_link_cfg1 = rvu_read64(rvu, blkaddr, CPT_AF_X2PX_LINK_CFG(1)); } static void get_eng_sts(struct rvu *rvu, struct cpt_sts_rsp *rsp, int blkaddr) @@ -913,13 +986,17 @@ int rvu_mbox_handler_cpt_flt_eng_info(struct rvu *rvu, struct cpt_flt_eng_info_r struct rvu_block *block; unsigned long flags; int blkaddr, vec; + int flt_vecs; + u16 max_engs; blkaddr = validate_and_get_cpt_blkaddr(req->blkaddr); if (blkaddr < 0) return blkaddr; block = &rvu->hw->block[blkaddr]; - for (vec = 0; vec < CPT_10K_AF_INT_VEC_RVU; vec++) { + max_engs = cpt_max_engines_get(rvu); + flt_vecs = cpt_10k_flt_nvecs_get(rvu, max_engs); + for (vec = 0; vec < flt_vecs; vec++) { spin_lock_irqsave(&rvu->cpt_intr_lock, flags); rsp->flt_eng_map[vec] = block->cpt_flt_eng_map[vec]; rsp->rcvrd_eng_map[vec] = block->cpt_rcvrd_eng_map[vec]; @@ -935,10 +1012,11 @@ int rvu_mbox_handler_cpt_flt_eng_info(struct rvu *rvu, struct cpt_flt_eng_info_r static void cpt_rxc_teardown(struct rvu *rvu, int blkaddr) { struct cpt_rxc_time_cfg_req req, prev; + struct rvu_hwinfo *hw = rvu->hw; int timeout = 2000; u64 reg; - if (is_rvu_otx2(rvu)) + if (!hw->cap.cpt_rxc) return; /* Set time limit to minimum values, so that rxc entries will be @@ -1211,10 +1289,30 @@ unlock: return 0; } +#define MAX_RXC_ICB_CNT GENMASK_ULL(40, 32) + int rvu_cpt_init(struct rvu *rvu) { + struct rvu_hwinfo *hw = rvu->hw; + u64 reg_val; + /* Retrieve CPT PF number */ rvu->cpt_pf_num = get_cpt_pf_num(rvu); + if (is_block_implemented(rvu->hw, BLKADDR_CPT0) && !is_rvu_otx2(rvu) && + !is_cn10kb(rvu)) + hw->cap.cpt_rxc = true; + + if (hw->cap.cpt_rxc && !is_cn10ka_a0(rvu) && !is_cn10ka_a1(rvu)) { + /* Set CPT_AF_RXC_CFG1:max_rxc_icb_cnt to 0xc0 to not effect + * inline inbound peak performance + */ + reg_val = rvu_read64(rvu, BLKADDR_CPT0, CPT_AF_RXC_CFG1); + reg_val &= ~MAX_RXC_ICB_CNT; + reg_val |= FIELD_PREP(MAX_RXC_ICB_CNT, + CPT_DFLT_MAX_RXC_ICB_CNT); + rvu_write64(rvu, BLKADDR_CPT0, CPT_AF_RXC_CFG1, reg_val); + } + spin_lock_init(&rvu->cpt_intr_lock); return 0; diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c index e6d7914ce61c..a1f9ec03c2ce 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c @@ -45,33 +45,6 @@ enum { CGX_STAT18, }; -/* NIX TX stats */ -enum nix_stat_lf_tx { - TX_UCAST = 0x0, - TX_BCAST = 0x1, - TX_MCAST = 0x2, - TX_DROP = 0x3, - TX_OCTS = 0x4, - TX_STATS_ENUM_LAST, -}; - -/* NIX RX stats */ -enum nix_stat_lf_rx { - RX_OCTS = 0x0, - RX_UCAST = 0x1, - RX_BCAST = 0x2, - RX_MCAST = 0x3, - RX_DROP = 0x4, - RX_DROP_OCTS = 0x5, - RX_FCS = 0x6, - RX_ERR = 0x7, - RX_DRP_BCAST = 0x8, - RX_DRP_MCAST = 0x9, - RX_DRP_L3BCAST = 0xa, - RX_DRP_L3MCAST = 0xb, - RX_STATS_ENUM_LAST, -}; - static char *cgx_rx_stats_fields[] = { [CGX_STAT0] = "Received packets", [CGX_STAT1] = "Octets of received packets", @@ -663,16 +636,16 @@ static ssize_t rvu_dbg_lmtst_map_table_display(struct file *filp, RVU_DEBUG_FOPS(lmtst_map_table, lmtst_map_table_display, NULL); -static void get_lf_str_list(struct rvu_block block, int pcifunc, +static void get_lf_str_list(const struct rvu_block *block, int pcifunc, char *lfs) { - int lf = 0, seq = 0, len = 0, prev_lf = block.lf.max; + int lf = 0, seq = 0, len = 0, prev_lf = block->lf.max; - for_each_set_bit(lf, block.lf.bmap, block.lf.max) { - if (lf >= block.lf.max) + for_each_set_bit(lf, block->lf.bmap, block->lf.max) { + if (lf >= block->lf.max) break; - if (block.fn_map[lf] != pcifunc) + if (block->fn_map[lf] != pcifunc) continue; if (lf == prev_lf + 1) { @@ -719,7 +692,7 @@ static int get_max_column_width(struct rvu *rvu) if (!strlen(block.name)) continue; - get_lf_str_list(block, pcifunc, buf); + get_lf_str_list(&block, pcifunc, buf); if (lf_str_size <= strlen(buf)) lf_str_size = strlen(buf) + 1; } @@ -803,7 +776,7 @@ static ssize_t rvu_dbg_rsrc_attach_status(struct file *filp, continue; len = 0; lfs[len] = '\0'; - get_lf_str_list(block, pcifunc, lfs); + get_lf_str_list(&block, pcifunc, lfs); if (strlen(lfs)) flag = 1; @@ -838,10 +811,10 @@ RVU_DEBUG_FOPS(rsrc_status, rsrc_attach_status, NULL); static int rvu_dbg_rvu_pf_cgx_map_display(struct seq_file *filp, void *unused) { + char cgx[10], lmac[10], chan[10]; struct rvu *rvu = filp->private; struct pci_dev *pdev = NULL; struct mac_ops *mac_ops; - char cgx[10], lmac[10]; struct rvu_pfvf *pfvf; int pf, domain, blkid; u8 cgx_id, lmac_id; @@ -852,7 +825,7 @@ static int rvu_dbg_rvu_pf_cgx_map_display(struct seq_file *filp, void *unused) /* There can be no CGX devices at all */ if (!mac_ops) return 0; - seq_printf(filp, "PCI dev\t\tRVU PF Func\tNIX block\t%s\tLMAC\n", + seq_printf(filp, "PCI dev\t\tRVU PF Func\tNIX block\t%s\tLMAC\tCHAN\n", mac_ops->name); for (pf = 0; pf < rvu->hw->total_pfs; pf++) { if (!is_pf_cgxmapped(rvu, pf)) @@ -876,8 +849,11 @@ static int rvu_dbg_rvu_pf_cgx_map_display(struct seq_file *filp, void *unused) &lmac_id); sprintf(cgx, "%s%d", mac_ops->name, cgx_id); sprintf(lmac, "LMAC%d", lmac_id); - seq_printf(filp, "%s\t0x%x\t\tNIX%d\t\t%s\t%s\n", - dev_name(&pdev->dev), pcifunc, blkid, cgx, lmac); + sprintf(chan, "%d", + rvu_nix_chan_cgx(rvu, cgx_id, lmac_id, 0)); + seq_printf(filp, "%s\t0x%x\t\tNIX%d\t\t%s\t%s\t%s\n", + dev_name(&pdev->dev), pcifunc, blkid, cgx, lmac, + chan); pci_dev_put(pdev); } @@ -941,19 +917,18 @@ static void print_npa_qsize(struct seq_file *m, struct rvu_pfvf *pfvf) /* The 'qsize' entry dumps current Aura/Pool context Qsize * and each context's current enable/disable status in a bitmap. */ -static int rvu_dbg_qsize_display(struct seq_file *filp, void *unsused, +static int rvu_dbg_qsize_display(struct seq_file *s, void *unsused, int blktype) { - void (*print_qsize)(struct seq_file *filp, + void (*print_qsize)(struct seq_file *s, struct rvu_pfvf *pfvf) = NULL; - struct dentry *current_dir; struct rvu_pfvf *pfvf; struct rvu *rvu; int qsize_id; u16 pcifunc; int blkaddr; - rvu = filp->private; + rvu = s->private; switch (blktype) { case BLKTYPE_NPA: qsize_id = rvu->rvu_dbg.npa_qsize_id; @@ -969,42 +944,36 @@ static int rvu_dbg_qsize_display(struct seq_file *filp, void *unsused, return -EINVAL; } - if (blktype == BLKTYPE_NPA) { + if (blktype == BLKTYPE_NPA) blkaddr = BLKADDR_NPA; - } else { - current_dir = filp->file->f_path.dentry->d_parent; - blkaddr = (!strcmp(current_dir->d_name.name, "nix1") ? - BLKADDR_NIX1 : BLKADDR_NIX0); - } + else + blkaddr = debugfs_get_aux_num(s->file); if (!rvu_dbg_is_valid_lf(rvu, blkaddr, qsize_id, &pcifunc)) return -EINVAL; pfvf = rvu_get_pfvf(rvu, pcifunc); - print_qsize(filp, pfvf); + print_qsize(s, pfvf); return 0; } -static ssize_t rvu_dbg_qsize_write(struct file *filp, +static ssize_t rvu_dbg_qsize_write(struct file *file, const char __user *buffer, size_t count, loff_t *ppos, int blktype) { char *blk_string = (blktype == BLKTYPE_NPA) ? "npa" : "nix"; - struct seq_file *seqfile = filp->private_data; + struct seq_file *seqfile = file->private_data; char *cmd_buf, *cmd_buf_tmp, *subtoken; struct rvu *rvu = seqfile->private; - struct dentry *current_dir; int blkaddr; u16 pcifunc; int ret, lf; - cmd_buf = memdup_user(buffer, count + 1); + cmd_buf = memdup_user_nul(buffer, count); if (IS_ERR(cmd_buf)) return -ENOMEM; - cmd_buf[count] = '\0'; - cmd_buf_tmp = strchr(cmd_buf, '\n'); if (cmd_buf_tmp) { *cmd_buf_tmp = '\0'; @@ -1022,13 +991,10 @@ static ssize_t rvu_dbg_qsize_write(struct file *filp, goto qsize_write_done; } - if (blktype == BLKTYPE_NPA) { + if (blktype == BLKTYPE_NPA) blkaddr = BLKADDR_NPA; - } else { - current_dir = filp->f_path.dentry->d_parent; - blkaddr = (!strcmp(current_dir->d_name.name, "nix1") ? - BLKADDR_NIX1 : BLKADDR_NIX0); - } + else + blkaddr = debugfs_get_aux_num(file); if (!rvu_dbg_is_valid_lf(rvu, blkaddr, lf, &pcifunc)) { ret = -EINVAL; @@ -1605,6 +1571,367 @@ static void print_nix_cn10k_sq_ctx(struct seq_file *m, (u64)sq_ctx->dropped_pkts); } +static void print_tm_tree(struct seq_file *m, + struct nix_aq_enq_rsp *rsp, u64 sq) +{ + struct nix_sq_ctx_s *sq_ctx = &rsp->sq; + struct nix_hw *nix_hw = m->private; + struct rvu *rvu = nix_hw->rvu; + u16 p1, p2, p3, p4, schq; + int blkaddr; + u64 cfg; + + blkaddr = nix_hw->blkaddr; + schq = sq_ctx->smq; + + cfg = rvu_read64(rvu, blkaddr, NIX_AF_MDQX_PARENT(schq)); + p1 = FIELD_GET(NIX_AF_MDQ_PARENT_MASK, cfg); + + cfg = rvu_read64(rvu, blkaddr, NIX_AF_TL4X_PARENT(p1)); + p2 = FIELD_GET(NIX_AF_TL4_PARENT_MASK, cfg); + + cfg = rvu_read64(rvu, blkaddr, NIX_AF_TL3X_PARENT(p2)); + p3 = FIELD_GET(NIX_AF_TL3_PARENT_MASK, cfg); + + cfg = rvu_read64(rvu, blkaddr, NIX_AF_TL2X_PARENT(p3)); + p4 = FIELD_GET(NIX_AF_TL2_PARENT_MASK, cfg); + seq_printf(m, + "SQ(%llu) -> SMQ(%u) -> TL4(%u) -> TL3(%u) -> TL2(%u) -> TL1(%u)\n", + sq, schq, p1, p2, p3, p4); +} + +/*dumps given tm_tree registers*/ +static int rvu_dbg_nix_tm_tree_display(struct seq_file *m, void *unused) +{ + int qidx, nixlf, rc, id, max_id = 0; + struct nix_hw *nix_hw = m->private; + struct rvu *rvu = nix_hw->rvu; + struct nix_aq_enq_req aq_req; + struct nix_aq_enq_rsp rsp; + struct rvu_pfvf *pfvf; + u16 pcifunc; + + nixlf = rvu->rvu_dbg.nix_tm_ctx.lf; + id = rvu->rvu_dbg.nix_tm_ctx.id; + + if (!rvu_dbg_is_valid_lf(rvu, nix_hw->blkaddr, nixlf, &pcifunc)) + return -EINVAL; + + pfvf = rvu_get_pfvf(rvu, pcifunc); + max_id = pfvf->sq_ctx->qsize; + + memset(&aq_req, 0, sizeof(struct nix_aq_enq_req)); + aq_req.hdr.pcifunc = pcifunc; + aq_req.ctype = NIX_AQ_CTYPE_SQ; + aq_req.op = NIX_AQ_INSTOP_READ; + seq_printf(m, "pcifunc is 0x%x\n", pcifunc); + for (qidx = id; qidx < max_id; qidx++) { + aq_req.qidx = qidx; + + /* Skip SQ's if not initialized */ + if (!test_bit(qidx, pfvf->sq_bmap)) + continue; + + rc = rvu_mbox_handler_nix_aq_enq(rvu, &aq_req, &rsp); + + if (rc) { + seq_printf(m, "Failed to read SQ(%d) context\n", + aq_req.qidx); + continue; + } + print_tm_tree(m, &rsp, aq_req.qidx); + } + return 0; +} + +static ssize_t rvu_dbg_nix_tm_tree_write(struct file *filp, + const char __user *buffer, + size_t count, loff_t *ppos) +{ + struct seq_file *m = filp->private_data; + struct nix_hw *nix_hw = m->private; + struct rvu *rvu = nix_hw->rvu; + struct rvu_pfvf *pfvf; + u16 pcifunc; + u64 nixlf; + int ret; + + ret = kstrtoull_from_user(buffer, count, 10, &nixlf); + if (ret) + return ret; + + if (!rvu_dbg_is_valid_lf(rvu, nix_hw->blkaddr, nixlf, &pcifunc)) + return -EINVAL; + + pfvf = rvu_get_pfvf(rvu, pcifunc); + if (!pfvf->sq_ctx) { + dev_warn(rvu->dev, "SQ context is not initialized\n"); + return -EINVAL; + } + + rvu->rvu_dbg.nix_tm_ctx.lf = nixlf; + return count; +} + +RVU_DEBUG_SEQ_FOPS(nix_tm_tree, nix_tm_tree_display, nix_tm_tree_write); + +static void print_tm_topo(struct seq_file *m, u64 schq, u32 lvl) +{ + struct nix_hw *nix_hw = m->private; + struct rvu *rvu = nix_hw->rvu; + int blkaddr, link, link_level; + struct rvu_hwinfo *hw; + + hw = rvu->hw; + blkaddr = nix_hw->blkaddr; + if (lvl == NIX_TXSCH_LVL_MDQ) { + seq_printf(m, "NIX_AF_SMQ[%llu]_CFG =0x%llx\n", schq, + rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(schq))); + seq_printf(m, "NIX_AF_SMQ[%llu]_STATUS =0x%llx\n", schq, + rvu_read64(rvu, blkaddr, NIX_AF_SMQX_STATUS(schq))); + seq_printf(m, "NIX_AF_MDQ[%llu]_OUT_MD_COUNT =0x%llx\n", schq, + rvu_read64(rvu, blkaddr, + NIX_AF_MDQX_OUT_MD_COUNT(schq))); + seq_printf(m, "NIX_AF_MDQ[%llu]_SCHEDULE =0x%llx\n", schq, + rvu_read64(rvu, blkaddr, + NIX_AF_MDQX_SCHEDULE(schq))); + seq_printf(m, "NIX_AF_MDQ[%llu]_SHAPE =0x%llx\n", schq, + rvu_read64(rvu, blkaddr, NIX_AF_MDQX_SHAPE(schq))); + seq_printf(m, "NIX_AF_MDQ[%llu]_CIR =0x%llx\n", schq, + rvu_read64(rvu, blkaddr, NIX_AF_MDQX_CIR(schq))); + seq_printf(m, "NIX_AF_MDQ[%llu]_PIR =0x%llx\n", schq, + rvu_read64(rvu, blkaddr, NIX_AF_MDQX_PIR(schq))); + seq_printf(m, "NIX_AF_MDQ[%llu]_SW_XOFF =0x%llx\n", schq, + rvu_read64(rvu, blkaddr, NIX_AF_MDQX_SW_XOFF(schq))); + seq_printf(m, "NIX_AF_MDQ[%llu]_PARENT =0x%llx\n", schq, + rvu_read64(rvu, blkaddr, NIX_AF_MDQX_PARENT(schq))); + seq_puts(m, "\n"); + } + + if (lvl == NIX_TXSCH_LVL_TL4) { + seq_printf(m, "NIX_AF_TL4[%llu]_SDP_LINK_CFG =0x%llx\n", schq, + rvu_read64(rvu, blkaddr, + NIX_AF_TL4X_SDP_LINK_CFG(schq))); + seq_printf(m, "NIX_AF_TL4[%llu]_SCHEDULE =0x%llx\n", schq, + rvu_read64(rvu, blkaddr, + NIX_AF_TL4X_SCHEDULE(schq))); + seq_printf(m, "NIX_AF_TL4[%llu]_SHAPE =0x%llx\n", schq, + rvu_read64(rvu, blkaddr, NIX_AF_TL4X_SHAPE(schq))); + seq_printf(m, "NIX_AF_TL4[%llu]_CIR =0x%llx\n", schq, + rvu_read64(rvu, blkaddr, NIX_AF_TL4X_CIR(schq))); + seq_printf(m, "NIX_AF_TL4[%llu]_PIR =0x%llx\n", schq, + rvu_read64(rvu, blkaddr, NIX_AF_TL4X_PIR(schq))); + seq_printf(m, "NIX_AF_TL4[%llu]_SW_XOFF =0x%llx\n", schq, + rvu_read64(rvu, blkaddr, NIX_AF_TL4X_SW_XOFF(schq))); + seq_printf(m, "NIX_AF_TL4[%llu]_TOPOLOGY =0x%llx\n", schq, + rvu_read64(rvu, blkaddr, + NIX_AF_TL4X_TOPOLOGY(schq))); + seq_printf(m, "NIX_AF_TL4[%llu]_PARENT =0x%llx\n", schq, + rvu_read64(rvu, blkaddr, NIX_AF_TL4X_PARENT(schq))); + seq_printf(m, "NIX_AF_TL4[%llu]_MD_DEBUG0 =0x%llx\n", schq, + rvu_read64(rvu, blkaddr, + NIX_AF_TL4X_MD_DEBUG0(schq))); + seq_printf(m, "NIX_AF_TL4[%llu]_MD_DEBUG1 =0x%llx\n", schq, + rvu_read64(rvu, blkaddr, + NIX_AF_TL4X_MD_DEBUG1(schq))); + seq_puts(m, "\n"); + } + + if (lvl == NIX_TXSCH_LVL_TL3) { + seq_printf(m, "NIX_AF_TL3[%llu]_SCHEDULE =0x%llx\n", schq, + rvu_read64(rvu, blkaddr, + NIX_AF_TL3X_SCHEDULE(schq))); + seq_printf(m, "NIX_AF_TL3[%llu]_SHAPE =0x%llx\n", schq, + rvu_read64(rvu, blkaddr, NIX_AF_TL3X_SHAPE(schq))); + seq_printf(m, "NIX_AF_TL3[%llu]_CIR =0x%llx\n", schq, + rvu_read64(rvu, blkaddr, NIX_AF_TL3X_CIR(schq))); + seq_printf(m, "NIX_AF_TL3[%llu]_PIR =0x%llx\n", schq, + rvu_read64(rvu, blkaddr, NIX_AF_TL3X_PIR(schq))); + seq_printf(m, "NIX_AF_TL3[%llu]_SW_XOFF =0x%llx\n", schq, + rvu_read64(rvu, blkaddr, NIX_AF_TL3X_SW_XOFF(schq))); + seq_printf(m, "NIX_AF_TL3[%llu]_TOPOLOGY =0x%llx\n", schq, + rvu_read64(rvu, blkaddr, + NIX_AF_TL3X_TOPOLOGY(schq))); + seq_printf(m, "NIX_AF_TL3[%llu]_PARENT =0x%llx\n", schq, + rvu_read64(rvu, blkaddr, NIX_AF_TL3X_PARENT(schq))); + seq_printf(m, "NIX_AF_TL3[%llu]_MD_DEBUG0 =0x%llx\n", schq, + rvu_read64(rvu, blkaddr, + NIX_AF_TL3X_MD_DEBUG0(schq))); + seq_printf(m, "NIX_AF_TL3[%llu]_MD_DEBUG1 =0x%llx\n", schq, + rvu_read64(rvu, blkaddr, + NIX_AF_TL3X_MD_DEBUG1(schq))); + + link_level = rvu_read64(rvu, blkaddr, NIX_AF_PSE_CHANNEL_LEVEL) + & 0x01 ? NIX_TXSCH_LVL_TL3 : NIX_TXSCH_LVL_TL2; + if (lvl == link_level) { + seq_printf(m, + "NIX_AF_TL3_TL2[%llu]_BP_STATUS =0x%llx\n", + schq, rvu_read64(rvu, blkaddr, + NIX_AF_TL3_TL2X_BP_STATUS(schq))); + for (link = 0; link < hw->cgx_links; link++) + seq_printf(m, + "NIX_AF_TL3_TL2[%llu]_LINK[%d]_CFG =0x%llx\n", + schq, link, + rvu_read64(rvu, blkaddr, + NIX_AF_TL3_TL2X_LINKX_CFG(schq, link))); + } + seq_puts(m, "\n"); + } + + if (lvl == NIX_TXSCH_LVL_TL2) { + seq_printf(m, "NIX_AF_TL2[%llu]_SHAPE =0x%llx\n", schq, + rvu_read64(rvu, blkaddr, NIX_AF_TL2X_SHAPE(schq))); + seq_printf(m, "NIX_AF_TL2[%llu]_CIR =0x%llx\n", schq, + rvu_read64(rvu, blkaddr, NIX_AF_TL2X_CIR(schq))); + seq_printf(m, "NIX_AF_TL2[%llu]_PIR =0x%llx\n", schq, + rvu_read64(rvu, blkaddr, NIX_AF_TL2X_PIR(schq))); + seq_printf(m, "NIX_AF_TL2[%llu]_SW_XOFF =0x%llx\n", schq, + rvu_read64(rvu, blkaddr, NIX_AF_TL2X_SW_XOFF(schq))); + seq_printf(m, "NIX_AF_TL2[%llu]_TOPOLOGY =0x%llx\n", schq, + rvu_read64(rvu, blkaddr, + NIX_AF_TL2X_TOPOLOGY(schq))); + seq_printf(m, "NIX_AF_TL2[%llu]_PARENT =0x%llx\n", schq, + rvu_read64(rvu, blkaddr, NIX_AF_TL2X_PARENT(schq))); + seq_printf(m, "NIX_AF_TL2[%llu]_MD_DEBUG0 =0x%llx\n", schq, + rvu_read64(rvu, blkaddr, + NIX_AF_TL2X_MD_DEBUG0(schq))); + seq_printf(m, "NIX_AF_TL2[%llu]_MD_DEBUG1 =0x%llx\n", schq, + rvu_read64(rvu, blkaddr, + NIX_AF_TL2X_MD_DEBUG1(schq))); + + link_level = rvu_read64(rvu, blkaddr, NIX_AF_PSE_CHANNEL_LEVEL) + & 0x01 ? NIX_TXSCH_LVL_TL3 : NIX_TXSCH_LVL_TL2; + if (lvl == link_level) { + seq_printf(m, + "NIX_AF_TL3_TL2[%llu]_BP_STATUS =0x%llx\n", + schq, rvu_read64(rvu, blkaddr, + NIX_AF_TL3_TL2X_BP_STATUS(schq))); + for (link = 0; link < hw->cgx_links; link++) + seq_printf(m, + "NIX_AF_TL3_TL2[%llu]_LINK[%d]_CFG =0x%llx\n", + schq, link, rvu_read64(rvu, blkaddr, + NIX_AF_TL3_TL2X_LINKX_CFG(schq, link))); + } + seq_puts(m, "\n"); + } + + if (lvl == NIX_TXSCH_LVL_TL1) { + seq_printf(m, "NIX_AF_TX_LINK[%llu]_NORM_CREDIT =0x%llx\n", + schq, + rvu_read64(rvu, blkaddr, + NIX_AF_TX_LINKX_NORM_CREDIT(schq))); + seq_printf(m, "NIX_AF_TX_LINK[%llu]_HW_XOFF =0x%llx\n", schq, + rvu_read64(rvu, blkaddr, + NIX_AF_TX_LINKX_HW_XOFF(schq))); + seq_printf(m, "NIX_AF_TL1[%llu]_SCHEDULE =0x%llx\n", schq, + rvu_read64(rvu, blkaddr, + NIX_AF_TL1X_SCHEDULE(schq))); + seq_printf(m, "NIX_AF_TL1[%llu]_SHAPE =0x%llx\n", schq, + rvu_read64(rvu, blkaddr, NIX_AF_TL1X_SHAPE(schq))); + seq_printf(m, "NIX_AF_TL1[%llu]_CIR =0x%llx\n", schq, + rvu_read64(rvu, blkaddr, NIX_AF_TL1X_CIR(schq))); + seq_printf(m, "NIX_AF_TL1[%llu]_SW_XOFF =0x%llx\n", schq, + rvu_read64(rvu, blkaddr, NIX_AF_TL1X_SW_XOFF(schq))); + seq_printf(m, "NIX_AF_TL1[%llu]_TOPOLOGY =0x%llx\n", schq, + rvu_read64(rvu, blkaddr, + NIX_AF_TL1X_TOPOLOGY(schq))); + seq_printf(m, "NIX_AF_TL1[%llu]_MD_DEBUG0 =0x%llx\n", schq, + rvu_read64(rvu, blkaddr, + NIX_AF_TL1X_MD_DEBUG0(schq))); + seq_printf(m, "NIX_AF_TL1[%llu]_MD_DEBUG1 =0x%llx\n", schq, + rvu_read64(rvu, blkaddr, + NIX_AF_TL1X_MD_DEBUG1(schq))); + seq_printf(m, "NIX_AF_TL1[%llu]_DROPPED_PACKETS =0x%llx\n", + schq, + rvu_read64(rvu, blkaddr, + NIX_AF_TL1X_DROPPED_PACKETS(schq))); + seq_printf(m, "NIX_AF_TL1[%llu]_DROPPED_BYTES =0x%llx\n", schq, + rvu_read64(rvu, blkaddr, + NIX_AF_TL1X_DROPPED_BYTES(schq))); + seq_printf(m, "NIX_AF_TL1[%llu]_RED_PACKETS =0x%llx\n", schq, + rvu_read64(rvu, blkaddr, + NIX_AF_TL1X_RED_PACKETS(schq))); + seq_printf(m, "NIX_AF_TL1[%llu]_RED_BYTES =0x%llx\n", schq, + rvu_read64(rvu, blkaddr, + NIX_AF_TL1X_RED_BYTES(schq))); + seq_printf(m, "NIX_AF_TL1[%llu]_YELLOW_PACKETS =0x%llx\n", schq, + rvu_read64(rvu, blkaddr, + NIX_AF_TL1X_YELLOW_PACKETS(schq))); + seq_printf(m, "NIX_AF_TL1[%llu]_YELLOW_BYTES =0x%llx\n", schq, + rvu_read64(rvu, blkaddr, + NIX_AF_TL1X_YELLOW_BYTES(schq))); + seq_printf(m, "NIX_AF_TL1[%llu]_GREEN_PACKETS =0x%llx\n", schq, + rvu_read64(rvu, blkaddr, + NIX_AF_TL1X_GREEN_PACKETS(schq))); + seq_printf(m, "NIX_AF_TL1[%llu]_GREEN_BYTES =0x%llx\n", schq, + rvu_read64(rvu, blkaddr, + NIX_AF_TL1X_GREEN_BYTES(schq))); + seq_puts(m, "\n"); + } +} + +/*dumps given tm_topo registers*/ +static int rvu_dbg_nix_tm_topo_display(struct seq_file *m, void *unused) +{ + struct nix_hw *nix_hw = m->private; + struct rvu *rvu = nix_hw->rvu; + struct nix_aq_enq_req aq_req; + struct nix_txsch *txsch; + int nixlf, lvl, schq; + u16 pcifunc; + + nixlf = rvu->rvu_dbg.nix_tm_ctx.lf; + + if (!rvu_dbg_is_valid_lf(rvu, nix_hw->blkaddr, nixlf, &pcifunc)) + return -EINVAL; + + memset(&aq_req, 0, sizeof(struct nix_aq_enq_req)); + aq_req.hdr.pcifunc = pcifunc; + aq_req.ctype = NIX_AQ_CTYPE_SQ; + aq_req.op = NIX_AQ_INSTOP_READ; + seq_printf(m, "pcifunc is 0x%x\n", pcifunc); + + for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) { + txsch = &nix_hw->txsch[lvl]; + for (schq = 0; schq < txsch->schq.max; schq++) { + if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) == pcifunc) + print_tm_topo(m, schq, lvl); + } + } + return 0; +} + +static ssize_t rvu_dbg_nix_tm_topo_write(struct file *filp, + const char __user *buffer, + size_t count, loff_t *ppos) +{ + struct seq_file *m = filp->private_data; + struct nix_hw *nix_hw = m->private; + struct rvu *rvu = nix_hw->rvu; + struct rvu_pfvf *pfvf; + u16 pcifunc; + u64 nixlf; + int ret; + + ret = kstrtoull_from_user(buffer, count, 10, &nixlf); + if (ret) + return ret; + + if (!rvu_dbg_is_valid_lf(rvu, nix_hw->blkaddr, nixlf, &pcifunc)) + return -EINVAL; + + pfvf = rvu_get_pfvf(rvu, pcifunc); + if (!pfvf->sq_ctx) { + dev_warn(rvu->dev, "SQ context is not initialized\n"); + return -EINVAL; + } + + rvu->rvu_dbg.nix_tm_ctx.lf = nixlf; + return count; +} + +RVU_DEBUG_SEQ_FOPS(nix_tm_topo, nix_tm_topo_display, nix_tm_topo_write); + /* Dumps given nix_sq's context */ static void print_nix_sq_ctx(struct seq_file *m, struct nix_aq_enq_rsp *rsp) { @@ -2351,6 +2678,10 @@ static void rvu_dbg_nix_init(struct rvu *rvu, int blkaddr) nix_hw = &rvu->hw->nix[1]; } + debugfs_create_file("tm_tree", 0600, rvu->rvu_dbg.nix, nix_hw, + &rvu_dbg_nix_tm_tree_fops); + debugfs_create_file("tm_topo", 0600, rvu->rvu_dbg.nix, nix_hw, + &rvu_dbg_nix_tm_topo_fops); debugfs_create_file("sq_ctx", 0600, rvu->rvu_dbg.nix, nix_hw, &rvu_dbg_nix_sq_ctx_fops); debugfs_create_file("rq_ctx", 0600, rvu->rvu_dbg.nix, nix_hw, @@ -2365,8 +2696,8 @@ static void rvu_dbg_nix_init(struct rvu *rvu, int blkaddr) &rvu_dbg_nix_ndc_tx_hits_miss_fops); debugfs_create_file("ndc_rx_hits_miss", 0600, rvu->rvu_dbg.nix, nix_hw, &rvu_dbg_nix_ndc_rx_hits_miss_fops); - debugfs_create_file("qsize", 0600, rvu->rvu_dbg.nix, rvu, - &rvu_dbg_nix_qsize_fops); + debugfs_create_file_aux_num("qsize", 0600, rvu->rvu_dbg.nix, rvu, + blkaddr, &rvu_dbg_nix_qsize_fops); debugfs_create_file("ingress_policer_ctx", 0600, rvu->rvu_dbg.nix, nix_hw, &rvu_dbg_nix_band_prof_ctx_fops); debugfs_create_file("ingress_policer_rsrc", 0600, rvu->rvu_dbg.nix, nix_hw, @@ -2515,28 +2846,14 @@ static int cgx_print_stats(struct seq_file *s, int lmac_id) return err; } -static int rvu_dbg_derive_lmacid(struct seq_file *filp, int *lmac_id) +static int rvu_dbg_derive_lmacid(struct seq_file *s) { - struct dentry *current_dir; - char *buf; - - current_dir = filp->file->f_path.dentry->d_parent; - buf = strrchr(current_dir->d_name.name, 'c'); - if (!buf) - return -EINVAL; - - return kstrtoint(buf + 1, 10, lmac_id); + return debugfs_get_aux_num(s->file); } -static int rvu_dbg_cgx_stat_display(struct seq_file *filp, void *unused) +static int rvu_dbg_cgx_stat_display(struct seq_file *s, void *unused) { - int lmac_id, err; - - err = rvu_dbg_derive_lmacid(filp, &lmac_id); - if (!err) - return cgx_print_stats(filp, lmac_id); - - return err; + return cgx_print_stats(s, rvu_dbg_derive_lmacid(s)); } RVU_DEBUG_SEQ_FOPS(cgx_stat, cgx_stat_display, NULL); @@ -2594,15 +2911,9 @@ static int cgx_print_dmac_flt(struct seq_file *s, int lmac_id) return 0; } -static int rvu_dbg_cgx_dmac_flt_display(struct seq_file *filp, void *unused) +static int rvu_dbg_cgx_dmac_flt_display(struct seq_file *s, void *unused) { - int err, lmac_id; - - err = rvu_dbg_derive_lmacid(filp, &lmac_id); - if (!err) - return cgx_print_dmac_flt(filp, lmac_id); - - return err; + return cgx_print_dmac_flt(s, rvu_dbg_derive_lmacid(s)); } RVU_DEBUG_SEQ_FOPS(cgx_dmac_flt, cgx_dmac_flt_display, NULL); @@ -2641,10 +2952,10 @@ static void rvu_dbg_cgx_init(struct rvu *rvu) rvu->rvu_dbg.lmac = debugfs_create_dir(dname, rvu->rvu_dbg.cgx); - debugfs_create_file("stats", 0600, rvu->rvu_dbg.lmac, - cgx, &rvu_dbg_cgx_stat_fops); - debugfs_create_file("mac_filter", 0600, - rvu->rvu_dbg.lmac, cgx, + debugfs_create_file_aux_num("stats", 0600, rvu->rvu_dbg.lmac, + cgx, lmac_id, &rvu_dbg_cgx_stat_fops); + debugfs_create_file_aux_num("mac_filter", 0600, + rvu->rvu_dbg.lmac, cgx, lmac_id, &rvu_dbg_cgx_dmac_flt_fops); } } @@ -2870,6 +3181,10 @@ static void rvu_dbg_npc_mcam_show_flows(struct seq_file *s, seq_printf(s, "%d ", ntohs(rule->packet.dport)); seq_printf(s, "mask 0x%x\n", ntohs(rule->mask.dport)); break; + case NPC_TCP_FLAGS: + seq_printf(s, "%d ", rule->packet.tcp_flags); + seq_printf(s, "mask 0x%x\n", rule->mask.tcp_flags); + break; case NPC_IPSEC_SPI: seq_printf(s, "0x%x ", ntohl(rule->packet.spi)); seq_printf(s, "mask 0x%x\n", ntohl(rule->mask.spi)); diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c index 1e6fbd98423d..dab4deca893f 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c @@ -1202,7 +1202,8 @@ static int rvu_af_dl_dwrr_mtu_validate(struct devlink *devlink, u32 id, } static int rvu_af_dl_dwrr_mtu_set(struct devlink *devlink, u32 id, - struct devlink_param_gset_ctx *ctx) + struct devlink_param_gset_ctx *ctx, + struct netlink_ext_ack *extack) { struct rvu_devlink *rvu_dl = devlink_priv(devlink); struct rvu *rvu = rvu_dl->rvu; @@ -1235,8 +1236,9 @@ static int rvu_af_dl_dwrr_mtu_get(struct devlink *devlink, u32 id, enum rvu_af_dl_param_id { RVU_AF_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX, RVU_AF_DEVLINK_PARAM_ID_DWRR_MTU, - RVU_AF_DEVLINK_PARAM_ID_NPC_EXACT_FEATURE_DISABLE, RVU_AF_DEVLINK_PARAM_ID_NPC_MCAM_ZONE_PERCENT, + RVU_AF_DEVLINK_PARAM_ID_NPC_EXACT_FEATURE_DISABLE, + RVU_AF_DEVLINK_PARAM_ID_NPC_DEF_RULE_CNTR_ENABLE, RVU_AF_DEVLINK_PARAM_ID_NIX_MAXLF, }; @@ -1256,7 +1258,8 @@ static int rvu_af_npc_exact_feature_get(struct devlink *devlink, u32 id, } static int rvu_af_npc_exact_feature_disable(struct devlink *devlink, u32 id, - struct devlink_param_gset_ctx *ctx) + struct devlink_param_gset_ctx *ctx, + struct netlink_ext_ack *extack) { struct rvu_devlink *rvu_dl = devlink_priv(devlink); struct rvu *rvu = rvu_dl->rvu; @@ -1310,7 +1313,8 @@ static int rvu_af_dl_npc_mcam_high_zone_percent_get(struct devlink *devlink, u32 } static int rvu_af_dl_npc_mcam_high_zone_percent_set(struct devlink *devlink, u32 id, - struct devlink_param_gset_ctx *ctx) + struct devlink_param_gset_ctx *ctx, + struct netlink_ext_ack *extack) { struct rvu_devlink *rvu_dl = devlink_priv(devlink); struct rvu *rvu = rvu_dl->rvu; @@ -1355,6 +1359,32 @@ static int rvu_af_dl_npc_mcam_high_zone_percent_validate(struct devlink *devlink return 0; } +static int rvu_af_dl_npc_def_rule_cntr_get(struct devlink *devlink, u32 id, + struct devlink_param_gset_ctx *ctx) +{ + struct rvu_devlink *rvu_dl = devlink_priv(devlink); + struct rvu *rvu = rvu_dl->rvu; + + ctx->val.vbool = rvu->def_rule_cntr_en; + + return 0; +} + +static int rvu_af_dl_npc_def_rule_cntr_set(struct devlink *devlink, u32 id, + struct devlink_param_gset_ctx *ctx, + struct netlink_ext_ack *extack) +{ + struct rvu_devlink *rvu_dl = devlink_priv(devlink); + struct rvu *rvu = rvu_dl->rvu; + int err; + + err = npc_config_cntr_default_entries(rvu, ctx->val.vbool); + if (!err) + rvu->def_rule_cntr_en = ctx->val.vbool; + + return err; +} + static int rvu_af_dl_nix_maxlf_get(struct devlink *devlink, u32 id, struct devlink_param_gset_ctx *ctx) { @@ -1367,7 +1397,8 @@ static int rvu_af_dl_nix_maxlf_get(struct devlink *devlink, u32 id, } static int rvu_af_dl_nix_maxlf_set(struct devlink *devlink, u32 id, - struct devlink_param_gset_ctx *ctx) + struct devlink_param_gset_ctx *ctx, + struct netlink_ext_ack *extack) { struct rvu_devlink *rvu_dl = devlink_priv(devlink); struct rvu *rvu = rvu_dl->rvu; @@ -1434,21 +1465,17 @@ static const struct devlink_param rvu_af_dl_params[] = { BIT(DEVLINK_PARAM_CMODE_RUNTIME), rvu_af_dl_dwrr_mtu_get, rvu_af_dl_dwrr_mtu_set, rvu_af_dl_dwrr_mtu_validate), -}; - -static const struct devlink_param rvu_af_dl_param_exact_match[] = { - DEVLINK_PARAM_DRIVER(RVU_AF_DEVLINK_PARAM_ID_NPC_EXACT_FEATURE_DISABLE, - "npc_exact_feature_disable", DEVLINK_PARAM_TYPE_STRING, - BIT(DEVLINK_PARAM_CMODE_RUNTIME), - rvu_af_npc_exact_feature_get, - rvu_af_npc_exact_feature_disable, - rvu_af_npc_exact_feature_validate), DEVLINK_PARAM_DRIVER(RVU_AF_DEVLINK_PARAM_ID_NPC_MCAM_ZONE_PERCENT, "npc_mcam_high_zone_percent", DEVLINK_PARAM_TYPE_U8, BIT(DEVLINK_PARAM_CMODE_RUNTIME), rvu_af_dl_npc_mcam_high_zone_percent_get, rvu_af_dl_npc_mcam_high_zone_percent_set, rvu_af_dl_npc_mcam_high_zone_percent_validate), + DEVLINK_PARAM_DRIVER(RVU_AF_DEVLINK_PARAM_ID_NPC_DEF_RULE_CNTR_ENABLE, + "npc_def_rule_cntr", DEVLINK_PARAM_TYPE_BOOL, + BIT(DEVLINK_PARAM_CMODE_RUNTIME), + rvu_af_dl_npc_def_rule_cntr_get, + rvu_af_dl_npc_def_rule_cntr_set, NULL), DEVLINK_PARAM_DRIVER(RVU_AF_DEVLINK_PARAM_ID_NIX_MAXLF, "nix_maxlf", DEVLINK_PARAM_TYPE_U16, BIT(DEVLINK_PARAM_CMODE_RUNTIME), @@ -1457,6 +1484,15 @@ static const struct devlink_param rvu_af_dl_param_exact_match[] = { rvu_af_dl_nix_maxlf_validate), }; +static const struct devlink_param rvu_af_dl_param_exact_match[] = { + DEVLINK_PARAM_DRIVER(RVU_AF_DEVLINK_PARAM_ID_NPC_EXACT_FEATURE_DISABLE, + "npc_exact_feature_disable", DEVLINK_PARAM_TYPE_STRING, + BIT(DEVLINK_PARAM_CMODE_RUNTIME), + rvu_af_npc_exact_feature_get, + rvu_af_npc_exact_feature_disable, + rvu_af_npc_exact_feature_validate), +}; + /* Devlink switch mode */ static int rvu_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode) { @@ -1464,6 +1500,9 @@ static int rvu_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode) struct rvu *rvu = rvu_dl->rvu; struct rvu_switch *rswitch; + if (rvu->rep_mode) + return -EOPNOTSUPP; + rswitch = &rvu->rswitch; *mode = rswitch->mode; diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c index 66203a90f052..613655fcd34f 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c @@ -31,6 +31,7 @@ static int nix_free_all_bandprof(struct rvu *rvu, u16 pcifunc); static void nix_clear_ratelimit_aggr(struct rvu *rvu, struct nix_hw *nix_hw, u32 leaf_prof); static const char *nix_get_ctx_name(int ctype); +static int nix_get_tx_link(struct rvu *rvu, u16 pcifunc); enum mc_tbl_sz { MC_TBL_SZ_256, @@ -312,7 +313,9 @@ static bool is_valid_txschq(struct rvu *rvu, int blkaddr, /* TLs aggegating traffic are shared across PF and VFs */ if (lvl >= hw->cap.nix_tx_aggr_lvl) { - if (rvu_get_pf(map_func) != rvu_get_pf(pcifunc)) + if ((nix_get_tx_link(rvu, map_func) != + nix_get_tx_link(rvu, pcifunc)) && + (rvu_get_pf(map_func) != rvu_get_pf(pcifunc))) return false; else return true; @@ -360,7 +363,6 @@ static int nix_interface_init(struct rvu *rvu, u16 pcifunc, int type, int nixlf, cgx_set_pkind(rvu_cgx_pdata(cgx_id, rvu), lmac_id, pkind); rvu_npc_set_pkind(rvu, pkind, pfvf); - break; case NIX_INTF_TYPE_LBK: vf = (pcifunc & RVU_PFVF_FUNC_MASK) - 1; @@ -499,55 +501,166 @@ static void nix_interface_deinit(struct rvu *rvu, u16 pcifunc, u8 nixlf) rvu_cgx_disable_dmac_entries(rvu, pcifunc); } -int rvu_mbox_handler_nix_bp_disable(struct rvu *rvu, - struct nix_bp_cfg_req *req, - struct msg_rsp *rsp) +#define NIX_BPIDS_PER_LMAC 8 +#define NIX_BPIDS_PER_CPT 1 +static int nix_setup_bpids(struct rvu *rvu, struct nix_hw *hw, int blkaddr) +{ + struct nix_bp *bp = &hw->bp; + int err, max_bpids; + u64 cfg; + + cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST1); + max_bpids = FIELD_GET(NIX_CONST_MAX_BPIDS, cfg); + + /* Reserve the BPIds for CGX and SDP */ + bp->cgx_bpid_cnt = rvu->hw->cgx_links * NIX_BPIDS_PER_LMAC; + bp->sdp_bpid_cnt = rvu->hw->sdp_links * FIELD_GET(NIX_CONST_SDP_CHANS, cfg); + bp->free_pool_base = bp->cgx_bpid_cnt + bp->sdp_bpid_cnt + + NIX_BPIDS_PER_CPT; + bp->bpids.max = max_bpids - bp->free_pool_base; + + err = rvu_alloc_bitmap(&bp->bpids); + if (err) + return err; + + bp->fn_map = devm_kcalloc(rvu->dev, bp->bpids.max, + sizeof(u16), GFP_KERNEL); + if (!bp->fn_map) + return -ENOMEM; + + bp->intf_map = devm_kcalloc(rvu->dev, bp->bpids.max, + sizeof(u8), GFP_KERNEL); + if (!bp->intf_map) + return -ENOMEM; + + bp->ref_cnt = devm_kcalloc(rvu->dev, bp->bpids.max, + sizeof(u8), GFP_KERNEL); + if (!bp->ref_cnt) + return -ENOMEM; + + return 0; +} + +void rvu_nix_flr_free_bpids(struct rvu *rvu, u16 pcifunc) +{ + int blkaddr, bpid, err; + struct nix_hw *nix_hw; + struct nix_bp *bp; + + if (!is_lbk_vf(rvu, pcifunc)) + return; + + err = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr); + if (err) + return; + + bp = &nix_hw->bp; + + mutex_lock(&rvu->rsrc_lock); + for (bpid = 0; bpid < bp->bpids.max; bpid++) { + if (bp->fn_map[bpid] == pcifunc) { + bp->ref_cnt[bpid]--; + if (bp->ref_cnt[bpid]) + continue; + rvu_free_rsrc(&bp->bpids, bpid); + bp->fn_map[bpid] = 0; + } + } + mutex_unlock(&rvu->rsrc_lock); +} + +static u16 nix_get_channel(u16 chan, bool cpt_link) +{ + /* CPT channel for a given link channel is always + * assumed to be BIT(11) set in link channel. + */ + return cpt_link ? chan | BIT(11) : chan; +} + +static int nix_bp_disable(struct rvu *rvu, + struct nix_bp_cfg_req *req, + struct msg_rsp *rsp, bool cpt_link) { u16 pcifunc = req->hdr.pcifunc; + int blkaddr, pf, type, err; + u16 chan_base, chan, bpid; struct rvu_pfvf *pfvf; - int blkaddr, pf, type; - u16 chan_base, chan; + struct nix_hw *nix_hw; + struct nix_bp *bp; + u16 chan_v; u64 cfg; pf = rvu_get_pf(pcifunc); - type = is_afvf(pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX; + type = is_lbk_vf(rvu, pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX; if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK) return 0; + if (is_sdp_pfvf(pcifunc)) + type = NIX_INTF_TYPE_SDP; + + if (cpt_link && !rvu->hw->cpt_links) + return 0; + pfvf = rvu_get_pfvf(rvu, pcifunc); - blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); + err = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr); + if (err) + return err; + bp = &nix_hw->bp; chan_base = pfvf->rx_chan_base + req->chan_base; for (chan = chan_base; chan < (chan_base + req->chan_cnt); chan++) { - cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan)); - rvu_write64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan), + chan_v = nix_get_channel(chan, cpt_link); + cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan_v)); + rvu_write64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan_v), cfg & ~BIT_ULL(16)); + + if (type == NIX_INTF_TYPE_LBK) { + bpid = cfg & GENMASK(8, 0); + mutex_lock(&rvu->rsrc_lock); + rvu_free_rsrc(&bp->bpids, bpid - bp->free_pool_base); + for (bpid = 0; bpid < bp->bpids.max; bpid++) { + if (bp->fn_map[bpid] == pcifunc) { + bp->fn_map[bpid] = 0; + bp->ref_cnt[bpid] = 0; + } + } + mutex_unlock(&rvu->rsrc_lock); + } } return 0; } +int rvu_mbox_handler_nix_bp_disable(struct rvu *rvu, + struct nix_bp_cfg_req *req, + struct msg_rsp *rsp) +{ + return nix_bp_disable(rvu, req, rsp, false); +} + +int rvu_mbox_handler_nix_cpt_bp_disable(struct rvu *rvu, + struct nix_bp_cfg_req *req, + struct msg_rsp *rsp) +{ + return nix_bp_disable(rvu, req, rsp, true); +} + static int rvu_nix_get_bpid(struct rvu *rvu, struct nix_bp_cfg_req *req, int type, int chan_id) { - int bpid, blkaddr, lmac_chan_cnt, sdp_chan_cnt; - u16 cgx_bpid_cnt, lbk_bpid_cnt, sdp_bpid_cnt; + int bpid, blkaddr, sdp_chan_base, err; struct rvu_hwinfo *hw = rvu->hw; struct rvu_pfvf *pfvf; + struct nix_hw *nix_hw; u8 cgx_id, lmac_id; - u64 cfg; + struct nix_bp *bp; - blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, req->hdr.pcifunc); - cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST); - lmac_chan_cnt = cfg & 0xFF; - - cgx_bpid_cnt = hw->cgx_links * lmac_chan_cnt; - lbk_bpid_cnt = hw->lbk_links * ((cfg >> 16) & 0xFF); + pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc); - cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST1); - sdp_chan_cnt = cfg & 0xFFF; - sdp_bpid_cnt = hw->sdp_links * sdp_chan_cnt; + err = nix_get_struct_ptrs(rvu, req->hdr.pcifunc, &nix_hw, &blkaddr); + if (err) + return err; - pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc); + bp = &nix_hw->bp; /* Backpressure IDs range division * CGX channles are mapped to (0 - 191) BPIDs @@ -561,38 +674,48 @@ static int rvu_nix_get_bpid(struct rvu *rvu, struct nix_bp_cfg_req *req, */ switch (type) { case NIX_INTF_TYPE_CGX: - if ((req->chan_base + req->chan_cnt) > 16) - return -EINVAL; + if ((req->chan_base + req->chan_cnt) > NIX_BPIDS_PER_LMAC) + return NIX_AF_ERR_INVALID_BPID_REQ; rvu_get_cgx_lmac_id(pfvf->cgx_lmac, &cgx_id, &lmac_id); /* Assign bpid based on cgx, lmac and chan id */ - bpid = (cgx_id * hw->lmac_per_cgx * lmac_chan_cnt) + - (lmac_id * lmac_chan_cnt) + req->chan_base; + bpid = (cgx_id * hw->lmac_per_cgx * NIX_BPIDS_PER_LMAC) + + (lmac_id * NIX_BPIDS_PER_LMAC) + req->chan_base; if (req->bpid_per_chan) bpid += chan_id; - if (bpid > cgx_bpid_cnt) - return -EINVAL; + if (bpid > bp->cgx_bpid_cnt) + return NIX_AF_ERR_INVALID_BPID; break; case NIX_INTF_TYPE_LBK: - if ((req->chan_base + req->chan_cnt) > 63) - return -EINVAL; - bpid = cgx_bpid_cnt + req->chan_base; - if (req->bpid_per_chan) - bpid += chan_id; - if (bpid > (cgx_bpid_cnt + lbk_bpid_cnt)) - return -EINVAL; + /* Alloc bpid from the free pool */ + mutex_lock(&rvu->rsrc_lock); + bpid = rvu_alloc_rsrc(&bp->bpids); + if (bpid < 0) { + mutex_unlock(&rvu->rsrc_lock); + return NIX_AF_ERR_INVALID_BPID; + } + bp->fn_map[bpid] = req->hdr.pcifunc; + bp->ref_cnt[bpid]++; + bpid += bp->free_pool_base; + mutex_unlock(&rvu->rsrc_lock); break; case NIX_INTF_TYPE_SDP: - if ((req->chan_base + req->chan_cnt) > 255) - return -EINVAL; + if ((req->chan_base + req->chan_cnt) > bp->sdp_bpid_cnt) + return NIX_AF_ERR_INVALID_BPID_REQ; - bpid = sdp_bpid_cnt + req->chan_base; + /* Handle usecase of 2 SDP blocks */ + if (!hw->cap.programmable_chans) + sdp_chan_base = pfvf->rx_chan_base - NIX_CHAN_SDP_CH_START; + else + sdp_chan_base = pfvf->rx_chan_base - hw->sdp_chan_base; + + bpid = bp->cgx_bpid_cnt + req->chan_base + sdp_chan_base; if (req->bpid_per_chan) bpid += chan_id; - if (bpid > (cgx_bpid_cnt + lbk_bpid_cnt + sdp_bpid_cnt)) - return -EINVAL; + if (bpid > (bp->cgx_bpid_cnt + bp->sdp_bpid_cnt)) + return NIX_AF_ERR_INVALID_BPID; break; default: return -EINVAL; @@ -600,19 +723,21 @@ static int rvu_nix_get_bpid(struct rvu *rvu, struct nix_bp_cfg_req *req, return bpid; } -int rvu_mbox_handler_nix_bp_enable(struct rvu *rvu, - struct nix_bp_cfg_req *req, - struct nix_bp_cfg_rsp *rsp) +static int nix_bp_enable(struct rvu *rvu, + struct nix_bp_cfg_req *req, + struct nix_bp_cfg_rsp *rsp, + bool cpt_link) { int blkaddr, pf, type, chan_id = 0; u16 pcifunc = req->hdr.pcifunc; struct rvu_pfvf *pfvf; u16 chan_base, chan; s16 bpid, bpid_base; + u16 chan_v; u64 cfg; pf = rvu_get_pf(pcifunc); - type = is_afvf(pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX; + type = is_lbk_vf(rvu, pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX; if (is_sdp_pfvf(pcifunc)) type = NIX_INTF_TYPE_SDP; @@ -621,6 +746,9 @@ int rvu_mbox_handler_nix_bp_enable(struct rvu *rvu, type != NIX_INTF_TYPE_SDP) return 0; + if (cpt_link && !rvu->hw->cpt_links) + return 0; + pfvf = rvu_get_pfvf(rvu, pcifunc); blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); @@ -634,9 +762,11 @@ int rvu_mbox_handler_nix_bp_enable(struct rvu *rvu, return -EINVAL; } - cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan)); + chan_v = nix_get_channel(chan, cpt_link); + + cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan_v)); cfg &= ~GENMASK_ULL(8, 0); - rvu_write64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan), + rvu_write64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan_v), cfg | (bpid & GENMASK_ULL(8, 0)) | BIT_ULL(16)); chan_id++; bpid = rvu_nix_get_bpid(rvu, req, type, chan_id); @@ -654,6 +784,20 @@ int rvu_mbox_handler_nix_bp_enable(struct rvu *rvu, return 0; } +int rvu_mbox_handler_nix_bp_enable(struct rvu *rvu, + struct nix_bp_cfg_req *req, + struct nix_bp_cfg_rsp *rsp) +{ + return nix_bp_enable(rvu, req, rsp, false); +} + +int rvu_mbox_handler_nix_cpt_bp_enable(struct rvu *rvu, + struct nix_bp_cfg_req *req, + struct nix_bp_cfg_rsp *rsp) +{ + return nix_bp_enable(rvu, req, rsp, true); +} + static void nix_setup_lso_tso_l3(struct rvu *rvu, int blkaddr, u64 format, bool v4, u64 *fidx) { @@ -1523,7 +1667,13 @@ int rvu_mbox_handler_nix_lf_alloc(struct rvu *rvu, cfg = NPC_TX_DEF_PKIND; rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_PARSE_CFG(nixlf), cfg); - intf = is_afvf(pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX; + if (is_rep_dev(rvu, pcifunc)) { + pfvf->tx_chan_base = RVU_SWITCH_LBK_CHAN; + pfvf->tx_chan_cnt = 1; + goto exit; + } + + intf = is_lbk_vf(rvu, pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX; if (is_sdp_pfvf(pcifunc)) intf = NIX_INTF_TYPE_SDP; @@ -1593,6 +1743,9 @@ int rvu_mbox_handler_nix_lf_free(struct rvu *rvu, struct nix_lf_free_req *req, if (nixlf < 0) return NIX_AF_ERR_AF_LF_INVALID; + if (is_rep_dev(rvu, pcifunc)) + goto free_lf; + if (req->flags & NIX_LF_DISABLE_FLOWS) rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf); else @@ -1604,6 +1757,7 @@ int rvu_mbox_handler_nix_lf_free(struct rvu *rvu, struct nix_lf_free_req *req, nix_interface_deinit(rvu, pcifunc, nixlf); +free_lf: /* Reset this NIX LF */ err = rvu_lf_reset(rvu, block, nixlf); if (err) { @@ -1899,7 +2053,7 @@ static int nix_get_tx_link(struct rvu *rvu, u16 pcifunc) int pf = rvu_get_pf(pcifunc); u8 cgx_id = 0, lmac_id = 0; - if (is_afvf(pcifunc)) {/* LBK links */ + if (is_lbk_vf(rvu, pcifunc)) {/* LBK links */ return hw->cgx_links; } else if (is_pf_cgxmapped(rvu, pf)) { rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); @@ -1916,7 +2070,8 @@ static void nix_get_txschq_range(struct rvu *rvu, u16 pcifunc, struct rvu_hwinfo *hw = rvu->hw; int pf = rvu_get_pf(pcifunc); - if (is_afvf(pcifunc)) { /* LBK links */ + /* LBK links */ + if (is_lbk_vf(rvu, pcifunc) || is_rep_dev(rvu, pcifunc)) { *start = hw->cap.nix_txsch_per_cgx_lmac * link; *end = *start + hw->cap.nix_txsch_per_lbk_lmac; } else if (is_pf_cgxmapped(rvu, pf)) { /* CGX links */ @@ -2168,14 +2323,13 @@ static void nix_smq_flush_fill_ctx(struct rvu *rvu, int blkaddr, int smq, schq = smq; for (lvl = NIX_TXSCH_LVL_SMQ; lvl <= NIX_TXSCH_LVL_TL1; lvl++) { smq_tree_ctx = &smq_flush_ctx->smq_tree_ctx[lvl]; + smq_tree_ctx->schq = schq; if (lvl == NIX_TXSCH_LVL_TL1) { - smq_flush_ctx->tl1_schq = schq; smq_tree_ctx->cir_off = NIX_AF_TL1X_CIR(schq); smq_tree_ctx->pir_off = 0; smq_tree_ctx->pir_val = 0; parent_off = 0; } else if (lvl == NIX_TXSCH_LVL_TL2) { - smq_flush_ctx->tl2_schq = schq; smq_tree_ctx->cir_off = NIX_AF_TL2X_CIR(schq); smq_tree_ctx->pir_off = NIX_AF_TL2X_PIR(schq); parent_off = NIX_AF_TL2X_PARENT(schq); @@ -2210,8 +2364,8 @@ static void nix_smq_flush_enadis_xoff(struct rvu *rvu, int blkaddr, { struct nix_txsch *txsch; struct nix_hw *nix_hw; + int tl2, tl2_schq; u64 regoff; - int tl2; nix_hw = get_nix_hw(rvu->hw, blkaddr); if (!nix_hw) @@ -2219,16 +2373,17 @@ static void nix_smq_flush_enadis_xoff(struct rvu *rvu, int blkaddr, /* loop through all TL2s with matching PF_FUNC */ txsch = &nix_hw->txsch[NIX_TXSCH_LVL_TL2]; + tl2_schq = smq_flush_ctx->smq_tree_ctx[NIX_TXSCH_LVL_TL2].schq; for (tl2 = 0; tl2 < txsch->schq.max; tl2++) { /* skip the smq(flush) TL2 */ - if (tl2 == smq_flush_ctx->tl2_schq) + if (tl2 == tl2_schq) continue; /* skip unused TL2s */ if (TXSCH_MAP_FLAGS(txsch->pfvf_map[tl2]) & NIX_TXSCHQ_FREE) continue; /* skip if PF_FUNC doesn't match */ if ((TXSCH_MAP_FUNC(txsch->pfvf_map[tl2]) & ~RVU_PFVF_FUNC_MASK) != - (TXSCH_MAP_FUNC(txsch->pfvf_map[smq_flush_ctx->tl2_schq] & + (TXSCH_MAP_FUNC(txsch->pfvf_map[tl2_schq] & ~RVU_PFVF_FUNC_MASK))) continue; /* enable/disable XOFF */ @@ -2270,10 +2425,12 @@ static int nix_smq_flush(struct rvu *rvu, int blkaddr, int smq, u16 pcifunc, int nixlf) { struct nix_smq_flush_ctx *smq_flush_ctx; + int err, restore_tx_en = 0, i; int pf = rvu_get_pf(pcifunc); u8 cgx_id = 0, lmac_id = 0; - int err, restore_tx_en = 0; - u64 cfg; + u16 tl2_tl3_link_schq; + u8 link, link_level; + u64 cfg, bmap = 0; if (!is_rvu_otx2(rvu)) { /* Skip SMQ flush if pkt count is zero */ @@ -2297,16 +2454,38 @@ static int nix_smq_flush(struct rvu *rvu, int blkaddr, nix_smq_flush_enadis_xoff(rvu, blkaddr, smq_flush_ctx, true); nix_smq_flush_enadis_rate(rvu, blkaddr, smq_flush_ctx, false); - cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq)); - /* Do SMQ flush and set enqueue xoff */ - cfg |= BIT_ULL(50) | BIT_ULL(49); - rvu_write64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq), cfg); - /* Disable backpressure from physical link, * otherwise SMQ flush may stall. */ rvu_cgx_enadis_rx_bp(rvu, pf, false); + link_level = rvu_read64(rvu, blkaddr, NIX_AF_PSE_CHANNEL_LEVEL) & 0x01 ? + NIX_TXSCH_LVL_TL3 : NIX_TXSCH_LVL_TL2; + tl2_tl3_link_schq = smq_flush_ctx->smq_tree_ctx[link_level].schq; + link = smq_flush_ctx->smq_tree_ctx[NIX_TXSCH_LVL_TL1].schq; + + /* SMQ set enqueue xoff */ + cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq)); + cfg |= BIT_ULL(50); + rvu_write64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq), cfg); + + /* Clear all NIX_AF_TL3_TL2_LINK_CFG[ENA] for the TL3/TL2 queue */ + for (i = 0; i < (rvu->hw->cgx_links + rvu->hw->lbk_links); i++) { + cfg = rvu_read64(rvu, blkaddr, + NIX_AF_TL3_TL2X_LINKX_CFG(tl2_tl3_link_schq, link)); + if (!(cfg & BIT_ULL(12))) + continue; + bmap |= BIT_ULL(i); + cfg &= ~BIT_ULL(12); + rvu_write64(rvu, blkaddr, + NIX_AF_TL3_TL2X_LINKX_CFG(tl2_tl3_link_schq, link), cfg); + } + + /* Do SMQ flush and set enqueue xoff */ + cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq)); + cfg |= BIT_ULL(50) | BIT_ULL(49); + rvu_write64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq), cfg); + /* Wait for flush to complete */ err = rvu_poll_reg(rvu, blkaddr, NIX_AF_SMQX_CFG(smq), BIT_ULL(49), true); @@ -2315,6 +2494,17 @@ static int nix_smq_flush(struct rvu *rvu, int blkaddr, "NIXLF%d: SMQ%d flush failed, txlink might be busy\n", nixlf, smq); + /* Set NIX_AF_TL3_TL2_LINKX_CFG[ENA] for the TL3/TL2 queue */ + for (i = 0; i < (rvu->hw->cgx_links + rvu->hw->lbk_links); i++) { + if (!(bmap & BIT_ULL(i))) + continue; + cfg = rvu_read64(rvu, blkaddr, + NIX_AF_TL3_TL2X_LINKX_CFG(tl2_tl3_link_schq, link)); + cfg |= BIT_ULL(12); + rvu_write64(rvu, blkaddr, + NIX_AF_TL3_TL2X_LINKX_CFG(tl2_tl3_link_schq, link), cfg); + } + /* clear XOFF on TL2s */ nix_smq_flush_enadis_rate(rvu, blkaddr, smq_flush_ctx, true); nix_smq_flush_enadis_xoff(rvu, blkaddr, smq_flush_ctx, false); @@ -2406,9 +2596,7 @@ static int nix_txschq_free(struct rvu *rvu, u16 pcifunc) } mutex_unlock(&rvu->rsrc_lock); - /* Sync cached info for this LF in NDC-TX to LLC/DRAM */ - rvu_write64(rvu, blkaddr, NIX_AF_NDC_TX_SYNC, BIT_ULL(12) | nixlf); - err = rvu_poll_reg(rvu, blkaddr, NIX_AF_NDC_TX_SYNC, BIT_ULL(12), true); + err = rvu_ndc_sync(rvu, blkaddr, nixlf, NIX_AF_NDC_TX_SYNC); if (err) dev_err(rvu->dev, "NDC-TX sync failed for NIXLF %d\n", nixlf); @@ -2636,7 +2824,7 @@ void rvu_nix_tx_tl2_cfg(struct rvu *rvu, int blkaddr, u16 pcifunc, int schq; u64 cfg; - if (!is_pf_cgxmapped(rvu, pf)) + if (!is_pf_cgxmapped(rvu, pf) && !is_rep_dev(rvu, pcifunc)) return; cfg = enable ? (BIT_ULL(12) | RVU_SWITCH_LBK_CHAN) : 0; @@ -3356,7 +3544,7 @@ static int nix_update_mce_rule(struct rvu *rvu, u16 pcifunc, int pf; /* skip multicast pkt replication for AF's VFs & SDP links */ - if (is_afvf(pcifunc) || is_sdp_pfvf(pcifunc)) + if (is_lbk_vf(rvu, pcifunc) || is_sdp_pfvf(pcifunc)) return 0; if (!hw->cap.nix_rx_multicast) @@ -3703,7 +3891,7 @@ int rvu_mbox_handler_nix_get_hw_info(struct rvu *rvu, struct msg_req *req, if (blkaddr < 0) return NIX_AF_ERR_AF_LF_INVALID; - if (is_afvf(pcifunc)) + if (is_lbk_vf(rvu, pcifunc)) rvu_get_lbk_link_max_frs(rvu, &rsp->max_mtu); else rvu_get_lmac_link_max_frs(rvu, &rsp->max_mtu); @@ -3773,6 +3961,11 @@ static int get_flowkey_alg_idx(struct nix_hw *nix_hw, u32 flow_cfg) return -ERANGE; } +/* Mask to match ipv6(NPC_LT_LC_IP6) and ipv6 ext(NPC_LT_LC_IP6_EXT) */ +#define NPC_LT_LC_IP6_MATCH_MSK ((~(NPC_LT_LC_IP6 ^ NPC_LT_LC_IP6_EXT)) & 0xf) +/* Mask to match both ipv4(NPC_LT_LC_IP) and ipv4 ext(NPC_LT_LC_IP_OPT) */ +#define NPC_LT_LC_IP_MATCH_MSK ((~(NPC_LT_LC_IP ^ NPC_LT_LC_IP_OPT)) & 0xf) + static int set_flowkey_fields(struct nix_rx_flowkey_alg *alg, u32 flow_cfg) { int idx, nr_field, key_off, field_marker, keyoff_marker; @@ -3842,7 +4035,7 @@ static int set_flowkey_fields(struct nix_rx_flowkey_alg *alg, u32 flow_cfg) field->hdr_offset = 9; /* offset */ field->bytesm1 = 0; /* 1 byte */ field->ltype_match = NPC_LT_LC_IP; - field->ltype_mask = 0xF; + field->ltype_mask = NPC_LT_LC_IP_MATCH_MSK; break; case NIX_FLOW_KEY_TYPE_IPV4: case NIX_FLOW_KEY_TYPE_INNR_IPV4: @@ -3869,8 +4062,7 @@ static int set_flowkey_fields(struct nix_rx_flowkey_alg *alg, u32 flow_cfg) field->bytesm1 = 3; /* DIP, 4 bytes */ } } - - field->ltype_mask = 0xF; /* Match only IPv4 */ + field->ltype_mask = NPC_LT_LC_IP_MATCH_MSK; keyoff_marker = false; break; case NIX_FLOW_KEY_TYPE_IPV6: @@ -3899,7 +4091,7 @@ static int set_flowkey_fields(struct nix_rx_flowkey_alg *alg, u32 flow_cfg) field->bytesm1 = 15; /* DIP,16 bytes */ } } - field->ltype_mask = 0xF; /* Match only IPv6 */ + field->ltype_mask = NPC_LT_LC_IP6_MATCH_MSK; break; case NIX_FLOW_KEY_TYPE_TCP: case NIX_FLOW_KEY_TYPE_UDP: @@ -4039,6 +4231,13 @@ static int set_flowkey_fields(struct nix_rx_flowkey_alg *alg, u32 flow_cfg) field->ltype_match = NPC_LT_LE_GTPU; field->ltype_mask = 0xF; break; + case NIX_FLOW_KEY_TYPE_CUSTOM0: + field->lid = NPC_LID_LC; + field->hdr_offset = 6; + field->bytesm1 = 1; /* 2 Bytes*/ + field->ltype_match = NPC_LT_LC_CUSTOM0; + field->ltype_mask = 0xF; + break; case NIX_FLOW_KEY_TYPE_VLAN: field->lid = NPC_LID_LB; field->hdr_offset = 2; /* Skip TPID (2-bytes) */ @@ -4258,8 +4457,6 @@ int rvu_mbox_handler_nix_set_mac_addr(struct rvu *rvu, if (test_bit(PF_SET_VF_TRUSTED, &pfvf->flags) && from_vf) ether_addr_copy(pfvf->default_mac, req->mac_addr); - rvu_switch_update_rules(rvu, pcifunc); - return 0; } @@ -4420,7 +4617,7 @@ int rvu_mbox_handler_nix_set_hw_frs(struct rvu *rvu, struct nix_frs_cfg *req, if (!nix_hw) return NIX_AF_ERR_INVALID_NIXBLK; - if (is_afvf(pcifunc)) + if (is_lbk_vf(rvu, pcifunc) || is_rep_dev(rvu, pcifunc)) rvu_get_lbk_link_max_frs(rvu, &max_mtu); else rvu_get_lmac_link_max_frs(rvu, &max_mtu); @@ -4448,6 +4645,8 @@ int rvu_mbox_handler_nix_set_hw_frs(struct rvu *rvu, struct nix_frs_cfg *req, /* For VFs of PF0 ingress is LBK port, so config LBK link */ pfvf = rvu_get_pfvf(rvu, pcifunc); link = hw->cgx_links + pfvf->lbkid; + } else if (is_rep_dev(rvu, pcifunc)) { + link = hw->cgx_links + 0; } if (link < 0) @@ -4521,6 +4720,9 @@ static void nix_link_config(struct rvu *rvu, int blkaddr, rvu_get_lbk_link_max_frs(rvu, &lbk_max_frs); rvu_get_lmac_link_max_frs(rvu, &lmac_max_frs); + /* Set SDP link credit */ + rvu_write64(rvu, blkaddr, NIX_AF_SDP_LINK_CREDIT, SDP_LINK_CREDIT); + /* Set default min/max packet lengths allowed on NIX Rx links. * * With HW reset minlen value of 60byte, HW will treat ARP pkts @@ -4539,7 +4741,7 @@ static void nix_link_config(struct rvu *rvu, int blkaddr, if (hw->sdp_links) { link = hw->cgx_links + hw->lbk_links; rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link), - SDP_HW_MAX_FRS << 16 | NIC_HW_MIN_FRS); + SDP_HW_MAX_FRS << 16 | SDP_HW_MIN_FRS); } /* Get MCS external bypass status for CN10K-B */ @@ -4721,18 +4923,18 @@ static int rvu_nix_block_init(struct rvu *rvu, struct nix_hw *nix_hw) */ rvu_write64(rvu, blkaddr, NIX_AF_CFG, rvu_read64(rvu, blkaddr, NIX_AF_CFG) | 0x40ULL); + } - /* Set chan/link to backpressure TL3 instead of TL2 */ - rvu_write64(rvu, blkaddr, NIX_AF_PSE_CHANNEL_LEVEL, 0x01); + /* Set chan/link to backpressure TL3 instead of TL2 */ + rvu_write64(rvu, blkaddr, NIX_AF_PSE_CHANNEL_LEVEL, 0x01); - /* Disable SQ manager's sticky mode operation (set TM6 = 0) - * This sticky mode is known to cause SQ stalls when multiple - * SQs are mapped to same SMQ and transmitting pkts at a time. - */ - cfg = rvu_read64(rvu, blkaddr, NIX_AF_SQM_DBG_CTL_STATUS); - cfg &= ~BIT_ULL(15); - rvu_write64(rvu, blkaddr, NIX_AF_SQM_DBG_CTL_STATUS, cfg); - } + /* Disable SQ manager's sticky mode operation (set TM6 = 0) + * This sticky mode is known to cause SQ stalls when multiple + * SQs are mapped to same SMQ and transmitting pkts at a time. + */ + cfg = rvu_read64(rvu, blkaddr, NIX_AF_SQM_DBG_CTL_STATUS); + cfg &= ~BIT_ULL(15); + rvu_write64(rvu, blkaddr, NIX_AF_SQM_DBG_CTL_STATUS, cfg); ltdefs = rvu->kpu.lt_def; /* Calibrate X2P bus to check if CGX/LBK links are fine */ @@ -4784,6 +4986,10 @@ static int rvu_nix_block_init(struct rvu *rvu, struct nix_hw *nix_hw) if (err) return err; + err = nix_setup_bpids(rvu, nix_hw, blkaddr); + if (err) + return err; + /* Configure segmentation offload formats */ nix_setup_lso(rvu, nix_hw, blkaddr); @@ -5027,7 +5233,7 @@ int rvu_mbox_handler_nix_lf_start_rx(struct rvu *rvu, struct msg_req *req, { u16 pcifunc = req->hdr.pcifunc; struct rvu_pfvf *pfvf; - int nixlf, err; + int nixlf, err, pf; err = nix_get_nixlf(rvu, pcifunc, &nixlf, NULL); if (err) @@ -5043,7 +5249,11 @@ int rvu_mbox_handler_nix_lf_start_rx(struct rvu *rvu, struct msg_req *req, pfvf = rvu_get_pfvf(rvu, pcifunc); set_bit(NIXLF_INITIALIZED, &pfvf->flags); - rvu_switch_update_rules(rvu, pcifunc); + rvu_switch_update_rules(rvu, pcifunc, true); + + pf = rvu_get_pf(pcifunc); + if (is_pf_cgxmapped(rvu, pf) && rvu->rep_mode) + rvu_rep_notify_pfvf_state(rvu, pcifunc, true); return rvu_cgx_start_stop_io(rvu, pcifunc, true); } @@ -5053,7 +5263,7 @@ int rvu_mbox_handler_nix_lf_stop_rx(struct rvu *rvu, struct msg_req *req, { u16 pcifunc = req->hdr.pcifunc; struct rvu_pfvf *pfvf; - int nixlf, err; + int nixlf, err, pf; err = nix_get_nixlf(rvu, pcifunc, &nixlf, NULL); if (err) @@ -5071,8 +5281,12 @@ int rvu_mbox_handler_nix_lf_stop_rx(struct rvu *rvu, struct msg_req *req, if (err) return err; + rvu_switch_update_rules(rvu, pcifunc, false); rvu_cgx_tx_enable(rvu, pcifunc, true); + pf = rvu_get_pf(pcifunc); + if (is_pf_cgxmapped(rvu, pf) && rvu->rep_mode) + rvu_rep_notify_pfvf_state(rvu, pcifunc, false); return 0; } @@ -5100,6 +5314,9 @@ void rvu_nix_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int nixlf) clear_bit(NIXLF_INITIALIZED, &pfvf->flags); + if (is_pf_cgxmapped(rvu, pf) && rvu->rep_mode) + rvu_rep_notify_pfvf_state(rvu, pcifunc, false); + rvu_cgx_start_stop_io(rvu, pcifunc, false); if (pfvf->sq_ctx) { diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c index 516adb50f9f6..821fe242f821 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c @@ -395,7 +395,7 @@ static void npc_fixup_vf_rule(struct rvu *rvu, struct npc_mcam *mcam, owner = mcam->entry2pfvf_map[index]; target_func = (entry->action >> 4) & 0xffff; /* do nothing when target is LBK/PF or owner is not PF */ - if (is_pffunc_af(owner) || is_afvf(target_func) || + if (is_pffunc_af(owner) || is_lbk_vf(rvu, target_func) || (owner & RVU_PFVF_FUNC_MASK) || !(target_func & RVU_PFVF_FUNC_MASK)) return; @@ -608,7 +608,7 @@ void rvu_npc_install_ucast_entry(struct rvu *rvu, u16 pcifunc, int blkaddr, index; /* AF's and SDP VFs work in promiscuous mode */ - if (is_afvf(pcifunc) || is_sdp_vf(pcifunc)) + if (is_lbk_vf(rvu, pcifunc) || is_sdp_vf(rvu, pcifunc)) return; blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); @@ -773,7 +773,7 @@ void rvu_npc_install_bcast_match_entry(struct rvu *rvu, u16 pcifunc, return; /* Skip LBK VFs */ - if (is_afvf(pcifunc)) + if (is_lbk_vf(rvu, pcifunc)) return; /* If pkt replication is not supported, @@ -853,7 +853,7 @@ void rvu_npc_install_allmulti_entry(struct rvu *rvu, u16 pcifunc, int nixlf, u16 vf_func; /* Only CGX PF/VF can add allmulticast entry */ - if (is_afvf(pcifunc) && is_sdp_vf(pcifunc)) + if (is_lbk_vf(rvu, pcifunc) && is_sdp_vf(rvu, pcifunc)) return; blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); @@ -1657,7 +1657,7 @@ static int npc_fwdb_detect_load_prfl_img(struct rvu *rvu, uint64_t prfl_sz, struct npc_coalesced_kpu_prfl *img_data = NULL; int i = 0, rc = -EINVAL; void __iomem *kpu_prfl_addr; - u16 offset; + u32 offset; img_data = (struct npc_coalesced_kpu_prfl __force *)rvu->kpu_prfl_addr; if (le64_to_cpu(img_data->signature) == KPU_SIGN && @@ -2181,7 +2181,6 @@ void rvu_npc_freemem(struct rvu *rvu) kfree(pkind->rsrc.bmap); npc_mcam_rsrcs_deinit(rvu); - kfree(mcam->counters.bmap); if (rvu->kpu_prfl_addr) iounmap(rvu->kpu_prfl_addr); else @@ -2520,7 +2519,17 @@ static int npc_mcam_alloc_entries(struct npc_mcam *mcam, u16 pcifunc, * - when available free entries are less. * Lower priority ones out of avaialble free entries are always * chosen when 'high vs low' question arises. + * + * For a VF base MCAM match rule is set by its PF. And all the + * further MCAM rules installed by VF on its own are + * concatenated with the base rule set by its PF. Hence PF entries + * should be at lower priority compared to VF entries. Otherwise + * base rule is hit always and rules installed by VF will be of + * no use. Hence if the request is from PF then allocate low + * priority entries. */ + if (!(pcifunc & RVU_PFVF_FUNC_MASK)) + goto lprio_alloc; /* Get the search range for priority allocation request */ if (req->priority) { @@ -2529,17 +2538,6 @@ static int npc_mcam_alloc_entries(struct npc_mcam *mcam, u16 pcifunc, goto alloc; } - /* For a VF base MCAM match rule is set by its PF. And all the - * further MCAM rules installed by VF on its own are - * concatenated with the base rule set by its PF. Hence PF entries - * should be at lower priority compared to VF entries. Otherwise - * base rule is hit always and rules installed by VF will be of - * no use. Hence if the request is from PF and NOT a priority - * allocation request then allocate low priority entries. - */ - if (!(pcifunc & RVU_PFVF_FUNC_MASK)) - goto lprio_alloc; - /* Find out the search range for non-priority allocation request * * Get MCAM free entry count in middle zone. @@ -2569,6 +2567,18 @@ lprio_alloc: reverse = true; start = 0; end = mcam->bmap_entries; + /* Ensure PF requests are always at bottom and if PF requests + * for higher/lower priority entry wrt reference entry then + * honour that criteria and start search for entries from bottom + * and not in mid zone. + */ + if (!(pcifunc & RVU_PFVF_FUNC_MASK) && + req->priority == NPC_MCAM_HIGHER_PRIO) + end = req->ref_entry; + + if (!(pcifunc & RVU_PFVF_FUNC_MASK) && + req->priority == NPC_MCAM_LOWER_PRIO) + start = req->ref_entry; } alloc: @@ -2681,6 +2691,49 @@ void npc_mcam_rsrcs_reserve(struct rvu *rvu, int blkaddr, int entry_idx) npc_mcam_set_bit(mcam, entry_idx); } +int npc_config_cntr_default_entries(struct rvu *rvu, bool enable) +{ + struct npc_mcam *mcam = &rvu->hw->mcam; + struct npc_install_flow_rsp rsp; + struct rvu_npc_mcam_rule *rule; + int blkaddr; + + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); + if (blkaddr < 0) + return -EINVAL; + + mutex_lock(&mcam->lock); + list_for_each_entry(rule, &mcam->mcam_rules, list) { + if (!is_mcam_entry_enabled(rvu, mcam, blkaddr, rule->entry)) + continue; + if (!rule->default_rule) + continue; + if (enable && !rule->has_cntr) { /* Alloc and map new counter */ + __rvu_mcam_add_counter_to_rule(rvu, rule->owner, + rule, &rsp); + if (rsp.counter < 0) { + dev_err(rvu->dev, + "%s: Failed to allocate cntr for default rule (err=%d)\n", + __func__, rsp.counter); + break; + } + npc_map_mcam_entry_and_cntr(rvu, mcam, blkaddr, + rule->entry, rsp.counter); + /* Reset counter before use */ + rvu_write64(rvu, blkaddr, + NPC_AF_MATCH_STATX(rule->cntr), 0x0); + } + + /* Free and unmap counter */ + if (!enable && rule->has_cntr) + __rvu_mcam_remove_counter_from_rule(rvu, rule->owner, + rule); + } + mutex_unlock(&mcam->lock); + + return 0; +} + int rvu_mbox_handler_npc_mcam_alloc_entry(struct rvu *rvu, struct npc_mcam_alloc_entry_req *req, struct npc_mcam_alloc_entry_rsp *rsp) @@ -2965,9 +3018,9 @@ int rvu_mbox_handler_npc_mcam_shift_entry(struct rvu *rvu, return rc; } -int rvu_mbox_handler_npc_mcam_alloc_counter(struct rvu *rvu, - struct npc_mcam_alloc_counter_req *req, - struct npc_mcam_alloc_counter_rsp *rsp) +static int __npc_mcam_alloc_counter(struct rvu *rvu, + struct npc_mcam_alloc_counter_req *req, + struct npc_mcam_alloc_counter_rsp *rsp) { struct npc_mcam *mcam = &rvu->hw->mcam; u16 pcifunc = req->hdr.pcifunc; @@ -2988,11 +3041,9 @@ int rvu_mbox_handler_npc_mcam_alloc_counter(struct rvu *rvu, if (!req->contig && req->count > NPC_MAX_NONCONTIG_COUNTERS) return NPC_MCAM_INVALID_REQ; - mutex_lock(&mcam->lock); /* Check if unused counters are available or not */ if (!rvu_rsrc_free_count(&mcam->counters)) { - mutex_unlock(&mcam->lock); return NPC_MCAM_ALLOC_FAILED; } @@ -3025,12 +3076,27 @@ int rvu_mbox_handler_npc_mcam_alloc_counter(struct rvu *rvu, } } - mutex_unlock(&mcam->lock); return 0; } -int rvu_mbox_handler_npc_mcam_free_counter(struct rvu *rvu, - struct npc_mcam_oper_counter_req *req, struct msg_rsp *rsp) +int rvu_mbox_handler_npc_mcam_alloc_counter(struct rvu *rvu, + struct npc_mcam_alloc_counter_req *req, + struct npc_mcam_alloc_counter_rsp *rsp) +{ + struct npc_mcam *mcam = &rvu->hw->mcam; + int err; + + mutex_lock(&mcam->lock); + + err = __npc_mcam_alloc_counter(rvu, req, rsp); + + mutex_unlock(&mcam->lock); + return err; +} + +static int __npc_mcam_free_counter(struct rvu *rvu, + struct npc_mcam_oper_counter_req *req, + struct msg_rsp *rsp) { struct npc_mcam *mcam = &rvu->hw->mcam; u16 index, entry = 0; @@ -3040,10 +3106,8 @@ int rvu_mbox_handler_npc_mcam_free_counter(struct rvu *rvu, if (blkaddr < 0) return NPC_MCAM_INVALID_REQ; - mutex_lock(&mcam->lock); err = npc_mcam_verify_counter(mcam, req->hdr.pcifunc, req->cntr); if (err) { - mutex_unlock(&mcam->lock); return err; } @@ -3067,10 +3131,66 @@ int rvu_mbox_handler_npc_mcam_free_counter(struct rvu *rvu, index, req->cntr); } - mutex_unlock(&mcam->lock); return 0; } +int rvu_mbox_handler_npc_mcam_free_counter(struct rvu *rvu, + struct npc_mcam_oper_counter_req *req, struct msg_rsp *rsp) +{ + struct npc_mcam *mcam = &rvu->hw->mcam; + int err; + + mutex_lock(&mcam->lock); + + err = __npc_mcam_free_counter(rvu, req, rsp); + + mutex_unlock(&mcam->lock); + + return err; +} + +void __rvu_mcam_remove_counter_from_rule(struct rvu *rvu, u16 pcifunc, + struct rvu_npc_mcam_rule *rule) +{ + struct npc_mcam_oper_counter_req free_req = { 0 }; + struct msg_rsp free_rsp; + + if (!rule->has_cntr) + return; + + free_req.hdr.pcifunc = pcifunc; + free_req.cntr = rule->cntr; + + __npc_mcam_free_counter(rvu, &free_req, &free_rsp); + rule->has_cntr = false; +} + +void __rvu_mcam_add_counter_to_rule(struct rvu *rvu, u16 pcifunc, + struct rvu_npc_mcam_rule *rule, + struct npc_install_flow_rsp *rsp) +{ + struct npc_mcam_alloc_counter_req cntr_req = { 0 }; + struct npc_mcam_alloc_counter_rsp cntr_rsp = { 0 }; + int err; + + cntr_req.hdr.pcifunc = pcifunc; + cntr_req.contig = true; + cntr_req.count = 1; + + /* we try to allocate a counter to track the stats of this + * rule. If counter could not be allocated then proceed + * without counter because counters are limited than entries. + */ + err = __npc_mcam_alloc_counter(rvu, &cntr_req, &cntr_rsp); + if (!err && cntr_rsp.count) { + rule->cntr = cntr_rsp.cntr; + rule->has_cntr = true; + rsp->counter = rule->cntr; + } else { + rsp->counter = err; + } +} + int rvu_mbox_handler_npc_mcam_unmap_counter(struct rvu *rvu, struct npc_mcam_unmap_counter_req *req, struct msg_rsp *rsp) { diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c index c75669c8fde7..1b765045aa63 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c @@ -53,6 +53,7 @@ static const char * const npc_flow_names[] = { [NPC_MPLS4_TTL] = "lse depth 4", [NPC_TYPE_ICMP] = "icmp type", [NPC_CODE_ICMP] = "icmp code", + [NPC_TCP_FLAGS] = "tcp flags", [NPC_UNKNOWN] = "unknown", }; @@ -530,6 +531,7 @@ do { \ NPC_SCAN_HDR(NPC_DPORT_SCTP, NPC_LID_LD, NPC_LT_LD_SCTP, 2, 2); NPC_SCAN_HDR(NPC_TYPE_ICMP, NPC_LID_LD, NPC_LT_LD_ICMP, 0, 1); NPC_SCAN_HDR(NPC_CODE_ICMP, NPC_LID_LD, NPC_LT_LD_ICMP, 1, 1); + NPC_SCAN_HDR(NPC_TCP_FLAGS, NPC_LID_LD, NPC_LT_LD_TCP, 12, 2); NPC_SCAN_HDR(NPC_ETYPE_ETHER, NPC_LID_LA, NPC_LT_LA_ETHER, 12, 2); NPC_SCAN_HDR(NPC_ETYPE_TAG1, NPC_LID_LB, NPC_LT_LB_CTAG, 4, 2); NPC_SCAN_HDR(NPC_ETYPE_TAG2, NPC_LID_LB, NPC_LT_LB_STAG_QINQ, 8, 2); @@ -574,7 +576,8 @@ static void npc_set_features(struct rvu *rvu, int blkaddr, u8 intf) BIT_ULL(NPC_DPORT_TCP) | BIT_ULL(NPC_DPORT_UDP) | BIT_ULL(NPC_SPORT_SCTP) | BIT_ULL(NPC_DPORT_SCTP) | BIT_ULL(NPC_SPORT_SCTP) | BIT_ULL(NPC_DPORT_SCTP) | - BIT_ULL(NPC_TYPE_ICMP) | BIT_ULL(NPC_CODE_ICMP); + BIT_ULL(NPC_TYPE_ICMP) | BIT_ULL(NPC_CODE_ICMP) | + BIT_ULL(NPC_TCP_FLAGS); /* for tcp/udp/sctp corresponding layer type should be in the key */ if (*features & proto_flags) { @@ -982,7 +985,8 @@ do { \ mask->icmp_type, 0); NPC_WRITE_FLOW(NPC_CODE_ICMP, icmp_code, pkt->icmp_code, 0, mask->icmp_code, 0); - + NPC_WRITE_FLOW(NPC_TCP_FLAGS, tcp_flags, ntohs(pkt->tcp_flags), 0, + ntohs(mask->tcp_flags), 0); NPC_WRITE_FLOW(NPC_IPSEC_SPI, spi, ntohl(pkt->spi), 0, ntohl(mask->spi), 0); @@ -1077,44 +1081,26 @@ static void rvu_mcam_add_rule(struct npc_mcam *mcam, static void rvu_mcam_remove_counter_from_rule(struct rvu *rvu, u16 pcifunc, struct rvu_npc_mcam_rule *rule) { - struct npc_mcam_oper_counter_req free_req = { 0 }; - struct msg_rsp free_rsp; + struct npc_mcam *mcam = &rvu->hw->mcam; - if (!rule->has_cntr) - return; + mutex_lock(&mcam->lock); - free_req.hdr.pcifunc = pcifunc; - free_req.cntr = rule->cntr; + __rvu_mcam_remove_counter_from_rule(rvu, pcifunc, rule); - rvu_mbox_handler_npc_mcam_free_counter(rvu, &free_req, &free_rsp); - rule->has_cntr = false; + mutex_unlock(&mcam->lock); } static void rvu_mcam_add_counter_to_rule(struct rvu *rvu, u16 pcifunc, struct rvu_npc_mcam_rule *rule, struct npc_install_flow_rsp *rsp) { - struct npc_mcam_alloc_counter_req cntr_req = { 0 }; - struct npc_mcam_alloc_counter_rsp cntr_rsp = { 0 }; - int err; + struct npc_mcam *mcam = &rvu->hw->mcam; - cntr_req.hdr.pcifunc = pcifunc; - cntr_req.contig = true; - cntr_req.count = 1; + mutex_lock(&mcam->lock); - /* we try to allocate a counter to track the stats of this - * rule. If counter could not be allocated then proceed - * without counter because counters are limited than entries. - */ - err = rvu_mbox_handler_npc_mcam_alloc_counter(rvu, &cntr_req, - &cntr_rsp); - if (!err && cntr_rsp.count) { - rule->cntr = cntr_rsp.cntr; - rule->has_cntr = true; - rsp->counter = rule->cntr; - } else { - rsp->counter = err; - } + __rvu_mcam_add_counter_to_rule(rvu, pcifunc, rule, rsp); + + mutex_unlock(&mcam->lock); } static int npc_mcast_update_action_index(struct rvu *rvu, struct npc_install_flow_req *req, @@ -1183,6 +1169,8 @@ static int npc_update_rx_entry(struct rvu *rvu, struct rvu_pfvf *pfvf, action.pf_func = target; action.op = NIX_RX_ACTIONOP_UCAST; } + if (req->match_id) + action.match_id = req->match_id; } entry->action = *(u64 *)&action; @@ -1410,6 +1398,7 @@ int rvu_mbox_handler_npc_install_flow(struct rvu *rvu, struct npc_install_flow_rsp *rsp) { bool from_vf = !!(req->hdr.pcifunc & RVU_PFVF_FUNC_MASK); + bool from_rep_dev = !!is_rep_dev(rvu, req->hdr.pcifunc); struct rvu_switch *rswitch = &rvu->rswitch; int blkaddr, nixlf, err; struct rvu_pfvf *pfvf; @@ -1463,18 +1452,21 @@ process_flow: * hence modify pcifunc accordingly. */ - /* AF installing for a PF/VF */ - if (!req->hdr.pcifunc) + if (!req->hdr.pcifunc) { + /* AF installing for a PF/VF */ target = req->vf; - /* PF installing for its VF */ - else if (!from_vf && req->vf) { + } else if (!from_vf && req->vf && !from_rep_dev) { + /* PF installing for its VF */ target = (req->hdr.pcifunc & ~RVU_PFVF_FUNC_MASK) | req->vf; pf_set_vfs_mac = req->default_rule && (req->features & BIT_ULL(NPC_DMAC)); - } - /* msg received from PF/VF */ - else + } else if (from_rep_dev && req->vf) { + /* Representor device installing for a representee */ + target = req->vf; + } else { + /* msg received from PF/VF */ target = req->hdr.pcifunc; + } /* ignore chan_mask in case pf func is not AF, revisit later */ if (!is_pffunc_af(req->hdr.pcifunc)) @@ -1486,8 +1478,10 @@ process_flow: pfvf = rvu_get_pfvf(rvu, target); + if (from_rep_dev) + req->channel = pfvf->rx_chan_base; /* PF installing for its VF */ - if (req->hdr.pcifunc && !from_vf && req->vf) + if (req->hdr.pcifunc && !from_vf && req->vf && !from_rep_dev) set_bit(PF_SET_VF_CFG, &pfvf->flags); /* update req destination mac addr */ diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h index 6f73ad9807f0..62cdc714ba57 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h @@ -121,6 +121,7 @@ #define NPA_AF_LF_RST (0x0020) #define NPA_AF_GEN_CFG (0x0030) #define NPA_AF_NDC_CFG (0x0040) +#define NPA_AF_NDC_SYNC (0x0050) #define NPA_AF_INP_CTL (0x00D0) #define NPA_AF_ACTIVE_CYCLES_PC (0x00F0) #define NPA_AF_AVG_DELAY (0x0100) @@ -239,6 +240,7 @@ #define NIX_AF_RX_CPTX_INST_ADDR (0x0310) #define NIX_AF_RX_CPTX_INST_QSEL(a) (0x0320ull | (uint64_t)(a) << 3) #define NIX_AF_RX_CPTX_CREDIT(a) (0x0360ull | (uint64_t)(a) << 3) +#define NIX_AF_NDC_RX_SYNC (0x03E0) #define NIX_AF_NDC_TX_SYNC (0x03F0) #define NIX_AF_AQ_CFG (0x0400) #define NIX_AF_AQ_BASE (0x0410) @@ -429,6 +431,8 @@ #define NIX_AF_RX_ACTIVE_CYCLES_PCX(a) (0x4800 | (a) << 16) #define NIX_AF_LINKX_CFG(a) (0x4010 | (a) << 17) #define NIX_AF_MDQX_IN_MD_COUNT(a) (0x14e0 | (a) << 16) +#define NIX_AF_SMQX_STATUS(a) (0x730 | (a) << 16) +#define NIX_AF_MDQX_OUT_MD_COUNT(a) (0xdb0 | (a) << 16) #define NIX_PRIV_AF_INT_CFG (0x8000000) #define NIX_PRIV_LFX_CFG (0x8000010) @@ -439,6 +443,15 @@ #define NIX_AF_LINKX_RANGE_MASK GENMASK_ULL(19, 16) #define NIX_AF_LINKX_MCS_CNT_MASK GENMASK_ULL(33, 32) +#define NIX_CONST_MAX_BPIDS GENMASK_ULL(23, 12) +#define NIX_CONST_SDP_CHANS GENMASK_ULL(11, 0) +#define NIX_VLAN_ETYPE_MASK GENMASK_ULL(63, 48) + +#define NIX_AF_MDQ_PARENT_MASK GENMASK_ULL(24, 16) +#define NIX_AF_TL4_PARENT_MASK GENMASK_ULL(23, 16) +#define NIX_AF_TL3_PARENT_MASK GENMASK_ULL(23, 16) +#define NIX_AF_TL2_PARENT_MASK GENMASK_ULL(20, 16) + /* SSO */ #define SSO_AF_CONST (0x1000) #define SSO_AF_CONST1 (0x1008) @@ -533,6 +546,7 @@ #define CPT_AF_CTX_PSH_PC (0x49450ull) #define CPT_AF_CTX_PSH_LATENCY_PC (0x49458ull) #define CPT_AF_CTX_CAM_DATA(a) (0x49800ull | (u64)(a) << 3) +#define CPT_AF_RXC_CFG1 (0x50000ull) #define CPT_AF_RXC_TIME (0x50010ull) #define CPT_AF_RXC_TIME_CFG (0x50018ull) #define CPT_AF_RXC_DFRG (0x50020ull) diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_rep.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_rep.c new file mode 100644 index 000000000000..052ae5923e3a --- /dev/null +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_rep.c @@ -0,0 +1,468 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Marvell RVU Admin Function driver + * + * Copyright (C) 2024 Marvell. + * + */ + +#include <linux/bitfield.h> +#include <linux/types.h> +#include <linux/device.h> +#include <linux/module.h> +#include <linux/pci.h> + +#include "rvu.h" +#include "rvu_reg.h" + +#define M(_name, _id, _fn_name, _req_type, _rsp_type) \ +static struct _req_type __maybe_unused \ +*otx2_mbox_alloc_msg_ ## _fn_name(struct rvu *rvu, int devid) \ +{ \ + struct _req_type *req; \ + \ + req = (struct _req_type *)otx2_mbox_alloc_msg_rsp( \ + &rvu->afpf_wq_info.mbox_up, devid, sizeof(struct _req_type), \ + sizeof(struct _rsp_type)); \ + if (!req) \ + return NULL; \ + req->hdr.sig = OTX2_MBOX_REQ_SIG; \ + req->hdr.id = _id; \ + return req; \ +} + +MBOX_UP_REP_MESSAGES +#undef M + +static int rvu_rep_up_notify(struct rvu *rvu, struct rep_event *event) +{ + struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, event->pcifunc); + struct rep_event *msg; + int pf; + + pf = rvu_get_pf(event->pcifunc); + + if (event->event & RVU_EVENT_MAC_ADDR_CHANGE) + ether_addr_copy(pfvf->mac_addr, event->evt_data.mac); + + mutex_lock(&rvu->mbox_lock); + msg = otx2_mbox_alloc_msg_rep_event_up_notify(rvu, pf); + if (!msg) { + mutex_unlock(&rvu->mbox_lock); + return -ENOMEM; + } + + msg->hdr.pcifunc = event->pcifunc; + msg->event = event->event; + + memcpy(&msg->evt_data, &event->evt_data, sizeof(struct rep_evt_data)); + + otx2_mbox_wait_for_zero(&rvu->afpf_wq_info.mbox_up, pf); + + otx2_mbox_msg_send_up(&rvu->afpf_wq_info.mbox_up, pf); + + mutex_unlock(&rvu->mbox_lock); + return 0; +} + +static void rvu_rep_wq_handler(struct work_struct *work) +{ + struct rvu *rvu = container_of(work, struct rvu, rep_evt_work); + struct rep_evtq_ent *qentry; + struct rep_event *event; + unsigned long flags; + + do { + spin_lock_irqsave(&rvu->rep_evtq_lock, flags); + qentry = list_first_entry_or_null(&rvu->rep_evtq_head, + struct rep_evtq_ent, + node); + if (qentry) + list_del(&qentry->node); + + spin_unlock_irqrestore(&rvu->rep_evtq_lock, flags); + if (!qentry) + break; /* nothing more to process */ + + event = &qentry->event; + + rvu_rep_up_notify(rvu, event); + kfree(qentry); + } while (1); +} + +int rvu_mbox_handler_rep_event_notify(struct rvu *rvu, struct rep_event *req, + struct msg_rsp *rsp) +{ + struct rep_evtq_ent *qentry; + + qentry = kmalloc(sizeof(*qentry), GFP_ATOMIC); + if (!qentry) + return -ENOMEM; + + qentry->event = *req; + spin_lock(&rvu->rep_evtq_lock); + list_add_tail(&qentry->node, &rvu->rep_evtq_head); + spin_unlock(&rvu->rep_evtq_lock); + queue_work(rvu->rep_evt_wq, &rvu->rep_evt_work); + return 0; +} + +int rvu_rep_notify_pfvf_state(struct rvu *rvu, u16 pcifunc, bool enable) +{ + struct rep_event *req; + int pf; + + if (!is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc))) + return 0; + + pf = rvu_get_pf(rvu->rep_pcifunc); + + mutex_lock(&rvu->mbox_lock); + req = otx2_mbox_alloc_msg_rep_event_up_notify(rvu, pf); + if (!req) { + mutex_unlock(&rvu->mbox_lock); + return -ENOMEM; + } + + req->hdr.pcifunc = rvu->rep_pcifunc; + req->event |= RVU_EVENT_PFVF_STATE; + req->pcifunc = pcifunc; + req->evt_data.vf_state = enable; + + otx2_mbox_wait_for_zero(&rvu->afpf_wq_info.mbox_up, pf); + otx2_mbox_msg_send_up(&rvu->afpf_wq_info.mbox_up, pf); + + mutex_unlock(&rvu->mbox_lock); + return 0; +} + +#define RVU_LF_RX_STATS(reg) \ + rvu_read64(rvu, blkaddr, NIX_AF_LFX_RX_STATX(nixlf, reg)) + +#define RVU_LF_TX_STATS(reg) \ + rvu_read64(rvu, blkaddr, NIX_AF_LFX_TX_STATX(nixlf, reg)) + +int rvu_mbox_handler_nix_lf_stats(struct rvu *rvu, + struct nix_stats_req *req, + struct nix_stats_rsp *rsp) +{ + u16 pcifunc = req->pcifunc; + int nixlf, blkaddr, err; + struct msg_req rst_req; + struct msg_rsp rst_rsp; + + err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr); + if (err) + return 0; + + if (req->reset) { + rst_req.hdr.pcifunc = pcifunc; + return rvu_mbox_handler_nix_stats_rst(rvu, &rst_req, &rst_rsp); + } + rsp->rx.octs = RVU_LF_RX_STATS(RX_OCTS); + rsp->rx.ucast = RVU_LF_RX_STATS(RX_UCAST); + rsp->rx.bcast = RVU_LF_RX_STATS(RX_BCAST); + rsp->rx.mcast = RVU_LF_RX_STATS(RX_MCAST); + rsp->rx.drop = RVU_LF_RX_STATS(RX_DROP); + rsp->rx.err = RVU_LF_RX_STATS(RX_ERR); + rsp->rx.drop_octs = RVU_LF_RX_STATS(RX_DROP_OCTS); + rsp->rx.drop_mcast = RVU_LF_RX_STATS(RX_DRP_MCAST); + rsp->rx.drop_bcast = RVU_LF_RX_STATS(RX_DRP_BCAST); + + rsp->tx.octs = RVU_LF_TX_STATS(TX_OCTS); + rsp->tx.ucast = RVU_LF_TX_STATS(TX_UCAST); + rsp->tx.bcast = RVU_LF_TX_STATS(TX_BCAST); + rsp->tx.mcast = RVU_LF_TX_STATS(TX_MCAST); + rsp->tx.drop = RVU_LF_TX_STATS(TX_DROP); + + rsp->pcifunc = req->pcifunc; + return 0; +} + +static u16 rvu_rep_get_vlan_id(struct rvu *rvu, u16 pcifunc) +{ + int id; + + for (id = 0; id < rvu->rep_cnt; id++) + if (rvu->rep2pfvf_map[id] == pcifunc) + return id; + return 0; +} + +static int rvu_rep_tx_vlan_cfg(struct rvu *rvu, u16 pcifunc, + u16 vlan_tci, int *vidx) +{ + struct nix_vtag_config_rsp rsp = {}; + struct nix_vtag_config req = {}; + u64 etype = ETH_P_8021Q; + int err; + + /* Insert vlan tag */ + req.hdr.pcifunc = pcifunc; + req.vtag_size = VTAGSIZE_T4; + req.cfg_type = 0; /* tx vlan cfg */ + req.tx.cfg_vtag0 = true; + req.tx.vtag0 = FIELD_PREP(NIX_VLAN_ETYPE_MASK, etype) | vlan_tci; + + err = rvu_mbox_handler_nix_vtag_cfg(rvu, &req, &rsp); + if (err) { + dev_err(rvu->dev, "Tx vlan config failed\n"); + return err; + } + *vidx = rsp.vtag0_idx; + return 0; +} + +static int rvu_rep_rx_vlan_cfg(struct rvu *rvu, u16 pcifunc) +{ + struct nix_vtag_config req = {}; + struct nix_vtag_config_rsp rsp; + + /* config strip, capture and size */ + req.hdr.pcifunc = pcifunc; + req.vtag_size = VTAGSIZE_T4; + req.cfg_type = 1; /* rx vlan cfg */ + req.rx.vtag_type = NIX_AF_LFX_RX_VTAG_TYPE0; + req.rx.strip_vtag = true; + req.rx.capture_vtag = false; + + return rvu_mbox_handler_nix_vtag_cfg(rvu, &req, &rsp); +} + +static int rvu_rep_install_rx_rule(struct rvu *rvu, u16 pcifunc, + u16 entry, bool rte) +{ + struct npc_install_flow_req req = {}; + struct npc_install_flow_rsp rsp = {}; + struct rvu_pfvf *pfvf; + u16 vlan_tci, rep_id; + + pfvf = rvu_get_pfvf(rvu, pcifunc); + + /* To steer the traffic from Representee to Representor */ + rep_id = rvu_rep_get_vlan_id(rvu, pcifunc); + if (rte) { + vlan_tci = rep_id | BIT_ULL(8); + req.vf = rvu->rep_pcifunc; + req.op = NIX_RX_ACTIONOP_UCAST; + req.index = rep_id; + } else { + vlan_tci = rep_id; + req.vf = pcifunc; + req.op = NIX_RX_ACTION_DEFAULT; + } + + rvu_rep_rx_vlan_cfg(rvu, req.vf); + req.entry = entry; + req.hdr.pcifunc = 0; /* AF is requester */ + req.features = BIT_ULL(NPC_OUTER_VID) | BIT_ULL(NPC_VLAN_ETYPE_CTAG); + req.vtag0_valid = true; + req.vtag0_type = NIX_AF_LFX_RX_VTAG_TYPE0; + req.packet.vlan_etype = cpu_to_be16(ETH_P_8021Q); + req.mask.vlan_etype = cpu_to_be16(ETH_P_8021Q); + req.packet.vlan_tci = cpu_to_be16(vlan_tci); + req.mask.vlan_tci = cpu_to_be16(0xffff); + + req.channel = RVU_SWITCH_LBK_CHAN; + req.chan_mask = 0xffff; + req.intf = pfvf->nix_rx_intf; + + return rvu_mbox_handler_npc_install_flow(rvu, &req, &rsp); +} + +static int rvu_rep_install_tx_rule(struct rvu *rvu, u16 pcifunc, u16 entry, + bool rte) +{ + struct npc_install_flow_req req = {}; + struct npc_install_flow_rsp rsp = {}; + struct rvu_pfvf *pfvf; + int vidx, err; + u16 vlan_tci; + u8 lbkid; + + pfvf = rvu_get_pfvf(rvu, pcifunc); + vlan_tci = rvu_rep_get_vlan_id(rvu, pcifunc); + if (rte) + vlan_tci |= BIT_ULL(8); + + err = rvu_rep_tx_vlan_cfg(rvu, pcifunc, vlan_tci, &vidx); + if (err) + return err; + + lbkid = pfvf->nix_blkaddr == BLKADDR_NIX0 ? 0 : 1; + req.hdr.pcifunc = 0; /* AF is requester */ + if (rte) { + req.vf = pcifunc; + } else { + req.vf = rvu->rep_pcifunc; + req.packet.sq_id = vlan_tci; + req.mask.sq_id = 0xffff; + } + + req.entry = entry; + req.intf = pfvf->nix_tx_intf; + req.op = NIX_TX_ACTIONOP_UCAST_CHAN; + req.index = (lbkid << 8) | RVU_SWITCH_LBK_CHAN; + req.set_cntr = 1; + req.vtag0_def = vidx; + req.vtag0_op = 1; + return rvu_mbox_handler_npc_install_flow(rvu, &req, &rsp); +} + +int rvu_rep_install_mcam_rules(struct rvu *rvu) +{ + struct rvu_switch *rswitch = &rvu->rswitch; + u16 start = rswitch->start_entry; + struct rvu_hwinfo *hw = rvu->hw; + u16 pcifunc, entry = 0; + int pf, vf, numvfs; + int err, nixlf, i; + u8 rep; + + for (pf = 1; pf < hw->total_pfs; pf++) { + if (!is_pf_cgxmapped(rvu, pf)) + continue; + + pcifunc = pf << RVU_PFVF_PF_SHIFT; + rvu_get_nix_blkaddr(rvu, pcifunc); + rep = true; + for (i = 0; i < 2; i++) { + err = rvu_rep_install_rx_rule(rvu, pcifunc, + start + entry, rep); + if (err) + return err; + rswitch->entry2pcifunc[entry++] = pcifunc; + + err = rvu_rep_install_tx_rule(rvu, pcifunc, + start + entry, rep); + if (err) + return err; + rswitch->entry2pcifunc[entry++] = pcifunc; + rep = false; + } + + rvu_get_pf_numvfs(rvu, pf, &numvfs, NULL); + for (vf = 0; vf < numvfs; vf++) { + pcifunc = pf << RVU_PFVF_PF_SHIFT | + ((vf + 1) & RVU_PFVF_FUNC_MASK); + rvu_get_nix_blkaddr(rvu, pcifunc); + + /* Skip installimg rules if nixlf is not attached */ + err = nix_get_nixlf(rvu, pcifunc, &nixlf, NULL); + if (err) + continue; + rep = true; + for (i = 0; i < 2; i++) { + err = rvu_rep_install_rx_rule(rvu, pcifunc, + start + entry, + rep); + if (err) + return err; + rswitch->entry2pcifunc[entry++] = pcifunc; + + err = rvu_rep_install_tx_rule(rvu, pcifunc, + start + entry, + rep); + if (err) + return err; + rswitch->entry2pcifunc[entry++] = pcifunc; + rep = false; + } + } + } + + /* Initialize the wq for handling REP events */ + spin_lock_init(&rvu->rep_evtq_lock); + INIT_LIST_HEAD(&rvu->rep_evtq_head); + INIT_WORK(&rvu->rep_evt_work, rvu_rep_wq_handler); + rvu->rep_evt_wq = alloc_workqueue("rep_evt_wq", 0, 0); + if (!rvu->rep_evt_wq) { + dev_err(rvu->dev, "REP workqueue allocation failed\n"); + return -ENOMEM; + } + return 0; +} + +void rvu_rep_update_rules(struct rvu *rvu, u16 pcifunc, bool ena) +{ + struct rvu_switch *rswitch = &rvu->rswitch; + struct npc_mcam *mcam = &rvu->hw->mcam; + u32 max = rswitch->used_entries; + int blkaddr; + u16 entry; + + if (!rswitch->used_entries) + return; + + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); + + if (blkaddr < 0) + return; + + rvu_switch_enable_lbk_link(rvu, pcifunc, ena); + mutex_lock(&mcam->lock); + for (entry = 0; entry < max; entry++) { + if (rswitch->entry2pcifunc[entry] == pcifunc) + npc_enable_mcam_entry(rvu, mcam, blkaddr, entry, ena); + } + mutex_unlock(&mcam->lock); +} + +int rvu_rep_pf_init(struct rvu *rvu) +{ + u16 pcifunc = rvu->rep_pcifunc; + struct rvu_pfvf *pfvf; + + pfvf = rvu_get_pfvf(rvu, pcifunc); + set_bit(NIXLF_INITIALIZED, &pfvf->flags); + rvu_switch_enable_lbk_link(rvu, pcifunc, true); + rvu_rep_rx_vlan_cfg(rvu, pcifunc); + return 0; +} + +int rvu_mbox_handler_esw_cfg(struct rvu *rvu, struct esw_cfg_req *req, + struct msg_rsp *rsp) +{ + if (req->hdr.pcifunc != rvu->rep_pcifunc) + return 0; + + rvu->rep_mode = req->ena; + + if (!rvu->rep_mode) + rvu_npc_free_mcam_entries(rvu, req->hdr.pcifunc, -1); + + return 0; +} + +int rvu_mbox_handler_get_rep_cnt(struct rvu *rvu, struct msg_req *req, + struct get_rep_cnt_rsp *rsp) +{ + int pf, vf, numvfs, hwvf, rep = 0; + u16 pcifunc; + + rvu->rep_pcifunc = req->hdr.pcifunc; + rsp->rep_cnt = rvu->cgx_mapped_pfs + rvu->cgx_mapped_vfs; + rvu->rep_cnt = rsp->rep_cnt; + + rvu->rep2pfvf_map = devm_kzalloc(rvu->dev, rvu->rep_cnt * + sizeof(u16), GFP_KERNEL); + if (!rvu->rep2pfvf_map) + return -ENOMEM; + + for (pf = 0; pf < rvu->hw->total_pfs; pf++) { + if (!is_pf_cgxmapped(rvu, pf)) + continue; + pcifunc = pf << RVU_PFVF_PF_SHIFT; + rvu->rep2pfvf_map[rep] = pcifunc; + rsp->rep_pf_map[rep] = pcifunc; + rep++; + rvu_get_pf_numvfs(rvu, pf, &numvfs, &hwvf); + for (vf = 0; vf < numvfs; vf++) { + rvu->rep2pfvf_map[rep] = pcifunc | + ((vf + 1) & RVU_PFVF_FUNC_MASK); + rsp->rep_pf_map[rep] = rvu->rep2pfvf_map[rep]; + rep++; + } + } + return 0; +} diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_sdp.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_sdp.c index ae50d56258ec..38cfe148f4b7 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_sdp.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_sdp.c @@ -40,8 +40,12 @@ bool is_sdp_pf(u16 pcifunc) !(pcifunc & RVU_PFVF_FUNC_MASK)); } -bool is_sdp_vf(u16 pcifunc) +#define RVU_SDP_VF_DEVID 0xA0F7 +bool is_sdp_vf(struct rvu *rvu, u16 pcifunc) { + if (!(pcifunc & ~RVU_PFVF_FUNC_MASK)) + return (rvu->vf_devid == RVU_SDP_VF_DEVID); + return (is_sdp_pfvf(pcifunc) && !!(pcifunc & RVU_PFVF_FUNC_MASK)); } @@ -52,6 +56,14 @@ int rvu_sdp_init(struct rvu *rvu) struct rvu_pfvf *pfvf; u32 i = 0; + if (rvu->fwdata->channel_data.valid) { + sdp_pf_num[0] = 0; + pfvf = &rvu->pf[sdp_pf_num[0]]; + pfvf->sdp_info = &rvu->fwdata->channel_data.info; + + return 0; + } + while ((i < MAX_SDP) && (pdev = pci_get_device(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OTX2_SDP_PF, pdev)) != NULL) { diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h index 5ef406c7e8a4..77ac94cb2ec4 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h @@ -71,13 +71,11 @@ enum cpt_af_int_vec_e { CPT_AF_INT_VEC_CNT = 0x4, }; -enum cpt_10k_af_int_vec_e { +enum cpt_cn10k_flt_int_vec_e { CPT_10K_AF_INT_VEC_FLT0 = 0x0, CPT_10K_AF_INT_VEC_FLT1 = 0x1, CPT_10K_AF_INT_VEC_FLT2 = 0x2, - CPT_10K_AF_INT_VEC_RVU = 0x3, - CPT_10K_AF_INT_VEC_RAS = 0x4, - CPT_10K_AF_INT_VEC_CNT = 0x5, + CPT_10K_AF_INT_VEC_FLT_MAX = 0x3, }; /* NPA Admin function Interrupt Vector Enumeration */ @@ -825,4 +823,30 @@ enum nix_tx_vtag_op { #define VTAG_STRIP BIT_ULL(4) #define VTAG_CAPTURE BIT_ULL(5) +/* NIX TX stats */ +enum nix_stat_lf_tx { + TX_UCAST = 0x0, + TX_BCAST = 0x1, + TX_MCAST = 0x2, + TX_DROP = 0x3, + TX_OCTS = 0x4, + TX_STATS_ENUM_LAST, +}; + +/* NIX RX stats */ +enum nix_stat_lf_rx { + RX_OCTS = 0x0, + RX_UCAST = 0x1, + RX_BCAST = 0x2, + RX_MCAST = 0x3, + RX_DROP = 0x4, + RX_DROP_OCTS = 0x5, + RX_FCS = 0x6, + RX_ERR = 0x7, + RX_DRP_BCAST = 0x8, + RX_DRP_MCAST = 0x9, + RX_DRP_L3BCAST = 0xa, + RX_DRP_L3MCAST = 0xb, + RX_STATS_ENUM_LAST, +}; #endif /* RVU_STRUCT_H */ diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_switch.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_switch.c index 854045ed3b06..268efb7c1c15 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_switch.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_switch.c @@ -8,7 +8,7 @@ #include <linux/bitfield.h> #include "rvu.h" -static void rvu_switch_enable_lbk_link(struct rvu *rvu, u16 pcifunc, bool enable) +void rvu_switch_enable_lbk_link(struct rvu *rvu, u16 pcifunc, bool enable) { struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); struct nix_hw *nix_hw; @@ -166,6 +166,8 @@ void rvu_switch_enable(struct rvu *rvu) alloc_req.contig = true; alloc_req.count = rvu->cgx_mapped_pfs + rvu->cgx_mapped_vfs; + if (rvu->rep_mode) + alloc_req.count = alloc_req.count * 4; ret = rvu_mbox_handler_npc_mcam_alloc_entry(rvu, &alloc_req, &alloc_rsp); if (ret) { @@ -189,7 +191,12 @@ void rvu_switch_enable(struct rvu *rvu) rswitch->used_entries = alloc_rsp.count; rswitch->start_entry = alloc_rsp.entry; - ret = rvu_switch_install_rules(rvu); + if (rvu->rep_mode) { + rvu_rep_pf_init(rvu); + ret = rvu_rep_install_mcam_rules(rvu); + } else { + ret = rvu_switch_install_rules(rvu); + } if (ret) goto uninstall_rules; @@ -222,6 +229,9 @@ void rvu_switch_disable(struct rvu *rvu) if (!rswitch->used_entries) return; + if (rvu->rep_mode) + goto free_ents; + for (pf = 1; pf < hw->total_pfs; pf++) { if (!is_pf_cgxmapped(rvu, pf)) continue; @@ -249,6 +259,7 @@ void rvu_switch_disable(struct rvu *rvu) } } +free_ents: uninstall_req.start = rswitch->start_entry; uninstall_req.end = rswitch->start_entry + rswitch->used_entries - 1; free_req.all = 1; @@ -258,12 +269,15 @@ void rvu_switch_disable(struct rvu *rvu) kfree(rswitch->entry2pcifunc); } -void rvu_switch_update_rules(struct rvu *rvu, u16 pcifunc) +void rvu_switch_update_rules(struct rvu *rvu, u16 pcifunc, bool ena) { struct rvu_switch *rswitch = &rvu->rswitch; u32 max = rswitch->used_entries; u16 entry; + if (rvu->rep_mode) + return rvu_rep_update_rules(rvu, pcifunc, ena); + if (!rswitch->used_entries) return; diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_trace.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_trace.h index 28984d0e848a..5704520f9b02 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_trace.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_trace.h @@ -24,7 +24,7 @@ TRACE_EVENT(otx2_msg_alloc, __field(u16, id) __field(u64, size) ), - TP_fast_assign(__assign_str(dev, pci_name(pdev)); + TP_fast_assign(__assign_str(dev); __entry->id = id; __entry->size = size; ), @@ -39,7 +39,7 @@ TRACE_EVENT(otx2_msg_send, __field(u16, num_msgs) __field(u64, msg_size) ), - TP_fast_assign(__assign_str(dev, pci_name(pdev)); + TP_fast_assign(__assign_str(dev); __entry->num_msgs = num_msgs; __entry->msg_size = msg_size; ), @@ -55,7 +55,7 @@ TRACE_EVENT(otx2_msg_check, __field(u16, rspid) __field(int, rc) ), - TP_fast_assign(__assign_str(dev, pci_name(pdev)); + TP_fast_assign(__assign_str(dev); __entry->reqid = reqid; __entry->rspid = rspid; __entry->rc = rc; @@ -72,8 +72,8 @@ TRACE_EVENT(otx2_msg_interrupt, __string(str, msg) __field(u64, intr) ), - TP_fast_assign(__assign_str(dev, pci_name(pdev)); - __assign_str(str, msg); + TP_fast_assign(__assign_str(dev); + __assign_str(str); __entry->intr = intr; ), TP_printk("[%s] mbox interrupt %s (0x%llx)\n", __get_str(dev), @@ -87,7 +87,7 @@ TRACE_EVENT(otx2_msg_process, __field(u16, id) __field(int, err) ), - TP_fast_assign(__assign_str(dev, pci_name(pdev)); + TP_fast_assign(__assign_str(dev); __entry->id = id; __entry->err = err; ), diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/Makefile b/drivers/net/ethernet/marvell/octeontx2/nic/Makefile index 5664f768cb0c..cb6513ab35e7 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/Makefile +++ b/drivers/net/ethernet/marvell/octeontx2/nic/Makefile @@ -5,14 +5,16 @@ obj-$(CONFIG_OCTEONTX2_PF) += rvu_nicpf.o otx2_ptp.o obj-$(CONFIG_OCTEONTX2_VF) += rvu_nicvf.o otx2_ptp.o +obj-$(CONFIG_RVU_ESWITCH) += rvu_rep.o rvu_nicpf-y := otx2_pf.o otx2_common.o otx2_txrx.o otx2_ethtool.o \ otx2_flows.o otx2_tc.o cn10k.o otx2_dmac_flt.o \ otx2_devlink.o qos_sq.o qos.o -rvu_nicvf-y := otx2_vf.o otx2_devlink.o +rvu_nicvf-y := otx2_vf.o +rvu_rep-y := rep.o rvu_nicpf-$(CONFIG_DCB) += otx2_dcbnl.o -rvu_nicvf-$(CONFIG_DCB) += otx2_dcbnl.o rvu_nicpf-$(CONFIG_MACSEC) += cn10k_macsec.o +rvu_nicpf-$(CONFIG_XFRM_OFFLOAD) += cn10k_ipsec.o ccflags-y += -I$(srctree)/drivers/net/ethernet/marvell/octeontx2/af diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c index c1c99d7054f8..a15cc86635d6 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c @@ -72,7 +72,7 @@ int cn10k_lmtst_init(struct otx2_nic *pfvf) } EXPORT_SYMBOL(cn10k_lmtst_init); -int cn10k_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura) +int cn10k_sq_aq_init(void *dev, u16 qidx, u8 chan_offset, u16 sqb_aura) { struct nix_cn10k_aq_enq_req *aq; struct otx2_nic *pfvf = dev; @@ -88,7 +88,7 @@ int cn10k_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura) aq->sq.ena = 1; aq->sq.smq = otx2_get_smq_idx(pfvf, qidx); aq->sq.smq_rr_weight = mtu_to_dwrr_weight(pfvf, pfvf->tx_max_pktlen); - aq->sq.default_chan = pfvf->hw.tx_chan_base; + aq->sq.default_chan = pfvf->hw.tx_chan_base + chan_offset; aq->sq.sqe_stype = NIX_STYPE_STF; /* Cache SQB */ aq->sq.sqb_aura = sqb_aura; aq->sq.sq_int_ena = NIX_SQINT_BITS; @@ -203,6 +203,11 @@ int cn10k_alloc_leaf_profile(struct otx2_nic *pfvf, u16 *leaf) rsp = (struct nix_bandprof_alloc_rsp *) otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0, &req->hdr); + if (IS_ERR(rsp)) { + rc = PTR_ERR(rsp); + goto out; + } + if (!rsp->prof_count[BAND_PROF_LEAF_LAYER]) { rc = -EIO; goto out; diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.h b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.h index c1861f7de254..e3f0bce9908f 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.h +++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.h @@ -26,7 +26,7 @@ static inline int mtu_to_dwrr_weight(struct otx2_nic *pfvf, int mtu) int cn10k_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq); void cn10k_sqe_flush(void *dev, struct otx2_snd_queue *sq, int size, int qidx); -int cn10k_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura); +int cn10k_sq_aq_init(void *dev, u16 qidx, u8 chan_offset, u16 sqb_aura); int cn10k_lmtst_init(struct otx2_nic *pfvf); int cn10k_free_all_ipolicers(struct otx2_nic *pfvf); int cn10k_alloc_matchall_ipolicer(struct otx2_nic *pfvf); diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.c b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.c new file mode 100644 index 000000000000..09a5b5268205 --- /dev/null +++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.c @@ -0,0 +1,1056 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Marvell IPSEC offload driver + * + * Copyright (C) 2024 Marvell. + */ + +#include <net/xfrm.h> +#include <linux/netdevice.h> +#include <linux/bitfield.h> +#include <crypto/aead.h> +#include <crypto/gcm.h> + +#include "otx2_common.h" +#include "otx2_struct.h" +#include "cn10k_ipsec.h" + +static bool is_dev_support_ipsec_offload(struct pci_dev *pdev) +{ + return is_dev_cn10ka_b0(pdev) || is_dev_cn10kb(pdev); +} + +static bool cn10k_cpt_device_set_inuse(struct otx2_nic *pf) +{ + enum cn10k_cpt_hw_state_e state; + + while (true) { + state = atomic_cmpxchg(&pf->ipsec.cpt_state, + CN10K_CPT_HW_AVAILABLE, + CN10K_CPT_HW_IN_USE); + if (state == CN10K_CPT_HW_AVAILABLE) + return true; + if (state == CN10K_CPT_HW_UNAVAILABLE) + return false; + + mdelay(1); + } +} + +static void cn10k_cpt_device_set_available(struct otx2_nic *pf) +{ + atomic_set(&pf->ipsec.cpt_state, CN10K_CPT_HW_AVAILABLE); +} + +static void cn10k_cpt_device_set_unavailable(struct otx2_nic *pf) +{ + atomic_set(&pf->ipsec.cpt_state, CN10K_CPT_HW_UNAVAILABLE); +} + +static int cn10k_outb_cptlf_attach(struct otx2_nic *pf) +{ + struct rsrc_attach *attach; + int ret = -ENOMEM; + + mutex_lock(&pf->mbox.lock); + /* Get memory to put this msg */ + attach = otx2_mbox_alloc_msg_attach_resources(&pf->mbox); + if (!attach) + goto unlock; + + attach->cptlfs = true; + attach->modify = true; + + /* Send attach request to AF */ + ret = otx2_sync_mbox_msg(&pf->mbox); + +unlock: + mutex_unlock(&pf->mbox.lock); + return ret; +} + +static int cn10k_outb_cptlf_detach(struct otx2_nic *pf) +{ + struct rsrc_detach *detach; + int ret = -ENOMEM; + + mutex_lock(&pf->mbox.lock); + detach = otx2_mbox_alloc_msg_detach_resources(&pf->mbox); + if (!detach) + goto unlock; + + detach->partial = true; + detach->cptlfs = true; + + /* Send detach request to AF */ + ret = otx2_sync_mbox_msg(&pf->mbox); + +unlock: + mutex_unlock(&pf->mbox.lock); + return ret; +} + +static int cn10k_outb_cptlf_alloc(struct otx2_nic *pf) +{ + struct cpt_lf_alloc_req_msg *req; + int ret = -ENOMEM; + + mutex_lock(&pf->mbox.lock); + req = otx2_mbox_alloc_msg_cpt_lf_alloc(&pf->mbox); + if (!req) + goto unlock; + + /* PF function */ + req->nix_pf_func = pf->pcifunc; + /* Enable SE-IE Engine Group */ + req->eng_grpmsk = 1 << CN10K_DEF_CPT_IPSEC_EGRP; + + ret = otx2_sync_mbox_msg(&pf->mbox); + +unlock: + mutex_unlock(&pf->mbox.lock); + return ret; +} + +static void cn10k_outb_cptlf_free(struct otx2_nic *pf) +{ + mutex_lock(&pf->mbox.lock); + otx2_mbox_alloc_msg_cpt_lf_free(&pf->mbox); + otx2_sync_mbox_msg(&pf->mbox); + mutex_unlock(&pf->mbox.lock); +} + +static int cn10k_outb_cptlf_config(struct otx2_nic *pf) +{ + struct cpt_inline_ipsec_cfg_msg *req; + int ret = -ENOMEM; + + mutex_lock(&pf->mbox.lock); + req = otx2_mbox_alloc_msg_cpt_inline_ipsec_cfg(&pf->mbox); + if (!req) + goto unlock; + + req->dir = CPT_INLINE_OUTBOUND; + req->enable = 1; + req->nix_pf_func = pf->pcifunc; + ret = otx2_sync_mbox_msg(&pf->mbox); +unlock: + mutex_unlock(&pf->mbox.lock); + return ret; +} + +static void cn10k_outb_cptlf_iq_enable(struct otx2_nic *pf) +{ + u64 reg_val; + + /* Set Execution Enable of instruction queue */ + reg_val = otx2_read64(pf, CN10K_CPT_LF_INPROG); + reg_val |= BIT_ULL(16); + otx2_write64(pf, CN10K_CPT_LF_INPROG, reg_val); + + /* Set iqueue's enqueuing */ + reg_val = otx2_read64(pf, CN10K_CPT_LF_CTL); + reg_val |= BIT_ULL(0); + otx2_write64(pf, CN10K_CPT_LF_CTL, reg_val); +} + +static void cn10k_outb_cptlf_iq_disable(struct otx2_nic *pf) +{ + u32 inflight, grb_cnt, gwb_cnt; + u32 nq_ptr, dq_ptr; + int timeout = 20; + u64 reg_val; + int cnt; + + /* Disable instructions enqueuing */ + otx2_write64(pf, CN10K_CPT_LF_CTL, 0ull); + + /* Wait for instruction queue to become empty. + * CPT_LF_INPROG.INFLIGHT count is zero + */ + do { + reg_val = otx2_read64(pf, CN10K_CPT_LF_INPROG); + inflight = FIELD_GET(CPT_LF_INPROG_INFLIGHT, reg_val); + if (!inflight) + break; + + usleep_range(10000, 20000); + if (timeout-- < 0) { + netdev_err(pf->netdev, "Timeout to cleanup CPT IQ\n"); + break; + } + } while (1); + + /* Disable executions in the LF's queue, + * the queue should be empty at this point + */ + reg_val &= ~BIT_ULL(16); + otx2_write64(pf, CN10K_CPT_LF_INPROG, reg_val); + + /* Wait for instruction queue to become empty */ + cnt = 0; + do { + reg_val = otx2_read64(pf, CN10K_CPT_LF_INPROG); + if (reg_val & BIT_ULL(31)) + cnt = 0; + else + cnt++; + reg_val = otx2_read64(pf, CN10K_CPT_LF_Q_GRP_PTR); + nq_ptr = FIELD_GET(CPT_LF_Q_GRP_PTR_DQ_PTR, reg_val); + dq_ptr = FIELD_GET(CPT_LF_Q_GRP_PTR_DQ_PTR, reg_val); + } while ((cnt < 10) && (nq_ptr != dq_ptr)); + + cnt = 0; + do { + reg_val = otx2_read64(pf, CN10K_CPT_LF_INPROG); + inflight = FIELD_GET(CPT_LF_INPROG_INFLIGHT, reg_val); + grb_cnt = FIELD_GET(CPT_LF_INPROG_GRB_CNT, reg_val); + gwb_cnt = FIELD_GET(CPT_LF_INPROG_GWB_CNT, reg_val); + if (inflight == 0 && gwb_cnt < 40 && + (grb_cnt == 0 || grb_cnt == 40)) + cnt++; + else + cnt = 0; + } while (cnt < 10); +} + +/* Allocate memory for CPT outbound Instruction queue. + * Instruction queue memory format is: + * ----------------------------- + * | Instruction Group memory | + * | (CPT_LF_Q_SIZE[SIZE_DIV40] | + * | x 16 Bytes) | + * | | + * ----------------------------- <-- CPT_LF_Q_BASE[ADDR] + * | Flow Control (128 Bytes) | + * | | + * ----------------------------- + * | Instruction Memory | + * | (CPT_LF_Q_SIZE[SIZE_DIV40] | + * | × 40 × 64 bytes) | + * | | + * ----------------------------- + */ +static int cn10k_outb_cptlf_iq_alloc(struct otx2_nic *pf) +{ + struct cn10k_cpt_inst_queue *iq = &pf->ipsec.iq; + + iq->size = CN10K_CPT_INST_QLEN_BYTES + CN10K_CPT_Q_FC_LEN + + CN10K_CPT_INST_GRP_QLEN_BYTES + OTX2_ALIGN; + + iq->real_vaddr = dma_alloc_coherent(pf->dev, iq->size, + &iq->real_dma_addr, GFP_KERNEL); + if (!iq->real_vaddr) + return -ENOMEM; + + /* iq->vaddr/dma_addr points to Flow Control location */ + iq->vaddr = iq->real_vaddr + CN10K_CPT_INST_GRP_QLEN_BYTES; + iq->dma_addr = iq->real_dma_addr + CN10K_CPT_INST_GRP_QLEN_BYTES; + + /* Align pointers */ + iq->vaddr = PTR_ALIGN(iq->vaddr, OTX2_ALIGN); + iq->dma_addr = PTR_ALIGN(iq->dma_addr, OTX2_ALIGN); + return 0; +} + +static void cn10k_outb_cptlf_iq_free(struct otx2_nic *pf) +{ + struct cn10k_cpt_inst_queue *iq = &pf->ipsec.iq; + + if (iq->real_vaddr) + dma_free_coherent(pf->dev, iq->size, iq->real_vaddr, + iq->real_dma_addr); + + iq->real_vaddr = NULL; + iq->vaddr = NULL; +} + +static int cn10k_outb_cptlf_iq_init(struct otx2_nic *pf) +{ + u64 reg_val; + int ret; + + /* Allocate Memory for CPT IQ */ + ret = cn10k_outb_cptlf_iq_alloc(pf); + if (ret) + return ret; + + /* Disable IQ */ + cn10k_outb_cptlf_iq_disable(pf); + + /* Set IQ base address */ + otx2_write64(pf, CN10K_CPT_LF_Q_BASE, pf->ipsec.iq.dma_addr); + + /* Set IQ size */ + reg_val = FIELD_PREP(CPT_LF_Q_SIZE_DIV40, CN10K_CPT_SIZE_DIV40 + + CN10K_CPT_EXTRA_SIZE_DIV40); + otx2_write64(pf, CN10K_CPT_LF_Q_SIZE, reg_val); + + return 0; +} + +static int cn10k_outb_cptlf_init(struct otx2_nic *pf) +{ + int ret; + + /* Initialize CPTLF Instruction Queue (IQ) */ + ret = cn10k_outb_cptlf_iq_init(pf); + if (ret) + return ret; + + /* Configure CPTLF for outbound ipsec offload */ + ret = cn10k_outb_cptlf_config(pf); + if (ret) + goto iq_clean; + + /* Enable CPTLF IQ */ + cn10k_outb_cptlf_iq_enable(pf); + return 0; +iq_clean: + cn10k_outb_cptlf_iq_free(pf); + return ret; +} + +static int cn10k_outb_cpt_init(struct net_device *netdev) +{ + struct otx2_nic *pf = netdev_priv(netdev); + int ret; + + /* Attach a CPT LF for outbound ipsec offload */ + ret = cn10k_outb_cptlf_attach(pf); + if (ret) + return ret; + + /* Allocate a CPT LF for outbound ipsec offload */ + ret = cn10k_outb_cptlf_alloc(pf); + if (ret) + goto detach; + + /* Initialize the CPTLF for outbound ipsec offload */ + ret = cn10k_outb_cptlf_init(pf); + if (ret) + goto lf_free; + + pf->ipsec.io_addr = (__force u64)otx2_get_regaddr(pf, + CN10K_CPT_LF_NQX(0)); + + /* Set ipsec offload enabled for this device */ + pf->flags |= OTX2_FLAG_IPSEC_OFFLOAD_ENABLED; + + cn10k_cpt_device_set_available(pf); + return 0; + +lf_free: + cn10k_outb_cptlf_free(pf); +detach: + cn10k_outb_cptlf_detach(pf); + return ret; +} + +static int cn10k_outb_cpt_clean(struct otx2_nic *pf) +{ + int ret; + + if (!cn10k_cpt_device_set_inuse(pf)) { + netdev_err(pf->netdev, "CPT LF device unavailable\n"); + return -ENODEV; + } + + /* Set ipsec offload disabled for this device */ + pf->flags &= ~OTX2_FLAG_IPSEC_OFFLOAD_ENABLED; + + /* Disable CPTLF Instruction Queue (IQ) */ + cn10k_outb_cptlf_iq_disable(pf); + + /* Set IQ base address and size to 0 */ + otx2_write64(pf, CN10K_CPT_LF_Q_BASE, 0); + otx2_write64(pf, CN10K_CPT_LF_Q_SIZE, 0); + + /* Free CPTLF IQ */ + cn10k_outb_cptlf_iq_free(pf); + + /* Free and detach CPT LF */ + cn10k_outb_cptlf_free(pf); + ret = cn10k_outb_cptlf_detach(pf); + if (ret) + netdev_err(pf->netdev, "Failed to detach CPT LF\n"); + + cn10k_cpt_device_set_unavailable(pf); + return ret; +} + +static void cn10k_cpt_inst_flush(struct otx2_nic *pf, struct cpt_inst_s *inst, + u64 size) +{ + struct otx2_lmt_info *lmt_info; + u64 val = 0, tar_addr = 0; + + lmt_info = per_cpu_ptr(pf->hw.lmt_info, smp_processor_id()); + /* FIXME: val[0:10] LMT_ID. + * [12:15] no of LMTST - 1 in the burst. + * [19:63] data size of each LMTST in the burst except first. + */ + val = (lmt_info->lmt_id & 0x7FF); + /* Target address for LMTST flush tells HW how many 128bit + * words are present. + * tar_addr[6:4] size of first LMTST - 1 in units of 128b. + */ + tar_addr |= pf->ipsec.io_addr | (((size / 16) - 1) & 0x7) << 4; + dma_wmb(); + memcpy((u64 *)lmt_info->lmt_addr, inst, size); + cn10k_lmt_flush(val, tar_addr); +} + +static int cn10k_wait_for_cpt_respose(struct otx2_nic *pf, + struct cpt_res_s *res) +{ + unsigned long timeout = jiffies + msecs_to_jiffies(100); + u64 *completion_ptr = (u64 *)res; + + do { + if (time_after(jiffies, timeout)) { + netdev_err(pf->netdev, "CPT response timeout\n"); + return -EBUSY; + } + } while ((READ_ONCE(*completion_ptr) & CN10K_CPT_COMP_E_MASK) == + CN10K_CPT_COMP_E_NOTDONE); + + if (!(res->compcode == CN10K_CPT_COMP_E_GOOD || + res->compcode == CN10K_CPT_COMP_E_WARN) || res->uc_compcode) { + netdev_err(pf->netdev, "compcode=%x doneint=%x\n", + res->compcode, res->doneint); + netdev_err(pf->netdev, "uc_compcode=%x uc_info=%llx esn=%llx\n", + res->uc_compcode, (u64)res->uc_info, res->esn); + } + return 0; +} + +static int cn10k_outb_write_sa(struct otx2_nic *pf, struct qmem *sa_info) +{ + dma_addr_t res_iova, dptr_iova, sa_iova; + struct cn10k_tx_sa_s *sa_dptr; + struct cpt_inst_s inst = {}; + struct cpt_res_s *res; + u32 sa_size, off; + u64 *sptr, *dptr; + u64 reg_val; + int ret; + + sa_iova = sa_info->iova; + if (!sa_iova) + return -EINVAL; + + res = dma_alloc_coherent(pf->dev, sizeof(struct cpt_res_s), + &res_iova, GFP_ATOMIC); + if (!res) + return -ENOMEM; + + sa_size = sizeof(struct cn10k_tx_sa_s); + sa_dptr = dma_alloc_coherent(pf->dev, sa_size, &dptr_iova, GFP_ATOMIC); + if (!sa_dptr) { + dma_free_coherent(pf->dev, sizeof(struct cpt_res_s), res, + res_iova); + return -ENOMEM; + } + + sptr = (__force u64 *)sa_info->base; + dptr = (__force u64 *)sa_dptr; + for (off = 0; off < (sa_size / 8); off++) + *(dptr + off) = (__force u64)cpu_to_be64(*(sptr + off)); + + res->compcode = CN10K_CPT_COMP_E_NOTDONE; + inst.res_addr = res_iova; + inst.dptr = (u64)dptr_iova; + inst.param2 = sa_size >> 3; + inst.dlen = sa_size; + inst.opcode_major = CN10K_IPSEC_MAJOR_OP_WRITE_SA; + inst.opcode_minor = CN10K_IPSEC_MINOR_OP_WRITE_SA; + inst.cptr = sa_iova; + inst.ctx_val = 1; + inst.egrp = CN10K_DEF_CPT_IPSEC_EGRP; + + /* Check if CPT-LF available */ + if (!cn10k_cpt_device_set_inuse(pf)) { + ret = -ENODEV; + goto free_mem; + } + + cn10k_cpt_inst_flush(pf, &inst, sizeof(struct cpt_inst_s)); + dma_wmb(); + ret = cn10k_wait_for_cpt_respose(pf, res); + if (ret) + goto set_available; + + /* Trigger CTX flush to write dirty data back to DRAM */ + reg_val = FIELD_PREP(CPT_LF_CTX_FLUSH, sa_iova >> 7); + otx2_write64(pf, CN10K_CPT_LF_CTX_FLUSH, reg_val); + +set_available: + cn10k_cpt_device_set_available(pf); +free_mem: + dma_free_coherent(pf->dev, sa_size, sa_dptr, dptr_iova); + dma_free_coherent(pf->dev, sizeof(struct cpt_res_s), res, res_iova); + return ret; +} + +static int cn10k_ipsec_get_hw_ctx_offset(void) +{ + /* Offset on Hardware-context offset in word */ + return (offsetof(struct cn10k_tx_sa_s, hw_ctx) / sizeof(u64)) & 0x7F; +} + +static int cn10k_ipsec_get_ctx_push_size(void) +{ + /* Context push size is round up and in multiple of 8 Byte */ + return (roundup(offsetof(struct cn10k_tx_sa_s, hw_ctx), 8) / 8) & 0x7F; +} + +static int cn10k_ipsec_get_aes_key_len(int key_len) +{ + /* key_len is aes key length in bytes */ + switch (key_len) { + case 16: + return CN10K_IPSEC_SA_AES_KEY_LEN_128; + case 24: + return CN10K_IPSEC_SA_AES_KEY_LEN_192; + default: + return CN10K_IPSEC_SA_AES_KEY_LEN_256; + } +} + +static void cn10k_outb_prepare_sa(struct xfrm_state *x, + struct cn10k_tx_sa_s *sa_entry) +{ + int key_len = (x->aead->alg_key_len + 7) / 8; + struct net_device *netdev = x->xso.dev; + u8 *key = x->aead->alg_key; + struct otx2_nic *pf; + u32 *tmp_salt; + u64 *tmp_key; + int idx; + + memset(sa_entry, 0, sizeof(struct cn10k_tx_sa_s)); + + /* context size, 128 Byte aligned up */ + pf = netdev_priv(netdev); + sa_entry->ctx_size = (pf->ipsec.sa_size / OTX2_ALIGN) & 0xF; + sa_entry->hw_ctx_off = cn10k_ipsec_get_hw_ctx_offset(); + sa_entry->ctx_push_size = cn10k_ipsec_get_ctx_push_size(); + + /* Ucode to skip two words of CPT_CTX_HW_S */ + sa_entry->ctx_hdr_size = 1; + + /* Allow Atomic operation (AOP) */ + sa_entry->aop_valid = 1; + + /* Outbound, ESP TRANSPORT/TUNNEL Mode, AES-GCM with */ + sa_entry->sa_dir = CN10K_IPSEC_SA_DIR_OUTB; + sa_entry->ipsec_protocol = CN10K_IPSEC_SA_IPSEC_PROTO_ESP; + sa_entry->enc_type = CN10K_IPSEC_SA_ENCAP_TYPE_AES_GCM; + sa_entry->iv_src = CN10K_IPSEC_SA_IV_SRC_PACKET; + if (x->props.mode == XFRM_MODE_TUNNEL) + sa_entry->ipsec_mode = CN10K_IPSEC_SA_IPSEC_MODE_TUNNEL; + else + sa_entry->ipsec_mode = CN10K_IPSEC_SA_IPSEC_MODE_TRANSPORT; + + /* Last 4 bytes are salt */ + key_len -= 4; + sa_entry->aes_key_len = cn10k_ipsec_get_aes_key_len(key_len); + memcpy(sa_entry->cipher_key, key, key_len); + tmp_key = (u64 *)sa_entry->cipher_key; + + for (idx = 0; idx < key_len / 8; idx++) + tmp_key[idx] = (__force u64)cpu_to_be64(tmp_key[idx]); + + memcpy(&sa_entry->iv_gcm_salt, key + key_len, 4); + tmp_salt = (u32 *)&sa_entry->iv_gcm_salt; + *tmp_salt = (__force u32)cpu_to_be32(*tmp_salt); + + /* Write SA context data to memory before enabling */ + wmb(); + + /* Enable SA */ + sa_entry->sa_valid = 1; +} + +static int cn10k_ipsec_validate_state(struct xfrm_state *x, + struct netlink_ext_ack *extack) +{ + if (x->props.aalgo != SADB_AALG_NONE) { + NL_SET_ERR_MSG_MOD(extack, + "Cannot offload authenticated xfrm states"); + return -EINVAL; + } + if (x->props.ealgo != SADB_X_EALG_AES_GCM_ICV16) { + NL_SET_ERR_MSG_MOD(extack, + "Only AES-GCM-ICV16 xfrm state may be offloaded"); + return -EINVAL; + } + if (x->props.calgo != SADB_X_CALG_NONE) { + NL_SET_ERR_MSG_MOD(extack, + "Cannot offload compressed xfrm states"); + return -EINVAL; + } + if (x->props.flags & XFRM_STATE_ESN) { + NL_SET_ERR_MSG_MOD(extack, "Cannot offload ESN xfrm states"); + return -EINVAL; + } + if (x->props.family != AF_INET && x->props.family != AF_INET6) { + NL_SET_ERR_MSG_MOD(extack, + "Only IPv4/v6 xfrm states may be offloaded"); + return -EINVAL; + } + if (x->xso.type != XFRM_DEV_OFFLOAD_CRYPTO) { + NL_SET_ERR_MSG_MOD(extack, + "Cannot offload other than crypto-mode"); + return -EINVAL; + } + if (x->props.mode != XFRM_MODE_TRANSPORT && + x->props.mode != XFRM_MODE_TUNNEL) { + NL_SET_ERR_MSG_MOD(extack, + "Only tunnel/transport xfrm states may be offloaded"); + return -EINVAL; + } + if (x->id.proto != IPPROTO_ESP) { + NL_SET_ERR_MSG_MOD(extack, + "Only ESP xfrm state may be offloaded"); + return -EINVAL; + } + if (x->encap) { + NL_SET_ERR_MSG_MOD(extack, + "Encapsulated xfrm state may not be offloaded"); + return -EINVAL; + } + if (!x->aead) { + NL_SET_ERR_MSG_MOD(extack, + "Cannot offload xfrm states without aead"); + return -EINVAL; + } + + if (x->aead->alg_icv_len != 128) { + NL_SET_ERR_MSG_MOD(extack, + "Cannot offload xfrm states with AEAD ICV length other than 128bit"); + return -EINVAL; + } + if (x->aead->alg_key_len != 128 + 32 && + x->aead->alg_key_len != 192 + 32 && + x->aead->alg_key_len != 256 + 32) { + NL_SET_ERR_MSG_MOD(extack, + "Cannot offload xfrm states with AEAD key length other than 128/192/256bit"); + return -EINVAL; + } + if (x->tfcpad) { + NL_SET_ERR_MSG_MOD(extack, + "Cannot offload xfrm states with tfc padding"); + return -EINVAL; + } + if (!x->geniv) { + NL_SET_ERR_MSG_MOD(extack, + "Cannot offload xfrm states without geniv"); + return -EINVAL; + } + if (strcmp(x->geniv, "seqiv")) { + NL_SET_ERR_MSG_MOD(extack, + "Cannot offload xfrm states with geniv other than seqiv"); + return -EINVAL; + } + return 0; +} + +static int cn10k_ipsec_inb_add_state(struct xfrm_state *x, + struct netlink_ext_ack *extack) +{ + NL_SET_ERR_MSG_MOD(extack, "xfrm inbound offload not supported"); + return -EOPNOTSUPP; +} + +static int cn10k_ipsec_outb_add_state(struct xfrm_state *x, + struct netlink_ext_ack *extack) +{ + struct net_device *netdev = x->xso.dev; + struct cn10k_tx_sa_s *sa_entry; + struct qmem *sa_info; + struct otx2_nic *pf; + int err; + + err = cn10k_ipsec_validate_state(x, extack); + if (err) + return err; + + pf = netdev_priv(netdev); + + err = qmem_alloc(pf->dev, &sa_info, pf->ipsec.sa_size, OTX2_ALIGN); + if (err) + return err; + + sa_entry = (struct cn10k_tx_sa_s *)sa_info->base; + cn10k_outb_prepare_sa(x, sa_entry); + + err = cn10k_outb_write_sa(pf, sa_info); + if (err) { + NL_SET_ERR_MSG_MOD(extack, "Error writing outbound SA"); + qmem_free(pf->dev, sa_info); + return err; + } + + x->xso.offload_handle = (unsigned long)sa_info; + /* Enable static branch when first SA setup */ + if (!pf->ipsec.outb_sa_count) + static_branch_enable(&cn10k_ipsec_sa_enabled); + pf->ipsec.outb_sa_count++; + return 0; +} + +static int cn10k_ipsec_add_state(struct xfrm_state *x, + struct netlink_ext_ack *extack) +{ + if (x->xso.dir == XFRM_DEV_OFFLOAD_IN) + return cn10k_ipsec_inb_add_state(x, extack); + else + return cn10k_ipsec_outb_add_state(x, extack); +} + +static void cn10k_ipsec_del_state(struct xfrm_state *x) +{ + struct net_device *netdev = x->xso.dev; + struct cn10k_tx_sa_s *sa_entry; + struct qmem *sa_info; + struct otx2_nic *pf; + int err; + + if (x->xso.dir == XFRM_DEV_OFFLOAD_IN) + return; + + pf = netdev_priv(netdev); + + sa_info = (struct qmem *)x->xso.offload_handle; + sa_entry = (struct cn10k_tx_sa_s *)sa_info->base; + memset(sa_entry, 0, sizeof(struct cn10k_tx_sa_s)); + /* Disable SA in CPT h/w */ + sa_entry->ctx_push_size = cn10k_ipsec_get_ctx_push_size(); + sa_entry->ctx_size = (pf->ipsec.sa_size / OTX2_ALIGN) & 0xF; + sa_entry->aop_valid = 1; + + err = cn10k_outb_write_sa(pf, sa_info); + if (err) + netdev_err(netdev, "Error (%d) deleting SA\n", err); + + x->xso.offload_handle = 0; + qmem_free(pf->dev, sa_info); + + /* If no more SA's then update netdev feature for potential change + * in NETIF_F_HW_ESP. + */ + if (!--pf->ipsec.outb_sa_count) + queue_work(pf->ipsec.sa_workq, &pf->ipsec.sa_work); +} + +static bool cn10k_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *x) +{ + if (x->props.family == AF_INET) { + /* Offload with IPv4 options is not supported yet */ + if (ip_hdr(skb)->ihl > 5) + return false; + } else { + /* Offload with IPv6 extension headers is not support yet */ + if (ipv6_ext_hdr(ipv6_hdr(skb)->nexthdr)) + return false; + } + return true; +} + +static const struct xfrmdev_ops cn10k_ipsec_xfrmdev_ops = { + .xdo_dev_state_add = cn10k_ipsec_add_state, + .xdo_dev_state_delete = cn10k_ipsec_del_state, + .xdo_dev_offload_ok = cn10k_ipsec_offload_ok, +}; + +static void cn10k_ipsec_sa_wq_handler(struct work_struct *work) +{ + struct cn10k_ipsec *ipsec = container_of(work, struct cn10k_ipsec, + sa_work); + struct otx2_nic *pf = container_of(ipsec, struct otx2_nic, ipsec); + + /* Disable static branch when no more SA enabled */ + static_branch_disable(&cn10k_ipsec_sa_enabled); + rtnl_lock(); + netdev_update_features(pf->netdev); + rtnl_unlock(); +} + +int cn10k_ipsec_ethtool_init(struct net_device *netdev, bool enable) +{ + struct otx2_nic *pf = netdev_priv(netdev); + + /* IPsec offload supported on cn10k */ + if (!is_dev_support_ipsec_offload(pf->pdev)) + return -EOPNOTSUPP; + + /* Initialize CPT for outbound ipsec offload */ + if (enable) + return cn10k_outb_cpt_init(netdev); + + /* Don't do CPT cleanup if SA installed */ + if (pf->ipsec.outb_sa_count) { + netdev_err(pf->netdev, "SA installed on this device\n"); + return -EBUSY; + } + + return cn10k_outb_cpt_clean(pf); +} + +int cn10k_ipsec_init(struct net_device *netdev) +{ + struct otx2_nic *pf = netdev_priv(netdev); + u32 sa_size; + + if (!is_dev_support_ipsec_offload(pf->pdev)) + return 0; + + /* Each SA entry size is 128 Byte round up in size */ + sa_size = sizeof(struct cn10k_tx_sa_s) % OTX2_ALIGN ? + (sizeof(struct cn10k_tx_sa_s) / OTX2_ALIGN + 1) * + OTX2_ALIGN : sizeof(struct cn10k_tx_sa_s); + pf->ipsec.sa_size = sa_size; + + INIT_WORK(&pf->ipsec.sa_work, cn10k_ipsec_sa_wq_handler); + pf->ipsec.sa_workq = alloc_workqueue("cn10k_ipsec_sa_workq", 0, 0); + if (!pf->ipsec.sa_workq) { + netdev_err(pf->netdev, "SA alloc workqueue failed\n"); + return -ENOMEM; + } + + /* Set xfrm device ops */ + netdev->xfrmdev_ops = &cn10k_ipsec_xfrmdev_ops; + netdev->hw_features |= NETIF_F_HW_ESP; + netdev->hw_enc_features |= NETIF_F_HW_ESP; + + cn10k_cpt_device_set_unavailable(pf); + return 0; +} +EXPORT_SYMBOL(cn10k_ipsec_init); + +void cn10k_ipsec_clean(struct otx2_nic *pf) +{ + if (!is_dev_support_ipsec_offload(pf->pdev)) + return; + + if (!(pf->flags & OTX2_FLAG_IPSEC_OFFLOAD_ENABLED)) + return; + + if (pf->ipsec.sa_workq) { + destroy_workqueue(pf->ipsec.sa_workq); + pf->ipsec.sa_workq = NULL; + } + + cn10k_outb_cpt_clean(pf); +} +EXPORT_SYMBOL(cn10k_ipsec_clean); + +static u16 cn10k_ipsec_get_ip_data_len(struct xfrm_state *x, + struct sk_buff *skb) +{ + struct ipv6hdr *ipv6h; + struct iphdr *iph; + u8 *src; + + src = (u8 *)skb->data + ETH_HLEN; + + if (x->props.family == AF_INET) { + iph = (struct iphdr *)src; + return ntohs(iph->tot_len); + } + + ipv6h = (struct ipv6hdr *)src; + return ntohs(ipv6h->payload_len) + sizeof(struct ipv6hdr); +} + +/* Prepare CPT and NIX SQE scatter/gather subdescriptor structure. + * SG of NIX and CPT are same in size. + * Layout of a NIX SQE and CPT SG entry: + * ----------------------------- + * | CPT Scatter Gather | + * | (SQE SIZE) | + * | | + * ----------------------------- + * | NIX SQE | + * | (SQE SIZE) | + * | | + * ----------------------------- + */ +bool otx2_sqe_add_sg_ipsec(struct otx2_nic *pfvf, struct otx2_snd_queue *sq, + struct sk_buff *skb, int num_segs, int *offset) +{ + struct cpt_sg_s *cpt_sg = NULL; + struct nix_sqe_sg_s *sg = NULL; + u64 dma_addr, *iova = NULL; + u64 *cpt_iova = NULL; + u16 *sg_lens = NULL; + int seg, len; + + sq->sg[sq->head].num_segs = 0; + cpt_sg = (struct cpt_sg_s *)(sq->sqe_base - sq->sqe_size); + + for (seg = 0; seg < num_segs; seg++) { + if ((seg % MAX_SEGS_PER_SG) == 0) { + sg = (struct nix_sqe_sg_s *)(sq->sqe_base + *offset); + sg->ld_type = NIX_SEND_LDTYPE_LDD; + sg->subdc = NIX_SUBDC_SG; + sg->segs = 0; + sg_lens = (void *)sg; + iova = (void *)sg + sizeof(*sg); + /* Next subdc always starts at a 16byte boundary. + * So if sg->segs is whether 2 or 3, offset += 16bytes. + */ + if ((num_segs - seg) >= (MAX_SEGS_PER_SG - 1)) + *offset += sizeof(*sg) + (3 * sizeof(u64)); + else + *offset += sizeof(*sg) + sizeof(u64); + + cpt_sg += (seg / MAX_SEGS_PER_SG) * 4; + cpt_iova = (void *)cpt_sg + sizeof(*cpt_sg); + } + dma_addr = otx2_dma_map_skb_frag(pfvf, skb, seg, &len); + if (dma_mapping_error(pfvf->dev, dma_addr)) + return false; + + sg_lens[seg % MAX_SEGS_PER_SG] = len; + sg->segs++; + *iova++ = dma_addr; + *cpt_iova++ = dma_addr; + + /* Save DMA mapping info for later unmapping */ + sq->sg[sq->head].dma_addr[seg] = dma_addr; + sq->sg[sq->head].size[seg] = len; + sq->sg[sq->head].num_segs++; + + *cpt_sg = *(struct cpt_sg_s *)sg; + cpt_sg->rsvd_63_50 = 0; + } + + sq->sg[sq->head].skb = (u64)skb; + return true; +} + +static u16 cn10k_ipsec_get_param1(u8 iv_offset) +{ + u16 param1_val; + + /* Set Crypto mode, disable L3/L4 checksum */ + param1_val = CN10K_IPSEC_INST_PARAM1_DIS_L4_CSUM | + CN10K_IPSEC_INST_PARAM1_DIS_L3_CSUM; + param1_val |= (u16)iv_offset << CN10K_IPSEC_INST_PARAM1_IV_OFFSET_SHIFT; + return param1_val; +} + +bool cn10k_ipsec_transmit(struct otx2_nic *pf, struct netdev_queue *txq, + struct otx2_snd_queue *sq, struct sk_buff *skb, + int num_segs, int size) +{ + struct cpt_inst_s inst; + struct cpt_res_s *res; + struct xfrm_state *x; + struct qmem *sa_info; + dma_addr_t dptr_iova; + struct sec_path *sp; + u8 encap_offset; + u8 auth_offset; + u8 gthr_size; + u8 iv_offset; + u16 dlen; + + /* Check for IPSEC offload enabled */ + if (!(pf->flags & OTX2_FLAG_IPSEC_OFFLOAD_ENABLED)) + goto drop; + + sp = skb_sec_path(skb); + if (unlikely(!sp->len)) + goto drop; + + x = xfrm_input_state(skb); + if (unlikely(!x)) + goto drop; + + if (x->props.mode != XFRM_MODE_TRANSPORT && + x->props.mode != XFRM_MODE_TUNNEL) + goto drop; + + dlen = cn10k_ipsec_get_ip_data_len(x, skb); + if (dlen == 0 && netif_msg_tx_err(pf)) { + netdev_err(pf->netdev, "Invalid IP header, ip-length zero\n"); + goto drop; + } + + /* Check for valid SA context */ + sa_info = (struct qmem *)x->xso.offload_handle; + if (!sa_info) + goto drop; + + memset(&inst, 0, sizeof(struct cpt_inst_s)); + + /* Get authentication offset */ + if (x->props.family == AF_INET) + auth_offset = sizeof(struct iphdr); + else + auth_offset = sizeof(struct ipv6hdr); + + /* IV offset is after ESP header */ + iv_offset = auth_offset + sizeof(struct ip_esp_hdr); + /* Encap will start after IV */ + encap_offset = iv_offset + GCM_RFC4106_IV_SIZE; + + /* CPT Instruction word-1 */ + res = (struct cpt_res_s *)(sq->cpt_resp->base + (64 * sq->head)); + res->compcode = 0; + inst.res_addr = sq->cpt_resp->iova + (64 * sq->head); + + /* CPT Instruction word-2 */ + inst.rvu_pf_func = pf->pcifunc; + + /* CPT Instruction word-3: + * Set QORD to force CPT_RES_S write completion + */ + inst.qord = 1; + + /* CPT Instruction word-4 */ + /* inst.dlen should not include ICV length */ + inst.dlen = dlen + ETH_HLEN - (x->aead->alg_icv_len / 8); + inst.opcode_major = CN10K_IPSEC_MAJOR_OP_OUTB_IPSEC; + inst.param1 = cn10k_ipsec_get_param1(iv_offset); + + inst.param2 = encap_offset << + CN10K_IPSEC_INST_PARAM2_ENC_DATA_OFFSET_SHIFT; + inst.param2 |= (u16)auth_offset << + CN10K_IPSEC_INST_PARAM2_AUTH_DATA_OFFSET_SHIFT; + + /* CPT Instruction word-5 */ + gthr_size = num_segs / MAX_SEGS_PER_SG; + gthr_size = (num_segs % MAX_SEGS_PER_SG) ? gthr_size + 1 : gthr_size; + + gthr_size &= 0xF; + dptr_iova = (sq->sqe_ring->iova + (sq->head * (sq->sqe_size * 2))); + inst.dptr = dptr_iova | ((u64)gthr_size << 60); + + /* CPT Instruction word-6 */ + inst.rptr = inst.dptr; + + /* CPT Instruction word-7 */ + inst.cptr = sa_info->iova; + inst.ctx_val = 1; + inst.egrp = CN10K_DEF_CPT_IPSEC_EGRP; + + /* CPT Instruction word-0 */ + inst.nixtxl = (size / 16) - 1; + inst.dat_offset = ETH_HLEN; + inst.nixtx_offset = sq->sqe_size; + + netdev_tx_sent_queue(txq, skb->len); + + /* Finally Flush the CPT instruction */ + sq->head++; + sq->head &= (sq->sqe_cnt - 1); + cn10k_cpt_inst_flush(pf, &inst, sizeof(struct cpt_inst_s)); + return true; +drop: + dev_kfree_skb_any(skb); + return false; +} diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.h b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.h new file mode 100644 index 000000000000..9965df0faa3e --- /dev/null +++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.h @@ -0,0 +1,265 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Marvell IPSEC offload driver + * + * Copyright (C) 2024 Marvell. + */ + +#ifndef CN10K_IPSEC_H +#define CN10K_IPSEC_H + +#include <linux/types.h> + +DECLARE_STATIC_KEY_FALSE(cn10k_ipsec_sa_enabled); + +/* CPT instruction size in bytes */ +#define CN10K_CPT_INST_SIZE 64 + +/* CPT instruction (CPT_INST_S) queue length */ +#define CN10K_CPT_INST_QLEN 8200 + +/* CPT instruction queue size passed to HW is in units of + * 40*CPT_INST_S messages. + */ +#define CN10K_CPT_SIZE_DIV40 (CN10K_CPT_INST_QLEN / 40) + +/* CPT needs 320 free entries */ +#define CN10K_CPT_INST_QLEN_EXTRA_BYTES (320 * CN10K_CPT_INST_SIZE) +#define CN10K_CPT_EXTRA_SIZE_DIV40 (320 / 40) + +/* CPT instruction queue length in bytes */ +#define CN10K_CPT_INST_QLEN_BYTES \ + ((CN10K_CPT_SIZE_DIV40 * 40 * CN10K_CPT_INST_SIZE) + \ + CN10K_CPT_INST_QLEN_EXTRA_BYTES) + +/* CPT instruction group queue length in bytes */ +#define CN10K_CPT_INST_GRP_QLEN_BYTES \ + ((CN10K_CPT_SIZE_DIV40 + CN10K_CPT_EXTRA_SIZE_DIV40) * 16) + +/* CPT FC length in bytes */ +#define CN10K_CPT_Q_FC_LEN 128 + +/* Default CPT engine group for ipsec offload */ +#define CN10K_DEF_CPT_IPSEC_EGRP 1 + +/* CN10K CPT LF registers */ +#define CPT_LFBASE (BLKTYPE_CPT << RVU_FUNC_BLKADDR_SHIFT) +#define CN10K_CPT_LF_CTL (CPT_LFBASE | 0x10) +#define CN10K_CPT_LF_INPROG (CPT_LFBASE | 0x40) +#define CN10K_CPT_LF_Q_BASE (CPT_LFBASE | 0xf0) +#define CN10K_CPT_LF_Q_SIZE (CPT_LFBASE | 0x100) +#define CN10K_CPT_LF_Q_INST_PTR (CPT_LFBASE | 0x110) +#define CN10K_CPT_LF_Q_GRP_PTR (CPT_LFBASE | 0x120) +#define CN10K_CPT_LF_NQX(a) (CPT_LFBASE | 0x400 | (a) << 3) +#define CN10K_CPT_LF_CTX_FLUSH (CPT_LFBASE | 0x510) + +/* IPSEC Instruction opcodes */ +#define CN10K_IPSEC_MAJOR_OP_WRITE_SA 0x01UL +#define CN10K_IPSEC_MINOR_OP_WRITE_SA 0x09UL +#define CN10K_IPSEC_MAJOR_OP_OUTB_IPSEC 0x2AUL + +enum cn10k_cpt_comp_e { + CN10K_CPT_COMP_E_NOTDONE = 0x00, + CN10K_CPT_COMP_E_GOOD = 0x01, + CN10K_CPT_COMP_E_FAULT = 0x02, + CN10K_CPT_COMP_E_HWERR = 0x04, + CN10K_CPT_COMP_E_INSTERR = 0x05, + CN10K_CPT_COMP_E_WARN = 0x06, + CN10K_CPT_COMP_E_MASK = 0x3F +}; + +struct cn10k_cpt_inst_queue { + u8 *vaddr; + u8 *real_vaddr; + dma_addr_t dma_addr; + dma_addr_t real_dma_addr; + u32 size; +}; + +enum cn10k_cpt_hw_state_e { + CN10K_CPT_HW_UNAVAILABLE, + CN10K_CPT_HW_AVAILABLE, + CN10K_CPT_HW_IN_USE +}; + +struct cn10k_ipsec { + /* Outbound CPT */ + u64 io_addr; + atomic_t cpt_state; + struct cn10k_cpt_inst_queue iq; + + /* SA info */ + u32 sa_size; + u32 outb_sa_count; + struct work_struct sa_work; + struct workqueue_struct *sa_workq; +}; + +/* CN10K IPSEC Security Association (SA) */ +/* SA direction */ +#define CN10K_IPSEC_SA_DIR_INB 0 +#define CN10K_IPSEC_SA_DIR_OUTB 1 +/* SA protocol */ +#define CN10K_IPSEC_SA_IPSEC_PROTO_AH 0 +#define CN10K_IPSEC_SA_IPSEC_PROTO_ESP 1 +/* SA Encryption Type */ +#define CN10K_IPSEC_SA_ENCAP_TYPE_AES_GCM 5 +/* SA IPSEC mode Transport/Tunnel */ +#define CN10K_IPSEC_SA_IPSEC_MODE_TRANSPORT 0 +#define CN10K_IPSEC_SA_IPSEC_MODE_TUNNEL 1 +/* SA AES Key Length */ +#define CN10K_IPSEC_SA_AES_KEY_LEN_128 1 +#define CN10K_IPSEC_SA_AES_KEY_LEN_192 2 +#define CN10K_IPSEC_SA_AES_KEY_LEN_256 3 +/* IV Source */ +#define CN10K_IPSEC_SA_IV_SRC_COUNTER 0 +#define CN10K_IPSEC_SA_IV_SRC_PACKET 3 + +struct cn10k_tx_sa_s { + u64 esn_en : 1; /* W0 */ + u64 rsvd_w0_1_8 : 8; + u64 hw_ctx_off : 7; + u64 ctx_id : 16; + u64 rsvd_w0_32_47 : 16; + u64 ctx_push_size : 7; + u64 rsvd_w0_55 : 1; + u64 ctx_hdr_size : 2; + u64 aop_valid : 1; + u64 rsvd_w0_59 : 1; + u64 ctx_size : 4; + u64 w1; /* W1 */ + u64 sa_valid : 1; /* W2 */ + u64 sa_dir : 1; + u64 rsvd_w2_2_3 : 2; + u64 ipsec_mode : 1; + u64 ipsec_protocol : 1; + u64 aes_key_len : 2; + u64 enc_type : 3; + u64 rsvd_w2_11_19 : 9; + u64 iv_src : 2; + u64 rsvd_w2_22_31 : 10; + u64 rsvd_w2_32_63 : 32; + u64 w3; /* W3 */ + u8 cipher_key[32]; /* W4 - W7 */ + u32 rsvd_w8_0_31; /* W8 : IV */ + u32 iv_gcm_salt; + u64 rsvd_w9_w30[22]; /* W9 - W30 */ + u64 hw_ctx[6]; /* W31 - W36 */ +}; + +/* CPT instruction parameter-1 */ +#define CN10K_IPSEC_INST_PARAM1_DIS_L4_CSUM 0x1 +#define CN10K_IPSEC_INST_PARAM1_DIS_L3_CSUM 0x2 +#define CN10K_IPSEC_INST_PARAM1_CRYPTO_MODE 0x20 +#define CN10K_IPSEC_INST_PARAM1_IV_OFFSET_SHIFT 8 + +/* CPT instruction parameter-2 */ +#define CN10K_IPSEC_INST_PARAM2_ENC_DATA_OFFSET_SHIFT 0 +#define CN10K_IPSEC_INST_PARAM2_AUTH_DATA_OFFSET_SHIFT 8 + +/* CPT Instruction Structure */ +struct cpt_inst_s { + u64 nixtxl : 3; /* W0 */ + u64 doneint : 1; + u64 rsvd_w0_4_15 : 12; + u64 dat_offset : 8; + u64 ext_param1 : 8; + u64 nixtx_offset : 20; + u64 rsvd_w0_52_63 : 12; + u64 res_addr; /* W1 */ + u64 tag : 32; /* W2 */ + u64 tt : 2; + u64 grp : 10; + u64 rsvd_w2_44_47 : 4; + u64 rvu_pf_func : 16; + u64 qord : 1; /* W3 */ + u64 rsvd_w3_1_2 : 2; + u64 wqe_ptr : 61; + u64 dlen : 16; /* W4 */ + u64 param2 : 16; + u64 param1 : 16; + u64 opcode_major : 8; + u64 opcode_minor : 8; + u64 dptr; /* W5 */ + u64 rptr; /* W6 */ + u64 cptr : 60; /* W7 */ + u64 ctx_val : 1; + u64 egrp : 3; +}; + +/* CPT Instruction Result Structure */ +struct cpt_res_s { + u64 compcode : 7; /* W0 */ + u64 doneint : 1; + u64 uc_compcode : 8; + u64 uc_info : 48; + u64 esn; /* W1 */ +}; + +/* CPT SG structure */ +struct cpt_sg_s { + u64 seg1_size : 16; + u64 seg2_size : 16; + u64 seg3_size : 16; + u64 segs : 2; + u64 rsvd_63_50 : 14; +}; + +/* CPT LF_INPROG Register */ +#define CPT_LF_INPROG_INFLIGHT GENMASK_ULL(8, 0) +#define CPT_LF_INPROG_GRB_CNT GENMASK_ULL(39, 32) +#define CPT_LF_INPROG_GWB_CNT GENMASK_ULL(47, 40) + +/* CPT LF_Q_GRP_PTR Register */ +#define CPT_LF_Q_GRP_PTR_DQ_PTR GENMASK_ULL(14, 0) +#define CPT_LF_Q_GRP_PTR_NQ_PTR GENMASK_ULL(46, 32) + +/* CPT LF_Q_SIZE Register */ +#define CPT_LF_Q_BASE_ADDR GENMASK_ULL(52, 7) + +/* CPT LF_Q_SIZE Register */ +#define CPT_LF_Q_SIZE_DIV40 GENMASK_ULL(14, 0) + +/* CPT LF CTX Flush Register */ +#define CPT_LF_CTX_FLUSH GENMASK_ULL(45, 0) + +#ifdef CONFIG_XFRM_OFFLOAD +int cn10k_ipsec_init(struct net_device *netdev); +void cn10k_ipsec_clean(struct otx2_nic *pf); +int cn10k_ipsec_ethtool_init(struct net_device *netdev, bool enable); +bool otx2_sqe_add_sg_ipsec(struct otx2_nic *pfvf, struct otx2_snd_queue *sq, + struct sk_buff *skb, int num_segs, int *offset); +bool cn10k_ipsec_transmit(struct otx2_nic *pf, struct netdev_queue *txq, + struct otx2_snd_queue *sq, struct sk_buff *skb, + int num_segs, int size); +#else +static inline __maybe_unused int cn10k_ipsec_init(struct net_device *netdev) +{ + return 0; +} + +static inline __maybe_unused void cn10k_ipsec_clean(struct otx2_nic *pf) +{ +} + +static inline __maybe_unused +int cn10k_ipsec_ethtool_init(struct net_device *netdev, bool enable) +{ + return 0; +} + +static inline bool __maybe_unused +otx2_sqe_add_sg_ipsec(struct otx2_nic *pfvf, struct otx2_snd_queue *sq, + struct sk_buff *skb, int num_segs, int *offset) +{ + return true; +} + +static inline bool __maybe_unused +cn10k_ipsec_transmit(struct otx2_nic *pf, struct netdev_queue *txq, + struct otx2_snd_queue *sq, struct sk_buff *skb, + int num_segs, int size) +{ + return true; +} +#endif +#endif // CN10K_IPSEC_H diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c index 6cc7a78968fc..f3b9daffaec3 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c @@ -133,9 +133,7 @@ static const char *rsrc_name(enum mcs_rsrc_type rsrc_type) return "SA"; default: return "Unknown"; - }; - - return "Unknown"; + } } static int cn10k_mcs_alloc_rsrc(struct otx2_nic *pfvf, enum mcs_direction dir, diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c index 02d0b707aea5..2b49bfec7869 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c @@ -10,12 +10,19 @@ #include <net/page_pool/helpers.h> #include <net/tso.h> #include <linux/bitfield.h> +#include <linux/dcbnl.h> +#include <net/xfrm.h> #include "otx2_reg.h" #include "otx2_common.h" #include "otx2_struct.h" #include "cn10k.h" +static bool otx2_is_pfc_enabled(struct otx2_nic *pfvf) +{ + return IS_ENABLED(CONFIG_DCB) && !!pfvf->pfc_en; +} + static void otx2_nix_rq_op_stats(struct queue_stats *stats, struct otx2_nic *pfvf, int qidx) { @@ -83,6 +90,7 @@ int otx2_update_rq_stats(struct otx2_nic *pfvf, int qidx) otx2_nix_rq_op_stats(&rq->stats, pfvf, qidx); return 1; } +EXPORT_SYMBOL(otx2_update_rq_stats); int otx2_update_sq_stats(struct otx2_nic *pfvf, int qidx) { @@ -99,6 +107,7 @@ int otx2_update_sq_stats(struct otx2_nic *pfvf, int qidx) otx2_nix_sq_op_stats(&sq->stats, pfvf, qidx); return 1; } +EXPORT_SYMBOL(otx2_update_sq_stats); void otx2_get_dev_stats(struct otx2_nic *pfvf) { @@ -227,7 +236,7 @@ int otx2_hw_set_mtu(struct otx2_nic *pfvf, int mtu) u16 maxlen; int err; - maxlen = otx2_get_max_mtu(pfvf) + OTX2_ETH_HLEN + OTX2_HW_TIMESTAMP_LEN; + maxlen = pfvf->hw.max_mtu + OTX2_ETH_HLEN + OTX2_HW_TIMESTAMP_LEN; mutex_lock(&pfvf->mbox.lock); req = otx2_mbox_alloc_msg_nix_set_hw_frs(&pfvf->mbox); @@ -236,7 +245,7 @@ int otx2_hw_set_mtu(struct otx2_nic *pfvf, int mtu) return -ENOMEM; } - req->maxlen = pfvf->netdev->mtu + OTX2_ETH_HLEN + OTX2_HW_TIMESTAMP_LEN; + req->maxlen = mtu + OTX2_ETH_HLEN + OTX2_HW_TIMESTAMP_LEN; /* Use max receive length supported by hardware for loopback devices */ if (is_otx2_lbkvf(pfvf->pdev)) @@ -246,13 +255,14 @@ int otx2_hw_set_mtu(struct otx2_nic *pfvf, int mtu) mutex_unlock(&pfvf->mbox.lock); return err; } +EXPORT_SYMBOL(otx2_hw_set_mtu); int otx2_config_pause_frm(struct otx2_nic *pfvf) { struct cgx_pause_frm_cfg *req; int err; - if (is_otx2_lbkvf(pfvf->pdev)) + if (is_otx2_lbkvf(pfvf->pdev) || is_otx2_sdp_rep(pfvf->pdev)) return 0; mutex_lock(&pfvf->mbox.lock); @@ -646,20 +656,31 @@ int otx2_txschq_config(struct otx2_nic *pfvf, int lvl, int prio, bool txschq_for req->reg[2] = NIX_AF_MDQX_SCHEDULE(schq); req->regval[2] = dwrr_val; } else if (lvl == NIX_TXSCH_LVL_TL4) { + int sdp_chan = hw->tx_chan_base + prio; + + if (is_otx2_sdp_rep(pfvf->pdev)) + prio = 0; parent = schq_list[NIX_TXSCH_LVL_TL3][prio]; req->reg[0] = NIX_AF_TL4X_PARENT(schq); - req->regval[0] = parent << 16; + req->regval[0] = (u64)parent << 16; req->num_regs++; req->reg[1] = NIX_AF_TL4X_SCHEDULE(schq); req->regval[1] = dwrr_val; + if (is_otx2_sdp_rep(pfvf->pdev)) { + req->num_regs++; + req->reg[2] = NIX_AF_TL4X_SDP_LINK_CFG(schq); + req->regval[2] = BIT_ULL(12) | BIT_ULL(13) | + (sdp_chan & 0xff); + } } else if (lvl == NIX_TXSCH_LVL_TL3) { parent = schq_list[NIX_TXSCH_LVL_TL2][prio]; req->reg[0] = NIX_AF_TL3X_PARENT(schq); - req->regval[0] = parent << 16; + req->regval[0] = (u64)parent << 16; req->num_regs++; req->reg[1] = NIX_AF_TL3X_SCHEDULE(schq); req->regval[1] = dwrr_val; - if (lvl == hw->txschq_link_cfg_lvl) { + if (lvl == hw->txschq_link_cfg_lvl && + !is_otx2_sdp_rep(pfvf->pdev)) { req->num_regs++; req->reg[2] = NIX_AF_TL3_TL2X_LINKX_CFG(schq, hw->tx_link); /* Enable this queue and backpressure @@ -670,13 +691,14 @@ int otx2_txschq_config(struct otx2_nic *pfvf, int lvl, int prio, bool txschq_for } else if (lvl == NIX_TXSCH_LVL_TL2) { parent = schq_list[NIX_TXSCH_LVL_TL1][prio]; req->reg[0] = NIX_AF_TL2X_PARENT(schq); - req->regval[0] = parent << 16; + req->regval[0] = (u64)parent << 16; req->num_regs++; req->reg[1] = NIX_AF_TL2X_SCHEDULE(schq); - req->regval[1] = TXSCH_TL1_DFLT_RR_PRIO << 24 | dwrr_val; + req->regval[1] = (u64)hw->txschq_aggr_lvl_rr_prio << 24 | dwrr_val; - if (lvl == hw->txschq_link_cfg_lvl) { + if (lvl == hw->txschq_link_cfg_lvl && + !is_otx2_sdp_rep(pfvf->pdev)) { req->num_regs++; req->reg[2] = NIX_AF_TL3_TL2X_LINKX_CFG(schq, hw->tx_link); /* Enable this queue and backpressure @@ -698,7 +720,7 @@ int otx2_txschq_config(struct otx2_nic *pfvf, int lvl, int prio, bool txschq_for req->num_regs++; req->reg[1] = NIX_AF_TL1X_TOPOLOGY(schq); - req->regval[1] = (TXSCH_TL1_DFLT_RR_PRIO << 1); + req->regval[1] = hw->txschq_aggr_lvl_rr_prio << 1; req->num_regs++; req->reg[2] = NIX_AF_TL1X_CIR(schq); @@ -735,6 +757,7 @@ EXPORT_SYMBOL(otx2_smq_flush); int otx2_txsch_alloc(struct otx2_nic *pfvf) { + int chan_cnt = pfvf->hw.tx_chan_cnt; struct nix_txsch_alloc_req *req; struct nix_txsch_alloc_rsp *rsp; int lvl, schq, rc; @@ -747,6 +770,12 @@ int otx2_txsch_alloc(struct otx2_nic *pfvf) /* Request one schq per level */ for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) req->schq[lvl] = 1; + + if (is_otx2_sdp_rep(pfvf->pdev) && chan_cnt > 1) { + req->schq[NIX_TXSCH_LVL_SMQ] = chan_cnt; + req->schq[NIX_TXSCH_LVL_TL4] = chan_cnt; + } + rc = otx2_sync_mbox_msg(&pfvf->mbox); if (rc) return rc; @@ -757,10 +786,12 @@ int otx2_txsch_alloc(struct otx2_nic *pfvf) return PTR_ERR(rsp); /* Setup transmit scheduler list */ - for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) + for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) { + pfvf->hw.txschq_cnt[lvl] = rsp->schq[lvl]; for (schq = 0; schq < rsp->schq[lvl]; schq++) pfvf->hw.txschq_list[lvl][schq] = rsp->schq_list[lvl][schq]; + } pfvf->hw.txschq_link_cfg_lvl = rsp->link_cfg_lvl; pfvf->hw.txschq_aggr_lvl_rr_prio = rsp->aggr_lvl_rr_prio; @@ -798,12 +829,15 @@ EXPORT_SYMBOL(otx2_txschq_free_one); void otx2_txschq_stop(struct otx2_nic *pfvf) { - int lvl, schq; + int lvl, schq, idx; /* free non QOS TLx nodes */ - for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) - otx2_txschq_free_one(pfvf, lvl, - pfvf->hw.txschq_list[lvl][0]); + for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) { + for (idx = 0; idx < pfvf->hw.txschq_cnt[lvl]; idx++) { + otx2_txschq_free_one(pfvf, lvl, + pfvf->hw.txschq_list[lvl][idx]); + } + } /* Clear the txschq list */ for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) { @@ -883,7 +917,7 @@ static int otx2_rq_init(struct otx2_nic *pfvf, u16 qidx, u16 lpb_aura) return otx2_sync_mbox_msg(&pfvf->mbox); } -int otx2_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura) +int otx2_sq_aq_init(void *dev, u16 qidx, u8 chan_offset, u16 sqb_aura) { struct otx2_nic *pfvf = dev; struct otx2_snd_queue *sq; @@ -902,7 +936,7 @@ int otx2_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura) aq->sq.ena = 1; aq->sq.smq = otx2_get_smq_idx(pfvf, qidx); aq->sq.smq_rr_quantum = mtu_to_dwrr_weight(pfvf, pfvf->tx_max_pktlen); - aq->sq.default_chan = pfvf->hw.tx_chan_base; + aq->sq.default_chan = pfvf->hw.tx_chan_base + chan_offset; aq->sq.sqe_stype = NIX_STYPE_STF; /* Cache SQB */ aq->sq.sqb_aura = sqb_aura; aq->sq.sq_int_ena = NIX_SQINT_BITS; @@ -925,6 +959,7 @@ int otx2_sq_init(struct otx2_nic *pfvf, u16 qidx, u16 sqb_aura) struct otx2_qset *qset = &pfvf->qset; struct otx2_snd_queue *sq; struct otx2_pool *pool; + u8 chan_offset; int err; pool = &pfvf->qset.pool[sqb_aura]; @@ -936,6 +971,29 @@ int otx2_sq_init(struct otx2_nic *pfvf, u16 qidx, u16 sqb_aura) if (err) return err; + /* Allocate memory for NIX SQE (which includes NIX SG) and CPT SG. + * SG of NIX and CPT are same in size. Allocate memory for CPT SG + * same as NIX SQE for base address alignment. + * Layout of a NIX SQE and CPT SG entry: + * ----------------------------- + * | CPT Scatter Gather | + * | (SQE SIZE) | + * | | + * ----------------------------- + * | NIX SQE | + * | (SQE SIZE) | + * | | + * ----------------------------- + */ + err = qmem_alloc(pfvf->dev, &sq->sqe_ring, qset->sqe_cnt, + sq->sqe_size * 2); + if (err) + return err; + + err = qmem_alloc(pfvf->dev, &sq->cpt_resp, qset->sqe_cnt, 64); + if (err) + return err; + if (qidx < pfvf->hw.tx_queues) { err = qmem_alloc(pfvf->dev, &sq->tso_hdrs, qset->sqe_cnt, TSO_HEADER_SIZE); @@ -971,7 +1029,8 @@ int otx2_sq_init(struct otx2_nic *pfvf, u16 qidx, u16 sqb_aura) sq->stats.bytes = 0; sq->stats.pkts = 0; - err = pfvf->hw_ops->sq_aq_init(pfvf, qidx, sqb_aura); + chan_offset = qidx % pfvf->hw.tx_chan_cnt; + err = pfvf->hw_ops->sq_aq_init(pfvf, qidx, chan_offset, sqb_aura); if (err) { kfree(sq->sg); sq->sg = NULL; @@ -1592,7 +1651,7 @@ int otx2_detach_resources(struct mbox *mbox) detach->partial = false; /* Send detach request to AF */ - otx2_mbox_msg_send(&mbox->mbox, 0); + otx2_sync_mbox_msg(mbox); mutex_unlock(&mbox->lock); return 0; } @@ -1693,18 +1752,43 @@ int otx2_nix_config_bp(struct otx2_nic *pfvf, bool enable) return -ENOMEM; req->chan_base = 0; -#ifdef CONFIG_DCB - req->chan_cnt = pfvf->pfc_en ? IEEE_8021QAZ_MAX_TCS : 1; - req->bpid_per_chan = pfvf->pfc_en ? 1 : 0; -#else - req->chan_cnt = 1; - req->bpid_per_chan = 0; -#endif + if (otx2_is_pfc_enabled(pfvf)) { + req->chan_cnt = IEEE_8021QAZ_MAX_TCS; + req->bpid_per_chan = 1; + } else { + req->chan_cnt = 1; + req->bpid_per_chan = 0; + } return otx2_sync_mbox_msg(&pfvf->mbox); } EXPORT_SYMBOL(otx2_nix_config_bp); +int otx2_nix_cpt_config_bp(struct otx2_nic *pfvf, bool enable) +{ + struct nix_bp_cfg_req *req; + + if (enable) + req = otx2_mbox_alloc_msg_nix_cpt_bp_enable(&pfvf->mbox); + else + req = otx2_mbox_alloc_msg_nix_cpt_bp_disable(&pfvf->mbox); + + if (!req) + return -ENOMEM; + + req->chan_base = 0; + if (otx2_is_pfc_enabled(pfvf)) { + req->chan_cnt = IEEE_8021QAZ_MAX_TCS; + req->bpid_per_chan = 1; + } else { + req->chan_cnt = 1; + req->bpid_per_chan = 0; + } + + return otx2_sync_mbox_msg(&pfvf->mbox); +} +EXPORT_SYMBOL(otx2_nix_cpt_config_bp); + /* Mbox message handlers */ void mbox_handler_cgx_stats(struct otx2_nic *pfvf, struct cgx_stats_rsp *rsp) @@ -1738,6 +1822,8 @@ void mbox_handler_nix_lf_alloc(struct otx2_nic *pfvf, pfvf->hw.sqb_size = rsp->sqb_size; pfvf->hw.rx_chan_base = rsp->rx_chan_base; pfvf->hw.tx_chan_base = rsp->tx_chan_base; + pfvf->hw.rx_chan_cnt = rsp->rx_chan_cnt; + pfvf->hw.tx_chan_cnt = rsp->tx_chan_cnt; pfvf->hw.lso_tsov4_idx = rsp->lso_tsov4_idx; pfvf->hw.lso_tsov6_idx = rsp->lso_tsov6_idx; pfvf->hw.cgx_links = rsp->cgx_links; @@ -1782,6 +1868,7 @@ void otx2_free_cints(struct otx2_nic *pfvf, int n) free_irq(vector, &qset->napi[qidx]); } } +EXPORT_SYMBOL(otx2_free_cints); void otx2_set_cints_affinity(struct otx2_nic *pfvf) { @@ -1837,6 +1924,10 @@ u16 otx2_get_max_mtu(struct otx2_nic *pfvf) if (!rc) { rsp = (struct nix_hw_info *) otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0, &req->hdr); + if (IS_ERR(rsp)) { + rc = PTR_ERR(rsp); + goto out; + } /* HW counts VLAN insertion bytes (8 for double tag) * irrespective of whether SQE is requesting to insert VLAN @@ -1911,3 +2002,48 @@ EXPORT_SYMBOL(otx2_mbox_up_handler_ ## _fn_name); MBOX_UP_CGX_MESSAGES MBOX_UP_MCS_MESSAGES #undef M + +dma_addr_t otx2_dma_map_skb_frag(struct otx2_nic *pfvf, + struct sk_buff *skb, int seg, int *len) +{ + enum dma_data_direction dir = DMA_TO_DEVICE; + const skb_frag_t *frag; + struct page *page; + int offset; + + /* Crypto hardware need write permission for ipsec crypto offload */ + if (unlikely(xfrm_offload(skb))) { + dir = DMA_BIDIRECTIONAL; + skb = skb_unshare(skb, GFP_ATOMIC); + } + + /* First segment is always skb->data */ + if (!seg) { + page = virt_to_page(skb->data); + offset = offset_in_page(skb->data); + *len = skb_headlen(skb); + } else { + frag = &skb_shinfo(skb)->frags[seg - 1]; + page = skb_frag_page(frag); + offset = skb_frag_off(frag); + *len = skb_frag_size(frag); + } + return otx2_dma_map_page(pfvf, page, offset, *len, dir); +} + +void otx2_dma_unmap_skb_frags(struct otx2_nic *pfvf, struct sg_list *sg) +{ + enum dma_data_direction dir = DMA_TO_DEVICE; + struct sk_buff *skb = NULL; + int seg; + + skb = (struct sk_buff *)sg->skb; + if (unlikely(xfrm_offload(skb))) + dir = DMA_BIDIRECTIONAL; + + for (seg = 0; seg < sg->num_segs; seg++) { + otx2_dma_unmap_page(pfvf, sg->dma_addr[seg], + sg->size[seg], dir); + } + sg->num_segs = 0; +} diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h index 06910307085e..65814e3dc93f 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h @@ -29,6 +29,8 @@ #include "otx2_devlink.h" #include <rvu_trace.h> #include "qos.h" +#include "rep.h" +#include "cn10k_ipsec.h" /* IPv4 flag more fragment bit */ #define IPV4_FLAG_MORE 0x20 @@ -39,8 +41,11 @@ #define PCI_DEVID_OCTEONTX2_RVU_AFVF 0xA0F8 #define PCI_SUBSYS_DEVID_96XX_RVU_PFVF 0xB200 +#define PCI_SUBSYS_DEVID_CN10K_A_RVU_PFVF 0xB900 #define PCI_SUBSYS_DEVID_CN10K_B_RVU_PFVF 0xBD00 +#define PCI_DEVID_OCTEONTX2_SDP_REP 0xA0F7 + /* PCI BAR nos */ #define PCI_CFG_REG_BAR_NUM 2 #define PCI_MBOX_BAR_NUM 4 @@ -52,6 +57,9 @@ #define NIX_PF_PFC_PRIO_MAX 8 #endif +/* Number of segments per SG structure */ +#define MAX_SEGS_PER_SG 3 + enum arua_mapped_qtypes { AURA_NIX_RQ, AURA_NIX_SQ, @@ -120,33 +128,6 @@ enum otx2_errcodes_re { ERRCODE_IL4_CSUM = 0x22, }; -/* NIX TX stats */ -enum nix_stat_lf_tx { - TX_UCAST = 0x0, - TX_BCAST = 0x1, - TX_MCAST = 0x2, - TX_DROP = 0x3, - TX_OCTS = 0x4, - TX_STATS_ENUM_LAST, -}; - -/* NIX RX stats */ -enum nix_stat_lf_rx { - RX_OCTS = 0x0, - RX_UCAST = 0x1, - RX_BCAST = 0x2, - RX_MCAST = 0x3, - RX_DROP = 0x4, - RX_DROP_OCTS = 0x5, - RX_FCS = 0x6, - RX_ERR = 0x7, - RX_DRP_BCAST = 0x8, - RX_DRP_MCAST = 0x9, - RX_DRP_L3BCAST = 0xa, - RX_DRP_L3MCAST = 0xb, - RX_STATS_ENUM_LAST, -}; - struct otx2_dev_stats { u64 rx_bytes; u64 rx_frames; @@ -224,15 +205,19 @@ struct otx2_hw { /* NIX */ u8 txschq_link_cfg_lvl; + u8 txschq_cnt[NIX_TXSCH_LVL_CNT]; u8 txschq_aggr_lvl_rr_prio; u16 txschq_list[NIX_TXSCH_LVL_CNT][MAX_TXSCHQ_PER_FUNC]; u16 matchall_ipolicer; u32 dwrr_mtu; + u32 max_mtu; u8 smq_link_type; /* HW settings, coalescing etc */ u16 rx_chan_base; u16 tx_chan_base; + u8 rx_chan_cnt; + u8 tx_chan_cnt; u16 cq_qcount_wait; u16 cq_ecount_wait; u16 rq_skid; @@ -346,12 +331,9 @@ struct otx2_flow_config { u16 *def_ent; u16 nr_flows; #define OTX2_DEFAULT_FLOWCOUNT 16 -#define OTX2_MAX_UNICAST_FLOWS 8 +#define OTX2_DEFAULT_UNICAST_FLOWS 4 #define OTX2_MAX_VLAN_FLOWS 1 #define OTX2_MAX_TC_FLOWS OTX2_DEFAULT_FLOWCOUNT -#define OTX2_MCAM_COUNT (OTX2_DEFAULT_FLOWCOUNT + \ - OTX2_MAX_UNICAST_FLOWS + \ - OTX2_MAX_VLAN_FLOWS) u16 unicast_offset; u16 rx_vlan_offset; u16 vf_vlan_offset; @@ -363,12 +345,15 @@ struct otx2_flow_config { struct list_head flow_list; u32 dmacflt_max_flows; u16 max_flows; + refcount_t mark_flows; struct list_head flow_list_tc; + u8 ucast_flt_cnt; bool ntuple; }; struct dev_hw_ops { - int (*sq_aq_init)(void *dev, u16 qidx, u16 sqb_aura); + int (*sq_aq_init)(void *dev, u16 qidx, u8 chan_offset, + u16 sqb_aura); void (*sqe_flush)(void *dev, struct otx2_snd_queue *sq, int size, int qidx); int (*refill_pool_ptrs)(void *dev, struct otx2_cq_queue *cq); @@ -465,6 +450,10 @@ struct otx2_nic { #define OTX2_FLAG_DMACFLTR_SUPPORT BIT_ULL(14) #define OTX2_FLAG_PTP_ONESTEP_SYNC BIT_ULL(15) #define OTX2_FLAG_ADPTV_INT_COAL_ENABLED BIT_ULL(16) +#define OTX2_FLAG_TC_MARK_ENABLED BIT_ULL(17) +#define OTX2_FLAG_REP_MODE_ENABLED BIT_ULL(18) +#define OTX2_FLAG_PORT_UP BIT_ULL(19) +#define OTX2_FLAG_IPSEC_OFFLOAD_ENABLED BIT_ULL(20) u64 flags; u64 *cq_op_addr; @@ -516,9 +505,9 @@ struct otx2_nic { /* Devlink */ struct otx2_devlink *dl; -#ifdef CONFIG_DCB /* PFC */ u8 pfc_en; +#ifdef CONFIG_DCB u8 *queue_to_pfc_map; u16 pfc_schq_list[NIX_TXSCH_LVL_CNT][MAX_TXSCHQ_PER_FUNC]; bool pfc_alloc_status[NIX_PF_PFC_PRIO_MAX]; @@ -532,11 +521,22 @@ struct otx2_nic { #if IS_ENABLED(CONFIG_MACSEC) struct cn10k_mcs_cfg *macsec_cfg; #endif + +#if IS_ENABLED(CONFIG_RVU_ESWITCH) + struct rep_dev **reps; + int rep_cnt; + u16 rep_pf_map[RVU_MAX_REP]; + u16 esw_mode; +#endif + + /* Inline ipsec */ + struct cn10k_ipsec ipsec; }; static inline bool is_otx2_lbkvf(struct pci_dev *pdev) { - return pdev->device == PCI_DEVID_OCTEONTX2_RVU_AFVF; + return (pdev->device == PCI_DEVID_OCTEONTX2_RVU_AFVF) || + (pdev->device == PCI_DEVID_RVU_REP); } static inline bool is_96xx_A0(struct pci_dev *pdev) @@ -551,6 +551,11 @@ static inline bool is_96xx_B0(struct pci_dev *pdev) (pdev->subsystem_device == PCI_SUBSYS_DEVID_96XX_RVU_PFVF); } +static inline bool is_otx2_sdp_rep(struct pci_dev *pdev) +{ + return pdev->device == PCI_DEVID_OCTEONTX2_SDP_REP; +} + /* REVID for PCIe devices. * Bits 0..1: minor pass, bit 3..2: major pass * bits 7..4: midr id @@ -576,6 +581,15 @@ static inline bool is_dev_cn10kb(struct pci_dev *pdev) return pdev->subsystem_device == PCI_SUBSYS_DEVID_CN10K_B_RVU_PFVF; } +static inline bool is_dev_cn10ka_b0(struct pci_dev *pdev) +{ + if (pdev->subsystem_device == PCI_SUBSYS_DEVID_CN10K_A_RVU_PFVF && + (pdev->revision & 0xFF) == 0x54) + return true; + + return false; +} + static inline void otx2_setup_dev_hw_settings(struct otx2_nic *pfvf) { struct otx2_hw *hw = &pfvf->hw; @@ -625,6 +639,9 @@ static inline void __iomem *otx2_get_regaddr(struct otx2_nic *nic, u64 offset) case BLKTYPE_NPA: blkaddr = BLKADDR_NPA; break; + case BLKTYPE_CPT: + blkaddr = BLKADDR_CPT0; + break; default: blkaddr = BLKADDR_RVUM; break; @@ -815,7 +832,7 @@ static inline int otx2_sync_mbox_up_msg(struct mbox *mbox, int devid) if (!otx2_mbox_nonempty(&mbox->mbox_up, devid)) return 0; - otx2_mbox_msg_send(&mbox->mbox_up, devid); + otx2_mbox_msg_send_up(&mbox->mbox_up, devid); err = otx2_mbox_wait_for_rsp(&mbox->mbox_up, devid); if (err) return err; @@ -913,15 +930,19 @@ static inline void otx2_dma_unmap_page(struct otx2_nic *pfvf, static inline u16 otx2_get_smq_idx(struct otx2_nic *pfvf, u16 qidx) { u16 smq; + int idx; + #ifdef CONFIG_DCB if (qidx < NIX_PF_PFC_PRIO_MAX && pfvf->pfc_alloc_status[qidx]) return pfvf->pfc_schq_list[NIX_TXSCH_LVL_SMQ][qidx]; #endif /* check if qidx falls under QOS queues */ - if (qidx >= pfvf->hw.non_qos_queues) + if (qidx >= pfvf->hw.non_qos_queues) { smq = pfvf->qos.qid_to_sqmap[qidx - pfvf->hw.non_qos_queues]; - else - smq = pfvf->hw.txschq_list[NIX_TXSCH_LVL_SMQ][0]; + } else { + idx = qidx % pfvf->hw.txschq_cnt[NIX_TXSCH_LVL_SMQ]; + smq = pfvf->hw.txschq_list[NIX_TXSCH_LVL_SMQ][idx]; + } return smq; } @@ -961,6 +982,7 @@ void otx2_get_mac_from_af(struct net_device *netdev); void otx2_config_irq_coalescing(struct otx2_nic *pfvf, int qidx); int otx2_config_pause_frm(struct otx2_nic *pfvf); void otx2_setup_segmentation(struct otx2_nic *pfvf); +int otx2_reset_mac_stats(struct otx2_nic *pfvf); /* RVU block related APIs */ int otx2_attach_npa_nix(struct otx2_nic *pfvf); @@ -984,17 +1006,32 @@ int otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool, int otx2_rxtx_enable(struct otx2_nic *pfvf, bool enable); void otx2_ctx_disable(struct mbox *mbox, int type, bool npa); int otx2_nix_config_bp(struct otx2_nic *pfvf, bool enable); +int otx2_nix_cpt_config_bp(struct otx2_nic *pfvf, bool enable); void otx2_cleanup_rx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq, int qidx); void otx2_cleanup_tx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq); int otx2_sq_init(struct otx2_nic *pfvf, u16 qidx, u16 sqb_aura); -int otx2_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura); -int cn10k_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura); +int otx2_sq_aq_init(void *dev, u16 qidx, u8 chan_offset, u16 sqb_aura); +int cn10k_sq_aq_init(void *dev, u16 qidx, u8 chan_offset, u16 sqb_aura); int otx2_alloc_buffer(struct otx2_nic *pfvf, struct otx2_cq_queue *cq, dma_addr_t *dma); int otx2_pool_init(struct otx2_nic *pfvf, u16 pool_id, int stack_pages, int numptrs, int buf_size, int type); int otx2_aura_init(struct otx2_nic *pfvf, int aura_id, int pool_id, int numptrs); +int otx2_init_rsrc(struct pci_dev *pdev, struct otx2_nic *pf); +void otx2_free_queue_mem(struct otx2_qset *qset); +int otx2_alloc_queue_mem(struct otx2_nic *pf); +int otx2_init_hw_resources(struct otx2_nic *pfvf); +void otx2_free_hw_resources(struct otx2_nic *pf); +int otx2_wq_init(struct otx2_nic *pf); +int otx2_check_pf_usable(struct otx2_nic *pf); +int otx2_pfaf_mbox_init(struct otx2_nic *pf); +int otx2_register_mbox_intr(struct otx2_nic *pf, bool probe_af); +int otx2_realloc_msix_vectors(struct otx2_nic *pf); +void otx2_pfaf_mbox_destroy(struct otx2_nic *pf); +void otx2_disable_mbox_intr(struct otx2_nic *pf); +void otx2_disable_napi(struct otx2_nic *pf); +irqreturn_t otx2_cq_intr_handler(int irq, void *cq_irq); /* RSS configuration APIs*/ int otx2_rss_init(struct otx2_nic *pfvf); @@ -1064,6 +1101,7 @@ int otx2_handle_ntuple_tc_features(struct net_device *netdev, int otx2_smq_flush(struct otx2_nic *pfvf, int smq); void otx2_free_bufs(struct otx2_nic *pfvf, struct otx2_pool *pool, u64 iova, int size); +int otx2_mcam_entry_init(struct otx2_nic *pfvf); /* tc support */ int otx2_init_tc(struct otx2_nic *nic); @@ -1125,4 +1163,16 @@ u16 otx2_select_queue(struct net_device *netdev, struct sk_buff *skb, int otx2_get_txq_by_classid(struct otx2_nic *pfvf, u16 classid); void otx2_qos_config_txschq(struct otx2_nic *pfvf); void otx2_clean_qos_queues(struct otx2_nic *pfvf); +int rvu_event_up_notify(struct otx2_nic *pf, struct rep_event *info); +int otx2_setup_tc_cls_flower(struct otx2_nic *nic, + struct flow_cls_offload *cls_flower); + +static inline int mcam_entry_cmp(const void *a, const void *b) +{ + return *(u16 *)a - *(u16 *)b; +} + +dma_addr_t otx2_dma_map_skb_frag(struct otx2_nic *pfvf, + struct sk_buff *skb, int seg, int *len); +void otx2_dma_unmap_skb_frags(struct otx2_nic *pfvf, struct sg_list *sg); #endif /* OTX2_COMMON_H */ diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dcbnl.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dcbnl.c index 28fb643d2917..f110dfa42360 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dcbnl.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dcbnl.c @@ -54,6 +54,7 @@ int otx2_pfc_txschq_config(struct otx2_nic *pfvf) return 0; } +EXPORT_SYMBOL(otx2_pfc_txschq_config); static int otx2_pfc_txschq_alloc_one(struct otx2_nic *pfvf, u8 prio) { @@ -122,6 +123,7 @@ int otx2_pfc_txschq_alloc(struct otx2_nic *pfvf) return 0; } +EXPORT_SYMBOL(otx2_pfc_txschq_alloc); static int otx2_pfc_txschq_stop_one(struct otx2_nic *pfvf, u8 prio) { @@ -260,6 +262,7 @@ update_sq_smq_map: return 0; } +EXPORT_SYMBOL(otx2_pfc_txschq_update); int otx2_pfc_txschq_stop(struct otx2_nic *pfvf) { @@ -282,6 +285,7 @@ int otx2_pfc_txschq_stop(struct otx2_nic *pfvf) return 0; } +EXPORT_SYMBOL(otx2_pfc_txschq_stop); int otx2_config_priority_flow_ctrl(struct otx2_nic *pfvf) { @@ -311,6 +315,11 @@ int otx2_config_priority_flow_ctrl(struct otx2_nic *pfvf) if (!otx2_sync_mbox_msg(&pfvf->mbox)) { rsp = (struct cgx_pfc_rsp *) otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0, &req->hdr); + if (IS_ERR(rsp)) { + err = PTR_ERR(rsp); + goto unlock; + } + if (req->rx_pause != rsp->rx_pause || req->tx_pause != rsp->tx_pause) { dev_warn(pfvf->dev, "Failed to config PFC\n"); @@ -321,6 +330,7 @@ unlock: mutex_unlock(&pfvf->mbox.lock); return err; } +EXPORT_SYMBOL(otx2_config_priority_flow_ctrl); void otx2_update_bpid_in_rqctx(struct otx2_nic *pfvf, int vlan_prio, int qidx, bool pfc_enable) @@ -385,6 +395,7 @@ out: "Updating BPIDs in CQ and Aura contexts of RQ%d failed with err %d\n", qidx, err); } +EXPORT_SYMBOL(otx2_update_bpid_in_rqctx); static int otx2_dcbnl_ieee_getpfc(struct net_device *dev, struct ieee_pfc *pfc) { @@ -424,6 +435,9 @@ process_pfc: return err; } + /* Default disable backpressure on NIX-CPT */ + otx2_nix_cpt_config_bp(pfvf, false); + /* Request Per channel Bpids */ if (pfc->pfc_en) otx2_nix_config_bp(pfvf, true); @@ -472,3 +486,4 @@ int otx2_dcbnl_set_ops(struct net_device *dev) return 0; } +EXPORT_SYMBOL(otx2_dcbnl_set_ops); diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c index 4e1130496573..33ec9a7f7c03 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c @@ -32,7 +32,8 @@ static int otx2_dl_mcam_count_validate(struct devlink *devlink, u32 id, } static int otx2_dl_mcam_count_set(struct devlink *devlink, u32 id, - struct devlink_param_gset_ctx *ctx) + struct devlink_param_gset_ctx *ctx, + struct netlink_ext_ack *extack) { struct otx2_devlink *otx2_dl = devlink_priv(devlink); struct otx2_nic *pfvf = otx2_dl->pfvf; @@ -63,9 +64,68 @@ static int otx2_dl_mcam_count_get(struct devlink *devlink, u32 id, return 0; } +static int otx2_dl_ucast_flt_cnt_set(struct devlink *devlink, u32 id, + struct devlink_param_gset_ctx *ctx, + struct netlink_ext_ack *extack) +{ + struct otx2_devlink *otx2_dl = devlink_priv(devlink); + struct otx2_nic *pfvf = otx2_dl->pfvf; + int err; + + pfvf->flow_cfg->ucast_flt_cnt = ctx->val.vu8; + + otx2_mcam_flow_del(pfvf); + err = otx2_mcam_entry_init(pfvf); + if (err) + return err; + + return 0; +} + +static int otx2_dl_ucast_flt_cnt_get(struct devlink *devlink, u32 id, + struct devlink_param_gset_ctx *ctx) +{ + struct otx2_devlink *otx2_dl = devlink_priv(devlink); + struct otx2_nic *pfvf = otx2_dl->pfvf; + + ctx->val.vu8 = pfvf->flow_cfg ? pfvf->flow_cfg->ucast_flt_cnt : 0; + + return 0; +} + +static int otx2_dl_ucast_flt_cnt_validate(struct devlink *devlink, u32 id, + union devlink_param_value val, + struct netlink_ext_ack *extack) +{ + struct otx2_devlink *otx2_dl = devlink_priv(devlink); + struct otx2_nic *pfvf = otx2_dl->pfvf; + + /* Check for UNICAST filter support*/ + if (!(pfvf->flags & OTX2_FLAG_UCAST_FLTR_SUPPORT)) { + NL_SET_ERR_MSG_MOD(extack, + "Unicast filter not enabled"); + return -EINVAL; + } + + if (!pfvf->flow_cfg) { + NL_SET_ERR_MSG_MOD(extack, + "pfvf->flow_cfg not initialized"); + return -EINVAL; + } + + if (pfvf->flow_cfg->nr_flows) { + NL_SET_ERR_MSG_MOD(extack, + "Cannot modify count when there are active rules"); + return -EINVAL; + } + + return 0; +} + enum otx2_dl_param_id { OTX2_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX, OTX2_DEVLINK_PARAM_ID_MCAM_COUNT, + OTX2_DEVLINK_PARAM_ID_UCAST_FLT_CNT, }; static const struct devlink_param otx2_dl_params[] = { @@ -74,9 +134,63 @@ static const struct devlink_param otx2_dl_params[] = { BIT(DEVLINK_PARAM_CMODE_RUNTIME), otx2_dl_mcam_count_get, otx2_dl_mcam_count_set, otx2_dl_mcam_count_validate), + DEVLINK_PARAM_DRIVER(OTX2_DEVLINK_PARAM_ID_UCAST_FLT_CNT, + "unicast_filter_count", DEVLINK_PARAM_TYPE_U8, + BIT(DEVLINK_PARAM_CMODE_RUNTIME), + otx2_dl_ucast_flt_cnt_get, otx2_dl_ucast_flt_cnt_set, + otx2_dl_ucast_flt_cnt_validate), }; +#ifdef CONFIG_RVU_ESWITCH +static int otx2_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode) +{ + struct otx2_devlink *otx2_dl = devlink_priv(devlink); + struct otx2_nic *pfvf = otx2_dl->pfvf; + + if (!otx2_rep_dev(pfvf->pdev)) + return -EOPNOTSUPP; + + *mode = pfvf->esw_mode; + + return 0; +} + +static int otx2_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode, + struct netlink_ext_ack *extack) +{ + struct otx2_devlink *otx2_dl = devlink_priv(devlink); + struct otx2_nic *pfvf = otx2_dl->pfvf; + int ret = 0; + + if (!otx2_rep_dev(pfvf->pdev)) + return -EOPNOTSUPP; + + if (pfvf->esw_mode == mode) + return 0; + + switch (mode) { + case DEVLINK_ESWITCH_MODE_LEGACY: + rvu_rep_destroy(pfvf); + break; + case DEVLINK_ESWITCH_MODE_SWITCHDEV: + ret = rvu_rep_create(pfvf, extack); + break; + default: + return -EINVAL; + } + + if (!ret) + pfvf->esw_mode = mode; + + return ret; +} +#endif + static const struct devlink_ops otx2_devlink_ops = { +#ifdef CONFIG_RVU_ESWITCH + .eswitch_mode_get = otx2_devlink_eswitch_mode_get, + .eswitch_mode_set = otx2_devlink_eswitch_mode_set, +#endif }; int otx2_register_dl(struct otx2_nic *pfvf) @@ -112,6 +226,7 @@ err_dl: devlink_free(dl); return err; } +EXPORT_SYMBOL(otx2_register_dl); void otx2_unregister_dl(struct otx2_nic *pfvf) { @@ -123,3 +238,4 @@ void otx2_unregister_dl(struct otx2_nic *pfvf) ARRAY_SIZE(otx2_dl_params)); devlink_free(dl); } +EXPORT_SYMBOL(otx2_unregister_dl); diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dmac_flt.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dmac_flt.c index 80d853b343f9..2046dd0da00d 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dmac_flt.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dmac_flt.c @@ -28,6 +28,11 @@ static int otx2_dmacflt_do_add(struct otx2_nic *pf, const u8 *mac, if (!err) { rsp = (struct cgx_mac_addr_add_rsp *) otx2_mbox_get_rsp(&pf->mbox.mbox, 0, &req->hdr); + if (IS_ERR(rsp)) { + mutex_unlock(&pf->mbox.lock); + return PTR_ERR(rsp); + } + *dmac_index = rsp->index; } @@ -200,6 +205,10 @@ int otx2_dmacflt_update(struct otx2_nic *pf, u8 *mac, u32 bit_pos) rsp = (struct cgx_mac_addr_update_rsp *) otx2_mbox_get_rsp(&pf->mbox.mbox, 0, &req->hdr); + if (IS_ERR(rsp)) { + rc = PTR_ERR(rsp); + goto out; + } pf->flow_cfg->bmap_to_dmacindex[bit_pos] = rsp->index; diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c index 7f786de61014..2d53dc77ef1e 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c @@ -85,26 +85,22 @@ static void otx2_get_qset_strings(struct otx2_nic *pfvf, u8 **data, int qset) int start_qidx = qset * pfvf->hw.rx_queues; int qidx, stats; - for (qidx = 0; qidx < pfvf->hw.rx_queues; qidx++) { - for (stats = 0; stats < otx2_n_queue_stats; stats++) { - sprintf(*data, "rxq%d: %s", qidx + start_qidx, - otx2_queue_stats[stats].name); - *data += ETH_GSTRING_LEN; - } - } + for (qidx = 0; qidx < pfvf->hw.rx_queues; qidx++) + for (stats = 0; stats < otx2_n_queue_stats; stats++) + ethtool_sprintf(data, "rxq%d: %s", qidx + start_qidx, + otx2_queue_stats[stats].name); - for (qidx = 0; qidx < otx2_get_total_tx_queues(pfvf); qidx++) { - for (stats = 0; stats < otx2_n_queue_stats; stats++) { + for (qidx = 0; qidx < otx2_get_total_tx_queues(pfvf); qidx++) + for (stats = 0; stats < otx2_n_queue_stats; stats++) if (qidx >= pfvf->hw.non_qos_queues) - sprintf(*data, "txq_qos%d: %s", - qidx + start_qidx - pfvf->hw.non_qos_queues, - otx2_queue_stats[stats].name); + ethtool_sprintf(data, "txq_qos%d: %s", + qidx + start_qidx - + pfvf->hw.non_qos_queues, + otx2_queue_stats[stats].name); else - sprintf(*data, "txq%d: %s", qidx + start_qidx, - otx2_queue_stats[stats].name); - *data += ETH_GSTRING_LEN; - } - } + ethtool_sprintf(data, "txq%d: %s", + qidx + start_qidx, + otx2_queue_stats[stats].name); } static void otx2_get_strings(struct net_device *netdev, u32 sset, u8 *data) @@ -115,36 +111,25 @@ static void otx2_get_strings(struct net_device *netdev, u32 sset, u8 *data) if (sset != ETH_SS_STATS) return; - for (stats = 0; stats < otx2_n_dev_stats; stats++) { - memcpy(data, otx2_dev_stats[stats].name, ETH_GSTRING_LEN); - data += ETH_GSTRING_LEN; - } + for (stats = 0; stats < otx2_n_dev_stats; stats++) + ethtool_puts(&data, otx2_dev_stats[stats].name); - for (stats = 0; stats < otx2_n_drv_stats; stats++) { - memcpy(data, otx2_drv_stats[stats].name, ETH_GSTRING_LEN); - data += ETH_GSTRING_LEN; - } + for (stats = 0; stats < otx2_n_drv_stats; stats++) + ethtool_puts(&data, otx2_drv_stats[stats].name); otx2_get_qset_strings(pfvf, &data, 0); if (!test_bit(CN10K_RPM, &pfvf->hw.cap_flag)) { - for (stats = 0; stats < CGX_RX_STATS_COUNT; stats++) { - sprintf(data, "cgx_rxstat%d: ", stats); - data += ETH_GSTRING_LEN; - } + for (stats = 0; stats < CGX_RX_STATS_COUNT; stats++) + ethtool_sprintf(&data, "cgx_rxstat%d: ", stats); - for (stats = 0; stats < CGX_TX_STATS_COUNT; stats++) { - sprintf(data, "cgx_txstat%d: ", stats); - data += ETH_GSTRING_LEN; - } + for (stats = 0; stats < CGX_TX_STATS_COUNT; stats++) + ethtool_sprintf(&data, "cgx_txstat%d: ", stats); } - strcpy(data, "reset_count"); - data += ETH_GSTRING_LEN; - sprintf(data, "Fec Corrected Errors: "); - data += ETH_GSTRING_LEN; - sprintf(data, "Fec Uncorrected Errors: "); - data += ETH_GSTRING_LEN; + ethtool_puts(&data, "reset_count"); + ethtool_puts(&data, "Fec Corrected Errors: "); + ethtool_puts(&data, "Fec Uncorrected Errors: "); } static void otx2_get_qset_stats(struct otx2_nic *pfvf, @@ -343,6 +328,11 @@ static void otx2_get_pauseparam(struct net_device *netdev, if (!otx2_sync_mbox_msg(&pfvf->mbox)) { rsp = (struct cgx_pause_frm_cfg *) otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0, &req->hdr); + if (IS_ERR(rsp)) { + mutex_unlock(&pfvf->mbox.lock); + return; + } + pause->rx_pause = rsp->rx_pause; pause->tx_pause = rsp->tx_pause; } @@ -954,7 +944,7 @@ static u32 otx2_get_link(struct net_device *netdev) } static int otx2_get_ts_info(struct net_device *netdev, - struct ethtool_ts_info *info) + struct kernel_ethtool_ts_info *info) { struct otx2_nic *pfvf = netdev_priv(netdev); @@ -962,8 +952,6 @@ static int otx2_get_ts_info(struct net_device *netdev, return ethtool_op_get_ts_info(netdev, info); info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE | - SOF_TIMESTAMPING_RX_SOFTWARE | - SOF_TIMESTAMPING_SOFTWARE | SOF_TIMESTAMPING_TX_HARDWARE | SOF_TIMESTAMPING_RX_HARDWARE | SOF_TIMESTAMPING_RAW_HARDWARE; @@ -1074,6 +1062,11 @@ static int otx2_set_fecparam(struct net_device *netdev, rsp = (struct fec_mode *)otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0, &req->hdr); + if (IS_ERR(rsp)) { + err = PTR_ERR(rsp); + goto end; + } + if (rsp->fec >= 0) pfvf->linfo.fec = rsp->fec; else @@ -1367,20 +1360,15 @@ static void otx2vf_get_strings(struct net_device *netdev, u32 sset, u8 *data) if (sset != ETH_SS_STATS) return; - for (stats = 0; stats < otx2_n_dev_stats; stats++) { - memcpy(data, otx2_dev_stats[stats].name, ETH_GSTRING_LEN); - data += ETH_GSTRING_LEN; - } + for (stats = 0; stats < otx2_n_dev_stats; stats++) + ethtool_puts(&data, otx2_dev_stats[stats].name); - for (stats = 0; stats < otx2_n_drv_stats; stats++) { - memcpy(data, otx2_drv_stats[stats].name, ETH_GSTRING_LEN); - data += ETH_GSTRING_LEN; - } + for (stats = 0; stats < otx2_n_drv_stats; stats++) + ethtool_puts(&data, otx2_drv_stats[stats].name); otx2_get_qset_strings(vf, &data, 0); - strcpy(data, "reset_count"); - data += ETH_GSTRING_LEN; + ethtool_puts(&data, "reset_count"); } static void otx2vf_get_ethtool_stats(struct net_device *netdev, diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c index 97a71e9b8563..47bfd1fb37d4 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c @@ -12,8 +12,6 @@ #define OTX2_DEFAULT_ACTION 0x1 -static int otx2_mcam_entry_init(struct otx2_nic *pfvf); - struct otx2_flow { struct ethtool_rx_flow_spec flow_spec; struct list_head list; @@ -66,11 +64,6 @@ static int otx2_free_ntuple_mcam_entries(struct otx2_nic *pfvf) return 0; } -static int mcam_entry_cmp(const void *a, const void *b) -{ - return *(u16 *)a - *(u16 *)b; -} - int otx2_alloc_mcam_entries(struct otx2_nic *pfvf, u16 count) { struct otx2_flow_config *flow_cfg = pfvf->flow_cfg; @@ -121,6 +114,8 @@ int otx2_alloc_mcam_entries(struct otx2_nic *pfvf, u16 count) rsp = (struct npc_mcam_alloc_entry_rsp *)otx2_mbox_get_rsp (&pfvf->mbox.mbox, 0, &req->hdr); + if (IS_ERR(rsp)) + goto exit; for (ent = 0; ent < rsp->count; ent++) flow_cfg->flow_ent[ent + allocated] = rsp->entry_list[ent]; @@ -161,7 +156,7 @@ exit: } EXPORT_SYMBOL(otx2_alloc_mcam_entries); -static int otx2_mcam_entry_init(struct otx2_nic *pfvf) +int otx2_mcam_entry_init(struct otx2_nic *pfvf) { struct otx2_flow_config *flow_cfg = pfvf->flow_cfg; struct npc_get_field_status_req *freq; @@ -172,7 +167,7 @@ static int otx2_mcam_entry_init(struct otx2_nic *pfvf) int ent, count; vf_vlan_max_flows = pfvf->total_vfs * OTX2_PER_VF_VLAN_FLOWS; - count = OTX2_MAX_UNICAST_FLOWS + + count = flow_cfg->ucast_flt_cnt + OTX2_MAX_VLAN_FLOWS + vf_vlan_max_flows; flow_cfg->def_ent = devm_kmalloc_array(pfvf->dev, count, @@ -199,6 +194,10 @@ static int otx2_mcam_entry_init(struct otx2_nic *pfvf) rsp = (struct npc_mcam_alloc_entry_rsp *)otx2_mbox_get_rsp (&pfvf->mbox.mbox, 0, &req->hdr); + if (IS_ERR(rsp)) { + mutex_unlock(&pfvf->mbox.lock); + return PTR_ERR(rsp); + } if (rsp->count != req->count) { netdev_info(pfvf->netdev, @@ -214,7 +213,7 @@ static int otx2_mcam_entry_init(struct otx2_nic *pfvf) flow_cfg->vf_vlan_offset = 0; flow_cfg->unicast_offset = vf_vlan_max_flows; flow_cfg->rx_vlan_offset = flow_cfg->unicast_offset + - OTX2_MAX_UNICAST_FLOWS; + flow_cfg->ucast_flt_cnt; pfvf->flags |= OTX2_FLAG_UCAST_FLTR_SUPPORT; /* Check if NPC_DMAC field is supported @@ -234,6 +233,10 @@ static int otx2_mcam_entry_init(struct otx2_nic *pfvf) frsp = (struct npc_get_field_status_rsp *)otx2_mbox_get_rsp (&pfvf->mbox.mbox, 0, &freq->hdr); + if (IS_ERR(frsp)) { + mutex_unlock(&pfvf->mbox.lock); + return PTR_ERR(frsp); + } if (frsp->enable) { pfvf->flags |= OTX2_FLAG_RX_VLAN_SUPPORT; @@ -252,8 +255,10 @@ static int otx2_mcam_entry_init(struct otx2_nic *pfvf) pfvf->flags |= OTX2_FLAG_TC_FLOWER_SUPPORT; + refcount_set(&flow_cfg->mark_flows, 1); return 0; } +EXPORT_SYMBOL(otx2_mcam_entry_init); /* TODO : revisit on size */ #define OTX2_DMAC_FLTR_BITMAP_SZ (4 * 2048 + 32) @@ -301,6 +306,8 @@ int otx2_mcam_flow_init(struct otx2_nic *pf) INIT_LIST_HEAD(&pf->flow_cfg->flow_list); INIT_LIST_HEAD(&pf->flow_cfg->flow_list_tc); + pf->flow_cfg->ucast_flt_cnt = OTX2_DEFAULT_UNICAST_FLOWS; + /* Allocate bare minimum number of MCAM entries needed for * unicast and ntuple filters. */ @@ -313,7 +320,7 @@ int otx2_mcam_flow_init(struct otx2_nic *pf) return 0; pf->mac_table = devm_kzalloc(pf->dev, sizeof(struct otx2_mac_table) - * OTX2_MAX_UNICAST_FLOWS, GFP_KERNEL); + * pf->flow_cfg->ucast_flt_cnt, GFP_KERNEL); if (!pf->mac_table) return -ENOMEM; @@ -355,7 +362,7 @@ static int otx2_do_add_macfilter(struct otx2_nic *pf, const u8 *mac) return -ENOMEM; /* dont have free mcam entries or uc list is greater than alloted */ - if (netdev_uc_count(pf->netdev) > OTX2_MAX_UNICAST_FLOWS) + if (netdev_uc_count(pf->netdev) > pf->flow_cfg->ucast_flt_cnt) return -ENOMEM; mutex_lock(&pf->mbox.lock); @@ -366,7 +373,7 @@ static int otx2_do_add_macfilter(struct otx2_nic *pf, const u8 *mac) } /* unicast offset starts with 32 0..31 for ntuple */ - for (i = 0; i < OTX2_MAX_UNICAST_FLOWS; i++) { + for (i = 0; i < pf->flow_cfg->ucast_flt_cnt; i++) { if (pf->mac_table[i].inuse) continue; ether_addr_copy(pf->mac_table[i].addr, mac); @@ -409,7 +416,7 @@ static bool otx2_get_mcamentry_for_mac(struct otx2_nic *pf, const u8 *mac, { int i; - for (i = 0; i < OTX2_MAX_UNICAST_FLOWS; i++) { + for (i = 0; i < pf->flow_cfg->ucast_flt_cnt; i++) { if (!pf->mac_table[i].inuse) continue; @@ -1393,6 +1400,7 @@ int otx2_destroy_mcam_flows(struct otx2_nic *pfvf) } pfvf->flags &= ~OTX2_FLAG_MCAM_ENTRIES_ALLOC; + flow_cfg->max_flows = 0; mutex_unlock(&pfvf->mbox.lock); return 0; diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c index e5fe67e73865..e1dde93e8af8 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c @@ -26,6 +26,7 @@ #include "cn10k.h" #include "qos.h" #include <rvu_trace.h> +#include "cn10k_ipsec.h" #define DRV_NAME "rvu_nicpf" #define DRV_STRING "Marvell RVU NIC Physical Function Driver" @@ -67,7 +68,7 @@ static int otx2_change_mtu(struct net_device *netdev, int new_mtu) netdev_info(netdev, "Changing MTU from %d to %d\n", netdev->mtu, new_mtu); - netdev->mtu = new_mtu; + WRITE_ONCE(netdev->mtu, new_mtu); if (if_up) err = otx2_open(netdev); @@ -292,8 +293,8 @@ static int otx2_pf_flr_init(struct otx2_nic *pf, int num_vfs) return 0; } -static void otx2_queue_work(struct mbox *mw, struct workqueue_struct *mbox_wq, - int first, int mdevs, u64 intr, int type) +static void otx2_queue_vf_work(struct mbox *mw, struct workqueue_struct *mbox_wq, + int first, int mdevs, u64 intr) { struct otx2_mbox_dev *mdev; struct otx2_mbox *mbox; @@ -307,40 +308,26 @@ static void otx2_queue_work(struct mbox *mw, struct workqueue_struct *mbox_wq, mbox = &mw->mbox; mdev = &mbox->dev[i]; - if (type == TYPE_PFAF) - otx2_sync_mbox_bbuf(mbox, i); hdr = mdev->mbase + mbox->rx_start; /* The hdr->num_msgs is set to zero immediately in the interrupt - * handler to ensure that it holds a correct value next time - * when the interrupt handler is called. - * pf->mbox.num_msgs holds the data for use in pfaf_mbox_handler - * pf>mbox.up_num_msgs holds the data for use in - * pfaf_mbox_up_handler. + * handler to ensure that it holds a correct value next time + * when the interrupt handler is called. pf->mw[i].num_msgs + * holds the data for use in otx2_pfvf_mbox_handler and + * pf->mw[i].up_num_msgs holds the data for use in + * otx2_pfvf_mbox_up_handler. */ if (hdr->num_msgs) { mw[i].num_msgs = hdr->num_msgs; hdr->num_msgs = 0; - if (type == TYPE_PFAF) - memset(mbox->hwbase + mbox->rx_start, 0, - ALIGN(sizeof(struct mbox_hdr), - sizeof(u64))); - queue_work(mbox_wq, &mw[i].mbox_wrk); } mbox = &mw->mbox_up; mdev = &mbox->dev[i]; - if (type == TYPE_PFAF) - otx2_sync_mbox_bbuf(mbox, i); hdr = mdev->mbase + mbox->rx_start; if (hdr->num_msgs) { mw[i].up_num_msgs = hdr->num_msgs; hdr->num_msgs = 0; - if (type == TYPE_PFAF) - memset(mbox->hwbase + mbox->rx_start, 0, - ALIGN(sizeof(struct mbox_hdr), - sizeof(u64))); - queue_work(mbox_wq, &mw[i].mbox_up_wrk); } } @@ -356,8 +343,10 @@ static void otx2_forward_msg_pfvf(struct otx2_mbox_dev *mdev, /* Msgs are already copied, trigger VF's mbox irq */ smp_wmb(); + otx2_mbox_wait_for_zero(pfvf_mbox, devid); + offset = pfvf_mbox->trigger | (devid << pfvf_mbox->tr_shift); - writeq(1, (void __iomem *)pfvf_mbox->reg_base + offset); + writeq(MBOX_DOWN_MSG, (void __iomem *)pfvf_mbox->reg_base + offset); /* Restore VF's mbox bounce buffer region address */ src_mdev->mbase = bbuf_base; @@ -462,7 +451,6 @@ static void otx2_pfvf_mbox_handler(struct work_struct *work) struct mbox_msghdr *msg = NULL; int offset, vf_idx, id, err; struct otx2_mbox_dev *mdev; - struct mbox_hdr *req_hdr; struct otx2_mbox *mbox; struct mbox *vf_mbox; struct otx2_nic *pf; @@ -473,9 +461,8 @@ static void otx2_pfvf_mbox_handler(struct work_struct *work) mbox = &pf->mbox_pfvf[0].mbox; mdev = &mbox->dev[vf_idx]; - req_hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start); - offset = ALIGN(sizeof(*req_hdr), MBOX_MSG_ALIGN); + offset = ALIGN(sizeof(struct mbox_hdr), MBOX_MSG_ALIGN); for (id = 0; id < vf_mbox->num_msgs; id++) { msg = (struct mbox_msghdr *)(mdev->mbase + mbox->rx_start + @@ -506,7 +493,6 @@ static void otx2_pfvf_mbox_up_handler(struct work_struct *work) struct otx2_nic *pf = vf_mbox->pfvf; struct otx2_mbox_dev *mdev; int offset, id, vf_idx = 0; - struct mbox_hdr *rsp_hdr; struct mbox_msghdr *msg; struct otx2_mbox *mbox; @@ -514,8 +500,7 @@ static void otx2_pfvf_mbox_up_handler(struct work_struct *work) mbox = &pf->mbox_pfvf[0].mbox_up; mdev = &mbox->dev[vf_idx]; - rsp_hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start); - offset = mbox->rx_start + ALIGN(sizeof(*rsp_hdr), MBOX_MSG_ALIGN); + offset = mbox->rx_start + ALIGN(sizeof(struct mbox_hdr), MBOX_MSG_ALIGN); for (id = 0; id < vf_mbox->up_num_msgs; id++) { msg = mdev->mbase + offset; @@ -535,6 +520,7 @@ static void otx2_pfvf_mbox_up_handler(struct work_struct *work) switch (msg->id) { case MBOX_MSG_CGX_LINK_EVENT: + case MBOX_MSG_REP_EVENT_UP_NOTIFY: break; default: if (msg->rc) @@ -547,7 +533,7 @@ static void otx2_pfvf_mbox_up_handler(struct work_struct *work) end: offset = mbox->rx_start + msg->next_msgoff; if (mdev->msgs_acked == (vf_mbox->up_num_msgs - 1)) - __otx2_mbox_reset(mbox, 0); + __otx2_mbox_reset(mbox, vf_idx); mdev->msgs_acked++; } } @@ -564,8 +550,7 @@ static irqreturn_t otx2_pfvf_mbox_intr_handler(int irq, void *pf_irq) if (vfs > 64) { intr = otx2_read64(pf, RVU_PF_VFPF_MBOX_INTX(1)); otx2_write64(pf, RVU_PF_VFPF_MBOX_INTX(1), intr); - otx2_queue_work(mbox, pf->mbox_pfvf_wq, 64, vfs, intr, - TYPE_PFVF); + otx2_queue_vf_work(mbox, pf->mbox_pfvf_wq, 64, vfs, intr); if (intr) trace_otx2_msg_interrupt(mbox->mbox.pdev, "VF(s) to PF", intr); vfs = 64; @@ -574,7 +559,7 @@ static irqreturn_t otx2_pfvf_mbox_intr_handler(int irq, void *pf_irq) intr = otx2_read64(pf, RVU_PF_VFPF_MBOX_INTX(0)); otx2_write64(pf, RVU_PF_VFPF_MBOX_INTX(0), intr); - otx2_queue_work(mbox, pf->mbox_pfvf_wq, 0, vfs, intr, TYPE_PFVF); + otx2_queue_vf_work(mbox, pf->mbox_pfvf_wq, 0, vfs, intr); if (intr) trace_otx2_msg_interrupt(mbox->mbox.pdev, "VF(s) to PF", intr); @@ -597,8 +582,9 @@ static int otx2_pfvf_mbox_init(struct otx2_nic *pf, int numvfs) if (!pf->mbox_pfvf) return -ENOMEM; - pf->mbox_pfvf_wq = alloc_ordered_workqueue("otx2_pfvf_mailbox", - WQ_HIGHPRI | WQ_MEM_RECLAIM); + pf->mbox_pfvf_wq = alloc_workqueue("otx2_pfvf_mailbox", + WQ_UNBOUND | WQ_HIGHPRI | + WQ_MEM_RECLAIM, 0); if (!pf->mbox_pfvf_wq) return -ENOMEM; @@ -821,20 +807,22 @@ static void otx2_pfaf_mbox_handler(struct work_struct *work) struct mbox *af_mbox; struct otx2_nic *pf; int offset, id; + u16 num_msgs; af_mbox = container_of(work, struct mbox, mbox_wrk); mbox = &af_mbox->mbox; mdev = &mbox->dev[0]; rsp_hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start); + num_msgs = rsp_hdr->num_msgs; offset = mbox->rx_start + ALIGN(sizeof(*rsp_hdr), MBOX_MSG_ALIGN); pf = af_mbox->pfvf; - for (id = 0; id < af_mbox->num_msgs; id++) { + for (id = 0; id < num_msgs; id++) { msg = (struct mbox_msghdr *)(mdev->mbase + offset); otx2_process_pfaf_mbox_msg(pf, msg); offset = mbox->rx_start + msg->next_msgoff; - if (mdev->msgs_acked == (af_mbox->num_msgs - 1)) + if (mdev->msgs_acked == (num_msgs - 1)) __otx2_mbox_reset(mbox, 0); mdev->msgs_acked++; } @@ -846,6 +834,9 @@ static void otx2_handle_link_event(struct otx2_nic *pf) struct cgx_link_user_info *linfo = &pf->linfo; struct net_device *netdev = pf->netdev; + if (pf->flags & OTX2_FLAG_PORT_UP) + return; + pr_info("%s NIC Link is %s %d Mbps %s duplex\n", netdev->name, linfo->link_up ? "UP" : "DOWN", linfo->speed, linfo->full_duplex ? "Full" : "Half"); @@ -858,6 +849,35 @@ static void otx2_handle_link_event(struct otx2_nic *pf) } } +static int otx2_mbox_up_handler_rep_event_up_notify(struct otx2_nic *pf, + struct rep_event *info, + struct msg_rsp *rsp) +{ + struct net_device *netdev = pf->netdev; + + if (info->event == RVU_EVENT_MTU_CHANGE) { + netdev->mtu = info->evt_data.mtu; + return 0; + } + + if (info->event == RVU_EVENT_PORT_STATE) { + if (info->evt_data.port_state) { + pf->flags |= OTX2_FLAG_PORT_UP; + netif_carrier_on(netdev); + netif_tx_start_all_queues(netdev); + } else { + pf->flags &= ~OTX2_FLAG_PORT_UP; + netif_tx_stop_all_queues(netdev); + netif_carrier_off(netdev); + } + return 0; + } +#ifdef CONFIG_RVU_ESWITCH + rvu_event_up_notify(pf, info); +#endif + return 0; +} + int otx2_mbox_up_handler_mcs_intr_notify(struct otx2_nic *pf, struct mcs_intr_info *event, struct msg_rsp *rsp) @@ -927,6 +947,7 @@ static int otx2_process_mbox_msg_up(struct otx2_nic *pf, } MBOX_UP_CGX_MESSAGES MBOX_UP_MCS_MESSAGES +MBOX_UP_REP_MESSAGES #undef M break; default: @@ -945,12 +966,14 @@ static void otx2_pfaf_mbox_up_handler(struct work_struct *work) int offset, id, devid = 0; struct mbox_hdr *rsp_hdr; struct mbox_msghdr *msg; + u16 num_msgs; rsp_hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start); + num_msgs = rsp_hdr->num_msgs; offset = mbox->rx_start + ALIGN(sizeof(*rsp_hdr), MBOX_MSG_ALIGN); - for (id = 0; id < af_mbox->up_num_msgs; id++) { + for (id = 0; id < num_msgs; id++) { msg = (struct mbox_msghdr *)(mdev->mbase + offset); devid = msg->pcifunc & RVU_PFVF_FUNC_MASK; @@ -959,10 +982,11 @@ static void otx2_pfaf_mbox_up_handler(struct work_struct *work) otx2_process_mbox_msg_up(pf, msg); offset = mbox->rx_start + msg->next_msgoff; } - if (devid) { + /* Forward to VF iff VFs are really present */ + if (devid && pci_num_vf(pf->pdev)) { otx2_forward_vf_mbox_msgs(pf, &pf->mbox.mbox_up, MBOX_DIR_PFVF_UP, devid - 1, - af_mbox->up_num_msgs); + num_msgs); return; } @@ -972,21 +996,54 @@ static void otx2_pfaf_mbox_up_handler(struct work_struct *work) static irqreturn_t otx2_pfaf_mbox_intr_handler(int irq, void *pf_irq) { struct otx2_nic *pf = (struct otx2_nic *)pf_irq; - struct mbox *mbox; + struct mbox *mw = &pf->mbox; + struct otx2_mbox_dev *mdev; + struct otx2_mbox *mbox; + struct mbox_hdr *hdr; + u64 mbox_data; /* Clear the IRQ */ otx2_write64(pf, RVU_PF_INT, BIT_ULL(0)); - mbox = &pf->mbox; - trace_otx2_msg_interrupt(mbox->mbox.pdev, "AF to PF", BIT_ULL(0)); + mbox_data = otx2_read64(pf, RVU_PF_PFAF_MBOX0); + + if (mbox_data & MBOX_UP_MSG) { + mbox_data &= ~MBOX_UP_MSG; + otx2_write64(pf, RVU_PF_PFAF_MBOX0, mbox_data); + + mbox = &mw->mbox_up; + mdev = &mbox->dev[0]; + otx2_sync_mbox_bbuf(mbox, 0); + + hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start); + if (hdr->num_msgs) + queue_work(pf->mbox_wq, &mw->mbox_up_wrk); + + trace_otx2_msg_interrupt(pf->pdev, "UP message from AF to PF", + BIT_ULL(0)); + } + + if (mbox_data & MBOX_DOWN_MSG) { + mbox_data &= ~MBOX_DOWN_MSG; + otx2_write64(pf, RVU_PF_PFAF_MBOX0, mbox_data); + + mbox = &mw->mbox; + mdev = &mbox->dev[0]; + otx2_sync_mbox_bbuf(mbox, 0); - otx2_queue_work(mbox, pf->mbox_wq, 0, 1, 1, TYPE_PFAF); + hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start); + if (hdr->num_msgs) + queue_work(pf->mbox_wq, &mw->mbox_wrk); + + trace_otx2_msg_interrupt(pf->pdev, "DOWN reply from AF to PF", + BIT_ULL(0)); + } return IRQ_HANDLED; } -static void otx2_disable_mbox_intr(struct otx2_nic *pf) +void otx2_disable_mbox_intr(struct otx2_nic *pf) { int vector = pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_AFPF_MBOX); @@ -994,8 +1051,9 @@ static void otx2_disable_mbox_intr(struct otx2_nic *pf) otx2_write64(pf, RVU_PF_INT_ENA_W1C, BIT_ULL(0)); free_irq(vector, pf); } +EXPORT_SYMBOL(otx2_disable_mbox_intr); -static int otx2_register_mbox_intr(struct otx2_nic *pf, bool probe_af) +int otx2_register_mbox_intr(struct otx2_nic *pf, bool probe_af) { struct otx2_hw *hw = &pf->hw; struct msg_req *req; @@ -1039,7 +1097,7 @@ static int otx2_register_mbox_intr(struct otx2_nic *pf, bool probe_af) return 0; } -static void otx2_pfaf_mbox_destroy(struct otx2_nic *pf) +void otx2_pfaf_mbox_destroy(struct otx2_nic *pf) { struct mbox *mbox = &pf->mbox; @@ -1054,8 +1112,9 @@ static void otx2_pfaf_mbox_destroy(struct otx2_nic *pf) otx2_mbox_destroy(&mbox->mbox); otx2_mbox_destroy(&mbox->mbox_up); } +EXPORT_SYMBOL(otx2_pfaf_mbox_destroy); -static int otx2_pfaf_mbox_init(struct otx2_nic *pf) +int otx2_pfaf_mbox_init(struct otx2_nic *pf) { struct mbox *mbox = &pf->mbox; void __iomem *hwbase; @@ -1124,6 +1183,23 @@ static int otx2_cgx_config_linkevents(struct otx2_nic *pf, bool enable) return err; } +int otx2_reset_mac_stats(struct otx2_nic *pfvf) +{ + struct msg_req *req; + int err; + + mutex_lock(&pfvf->mbox.lock); + req = otx2_mbox_alloc_msg_cgx_stats_rst(&pfvf->mbox); + if (!req) { + mutex_unlock(&pfvf->mbox.lock); + return -ENOMEM; + } + + err = otx2_sync_mbox_msg(&pfvf->mbox); + mutex_unlock(&pfvf->mbox.lock); + return err; +} + static int otx2_cgx_config_loopback(struct otx2_nic *pf, bool enable) { struct msg_req *msg; @@ -1340,7 +1416,7 @@ done: return IRQ_HANDLED; } -static irqreturn_t otx2_cq_intr_handler(int irq, void *cq_irq) +irqreturn_t otx2_cq_intr_handler(int irq, void *cq_irq) { struct otx2_cq_poll *cq_poll = (struct otx2_cq_poll *)cq_irq; struct otx2_nic *pf = (struct otx2_nic *)cq_poll->dev; @@ -1359,20 +1435,25 @@ static irqreturn_t otx2_cq_intr_handler(int irq, void *cq_irq) return IRQ_HANDLED; } +EXPORT_SYMBOL(otx2_cq_intr_handler); -static void otx2_disable_napi(struct otx2_nic *pf) +void otx2_disable_napi(struct otx2_nic *pf) { struct otx2_qset *qset = &pf->qset; struct otx2_cq_poll *cq_poll; + struct work_struct *work; int qidx; for (qidx = 0; qidx < pf->hw.cint_cnt; qidx++) { cq_poll = &qset->napi[qidx]; - cancel_work_sync(&cq_poll->dim.work); + work = &cq_poll->dim.work; + if (work->func) + cancel_work_sync(work); napi_disable(&cq_poll->napi); netif_napi_del(&cq_poll->napi); } } +EXPORT_SYMBOL(otx2_disable_napi); static void otx2_free_cq_res(struct otx2_nic *pf) { @@ -1404,6 +1485,8 @@ static void otx2_free_sq_res(struct otx2_nic *pf) if (!sq->sqe) continue; qmem_free(pf->dev, sq->sqe); + qmem_free(pf->dev, sq->sqe_ring); + qmem_free(pf->dev, sq->cpt_resp); qmem_free(pf->dev, sq->tso_hdrs); kfree(sq->sg); kfree(sq->sqb_ptrs); @@ -1438,7 +1521,7 @@ static int otx2_get_rbuf_size(struct otx2_nic *pf, int mtu) return ALIGN(rbuf_size, 2048); } -static int otx2_init_hw_resources(struct otx2_nic *pf) +int otx2_init_hw_resources(struct otx2_nic *pf) { struct nix_lf_free_req *free_req; struct mbox *mbox = &pf->mbox; @@ -1454,10 +1537,11 @@ static int otx2_init_hw_resources(struct otx2_nic *pf) hw->sqpool_cnt = otx2_get_total_tx_queues(pf); hw->pool_cnt = hw->rqpool_cnt + hw->sqpool_cnt; - /* Maximum hardware supported transmit length */ - pf->tx_max_pktlen = pf->netdev->max_mtu + OTX2_ETH_HLEN; - - pf->rbsize = otx2_get_rbuf_size(pf, pf->netdev->mtu); + if (!otx2_rep_dev(pf->pdev)) { + /* Maximum hardware supported transmit length */ + pf->tx_max_pktlen = pf->netdev->max_mtu + OTX2_ETH_HLEN; + pf->rbsize = otx2_get_rbuf_size(pf, pf->netdev->mtu); + } mutex_lock(&mbox->lock); /* NPA init */ @@ -1470,6 +1554,9 @@ static int otx2_init_hw_resources(struct otx2_nic *pf) if (err) goto err_free_npa_lf; + /* Default disable backpressure on NIX-CPT */ + otx2_nix_cpt_config_bp(pf, false); + /* Enable backpressure for CGX mapped PF/VFs */ if (!is_otx2_lbkvf(pf->pdev)) otx2_nix_config_bp(pf, true); @@ -1510,10 +1597,15 @@ static int otx2_init_hw_resources(struct otx2_nic *pf) } for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) { - err = otx2_txschq_config(pf, lvl, 0, false); - if (err) { - mutex_unlock(&mbox->lock); - goto err_free_nix_queues; + int idx; + + for (idx = 0; idx < pf->hw.txschq_cnt[lvl]; idx++) { + err = otx2_txschq_config(pf, lvl, idx, false); + if (err) { + dev_err(pf->dev, "Failed to config TXSCH\n"); + mutex_unlock(&mbox->lock); + goto err_free_nix_queues; + } } } @@ -1562,8 +1654,9 @@ exit: mutex_unlock(&mbox->lock); return err; } +EXPORT_SYMBOL(otx2_init_hw_resources); -static void otx2_free_hw_resources(struct otx2_nic *pf) +void otx2_free_hw_resources(struct otx2_nic *pf) { struct otx2_qset *qset = &pf->qset; struct nix_lf_free_req *free_req; @@ -1585,11 +1678,12 @@ static void otx2_free_hw_resources(struct otx2_nic *pf) otx2_pfc_txschq_stop(pf); #endif - otx2_clean_qos_queues(pf); + if (!otx2_rep_dev(pf->pdev)) + otx2_clean_qos_queues(pf); mutex_lock(&mbox->lock); /* Disable backpressure */ - if (!(pf->pcifunc & RVU_PFVF_FUNC_MASK)) + if (!is_otx2_lbkvf(pf->pdev)) otx2_nix_config_bp(pf, false); mutex_unlock(&mbox->lock); @@ -1621,7 +1715,8 @@ static void otx2_free_hw_resources(struct otx2_nic *pf) otx2_free_cq_res(pf); /* Free all ingress bandwidth profiles allocated */ - cn10k_free_all_ipolicers(pf); + if (!otx2_rep_dev(pf->pdev)) + cn10k_free_all_ipolicers(pf); mutex_lock(&mbox->lock); /* Reset NIX LF */ @@ -1649,6 +1744,7 @@ static void otx2_free_hw_resources(struct otx2_nic *pf) } mutex_unlock(&mbox->lock); } +EXPORT_SYMBOL(otx2_free_hw_resources); static bool otx2_promisc_use_mce_list(struct otx2_nic *pfvf) { @@ -1675,7 +1771,7 @@ static void otx2_do_set_rx_mode(struct otx2_nic *pf) return; if ((netdev->flags & IFF_PROMISC) || - (netdev_uc_count(netdev) > OTX2_MAX_UNICAST_FLOWS)) { + (netdev_uc_count(netdev) > pf->flow_cfg->ucast_flt_cnt)) { promisc = true; } @@ -1731,15 +1827,24 @@ static void otx2_dim_work(struct work_struct *w) dim->state = DIM_START_MEASURE; } -int otx2_open(struct net_device *netdev) +void otx2_free_queue_mem(struct otx2_qset *qset) +{ + kfree(qset->sq); + qset->sq = NULL; + kfree(qset->cq); + qset->cq = NULL; + kfree(qset->rq); + qset->rq = NULL; + kfree(qset->napi); + qset->napi = NULL; +} +EXPORT_SYMBOL(otx2_free_queue_mem); + +int otx2_alloc_queue_mem(struct otx2_nic *pf) { - struct otx2_nic *pf = netdev_priv(netdev); - struct otx2_cq_poll *cq_poll = NULL; struct otx2_qset *qset = &pf->qset; - int err = 0, qidx, vec; - char *irq_name; + struct otx2_cq_poll *cq_poll; - netif_carrier_off(netdev); /* RQ and SQs are mapped to different CQs, * so find out max CQ IRQs (i.e CINTs) needed. @@ -1759,7 +1864,6 @@ int otx2_open(struct net_device *netdev) /* CQ size of SQ */ qset->sqe_cnt = qset->sqe_cnt ? qset->sqe_cnt : Q_COUNT(Q_SIZE_4K); - err = -ENOMEM; qset->cq = kcalloc(pf->qset.cq_cnt, sizeof(struct otx2_cq_queue), GFP_KERNEL); if (!qset->cq) @@ -1775,6 +1879,28 @@ int otx2_open(struct net_device *netdev) if (!qset->rq) goto err_free_mem; + return 0; + +err_free_mem: + otx2_free_queue_mem(qset); + return -ENOMEM; +} +EXPORT_SYMBOL(otx2_alloc_queue_mem); + +int otx2_open(struct net_device *netdev) +{ + struct otx2_nic *pf = netdev_priv(netdev); + struct otx2_cq_poll *cq_poll = NULL; + struct otx2_qset *qset = &pf->qset; + int err = 0, qidx, vec; + char *irq_name; + + netif_carrier_off(netdev); + + err = otx2_alloc_queue_mem(pf); + if (err) + return err; + err = otx2_init_hw_resources(pf); if (err) goto err_free_mem; @@ -1847,9 +1973,17 @@ int otx2_open(struct net_device *netdev) vec = pf->hw.nix_msixoff + NIX_LF_CINT_VEC_START; for (qidx = 0; qidx < pf->hw.cint_cnt; qidx++) { irq_name = &pf->hw.irq_name[vec * NAME_SIZE]; + int name_len; - snprintf(irq_name, NAME_SIZE, "%s-rxtx-%d", pf->netdev->name, - qidx); + name_len = snprintf(irq_name, NAME_SIZE, "%s-rxtx-%d", + pf->netdev->name, qidx); + if (name_len >= NAME_SIZE) { + dev_err(pf->dev, + "RVUPF%d: IRQ registration failed for CQ%d, irq name is too long\n", + rvu_get_pf(pf->pcifunc), qidx); + err = -EINVAL; + goto err_free_cints; + } err = request_irq(pci_irq_vector(pf->pdev, vec), otx2_cq_intr_handler, 0, irq_name, @@ -1885,6 +2019,7 @@ int otx2_open(struct net_device *netdev) } pf->flags &= ~OTX2_FLAG_INTF_DOWN; + pf->flags &= ~OTX2_FLAG_PORT_UP; /* 'intf_down' may be checked on any cpu */ smp_wmb(); @@ -1907,7 +2042,7 @@ int otx2_open(struct net_device *netdev) * mcam entries are enabled to receive the packets. Hence disable the * packet I/O. */ - if (err == EIO) + if (err == -EIO) goto err_disable_rxtx; else if (err) goto err_tx_stop_queues; @@ -1932,10 +2067,7 @@ err_disable_napi: otx2_disable_napi(pf); otx2_free_hw_resources(pf); err_free_mem: - kfree(qset->sq); - kfree(qset->cq); - kfree(qset->rq); - kfree(qset->napi); + otx2_free_queue_mem(qset); return err; } EXPORT_SYMBOL(otx2_open); @@ -2000,11 +2132,7 @@ int otx2_stop(struct net_device *netdev) for (qidx = 0; qidx < netdev->num_tx_queues; qidx++) netdev_tx_reset_queue(netdev_get_tx_queue(netdev, qidx)); - - kfree(qset->sq); - kfree(qset->cq); - kfree(qset->rq); - kfree(qset->napi); + otx2_free_queue_mem(qset); /* Do not clear RQ/SQ ringsize settings */ memset_startat(qset, 0, sqe_cnt); return 0; @@ -2034,7 +2162,7 @@ static netdev_tx_t otx2_xmit(struct sk_buff *skb, struct net_device *netdev) sq = &pf->qset.sq[sq_idx]; txq = netdev_get_tx_queue(netdev, qidx); - if (!otx2_sq_append_skb(netdev, sq, skb, qidx)) { + if (!otx2_sq_append_skb(pf, txq, sq, skb, qidx)) { netif_tx_stop_queue(txq); /* Check again, incase SQBs got freed up */ @@ -2151,6 +2279,10 @@ static int otx2_set_features(struct net_device *netdev, return otx2_enable_rxvlan(pf, features & NETIF_F_HW_VLAN_CTAG_RX); + if (changed & NETIF_F_HW_ESP) + return cn10k_ipsec_ethtool_init(netdev, + features & NETIF_F_HW_ESP); + return otx2_handle_ntuple_tc_features(netdev, features); } @@ -2739,7 +2871,7 @@ static const struct net_device_ops otx2_netdev_ops = { .ndo_set_vf_trust = otx2_ndo_set_vf_trust, }; -static int otx2_wq_init(struct otx2_nic *pf) +int otx2_wq_init(struct otx2_nic *pf) { pf->otx2_wq = create_singlethread_workqueue("otx2_wq"); if (!pf->otx2_wq) @@ -2750,7 +2882,7 @@ static int otx2_wq_init(struct otx2_nic *pf) return 0; } -static int otx2_check_pf_usable(struct otx2_nic *nic) +int otx2_check_pf_usable(struct otx2_nic *nic) { u64 rev; @@ -2768,7 +2900,7 @@ static int otx2_check_pf_usable(struct otx2_nic *nic) return 0; } -static int otx2_realloc_msix_vectors(struct otx2_nic *pf) +int otx2_realloc_msix_vectors(struct otx2_nic *pf) { struct otx2_hw *hw = &pf->hw; int num_vec, err; @@ -2790,6 +2922,7 @@ static int otx2_realloc_msix_vectors(struct otx2_nic *pf) return otx2_register_mbox_intr(pf, false); } +EXPORT_SYMBOL(otx2_realloc_msix_vectors); static int otx2_sriov_vfcfg_init(struct otx2_nic *pf) { @@ -2825,6 +2958,88 @@ static void otx2_sriov_vfcfg_cleanup(struct otx2_nic *pf) } } +int otx2_init_rsrc(struct pci_dev *pdev, struct otx2_nic *pf) +{ + struct device *dev = &pdev->dev; + struct otx2_hw *hw = &pf->hw; + int num_vec, err; + + num_vec = pci_msix_vec_count(pdev); + hw->irq_name = devm_kmalloc_array(&hw->pdev->dev, num_vec, NAME_SIZE, + GFP_KERNEL); + if (!hw->irq_name) + return -ENOMEM; + + hw->affinity_mask = devm_kcalloc(&hw->pdev->dev, num_vec, + sizeof(cpumask_var_t), GFP_KERNEL); + if (!hw->affinity_mask) + return -ENOMEM; + + /* Map CSRs */ + pf->reg_base = pcim_iomap(pdev, PCI_CFG_REG_BAR_NUM, 0); + if (!pf->reg_base) { + dev_err(dev, "Unable to map physical function CSRs, aborting\n"); + return -ENOMEM; + } + + err = otx2_check_pf_usable(pf); + if (err) + return err; + + err = pci_alloc_irq_vectors(hw->pdev, RVU_PF_INT_VEC_CNT, + RVU_PF_INT_VEC_CNT, PCI_IRQ_MSIX); + if (err < 0) { + dev_err(dev, "%s: Failed to alloc %d IRQ vectors\n", + __func__, num_vec); + return err; + } + + otx2_setup_dev_hw_settings(pf); + + /* Init PF <=> AF mailbox stuff */ + err = otx2_pfaf_mbox_init(pf); + if (err) + goto err_free_irq_vectors; + + /* Register mailbox interrupt */ + err = otx2_register_mbox_intr(pf, true); + if (err) + goto err_mbox_destroy; + + /* Request AF to attach NPA and NIX LFs to this PF. + * NIX and NPA LFs are needed for this PF to function as a NIC. + */ + err = otx2_attach_npa_nix(pf); + if (err) + goto err_disable_mbox_intr; + + err = otx2_realloc_msix_vectors(pf); + if (err) + goto err_detach_rsrc; + + err = cn10k_lmtst_init(pf); + if (err) + goto err_detach_rsrc; + + return 0; + +err_detach_rsrc: + if (pf->hw.lmt_info) + free_percpu(pf->hw.lmt_info); + if (test_bit(CN10K_LMTST, &pf->hw.cap_flag)) + qmem_free(pf->dev, pf->dync_lmt); + otx2_detach_resources(&pf->mbox); +err_disable_mbox_intr: + otx2_disable_mbox_intr(pf); +err_mbox_destroy: + otx2_pfaf_mbox_destroy(pf); +err_free_irq_vectors: + pci_free_irq_vectors(hw->pdev); + + return err; +} +EXPORT_SYMBOL(otx2_init_rsrc); + static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id) { struct device *dev = &pdev->dev; @@ -2832,7 +3047,6 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id) struct net_device *netdev; struct otx2_nic *pf; struct otx2_hw *hw; - int num_vec; err = pcim_enable_device(pdev); if (err) { @@ -2883,72 +3097,14 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id) /* Use CQE of 128 byte descriptor size by default */ hw->xqe_size = 128; - num_vec = pci_msix_vec_count(pdev); - hw->irq_name = devm_kmalloc_array(&hw->pdev->dev, num_vec, NAME_SIZE, - GFP_KERNEL); - if (!hw->irq_name) { - err = -ENOMEM; - goto err_free_netdev; - } - - hw->affinity_mask = devm_kcalloc(&hw->pdev->dev, num_vec, - sizeof(cpumask_var_t), GFP_KERNEL); - if (!hw->affinity_mask) { - err = -ENOMEM; - goto err_free_netdev; - } - - /* Map CSRs */ - pf->reg_base = pcim_iomap(pdev, PCI_CFG_REG_BAR_NUM, 0); - if (!pf->reg_base) { - dev_err(dev, "Unable to map physical function CSRs, aborting\n"); - err = -ENOMEM; - goto err_free_netdev; - } - - err = otx2_check_pf_usable(pf); + err = otx2_init_rsrc(pdev, pf); if (err) goto err_free_netdev; - err = pci_alloc_irq_vectors(hw->pdev, RVU_PF_INT_VEC_CNT, - RVU_PF_INT_VEC_CNT, PCI_IRQ_MSIX); - if (err < 0) { - dev_err(dev, "%s: Failed to alloc %d IRQ vectors\n", - __func__, num_vec); - goto err_free_netdev; - } - - otx2_setup_dev_hw_settings(pf); - - /* Init PF <=> AF mailbox stuff */ - err = otx2_pfaf_mbox_init(pf); - if (err) - goto err_free_irq_vectors; - - /* Register mailbox interrupt */ - err = otx2_register_mbox_intr(pf, true); - if (err) - goto err_mbox_destroy; - - /* Request AF to attach NPA and NIX LFs to this PF. - * NIX and NPA LFs are needed for this PF to function as a NIC. - */ - err = otx2_attach_npa_nix(pf); - if (err) - goto err_disable_mbox_intr; - - err = otx2_realloc_msix_vectors(pf); - if (err) - goto err_detach_rsrc; - err = otx2_set_real_num_queues(netdev, hw->tx_queues, hw->rx_queues); if (err) goto err_detach_rsrc; - err = cn10k_lmtst_init(pf); - if (err) - goto err_detach_rsrc; - /* Assign default mac address */ otx2_get_mac_from_af(netdev); @@ -3011,11 +3167,19 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id) netdev->min_mtu = OTX2_MIN_MTU; netdev->max_mtu = otx2_get_max_mtu(pf); + hw->max_mtu = netdev->max_mtu; + + /* reset CGX/RPM MAC stats */ + otx2_reset_mac_stats(pf); + + err = cn10k_ipsec_init(netdev); + if (err) + goto err_mcs_free; err = register_netdev(netdev); if (err) { dev_err(dev, "Failed to register netdevice\n"); - goto err_mcs_free; + goto err_ipsec_clean; } err = otx2_wq_init(pf); @@ -3056,6 +3220,8 @@ err_mcam_flow_del: otx2_mcam_flow_del(pf); err_unreg_netdev: unregister_netdev(netdev); +err_ipsec_clean: + cn10k_ipsec_clean(pf); err_mcs_free: cn10k_mcs_free(pf); err_del_mcam_entries: @@ -3068,11 +3234,8 @@ err_detach_rsrc: if (test_bit(CN10K_LMTST, &pf->hw.cap_flag)) qmem_free(pf->dev, pf->dync_lmt); otx2_detach_resources(&pf->mbox); -err_disable_mbox_intr: otx2_disable_mbox_intr(pf); -err_mbox_destroy: otx2_pfaf_mbox_destroy(pf); -err_free_irq_vectors: pci_free_irq_vectors(hw->pdev); err_free_netdev: pci_set_drvdata(pdev, NULL); @@ -3087,6 +3250,7 @@ static void otx2_vf_link_event_task(struct work_struct *work) struct otx2_vf_config *config; struct cgx_link_info_msg *req; struct mbox_msghdr *msghdr; + struct delayed_work *dwork; struct otx2_nic *pf; int vf_idx; @@ -3095,10 +3259,24 @@ static void otx2_vf_link_event_task(struct work_struct *work) vf_idx = config - config->pf->vf_configs; pf = config->pf; + if (config->intf_down) + return; + + mutex_lock(&pf->mbox.lock); + + dwork = &config->link_event_work; + + if (!otx2_mbox_wait_for_zero(&pf->mbox_pfvf[0].mbox_up, vf_idx)) { + schedule_delayed_work(dwork, msecs_to_jiffies(100)); + mutex_unlock(&pf->mbox.lock); + return; + } + msghdr = otx2_mbox_alloc_msg_rsp(&pf->mbox_pfvf[0].mbox_up, vf_idx, sizeof(*req), sizeof(struct msg_rsp)); if (!msghdr) { dev_err(pf->dev, "Failed to create VF%d link event\n", vf_idx); + mutex_unlock(&pf->mbox.lock); return; } @@ -3107,7 +3285,11 @@ static void otx2_vf_link_event_task(struct work_struct *work) req->hdr.sig = OTX2_MBOX_REQ_SIG; memcpy(&req->link_info, &pf->linfo, sizeof(req->link_info)); + otx2_mbox_wait_for_zero(&pf->mbox_pfvf[0].mbox_up, vf_idx); + otx2_sync_mbox_up_msg(&pf->mbox_pfvf[0], vf_idx); + + mutex_unlock(&pf->mbox.lock); } static int otx2_sriov_enable(struct pci_dev *pdev, int numvfs) @@ -3176,6 +3358,29 @@ static int otx2_sriov_configure(struct pci_dev *pdev, int numvfs) return otx2_sriov_enable(pdev, numvfs); } +static void otx2_ndc_sync(struct otx2_nic *pf) +{ + struct mbox *mbox = &pf->mbox; + struct ndc_sync_op *req; + + mutex_lock(&mbox->lock); + + req = otx2_mbox_alloc_msg_ndc_sync_op(mbox); + if (!req) { + mutex_unlock(&mbox->lock); + return; + } + + req->nix_lf_tx_sync = 1; + req->nix_lf_rx_sync = 1; + req->npa_lf_sync = 1; + + if (!otx2_sync_mbox_msg(mbox)) + dev_err(pf->dev, "NDC sync operation failed\n"); + + mutex_unlock(&mbox->lock); +} + static void otx2_remove(struct pci_dev *pdev) { struct net_device *netdev = pci_get_drvdata(pdev); @@ -3214,6 +3419,7 @@ static void otx2_remove(struct pci_dev *pdev) otx2_unregister_dl(pf); unregister_netdev(netdev); + cn10k_ipsec_clean(pf); cn10k_mcs_free(pf); otx2_sriov_disable(pf->pdev); otx2_sriov_vfcfg_cleanup(pf); @@ -3224,6 +3430,7 @@ static void otx2_remove(struct pci_dev *pdev) otx2_mcam_flow_del(pf); otx2_shutdown_tc(pf); otx2_shutdown_qos(pf); + otx2_ndc_sync(pf); otx2_detach_resources(&pf->mbox); if (pf->hw.lmt_info) free_percpu(pf->hw.lmt_info); diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_reg.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_reg.h index 45a32e4b49d1..e3aee6e36215 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_reg.h +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_reg.h @@ -139,33 +139,34 @@ #define NIX_LF_CINTX_ENA_W1C(a) (NIX_LFBASE | 0xD50 | (a) << 12) /* NIX AF transmit scheduler registers */ -#define NIX_AF_SMQX_CFG(a) (0x700 | (a) << 16) -#define NIX_AF_TL1X_SCHEDULE(a) (0xC00 | (a) << 16) -#define NIX_AF_TL1X_CIR(a) (0xC20 | (a) << 16) -#define NIX_AF_TL1X_TOPOLOGY(a) (0xC80 | (a) << 16) -#define NIX_AF_TL2X_PARENT(a) (0xE88 | (a) << 16) -#define NIX_AF_TL2X_SCHEDULE(a) (0xE00 | (a) << 16) -#define NIX_AF_TL2X_TOPOLOGY(a) (0xE80 | (a) << 16) -#define NIX_AF_TL2X_CIR(a) (0xE20 | (a) << 16) -#define NIX_AF_TL2X_PIR(a) (0xE30 | (a) << 16) -#define NIX_AF_TL3X_PARENT(a) (0x1088 | (a) << 16) -#define NIX_AF_TL3X_SCHEDULE(a) (0x1000 | (a) << 16) -#define NIX_AF_TL3X_SHAPE(a) (0x1010 | (a) << 16) -#define NIX_AF_TL3X_CIR(a) (0x1020 | (a) << 16) -#define NIX_AF_TL3X_PIR(a) (0x1030 | (a) << 16) -#define NIX_AF_TL3X_TOPOLOGY(a) (0x1080 | (a) << 16) -#define NIX_AF_TL4X_PARENT(a) (0x1288 | (a) << 16) -#define NIX_AF_TL4X_SCHEDULE(a) (0x1200 | (a) << 16) -#define NIX_AF_TL4X_SHAPE(a) (0x1210 | (a) << 16) -#define NIX_AF_TL4X_CIR(a) (0x1220 | (a) << 16) -#define NIX_AF_TL4X_PIR(a) (0x1230 | (a) << 16) -#define NIX_AF_TL4X_TOPOLOGY(a) (0x1280 | (a) << 16) -#define NIX_AF_MDQX_SCHEDULE(a) (0x1400 | (a) << 16) -#define NIX_AF_MDQX_SHAPE(a) (0x1410 | (a) << 16) -#define NIX_AF_MDQX_CIR(a) (0x1420 | (a) << 16) -#define NIX_AF_MDQX_PIR(a) (0x1430 | (a) << 16) -#define NIX_AF_MDQX_PARENT(a) (0x1480 | (a) << 16) -#define NIX_AF_TL3_TL2X_LINKX_CFG(a, b) (0x1700 | (a) << 16 | (b) << 3) +#define NIX_AF_SMQX_CFG(a) (0x700 | (u64)(a) << 16) +#define NIX_AF_TL4X_SDP_LINK_CFG(a) (0xB10 | (u64)(a) << 16) +#define NIX_AF_TL1X_SCHEDULE(a) (0xC00 | (u64)(a) << 16) +#define NIX_AF_TL1X_CIR(a) (0xC20 | (u64)(a) << 16) +#define NIX_AF_TL1X_TOPOLOGY(a) (0xC80 | (u64)(a) << 16) +#define NIX_AF_TL2X_PARENT(a) (0xE88 | (u64)(a) << 16) +#define NIX_AF_TL2X_SCHEDULE(a) (0xE00 | (u64)(a) << 16) +#define NIX_AF_TL2X_TOPOLOGY(a) (0xE80 | (u64)(a) << 16) +#define NIX_AF_TL2X_CIR(a) (0xE20 | (u64)(a) << 16) +#define NIX_AF_TL2X_PIR(a) (0xE30 | (u64)(a) << 16) +#define NIX_AF_TL3X_PARENT(a) (0x1088 | (u64)(a) << 16) +#define NIX_AF_TL3X_SCHEDULE(a) (0x1000 | (u64)(a) << 16) +#define NIX_AF_TL3X_SHAPE(a) (0x1010 | (u64)(a) << 16) +#define NIX_AF_TL3X_CIR(a) (0x1020 | (u64)(a) << 16) +#define NIX_AF_TL3X_PIR(a) (0x1030 | (u64)(a) << 16) +#define NIX_AF_TL3X_TOPOLOGY(a) (0x1080 | (u64)(a) << 16) +#define NIX_AF_TL4X_PARENT(a) (0x1288 | (u64)(a) << 16) +#define NIX_AF_TL4X_SCHEDULE(a) (0x1200 | (u64)(a) << 16) +#define NIX_AF_TL4X_SHAPE(a) (0x1210 | (u64)(a) << 16) +#define NIX_AF_TL4X_CIR(a) (0x1220 | (u64)(a) << 16) +#define NIX_AF_TL4X_PIR(a) (0x1230 | (u64)(a) << 16) +#define NIX_AF_TL4X_TOPOLOGY(a) (0x1280 | (u64)(a) << 16) +#define NIX_AF_MDQX_SCHEDULE(a) (0x1400 | (u64)(a) << 16) +#define NIX_AF_MDQX_SHAPE(a) (0x1410 | (u64)(a) << 16) +#define NIX_AF_MDQX_CIR(a) (0x1420 | (u64)(a) << 16) +#define NIX_AF_MDQX_PIR(a) (0x1430 | (u64)(a) << 16) +#define NIX_AF_MDQX_PARENT(a) (0x1480 | (u64)(a) << 16) +#define NIX_AF_TL3_TL2X_LINKX_CFG(a, b) (0x1700 | (u64)(a) << 16 | (b) << 3) /* LMT LF registers */ #define LMT_LFBASE BIT_ULL(RVU_FUNC_BLKADDR_SHIFT) diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c index 4fd44b6eecea..9a226ca74425 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c @@ -443,6 +443,7 @@ static int otx2_tc_parse_actions(struct otx2_nic *nic, struct flow_action_entry *act; struct net_device *target; struct otx2_nic *priv; + struct rep_dev *rdev; u32 burst, mark = 0; u8 nr_police = 0; u8 num_intf = 1; @@ -464,14 +465,18 @@ static int otx2_tc_parse_actions(struct otx2_nic *nic, return 0; case FLOW_ACTION_REDIRECT_INGRESS: target = act->dev; - priv = netdev_priv(target); - /* npc_install_flow_req doesn't support passing a target pcifunc */ - if (rvu_get_pf(nic->pcifunc) != rvu_get_pf(priv->pcifunc)) { - NL_SET_ERR_MSG_MOD(extack, - "can't redirect to other pf/vf"); - return -EOPNOTSUPP; + if (target->dev.parent) { + priv = netdev_priv(target); + if (rvu_get_pf(nic->pcifunc) != rvu_get_pf(priv->pcifunc)) { + NL_SET_ERR_MSG_MOD(extack, + "can't redirect to other pf/vf"); + return -EOPNOTSUPP; + } + req->vf = priv->pcifunc & RVU_PFVF_FUNC_MASK; + } else { + rdev = netdev_priv(target); + req->vf = rdev->pcifunc & RVU_PFVF_FUNC_MASK; } - req->vf = priv->pcifunc & RVU_PFVF_FUNC_MASK; /* if op is already set; avoid overwriting the same */ if (!req->op) @@ -511,7 +516,15 @@ static int otx2_tc_parse_actions(struct otx2_nic *nic, nr_police++; break; case FLOW_ACTION_MARK: + if (act->mark & ~OTX2_RX_MATCH_ID_MASK) { + NL_SET_ERR_MSG_MOD(extack, "Bad flow mark, only 16 bit supported"); + return -EOPNOTSUPP; + } mark = act->mark; + req->match_id = mark & OTX2_RX_MATCH_ID_MASK; + req->op = NIX_RX_ACTION_DEFAULT; + nic->flags |= OTX2_FLAG_TC_MARK_ENABLED; + refcount_inc(&nic->flow_cfg->mark_flows); break; case FLOW_ACTION_RX_QUEUE_MAPPING: @@ -638,6 +651,7 @@ static int otx2_tc_prepare_flow(struct otx2_nic *nic, struct otx2_tc_flow *node, BIT(FLOW_DISSECTOR_KEY_IPSEC) | BIT_ULL(FLOW_DISSECTOR_KEY_MPLS) | BIT_ULL(FLOW_DISSECTOR_KEY_ICMP) | + BIT_ULL(FLOW_DISSECTOR_KEY_TCP) | BIT_ULL(FLOW_DISSECTOR_KEY_IP)))) { netdev_info(nic->netdev, "unsupported flow used key 0x%llx", dissector->used_keys); @@ -688,20 +702,19 @@ static int otx2_tc_prepare_flow(struct otx2_nic *nic, struct otx2_tc_flow *node, if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) { struct flow_match_control match; + u32 val; flow_rule_match_control(rule, &match); - if (match.mask->flags & FLOW_DIS_FIRST_FRAG) { - NL_SET_ERR_MSG_MOD(extack, "HW doesn't support frag first/later"); - return -EOPNOTSUPP; - } if (match.mask->flags & FLOW_DIS_IS_FRAGMENT) { + val = match.key->flags & FLOW_DIS_IS_FRAGMENT; if (ntohs(flow_spec->etype) == ETH_P_IP) { - flow_spec->ip_flag = IPV4_FLAG_MORE; + flow_spec->ip_flag = val ? IPV4_FLAG_MORE : 0; flow_mask->ip_flag = IPV4_FLAG_MORE; req->features |= BIT_ULL(NPC_IPFRAG_IPV4); } else if (ntohs(flow_spec->etype) == ETH_P_IPV6) { - flow_spec->next_header = IPPROTO_FRAGMENT; + flow_spec->next_header = val ? + IPPROTO_FRAGMENT : 0; flow_mask->next_header = 0xff; req->features |= BIT_ULL(NPC_IPFRAG_IPV6); } else { @@ -709,6 +722,10 @@ static int otx2_tc_prepare_flow(struct otx2_nic *nic, struct otx2_tc_flow *node, return -EOPNOTSUPP; } } + + if (!flow_rule_is_supp_control_flags(FLOW_DIS_IS_FRAGMENT, + match.mask->flags, extack)) + return -EOPNOTSUPP; } if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) { @@ -857,6 +874,16 @@ static int otx2_tc_prepare_flow(struct otx2_nic *nic, struct otx2_tc_flow *node, } } + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_TCP)) { + struct flow_match_tcp match; + + flow_rule_match_tcp(rule, &match); + + flow_spec->tcp_flags = match.key->flags; + flow_mask->tcp_flags = match.mask->flags; + req->features |= BIT_ULL(NPC_TCP_FLAGS); + } + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_MPLS)) { struct flow_match_mpls match; u8 bit; @@ -1173,6 +1200,11 @@ static int otx2_tc_del_flow(struct otx2_nic *nic, return -EINVAL; } + /* Disable TC MARK flag if they are no rules with skbedit mark action */ + if (flow_node->req.match_id) + if (!refcount_dec_and_test(&flow_cfg->mark_flows)) + nic->flags &= ~OTX2_FLAG_TC_MARK_ENABLED; + if (flow_node->is_act_police) { __clear_bit(flow_node->rq, &nic->rq_bmap); @@ -1273,6 +1305,7 @@ static int otx2_tc_add_flow(struct otx2_nic *nic, req->channel = nic->hw.rx_chan_base; req->entry = flow_cfg->flow_ent[mcam_idx]; req->intf = NIX_INTF_RX; + req->vf = nic->pcifunc; req->set_cntr = 1; new_node->entry = req->entry; @@ -1373,8 +1406,8 @@ static int otx2_tc_get_flow_stats(struct otx2_nic *nic, return 0; } -static int otx2_setup_tc_cls_flower(struct otx2_nic *nic, - struct flow_cls_offload *cls_flower) +int otx2_setup_tc_cls_flower(struct otx2_nic *nic, + struct flow_cls_offload *cls_flower) { switch (cls_flower->command) { case FLOW_CLS_REPLACE: @@ -1387,6 +1420,7 @@ static int otx2_setup_tc_cls_flower(struct otx2_nic *nic, return -EOPNOTSUPP; } } +EXPORT_SYMBOL(otx2_setup_tc_cls_flower); static int otx2_tc_ingress_matchall_install(struct otx2_nic *nic, struct tc_cls_matchall_offload *cls) diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c index f828d32737af..224cef938927 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c @@ -11,6 +11,7 @@ #include <linux/bpf.h> #include <linux/bpf_trace.h> #include <net/ip6_checksum.h> +#include <net/xfrm.h> #include "otx2_reg.h" #include "otx2_common.h" @@ -26,12 +27,25 @@ */ #define PTP_SYNC_SEC_OFFSET 34 +DEFINE_STATIC_KEY_FALSE(cn10k_ipsec_sa_enabled); + static bool otx2_xdp_rcv_pkt_handler(struct otx2_nic *pfvf, struct bpf_prog *prog, struct nix_cqe_rx_s *cqe, struct otx2_cq_queue *cq, bool *need_xdp_flush); +static void otx2_sq_set_sqe_base(struct otx2_snd_queue *sq, + struct sk_buff *skb) +{ + if (static_branch_unlikely(&cn10k_ipsec_sa_enabled) && + (xfrm_offload(skb))) + sq->sqe_base = sq->sqe_ring->base + sq->sqe_size + + (sq->head * (sq->sqe_size * 2)); + else + sq->sqe_base = sq->sqe->base; +} + static int otx2_nix_cq_op_status(struct otx2_nic *pfvf, struct otx2_cq_queue *cq) { @@ -80,38 +94,6 @@ static unsigned int frag_num(unsigned int i) #endif } -static dma_addr_t otx2_dma_map_skb_frag(struct otx2_nic *pfvf, - struct sk_buff *skb, int seg, int *len) -{ - const skb_frag_t *frag; - struct page *page; - int offset; - - /* First segment is always skb->data */ - if (!seg) { - page = virt_to_page(skb->data); - offset = offset_in_page(skb->data); - *len = skb_headlen(skb); - } else { - frag = &skb_shinfo(skb)->frags[seg - 1]; - page = skb_frag_page(frag); - offset = skb_frag_off(frag); - *len = skb_frag_size(frag); - } - return otx2_dma_map_page(pfvf, page, offset, *len, DMA_TO_DEVICE); -} - -static void otx2_dma_unmap_skb_frags(struct otx2_nic *pfvf, struct sg_list *sg) -{ - int seg; - - for (seg = 0; seg < sg->num_segs; seg++) { - otx2_dma_unmap_page(pfvf, sg->dma_addr[seg], - sg->size[seg], DMA_TO_DEVICE); - } - sg->num_segs = 0; -} - static void otx2_xdp_snd_pkt_handler(struct otx2_nic *pfvf, struct otx2_snd_queue *sq, struct nix_cqe_tx_s *cqe) @@ -376,9 +358,14 @@ static void otx2_rcv_pkt_handler(struct otx2_nic *pfvf, } otx2_set_rxhash(pfvf, cqe, skb); - skb_record_rx_queue(skb, cq->cq_idx); - if (pfvf->netdev->features & NETIF_F_RXCSUM) - skb->ip_summed = CHECKSUM_UNNECESSARY; + if (!(pfvf->flags & OTX2_FLAG_REP_MODE_ENABLED)) { + skb_record_rx_queue(skb, cq->cq_idx); + if (pfvf->netdev->features & NETIF_F_RXCSUM) + skb->ip_summed = CHECKSUM_UNNECESSARY; + } + + if (pfvf->flags & OTX2_FLAG_TC_MARK_ENABLED) + skb->mark = parse->match_id; skb_mark_for_recycle(skb); @@ -450,6 +437,7 @@ static int otx2_tx_napi_handler(struct otx2_nic *pfvf, int tx_pkts = 0, tx_bytes = 0, qidx; struct otx2_snd_queue *sq; struct nix_cqe_tx_s *cqe; + struct net_device *ndev; int processed_cqe = 0; if (cq->pend_cqe >= budget) @@ -490,6 +478,13 @@ process_cqe: otx2_write64(pfvf, NIX_LF_CQ_OP_DOOR, ((u64)cq->cq_idx << 32) | processed_cqe); +#if IS_ENABLED(CONFIG_RVU_ESWITCH) + if (pfvf->flags & OTX2_FLAG_REP_MODE_ENABLED) + ndev = pfvf->reps[qidx]->netdev; + else +#endif + ndev = pfvf->netdev; + if (likely(tx_pkts)) { struct netdev_queue *txq; @@ -497,12 +492,14 @@ process_cqe: if (qidx >= pfvf->hw.tx_queues) qidx -= pfvf->hw.xdp_queues; - txq = netdev_get_tx_queue(pfvf->netdev, qidx); + if (pfvf->flags & OTX2_FLAG_REP_MODE_ENABLED) + qidx = 0; + txq = netdev_get_tx_queue(ndev, qidx); netdev_tx_completed_queue(txq, tx_pkts, tx_bytes); /* Check if queue was stopped earlier due to ring full */ smp_mb(); if (netif_tx_queue_stopped(txq) && - netif_carrier_ok(pfvf->netdev)) + netif_carrier_ok(ndev)) netif_tx_wake_queue(txq); } return 0; @@ -510,7 +507,7 @@ process_cqe: static void otx2_adjust_adaptive_coalese(struct otx2_nic *pfvf, struct otx2_cq_poll *cq_poll) { - struct dim_sample dim_sample; + struct dim_sample dim_sample = { 0 }; u64 rx_frames, rx_bytes; u64 tx_frames, tx_bytes; @@ -524,7 +521,7 @@ static void otx2_adjust_adaptive_coalese(struct otx2_nic *pfvf, struct otx2_cq_p rx_frames + tx_frames, rx_bytes + tx_bytes, &dim_sample); - net_dim(&cq_poll->dim, dim_sample); + net_dim(&cq_poll->dim, &dim_sample); } int otx2_napi_handler(struct napi_struct *napi, int budget) @@ -591,6 +588,7 @@ int otx2_napi_handler(struct napi_struct *napi, int budget) } return workdone; } +EXPORT_SYMBOL(otx2_napi_handler); void otx2_sqe_flush(void *dev, struct otx2_snd_queue *sq, int size, int qidx) @@ -609,7 +607,6 @@ void otx2_sqe_flush(void *dev, struct otx2_snd_queue *sq, sq->head &= (sq->sqe_cnt - 1); } -#define MAX_SEGS_PER_SG 3 /* Add SQE scatter/gather subdescriptor structure */ static bool otx2_sqe_add_sg(struct otx2_nic *pfvf, struct otx2_snd_queue *sq, struct sk_buff *skb, int num_segs, int *offset) @@ -684,7 +681,7 @@ static void otx2_sqe_add_ext(struct otx2_nic *pfvf, struct otx2_snd_queue *sq, } else if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) { __be16 l3_proto = vlan_get_protocol(skb); struct udphdr *udph = udp_hdr(skb); - u16 iplen; + __be16 iplen; ext->lso_sb = skb_transport_offset(skb) + sizeof(struct udphdr); @@ -1138,13 +1135,14 @@ static void otx2_set_txtstamp(struct otx2_nic *pfvf, struct sk_buff *skb, } } -bool otx2_sq_append_skb(struct net_device *netdev, struct otx2_snd_queue *sq, +bool otx2_sq_append_skb(void *dev, struct netdev_queue *txq, + struct otx2_snd_queue *sq, struct sk_buff *skb, u16 qidx) { - struct netdev_queue *txq = netdev_get_tx_queue(netdev, qidx); - struct otx2_nic *pfvf = netdev_priv(netdev); int offset, num_segs, free_desc; struct nix_sqe_hdr_s *sqe_hdr; + struct otx2_nic *pfvf = dev; + bool ret; /* Check if there is enough room between producer * and consumer index. @@ -1161,6 +1159,7 @@ bool otx2_sq_append_skb(struct net_device *netdev, struct otx2_snd_queue *sq, /* If SKB doesn't fit in a single SQE, linearize it. * TODO: Consider adding JUMP descriptor instead. */ + if (unlikely(num_segs > OTX2_MAX_FRAGS_IN_SQE)) { if (__skb_linearize(skb)) { dev_kfree_skb_any(skb); @@ -1171,12 +1170,18 @@ bool otx2_sq_append_skb(struct net_device *netdev, struct otx2_snd_queue *sq, if (skb_shinfo(skb)->gso_size && !is_hw_tso_supported(pfvf, skb)) { /* Insert vlan tag before giving pkt to tso */ - if (skb_vlan_tag_present(skb)) + if (skb_vlan_tag_present(skb)) { skb = __vlan_hwaccel_push_inside(skb); + if (!skb) + return true; + } otx2_sq_append_tso(pfvf, sq, skb, qidx); return true; } + /* Set sqe base address */ + otx2_sq_set_sqe_base(sq, skb); + /* Set SQE's SEND_HDR. * Do not clear the first 64bit as it contains constant info. */ @@ -1189,7 +1194,13 @@ bool otx2_sq_append_skb(struct net_device *netdev, struct otx2_snd_queue *sq, otx2_sqe_add_ext(pfvf, sq, skb, &offset); /* Add SG subdesc with data frags */ - if (!otx2_sqe_add_sg(pfvf, sq, skb, num_segs, &offset)) { + if (static_branch_unlikely(&cn10k_ipsec_sa_enabled) && + (xfrm_offload(skb))) + ret = otx2_sqe_add_sg_ipsec(pfvf, sq, skb, num_segs, &offset); + else + ret = otx2_sqe_add_sg(pfvf, sq, skb, num_segs, &offset); + + if (!ret) { otx2_dma_unmap_skb_frags(pfvf, &sq->sg[sq->head]); return false; } @@ -1198,11 +1209,15 @@ bool otx2_sq_append_skb(struct net_device *netdev, struct otx2_snd_queue *sq, sqe_hdr->sizem1 = (offset / 16) - 1; + if (static_branch_unlikely(&cn10k_ipsec_sa_enabled) && + (xfrm_offload(skb))) + return cn10k_ipsec_transmit(pfvf, txq, sq, skb, num_segs, + offset); + netdev_tx_sent_queue(txq, skb->len); /* Flush SQE to HW */ pfvf->hw_ops->sqe_flush(pfvf, sq, offset, qidx); - return true; } EXPORT_SYMBOL(otx2_sq_append_skb); diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h index a82ffca8ce1b..d23810963fdb 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h @@ -62,6 +62,9 @@ #define CQ_OP_STAT_OP_ERR 63 #define CQ_OP_STAT_CQ_ERR 46 +/* Packet mark mask */ +#define OTX2_RX_MATCH_ID_MASK 0x0000ffff + struct queue_stats { u64 bytes; u64 pkts; @@ -98,6 +101,9 @@ struct otx2_snd_queue { struct queue_stats stats; u16 sqb_count; u64 *sqb_ptrs; + /* SQE ring and CPT response queue for Inline IPSEC */ + struct qmem *sqe_ring; + struct qmem *cpt_resp; } ____cacheline_aligned_in_smp; enum cq_type { @@ -164,7 +170,8 @@ static inline u64 otx2_iova_to_phys(void *iommu_domain, dma_addr_t dma_addr) } int otx2_napi_handler(struct napi_struct *napi, int budget); -bool otx2_sq_append_skb(struct net_device *netdev, struct otx2_snd_queue *sq, +bool otx2_sq_append_skb(void *dev, struct netdev_queue *txq, + struct otx2_snd_queue *sq, struct sk_buff *skb, u16 qidx); void cn10k_sqe_flush(void *dev, struct otx2_snd_queue *sq, int size, int qidx); diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c index 35e06048356f..e926c6ce96cf 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c @@ -14,6 +14,7 @@ #include "otx2_reg.h" #include "otx2_ptp.h" #include "cn10k.h" +#include "cn10k_ipsec.h" #define DRV_NAME "rvu_nicvf" #define DRV_STRING "Marvell RVU NIC Virtual Function Driver" @@ -21,6 +22,7 @@ static const struct pci_device_id otx2_vf_id_table[] = { { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_RVU_AFVF) }, { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_RVU_VF) }, + { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_SDP_REP) }, { } }; @@ -89,16 +91,20 @@ static void otx2vf_vfaf_mbox_handler(struct work_struct *work) struct otx2_mbox *mbox; struct mbox *af_mbox; int offset, id; + u16 num_msgs; af_mbox = container_of(work, struct mbox, mbox_wrk); mbox = &af_mbox->mbox; mdev = &mbox->dev[0]; rsp_hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start); - if (af_mbox->num_msgs == 0) + num_msgs = rsp_hdr->num_msgs; + + if (num_msgs == 0) return; + offset = mbox->rx_start + ALIGN(sizeof(*rsp_hdr), MBOX_MSG_ALIGN); - for (id = 0; id < af_mbox->num_msgs; id++) { + for (id = 0; id < num_msgs; id++) { msg = (struct mbox_msghdr *)(mdev->mbase + offset); otx2vf_process_vfaf_mbox_msg(af_mbox->pfvf, msg); offset = mbox->rx_start + msg->next_msgoff; @@ -151,6 +157,7 @@ static void otx2vf_vfaf_mbox_up_handler(struct work_struct *work) struct mbox *vf_mbox; struct otx2_nic *vf; int offset, id; + u16 num_msgs; vf_mbox = container_of(work, struct mbox, mbox_up_wrk); vf = vf_mbox->pfvf; @@ -158,12 +165,14 @@ static void otx2vf_vfaf_mbox_up_handler(struct work_struct *work) mdev = &mbox->dev[0]; rsp_hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start); - if (vf_mbox->up_num_msgs == 0) + num_msgs = rsp_hdr->num_msgs; + + if (num_msgs == 0) return; offset = mbox->rx_start + ALIGN(sizeof(*rsp_hdr), MBOX_MSG_ALIGN); - for (id = 0; id < vf_mbox->up_num_msgs; id++) { + for (id = 0; id < num_msgs; id++) { msg = (struct mbox_msghdr *)(mdev->mbase + offset); otx2vf_process_mbox_msg_up(vf, msg); offset = mbox->rx_start + msg->next_msgoff; @@ -178,40 +187,48 @@ static irqreturn_t otx2vf_vfaf_mbox_intr_handler(int irq, void *vf_irq) struct otx2_mbox_dev *mdev; struct otx2_mbox *mbox; struct mbox_hdr *hdr; + u64 mbox_data; /* Clear the IRQ */ otx2_write64(vf, RVU_VF_INT, BIT_ULL(0)); + mbox_data = otx2_read64(vf, RVU_VF_VFPF_MBOX0); + /* Read latest mbox data */ smp_rmb(); - /* Check for PF => VF response messages */ - mbox = &vf->mbox.mbox; - mdev = &mbox->dev[0]; - otx2_sync_mbox_bbuf(mbox, 0); + if (mbox_data & MBOX_DOWN_MSG) { + mbox_data &= ~MBOX_DOWN_MSG; + otx2_write64(vf, RVU_VF_VFPF_MBOX0, mbox_data); + + /* Check for PF => VF response messages */ + mbox = &vf->mbox.mbox; + mdev = &mbox->dev[0]; + otx2_sync_mbox_bbuf(mbox, 0); - trace_otx2_msg_interrupt(mbox->pdev, "PF to VF", BIT_ULL(0)); + hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start); + if (hdr->num_msgs) + queue_work(vf->mbox_wq, &vf->mbox.mbox_wrk); - hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start); - if (hdr->num_msgs) { - vf->mbox.num_msgs = hdr->num_msgs; - hdr->num_msgs = 0; - memset(mbox->hwbase + mbox->rx_start, 0, - ALIGN(sizeof(struct mbox_hdr), sizeof(u64))); - queue_work(vf->mbox_wq, &vf->mbox.mbox_wrk); + trace_otx2_msg_interrupt(mbox->pdev, "DOWN reply from PF to VF", + BIT_ULL(0)); } - /* Check for PF => VF notification messages */ - mbox = &vf->mbox.mbox_up; - mdev = &mbox->dev[0]; - otx2_sync_mbox_bbuf(mbox, 0); - hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start); - if (hdr->num_msgs) { - vf->mbox.up_num_msgs = hdr->num_msgs; - hdr->num_msgs = 0; - memset(mbox->hwbase + mbox->rx_start, 0, - ALIGN(sizeof(struct mbox_hdr), sizeof(u64))); - queue_work(vf->mbox_wq, &vf->mbox.mbox_up_wrk); + if (mbox_data & MBOX_UP_MSG) { + mbox_data &= ~MBOX_UP_MSG; + otx2_write64(vf, RVU_VF_VFPF_MBOX0, mbox_data); + + /* Check for PF => VF notification messages */ + mbox = &vf->mbox.mbox_up; + mdev = &mbox->dev[0]; + otx2_sync_mbox_bbuf(mbox, 0); + + hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start); + if (hdr->num_msgs) + queue_work(vf->mbox_wq, &vf->mbox.mbox_up_wrk); + + trace_otx2_msg_interrupt(mbox->pdev, "UP message from PF to VF", + BIT_ULL(0)); } return IRQ_HANDLED; @@ -356,7 +373,7 @@ static int otx2vf_open(struct net_device *netdev) /* LBKs do not receive link events so tell everyone we are up here */ vf = netdev_priv(netdev); - if (is_otx2_lbkvf(vf->pdev)) { + if (is_otx2_lbkvf(vf->pdev) || is_otx2_sdp_rep(vf->pdev)) { pr_info("%s NIC Link is UP\n", netdev->name); netif_carrier_on(netdev); netif_tx_start_all_queues(netdev); @@ -380,7 +397,7 @@ static netdev_tx_t otx2vf_xmit(struct sk_buff *skb, struct net_device *netdev) sq = &vf->qset.sq[qidx]; txq = netdev_get_tx_queue(netdev, qidx); - if (!otx2_sq_append_skb(netdev, sq, skb, qidx)) { + if (!otx2_sq_append_skb(vf, txq, sq, skb, qidx)) { netif_tx_stop_queue(txq); /* Check again, incase SQBs got freed up */ @@ -441,7 +458,7 @@ static int otx2vf_change_mtu(struct net_device *netdev, int new_mtu) netdev_info(netdev, "Changing MTU from %d to %d\n", netdev->mtu, new_mtu); - netdev->mtu = new_mtu; + WRITE_ONCE(netdev->mtu, new_mtu); if (if_up) err = otx2vf_open(netdev); @@ -485,7 +502,7 @@ static const struct net_device_ops otx2vf_netdev_ops = { .ndo_setup_tc = otx2_setup_tc, }; -static int otx2_wq_init(struct otx2_nic *vf) +static int otx2_vf_wq_init(struct otx2_nic *vf) { vf->otx2_wq = create_singlethread_workqueue("otx2vf_wq"); if (!vf->otx2_wq) @@ -656,6 +673,7 @@ static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id) netdev->min_mtu = OTX2_MIN_MTU; netdev->max_mtu = otx2_get_max_mtu(vf); + hw->max_mtu = netdev->max_mtu; /* To distinguish, for LBK VFs set netdev name explicitly */ if (is_otx2_lbkvf(vf->pdev)) { @@ -667,13 +685,26 @@ static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id) snprintf(netdev->name, sizeof(netdev->name), "lbk%d", n); } + if (is_otx2_sdp_rep(vf->pdev)) { + int n; + + n = vf->pcifunc & RVU_PFVF_FUNC_MASK; + n -= 1; + snprintf(netdev->name, sizeof(netdev->name), "sdp%d-%d", + pdev->bus->number, n); + } + + err = cn10k_ipsec_init(netdev); + if (err) + goto err_ptp_destroy; + err = register_netdev(netdev); if (err) { dev_err(dev, "Failed to register netdevice\n"); - goto err_ptp_destroy; + goto err_ipsec_clean; } - err = otx2_wq_init(vf); + err = otx2_vf_wq_init(vf); if (err) goto err_unreg_netdev; @@ -704,6 +735,8 @@ err_shutdown_tc: otx2_shutdown_tc(vf); err_unreg_netdev: unregister_netdev(netdev); +err_ipsec_clean: + cn10k_ipsec_clean(vf); err_ptp_destroy: otx2_ptp_destroy(vf); err_detach_rsrc: @@ -756,12 +789,13 @@ static void otx2vf_remove(struct pci_dev *pdev) unregister_netdev(netdev); if (vf->otx2_wq) destroy_workqueue(vf->otx2_wq); + cn10k_ipsec_clean(vf); otx2_ptp_destroy(vf); otx2_mcam_flow_del(vf); otx2_shutdown_tc(vf); otx2_shutdown_qos(vf); - otx2vf_disable_mbox_intr(vf); otx2_detach_resources(&vf->mbox); + otx2vf_disable_mbox_intr(vf); free_percpu(vf->hw.lmt_info); if (test_bit(CN10K_LMTST, &vf->hw.cap_flag)) qmem_free(vf->dev, vf->dync_lmt); diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/qos.c b/drivers/net/ethernet/marvell/octeontx2/nic/qos.c index 1e77bbf5d22a..0f844c14485a 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/qos.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/qos.c @@ -153,7 +153,6 @@ static void __otx2_qos_txschq_cfg(struct otx2_nic *pfvf, num_regs++; otx2_config_sched_shaping(pfvf, node, cfg, &num_regs); - } else if (level == NIX_TXSCH_LVL_TL4) { otx2_config_sched_shaping(pfvf, node, cfg, &num_regs); } else if (level == NIX_TXSCH_LVL_TL3) { @@ -176,7 +175,7 @@ static void __otx2_qos_txschq_cfg(struct otx2_nic *pfvf, /* check if node is root */ if (node->qid == OTX2_QOS_QID_INNER && !node->parent) { cfg->reg[num_regs] = NIX_AF_TL2X_SCHEDULE(node->schq); - cfg->regval[num_regs] = TXSCH_TL1_DFLT_RR_PRIO << 24 | + cfg->regval[num_regs] = (u64)hw->txschq_aggr_lvl_rr_prio << 24 | mtu_to_dwrr_weight(pfvf, pfvf->tx_max_pktlen); num_regs++; @@ -382,6 +381,7 @@ static void otx2_qos_read_txschq_cfg_tl(struct otx2_qos_node *parent, otx2_qos_read_txschq_cfg_tl(node, cfg); cnt = cfg->static_node_pos[node->level]; cfg->schq_contig_list[node->level][cnt] = node->schq; + cfg->schq_index_used[node->level][cnt] = true; cfg->schq_contig[node->level]++; cfg->static_node_pos[node->level]++; otx2_qos_read_txschq_cfg_schq(node, cfg); @@ -544,6 +544,20 @@ otx2_qos_sw_create_leaf_node(struct otx2_nic *pfvf, return node; } +static struct otx2_qos_node +*otx2_sw_node_find_by_qid(struct otx2_nic *pfvf, u16 qid) +{ + struct otx2_qos_node *node = NULL; + int bkt; + + hash_for_each(pfvf->qos.qos_hlist, bkt, node, hlist) { + if (node->qid == qid) + break; + } + + return node; +} + static struct otx2_qos_node * otx2_sw_node_find(struct otx2_nic *pfvf, u32 classid) { @@ -916,6 +930,7 @@ static void otx2_qos_enadis_sq(struct otx2_nic *pfvf, otx2_qos_disable_sq(pfvf, qid); pfvf->qos.qid_to_sqmap[qid] = node->schq; + otx2_qos_txschq_config(pfvf, node); otx2_qos_enable_sq(pfvf, qid); } @@ -1406,7 +1421,10 @@ static int otx2_qos_leaf_to_inner(struct otx2_nic *pfvf, u16 classid, otx2_qos_read_txschq_cfg(pfvf, node, old_cfg); /* delete the txschq nodes allocated for this node */ + otx2_qos_disable_sq(pfvf, qid); + otx2_qos_free_hw_node_schq(pfvf, node); otx2_qos_free_sw_node_schq(pfvf, node); + pfvf->qos.qid_to_sqmap[qid] = OTX2_QOS_INVALID_SQ; /* mark this node as htb inner node */ WRITE_ONCE(node->qid, OTX2_QOS_QID_INNER); @@ -1474,13 +1492,45 @@ out: return ret; } +static int otx2_qos_cur_leaf_nodes(struct otx2_nic *pfvf) +{ + int last = find_last_bit(pfvf->qos.qos_sq_bmap, pfvf->hw.tc_tx_queues); + + return last == pfvf->hw.tc_tx_queues ? 0 : last + 1; +} + +static void otx2_reset_qdisc(struct net_device *dev, u16 qid) +{ + struct netdev_queue *dev_queue = netdev_get_tx_queue(dev, qid); + struct Qdisc *qdisc = rtnl_dereference(dev_queue->qdisc_sleeping); + + if (!qdisc) + return; + + spin_lock_bh(qdisc_lock(qdisc)); + qdisc_reset(qdisc); + spin_unlock_bh(qdisc_lock(qdisc)); +} + +static void otx2_cfg_smq(struct otx2_nic *pfvf, struct otx2_qos_node *node, + int qid) +{ + struct otx2_qos_node *tmp; + + list_for_each_entry(tmp, &node->child_schq_list, list) + if (tmp->level == NIX_TXSCH_LVL_MDQ) { + otx2_qos_txschq_config(pfvf, tmp); + pfvf->qos.qid_to_sqmap[qid] = tmp->schq; + } +} + static int otx2_qos_leaf_del(struct otx2_nic *pfvf, u16 *classid, struct netlink_ext_ack *extack) { struct otx2_qos_node *node, *parent; int dwrr_del_node = false; + u16 qid, moved_qid; u64 prio; - u16 qid; netdev_dbg(pfvf->netdev, "TC_HTB_LEAF_DEL classid %04x\n", *classid); @@ -1516,6 +1566,37 @@ static int otx2_qos_leaf_del(struct otx2_nic *pfvf, u16 *classid, if (!parent->child_static_cnt) parent->max_static_prio = 0; + moved_qid = otx2_qos_cur_leaf_nodes(pfvf); + + /* last node just deleted */ + if (moved_qid == 0 || moved_qid == qid) + return 0; + + moved_qid--; + + node = otx2_sw_node_find_by_qid(pfvf, moved_qid); + if (!node) + return 0; + + /* stop traffic to the old queue and disable + * SQ associated with it + */ + node->qid = OTX2_QOS_QID_INNER; + __clear_bit(moved_qid, pfvf->qos.qos_sq_bmap); + otx2_qos_disable_sq(pfvf, moved_qid); + + otx2_reset_qdisc(pfvf->netdev, pfvf->hw.tx_queues + moved_qid); + + /* enable SQ associated with qid and + * update the node + */ + otx2_cfg_smq(pfvf, node, qid); + + otx2_qos_enable_sq(pfvf, qid); + __set_bit(qid, pfvf->qos.qos_sq_bmap); + node->qid = qid; + + *classid = node->classid; return 0; } @@ -1553,6 +1634,7 @@ static int otx2_qos_leaf_del_last(struct otx2_nic *pfvf, u16 classid, bool force dwrr_del_node = true; /* destroy the leaf node */ + otx2_qos_disable_sq(pfvf, qid); otx2_qos_destroy_node(pfvf, node); pfvf->qos.qid_to_sqmap[qid] = OTX2_QOS_INVALID_SQ; diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/rep.c b/drivers/net/ethernet/marvell/octeontx2/nic/rep.c new file mode 100644 index 000000000000..04e08e06f30f --- /dev/null +++ b/drivers/net/ethernet/marvell/octeontx2/nic/rep.c @@ -0,0 +1,867 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Marvell RVU representor driver + * + * Copyright (C) 2024 Marvell. + * + */ + +#include <linux/etherdevice.h> +#include <linux/module.h> +#include <linux/pci.h> +#include <linux/net_tstamp.h> +#include <linux/sort.h> + +#include "otx2_common.h" +#include "cn10k.h" +#include "otx2_reg.h" +#include "rep.h" + +#define DRV_NAME "rvu_rep" +#define DRV_STRING "Marvell RVU Representor Driver" + +static const struct pci_device_id rvu_rep_id_table[] = { + { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_RVU_REP) }, + { } +}; + +MODULE_AUTHOR("Marvell International Ltd."); +MODULE_DESCRIPTION(DRV_STRING); +MODULE_LICENSE("GPL"); +MODULE_DEVICE_TABLE(pci, rvu_rep_id_table); + +static int rvu_rep_notify_pfvf(struct otx2_nic *priv, u16 event, + struct rep_event *data); + +static int rvu_rep_mcam_flow_init(struct rep_dev *rep) +{ + struct npc_mcam_alloc_entry_req *req; + struct npc_mcam_alloc_entry_rsp *rsp; + struct otx2_nic *priv = rep->mdev; + int ent, allocated = 0; + int count; + + rep->flow_cfg = kcalloc(1, sizeof(struct otx2_flow_config), GFP_KERNEL); + + if (!rep->flow_cfg) + return -ENOMEM; + + count = OTX2_DEFAULT_FLOWCOUNT; + + rep->flow_cfg->flow_ent = kcalloc(count, sizeof(u16), GFP_KERNEL); + if (!rep->flow_cfg->flow_ent) + return -ENOMEM; + + while (allocated < count) { + req = otx2_mbox_alloc_msg_npc_mcam_alloc_entry(&priv->mbox); + if (!req) + goto exit; + + req->hdr.pcifunc = rep->pcifunc; + req->contig = false; + req->ref_entry = 0; + req->count = (count - allocated) > NPC_MAX_NONCONTIG_ENTRIES ? + NPC_MAX_NONCONTIG_ENTRIES : count - allocated; + + if (otx2_sync_mbox_msg(&priv->mbox)) + goto exit; + + rsp = (struct npc_mcam_alloc_entry_rsp *)otx2_mbox_get_rsp + (&priv->mbox.mbox, 0, &req->hdr); + + for (ent = 0; ent < rsp->count; ent++) + rep->flow_cfg->flow_ent[ent + allocated] = rsp->entry_list[ent]; + + allocated += rsp->count; + + if (rsp->count != req->count) + break; + } +exit: + /* Multiple MCAM entry alloc requests could result in non-sequential + * MCAM entries in the flow_ent[] array. Sort them in an ascending + * order, otherwise user installed ntuple filter index and MCAM entry + * index will not be in sync. + */ + if (allocated) + sort(&rep->flow_cfg->flow_ent[0], allocated, + sizeof(rep->flow_cfg->flow_ent[0]), mcam_entry_cmp, NULL); + + mutex_unlock(&priv->mbox.lock); + + rep->flow_cfg->max_flows = allocated; + + if (allocated) { + rep->flags |= OTX2_FLAG_MCAM_ENTRIES_ALLOC; + rep->flags |= OTX2_FLAG_NTUPLE_SUPPORT; + rep->flags |= OTX2_FLAG_TC_FLOWER_SUPPORT; + } + + INIT_LIST_HEAD(&rep->flow_cfg->flow_list); + INIT_LIST_HEAD(&rep->flow_cfg->flow_list_tc); + return 0; +} + +static int rvu_rep_setup_tc_cb(enum tc_setup_type type, + void *type_data, void *cb_priv) +{ + struct rep_dev *rep = cb_priv; + struct otx2_nic *priv = rep->mdev; + + if (!(rep->flags & RVU_REP_VF_INITIALIZED)) + return -EINVAL; + + if (!(rep->flags & OTX2_FLAG_TC_FLOWER_SUPPORT)) + rvu_rep_mcam_flow_init(rep); + + priv->netdev = rep->netdev; + priv->flags = rep->flags; + priv->pcifunc = rep->pcifunc; + priv->flow_cfg = rep->flow_cfg; + + switch (type) { + case TC_SETUP_CLSFLOWER: + return otx2_setup_tc_cls_flower(priv, type_data); + default: + return -EOPNOTSUPP; + } +} + +static LIST_HEAD(rvu_rep_block_cb_list); +static int rvu_rep_setup_tc(struct net_device *netdev, enum tc_setup_type type, + void *type_data) +{ + struct rvu_rep *rep = netdev_priv(netdev); + + switch (type) { + case TC_SETUP_BLOCK: + return flow_block_cb_setup_simple(type_data, + &rvu_rep_block_cb_list, + rvu_rep_setup_tc_cb, + rep, rep, true); + default: + return -EOPNOTSUPP; + } +} + +static int +rvu_rep_sp_stats64(const struct net_device *dev, + struct rtnl_link_stats64 *stats) +{ + struct rep_dev *rep = netdev_priv(dev); + struct otx2_nic *priv = rep->mdev; + struct otx2_rcv_queue *rq; + struct otx2_snd_queue *sq; + u16 qidx = rep->rep_id; + + otx2_update_rq_stats(priv, qidx); + rq = &priv->qset.rq[qidx]; + + otx2_update_sq_stats(priv, qidx); + sq = &priv->qset.sq[qidx]; + + stats->tx_bytes = sq->stats.bytes; + stats->tx_packets = sq->stats.pkts; + stats->rx_bytes = rq->stats.bytes; + stats->rx_packets = rq->stats.pkts; + return 0; +} + +static bool +rvu_rep_has_offload_stats(const struct net_device *dev, int attr_id) +{ + return attr_id == IFLA_OFFLOAD_XSTATS_CPU_HIT; +} + +static int +rvu_rep_get_offload_stats(int attr_id, const struct net_device *dev, + void *sp) +{ + if (attr_id == IFLA_OFFLOAD_XSTATS_CPU_HIT) + return rvu_rep_sp_stats64(dev, (struct rtnl_link_stats64 *)sp); + + return -EINVAL; +} + +static int rvu_rep_dl_port_fn_hw_addr_get(struct devlink_port *port, + u8 *hw_addr, int *hw_addr_len, + struct netlink_ext_ack *extack) +{ + struct rep_dev *rep = container_of(port, struct rep_dev, dl_port); + + ether_addr_copy(hw_addr, rep->mac); + *hw_addr_len = ETH_ALEN; + return 0; +} + +static int rvu_rep_dl_port_fn_hw_addr_set(struct devlink_port *port, + const u8 *hw_addr, int hw_addr_len, + struct netlink_ext_ack *extack) +{ + struct rep_dev *rep = container_of(port, struct rep_dev, dl_port); + struct otx2_nic *priv = rep->mdev; + struct rep_event evt = {0}; + + eth_hw_addr_set(rep->netdev, hw_addr); + ether_addr_copy(rep->mac, hw_addr); + + ether_addr_copy(evt.evt_data.mac, hw_addr); + evt.pcifunc = rep->pcifunc; + rvu_rep_notify_pfvf(priv, RVU_EVENT_MAC_ADDR_CHANGE, &evt); + return 0; +} + +static const struct devlink_port_ops rvu_rep_dl_port_ops = { + .port_fn_hw_addr_get = rvu_rep_dl_port_fn_hw_addr_get, + .port_fn_hw_addr_set = rvu_rep_dl_port_fn_hw_addr_set, +}; + +static void +rvu_rep_devlink_set_switch_id(struct otx2_nic *priv, + struct netdev_phys_item_id *ppid) +{ + struct pci_dev *pdev = priv->pdev; + u64 id; + + id = pci_get_dsn(pdev); + + ppid->id_len = sizeof(id); + put_unaligned_be64(id, &ppid->id); +} + +static void rvu_rep_devlink_port_unregister(struct rep_dev *rep) +{ + devlink_port_unregister(&rep->dl_port); +} + +static int rvu_rep_devlink_port_register(struct rep_dev *rep) +{ + struct devlink_port_attrs attrs = {}; + struct otx2_nic *priv = rep->mdev; + struct devlink *dl = priv->dl->dl; + int err; + + if (!(rep->pcifunc & RVU_PFVF_FUNC_MASK)) { + attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL; + attrs.phys.port_number = rvu_get_pf(rep->pcifunc); + } else { + attrs.flavour = DEVLINK_PORT_FLAVOUR_PCI_VF; + attrs.pci_vf.pf = rvu_get_pf(rep->pcifunc); + attrs.pci_vf.vf = rep->pcifunc & RVU_PFVF_FUNC_MASK; + } + + rvu_rep_devlink_set_switch_id(priv, &attrs.switch_id); + devlink_port_attrs_set(&rep->dl_port, &attrs); + + err = devl_port_register_with_ops(dl, &rep->dl_port, rep->rep_id, + &rvu_rep_dl_port_ops); + if (err) { + dev_err(rep->mdev->dev, "devlink_port_register failed: %d\n", + err); + return err; + } + return 0; +} + +static int rvu_rep_get_repid(struct otx2_nic *priv, u16 pcifunc) +{ + int rep_id; + + for (rep_id = 0; rep_id < priv->rep_cnt; rep_id++) + if (priv->rep_pf_map[rep_id] == pcifunc) + return rep_id; + return -EINVAL; +} + +static int rvu_rep_notify_pfvf(struct otx2_nic *priv, u16 event, + struct rep_event *data) +{ + struct rep_event *req; + + mutex_lock(&priv->mbox.lock); + req = otx2_mbox_alloc_msg_rep_event_notify(&priv->mbox); + if (!req) { + mutex_unlock(&priv->mbox.lock); + return -ENOMEM; + } + req->event = event; + req->pcifunc = data->pcifunc; + + memcpy(&req->evt_data, &data->evt_data, sizeof(struct rep_evt_data)); + otx2_sync_mbox_msg(&priv->mbox); + mutex_unlock(&priv->mbox.lock); + return 0; +} + +static void rvu_rep_state_evt_handler(struct otx2_nic *priv, + struct rep_event *info) +{ + struct rep_dev *rep; + int rep_id; + + rep_id = rvu_rep_get_repid(priv, info->pcifunc); + rep = priv->reps[rep_id]; + if (info->evt_data.vf_state) + rep->flags |= RVU_REP_VF_INITIALIZED; + else + rep->flags &= ~RVU_REP_VF_INITIALIZED; +} + +int rvu_event_up_notify(struct otx2_nic *pf, struct rep_event *info) +{ + if (info->event & RVU_EVENT_PFVF_STATE) + rvu_rep_state_evt_handler(pf, info); + return 0; +} + +static int rvu_rep_change_mtu(struct net_device *dev, int new_mtu) +{ + struct rep_dev *rep = netdev_priv(dev); + struct otx2_nic *priv = rep->mdev; + struct rep_event evt = {0}; + + netdev_info(dev, "Changing MTU from %d to %d\n", + dev->mtu, new_mtu); + dev->mtu = new_mtu; + + evt.evt_data.mtu = new_mtu; + evt.pcifunc = rep->pcifunc; + rvu_rep_notify_pfvf(priv, RVU_EVENT_MTU_CHANGE, &evt); + return 0; +} + +static void rvu_rep_get_stats(struct work_struct *work) +{ + struct delayed_work *del_work = to_delayed_work(work); + struct nix_stats_req *req; + struct nix_stats_rsp *rsp; + struct rep_stats *stats; + struct otx2_nic *priv; + struct rep_dev *rep; + int err; + + rep = container_of(del_work, struct rep_dev, stats_wrk); + priv = rep->mdev; + + mutex_lock(&priv->mbox.lock); + req = otx2_mbox_alloc_msg_nix_lf_stats(&priv->mbox); + if (!req) { + mutex_unlock(&priv->mbox.lock); + return; + } + req->pcifunc = rep->pcifunc; + err = otx2_sync_mbox_msg_busy_poll(&priv->mbox); + if (err) + goto exit; + + rsp = (struct nix_stats_rsp *) + otx2_mbox_get_rsp(&priv->mbox.mbox, 0, &req->hdr); + + if (IS_ERR(rsp)) { + err = PTR_ERR(rsp); + goto exit; + } + + stats = &rep->stats; + stats->rx_bytes = rsp->rx.octs; + stats->rx_frames = rsp->rx.ucast + rsp->rx.bcast + + rsp->rx.mcast; + stats->rx_drops = rsp->rx.drop; + stats->rx_mcast_frames = rsp->rx.mcast; + stats->tx_bytes = rsp->tx.octs; + stats->tx_frames = rsp->tx.ucast + rsp->tx.bcast + rsp->tx.mcast; + stats->tx_drops = rsp->tx.drop; +exit: + mutex_unlock(&priv->mbox.lock); +} + +static void rvu_rep_get_stats64(struct net_device *dev, + struct rtnl_link_stats64 *stats) +{ + struct rep_dev *rep = netdev_priv(dev); + + if (!(rep->flags & RVU_REP_VF_INITIALIZED)) + return; + + stats->rx_packets = rep->stats.rx_frames; + stats->rx_bytes = rep->stats.rx_bytes; + stats->rx_dropped = rep->stats.rx_drops; + stats->multicast = rep->stats.rx_mcast_frames; + + stats->tx_packets = rep->stats.tx_frames; + stats->tx_bytes = rep->stats.tx_bytes; + stats->tx_dropped = rep->stats.tx_drops; + + schedule_delayed_work(&rep->stats_wrk, msecs_to_jiffies(100)); +} + +static int rvu_eswitch_config(struct otx2_nic *priv, u8 ena) +{ + struct esw_cfg_req *req; + + mutex_lock(&priv->mbox.lock); + req = otx2_mbox_alloc_msg_esw_cfg(&priv->mbox); + if (!req) { + mutex_unlock(&priv->mbox.lock); + return -ENOMEM; + } + req->ena = ena; + otx2_sync_mbox_msg(&priv->mbox); + mutex_unlock(&priv->mbox.lock); + return 0; +} + +static netdev_tx_t rvu_rep_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct rep_dev *rep = netdev_priv(dev); + struct otx2_nic *pf = rep->mdev; + struct otx2_snd_queue *sq; + struct netdev_queue *txq; + + sq = &pf->qset.sq[rep->rep_id]; + txq = netdev_get_tx_queue(dev, 0); + + if (!otx2_sq_append_skb(pf, txq, sq, skb, rep->rep_id)) { + netif_tx_stop_queue(txq); + + /* Check again, in case SQBs got freed up */ + smp_mb(); + if (((sq->num_sqbs - *sq->aura_fc_addr) * sq->sqe_per_sqb) + > sq->sqe_thresh) + netif_tx_wake_queue(txq); + + return NETDEV_TX_BUSY; + } + return NETDEV_TX_OK; +} + +static int rvu_rep_open(struct net_device *dev) +{ + struct rep_dev *rep = netdev_priv(dev); + struct otx2_nic *priv = rep->mdev; + struct rep_event evt = {0}; + + if (!(rep->flags & RVU_REP_VF_INITIALIZED)) + return 0; + + netif_carrier_on(dev); + netif_tx_start_all_queues(dev); + + evt.event = RVU_EVENT_PORT_STATE; + evt.evt_data.port_state = 1; + evt.pcifunc = rep->pcifunc; + rvu_rep_notify_pfvf(priv, RVU_EVENT_PORT_STATE, &evt); + return 0; +} + +static int rvu_rep_stop(struct net_device *dev) +{ + struct rep_dev *rep = netdev_priv(dev); + struct otx2_nic *priv = rep->mdev; + struct rep_event evt = {0}; + + if (!(rep->flags & RVU_REP_VF_INITIALIZED)) + return 0; + + netif_carrier_off(dev); + netif_tx_disable(dev); + + evt.event = RVU_EVENT_PORT_STATE; + evt.pcifunc = rep->pcifunc; + rvu_rep_notify_pfvf(priv, RVU_EVENT_PORT_STATE, &evt); + return 0; +} + +static const struct net_device_ops rvu_rep_netdev_ops = { + .ndo_open = rvu_rep_open, + .ndo_stop = rvu_rep_stop, + .ndo_start_xmit = rvu_rep_xmit, + .ndo_get_stats64 = rvu_rep_get_stats64, + .ndo_change_mtu = rvu_rep_change_mtu, + .ndo_has_offload_stats = rvu_rep_has_offload_stats, + .ndo_get_offload_stats = rvu_rep_get_offload_stats, + .ndo_setup_tc = rvu_rep_setup_tc, +}; + +static int rvu_rep_napi_init(struct otx2_nic *priv, + struct netlink_ext_ack *extack) +{ + struct otx2_qset *qset = &priv->qset; + struct otx2_cq_poll *cq_poll = NULL; + struct otx2_hw *hw = &priv->hw; + int err = 0, qidx, vec; + char *irq_name; + + qset->napi = kcalloc(hw->cint_cnt, sizeof(*cq_poll), GFP_KERNEL); + if (!qset->napi) + return -ENOMEM; + + /* Register NAPI handler */ + for (qidx = 0; qidx < hw->cint_cnt; qidx++) { + cq_poll = &qset->napi[qidx]; + cq_poll->cint_idx = qidx; + cq_poll->cq_ids[CQ_RX] = + (qidx < hw->rx_queues) ? qidx : CINT_INVALID_CQ; + cq_poll->cq_ids[CQ_TX] = (qidx < hw->tx_queues) ? + qidx + hw->rx_queues : + CINT_INVALID_CQ; + cq_poll->cq_ids[CQ_XDP] = CINT_INVALID_CQ; + cq_poll->cq_ids[CQ_QOS] = CINT_INVALID_CQ; + + cq_poll->dev = (void *)priv; + netif_napi_add(priv->reps[qidx]->netdev, &cq_poll->napi, + otx2_napi_handler); + napi_enable(&cq_poll->napi); + } + /* Register CQ IRQ handlers */ + vec = hw->nix_msixoff + NIX_LF_CINT_VEC_START; + for (qidx = 0; qidx < hw->cint_cnt; qidx++) { + irq_name = &hw->irq_name[vec * NAME_SIZE]; + + snprintf(irq_name, NAME_SIZE, "rep%d-rxtx-%d", qidx, qidx); + + err = request_irq(pci_irq_vector(priv->pdev, vec), + otx2_cq_intr_handler, 0, irq_name, + &qset->napi[qidx]); + if (err) { + NL_SET_ERR_MSG_FMT_MOD(extack, + "RVU REP IRQ registration failed for CQ%d", + qidx); + goto err_free_cints; + } + vec++; + + /* Enable CQ IRQ */ + otx2_write64(priv, NIX_LF_CINTX_INT(qidx), BIT_ULL(0)); + otx2_write64(priv, NIX_LF_CINTX_ENA_W1S(qidx), BIT_ULL(0)); + } + priv->flags &= ~OTX2_FLAG_INTF_DOWN; + return 0; + +err_free_cints: + otx2_free_cints(priv, qidx); + otx2_disable_napi(priv); + return err; +} + +static void rvu_rep_free_cq_rsrc(struct otx2_nic *priv) +{ + struct otx2_qset *qset = &priv->qset; + struct otx2_cq_poll *cq_poll = NULL; + int qidx, vec; + + /* Cleanup CQ NAPI and IRQ */ + vec = priv->hw.nix_msixoff + NIX_LF_CINT_VEC_START; + for (qidx = 0; qidx < priv->hw.cint_cnt; qidx++) { + /* Disable interrupt */ + otx2_write64(priv, NIX_LF_CINTX_ENA_W1C(qidx), BIT_ULL(0)); + + synchronize_irq(pci_irq_vector(priv->pdev, vec)); + + cq_poll = &qset->napi[qidx]; + napi_synchronize(&cq_poll->napi); + vec++; + } + otx2_free_cints(priv, priv->hw.cint_cnt); + otx2_disable_napi(priv); +} + +static void rvu_rep_rsrc_free(struct otx2_nic *priv) +{ + struct otx2_qset *qset = &priv->qset; + struct delayed_work *work; + int wrk; + + for (wrk = 0; wrk < priv->qset.cq_cnt; wrk++) { + work = &priv->refill_wrk[wrk].pool_refill_work; + cancel_delayed_work_sync(work); + } + devm_kfree(priv->dev, priv->refill_wrk); + + otx2_free_hw_resources(priv); + otx2_free_queue_mem(qset); +} + +static int rvu_rep_rsrc_init(struct otx2_nic *priv) +{ + struct otx2_qset *qset = &priv->qset; + int err; + + err = otx2_alloc_queue_mem(priv); + if (err) + return err; + + priv->hw.max_mtu = otx2_get_max_mtu(priv); + priv->tx_max_pktlen = priv->hw.max_mtu + OTX2_ETH_HLEN; + priv->rbsize = ALIGN(priv->hw.rbuf_len, OTX2_ALIGN) + OTX2_HEAD_ROOM; + + err = otx2_init_hw_resources(priv); + if (err) + goto err_free_rsrc; + + /* Set maximum frame size allowed in HW */ + err = otx2_hw_set_mtu(priv, priv->hw.max_mtu); + if (err) { + dev_err(priv->dev, "Failed to set HW MTU\n"); + goto err_free_rsrc; + } + return 0; + +err_free_rsrc: + otx2_free_hw_resources(priv); + otx2_free_queue_mem(qset); + return err; +} + +void rvu_rep_destroy(struct otx2_nic *priv) +{ + struct rep_dev *rep; + int rep_id; + + rvu_eswitch_config(priv, false); + priv->flags |= OTX2_FLAG_INTF_DOWN; + rvu_rep_free_cq_rsrc(priv); + for (rep_id = 0; rep_id < priv->rep_cnt; rep_id++) { + rep = priv->reps[rep_id]; + unregister_netdev(rep->netdev); + rvu_rep_devlink_port_unregister(rep); + free_netdev(rep->netdev); + kfree(rep->flow_cfg); + } + kfree(priv->reps); + rvu_rep_rsrc_free(priv); +} + +int rvu_rep_create(struct otx2_nic *priv, struct netlink_ext_ack *extack) +{ + int rep_cnt = priv->rep_cnt; + struct net_device *ndev; + struct rep_dev *rep; + int rep_id, err; + u16 pcifunc; + + err = rvu_rep_rsrc_init(priv); + if (err) + return -ENOMEM; + + priv->reps = kcalloc(rep_cnt, sizeof(struct rep_dev *), GFP_KERNEL); + if (!priv->reps) + return -ENOMEM; + + for (rep_id = 0; rep_id < rep_cnt; rep_id++) { + ndev = alloc_etherdev(sizeof(*rep)); + if (!ndev) { + NL_SET_ERR_MSG_FMT_MOD(extack, + "PFVF representor:%d creation failed", + rep_id); + err = -ENOMEM; + goto exit; + } + + rep = netdev_priv(ndev); + priv->reps[rep_id] = rep; + rep->mdev = priv; + rep->netdev = ndev; + rep->rep_id = rep_id; + + ndev->min_mtu = OTX2_MIN_MTU; + ndev->max_mtu = priv->hw.max_mtu; + ndev->netdev_ops = &rvu_rep_netdev_ops; + pcifunc = priv->rep_pf_map[rep_id]; + rep->pcifunc = pcifunc; + + snprintf(ndev->name, sizeof(ndev->name), "Rpf%dvf%d", + rvu_get_pf(pcifunc), (pcifunc & RVU_PFVF_FUNC_MASK)); + + ndev->hw_features = (NETIF_F_RXCSUM | NETIF_F_IP_CSUM | + NETIF_F_IPV6_CSUM | NETIF_F_RXHASH | + NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6); + + ndev->hw_features |= NETIF_F_HW_TC; + ndev->features |= ndev->hw_features; + eth_hw_addr_random(ndev); + err = rvu_rep_devlink_port_register(rep); + if (err) { + free_netdev(ndev); + goto exit; + } + + SET_NETDEV_DEVLINK_PORT(ndev, &rep->dl_port); + err = register_netdev(ndev); + if (err) { + NL_SET_ERR_MSG_MOD(extack, + "PFVF representor registration failed"); + rvu_rep_devlink_port_unregister(rep); + free_netdev(ndev); + goto exit; + } + + INIT_DELAYED_WORK(&rep->stats_wrk, rvu_rep_get_stats); + } + err = rvu_rep_napi_init(priv, extack); + if (err) + goto exit; + + rvu_eswitch_config(priv, true); + return 0; +exit: + while (--rep_id >= 0) { + rep = priv->reps[rep_id]; + unregister_netdev(rep->netdev); + rvu_rep_devlink_port_unregister(rep); + free_netdev(rep->netdev); + } + kfree(priv->reps); + rvu_rep_rsrc_free(priv); + return err; +} + +static int rvu_get_rep_cnt(struct otx2_nic *priv) +{ + struct get_rep_cnt_rsp *rsp; + struct mbox_msghdr *msghdr; + struct msg_req *req; + int err, rep; + + mutex_lock(&priv->mbox.lock); + req = otx2_mbox_alloc_msg_get_rep_cnt(&priv->mbox); + if (!req) { + mutex_unlock(&priv->mbox.lock); + return -ENOMEM; + } + err = otx2_sync_mbox_msg(&priv->mbox); + if (err) + goto exit; + + msghdr = otx2_mbox_get_rsp(&priv->mbox.mbox, 0, &req->hdr); + if (IS_ERR(msghdr)) { + err = PTR_ERR(msghdr); + goto exit; + } + + rsp = (struct get_rep_cnt_rsp *)msghdr; + priv->hw.tx_queues = rsp->rep_cnt; + priv->hw.rx_queues = rsp->rep_cnt; + priv->rep_cnt = rsp->rep_cnt; + for (rep = 0; rep < priv->rep_cnt; rep++) + priv->rep_pf_map[rep] = rsp->rep_pf_map[rep]; + +exit: + mutex_unlock(&priv->mbox.lock); + return err; +} + +static int rvu_rep_probe(struct pci_dev *pdev, const struct pci_device_id *id) +{ + struct device *dev = &pdev->dev; + struct otx2_nic *priv; + struct otx2_hw *hw; + int err; + + err = pcim_enable_device(pdev); + if (err) { + dev_err(dev, "Failed to enable PCI device\n"); + return err; + } + + err = pci_request_regions(pdev, DRV_NAME); + if (err) { + dev_err(dev, "PCI request regions failed 0x%x\n", err); + return err; + } + + err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48)); + if (err) { + dev_err(dev, "DMA mask config failed, abort\n"); + goto err_release_regions; + } + + pci_set_master(pdev); + + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); + if (!priv) { + err = -ENOMEM; + goto err_release_regions; + } + + pci_set_drvdata(pdev, priv); + priv->pdev = pdev; + priv->dev = dev; + priv->flags |= OTX2_FLAG_INTF_DOWN; + priv->flags |= OTX2_FLAG_REP_MODE_ENABLED; + + hw = &priv->hw; + hw->pdev = pdev; + hw->max_queues = OTX2_MAX_CQ_CNT; + hw->rbuf_len = OTX2_DEFAULT_RBUF_LEN; + hw->xqe_size = 128; + + err = otx2_init_rsrc(pdev, priv); + if (err) + goto err_release_regions; + + priv->iommu_domain = iommu_get_domain_for_dev(dev); + + err = rvu_get_rep_cnt(priv); + if (err) + goto err_detach_rsrc; + + err = otx2_register_dl(priv); + if (err) + goto err_detach_rsrc; + + return 0; + +err_detach_rsrc: + if (priv->hw.lmt_info) + free_percpu(priv->hw.lmt_info); + if (test_bit(CN10K_LMTST, &priv->hw.cap_flag)) + qmem_free(priv->dev, priv->dync_lmt); + otx2_detach_resources(&priv->mbox); + otx2_disable_mbox_intr(priv); + otx2_pfaf_mbox_destroy(priv); + pci_free_irq_vectors(pdev); +err_release_regions: + pci_set_drvdata(pdev, NULL); + pci_release_regions(pdev); + return err; +} + +static void rvu_rep_remove(struct pci_dev *pdev) +{ + struct otx2_nic *priv = pci_get_drvdata(pdev); + + otx2_unregister_dl(priv); + if (!(priv->flags & OTX2_FLAG_INTF_DOWN)) + rvu_rep_destroy(priv); + otx2_detach_resources(&priv->mbox); + if (priv->hw.lmt_info) + free_percpu(priv->hw.lmt_info); + if (test_bit(CN10K_LMTST, &priv->hw.cap_flag)) + qmem_free(priv->dev, priv->dync_lmt); + otx2_disable_mbox_intr(priv); + otx2_pfaf_mbox_destroy(priv); + pci_free_irq_vectors(priv->pdev); + pci_set_drvdata(pdev, NULL); + pci_release_regions(pdev); +} + +static struct pci_driver rvu_rep_driver = { + .name = DRV_NAME, + .id_table = rvu_rep_id_table, + .probe = rvu_rep_probe, + .remove = rvu_rep_remove, + .shutdown = rvu_rep_remove, +}; + +static int __init rvu_rep_init_module(void) +{ + return pci_register_driver(&rvu_rep_driver); +} + +static void __exit rvu_rep_cleanup_module(void) +{ + pci_unregister_driver(&rvu_rep_driver); +} + +module_init(rvu_rep_init_module); +module_exit(rvu_rep_cleanup_module); diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/rep.h b/drivers/net/ethernet/marvell/octeontx2/nic/rep.h new file mode 100644 index 000000000000..38446b3e4f13 --- /dev/null +++ b/drivers/net/ethernet/marvell/octeontx2/nic/rep.h @@ -0,0 +1,54 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Marvell RVU REPRESENTOR driver + * + * Copyright (C) 2024 Marvell. + * + */ + +#ifndef REP_H +#define REP_H + +#include <linux/pci.h> + +#include "otx2_reg.h" +#include "otx2_txrx.h" +#include "otx2_common.h" + +#define PCI_DEVID_RVU_REP 0xA0E0 + +#define RVU_MAX_REP OTX2_MAX_CQ_CNT + +struct rep_stats { + u64 rx_bytes; + u64 rx_frames; + u64 rx_drops; + u64 rx_mcast_frames; + + u64 tx_bytes; + u64 tx_frames; + u64 tx_drops; +}; + +struct rep_dev { + struct otx2_nic *mdev; + struct net_device *netdev; + struct rep_stats stats; + struct delayed_work stats_wrk; + struct devlink_port dl_port; + struct otx2_flow_config *flow_cfg; +#define RVU_REP_VF_INITIALIZED BIT_ULL(0) + u64 flags; + u16 rep_id; + u16 pcifunc; + u8 mac[ETH_ALEN]; +}; + +static inline bool otx2_rep_dev(struct pci_dev *pdev) +{ + return pdev->device == PCI_DEVID_RVU_REP; +} + +int rvu_rep_create(struct otx2_nic *priv, struct netlink_ext_ack *extack); +void rvu_rep_destroy(struct otx2_nic *priv); +int rvu_event_up_notify(struct otx2_nic *pf, struct rep_event *info); +#endif /* REP_H */ |