diff options
Diffstat (limited to 'drivers/net/ethernet/marvell/octeontx2/af')
25 files changed, 3481 insertions, 709 deletions
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/Makefile b/drivers/net/ethernet/marvell/octeontx2/af/Makefile index eb535c98ca38..1a3455620b38 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/Makefile +++ b/drivers/net/ethernet/marvell/octeontx2/af/Makefile @@ -4,10 +4,10 @@ # ccflags-y += -I$(src) -obj-$(CONFIG_OCTEONTX2_MBOX) += octeontx2_mbox.o -obj-$(CONFIG_OCTEONTX2_AF) += octeontx2_af.o +obj-$(CONFIG_OCTEONTX2_MBOX) += rvu_mbox.o +obj-$(CONFIG_OCTEONTX2_AF) += rvu_af.o -octeontx2_mbox-y := mbox.o rvu_trace.o -octeontx2_af-y := cgx.o rvu.o rvu_cgx.o rvu_npa.o rvu_nix.o \ +rvu_mbox-y := mbox.o rvu_trace.o +rvu_af-y := cgx.o rvu.o rvu_cgx.o rvu_npa.o rvu_nix.o \ rvu_reg.o rvu_npc.o rvu_debugfs.o ptp.o rvu_npc_fs.o \ - rvu_cpt.o rvu_devlink.o + rvu_cpt.o rvu_devlink.o rpm.o rvu_cn10k.o diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c index 1a8f5a039d50..9caa375d01b1 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c @@ -14,53 +14,18 @@ #include <linux/pci.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> +#include <linux/ethtool.h> #include <linux/phy.h> #include <linux/of.h> #include <linux/of_mdio.h> #include <linux/of_net.h> #include "cgx.h" +#include "rvu.h" +#include "lmac_common.h" -#define DRV_NAME "octeontx2-cgx" -#define DRV_STRING "Marvell OcteonTX2 CGX/MAC Driver" - -/** - * struct lmac - * @wq_cmd_cmplt: waitq to keep the process blocked until cmd completion - * @cmd_lock: Lock to serialize the command interface - * @resp: command response - * @link_info: link related information - * @event_cb: callback for linkchange events - * @event_cb_lock: lock for serializing callback with unregister - * @cmd_pend: flag set before new command is started - * flag cleared after command response is received - * @cgx: parent cgx port - * @lmac_id: lmac port id - * @name: lmac port name - */ -struct lmac { - wait_queue_head_t wq_cmd_cmplt; - struct mutex cmd_lock; - u64 resp; - struct cgx_link_user_info link_info; - struct cgx_event_cb event_cb; - spinlock_t event_cb_lock; - bool cmd_pend; - struct cgx *cgx; - u8 lmac_id; - char *name; -}; - -struct cgx { - void __iomem *reg_base; - struct pci_dev *pdev; - u8 cgx_id; - u8 lmac_count; - struct lmac *lmac_idmap[MAX_LMAC_PER_CGX]; - struct work_struct cgx_cmd_work; - struct workqueue_struct *cgx_cmd_workq; - struct list_head cgx_list; -}; +#define DRV_NAME "Marvell-CGX/RPM" +#define DRV_STRING "Marvell CGX/RPM Driver" static LIST_HEAD(cgx_list); @@ -76,22 +41,45 @@ static int cgx_fwi_link_change(struct cgx *cgx, int lmac_id, bool en); /* Supported devices */ static const struct pci_device_id cgx_id_table[] = { { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_CGX) }, + { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CN10K_RPM) }, { 0, } /* end of table */ }; MODULE_DEVICE_TABLE(pci, cgx_id_table); -static void cgx_write(struct cgx *cgx, u64 lmac, u64 offset, u64 val) +static bool is_dev_rpm(void *cgxd) { - writeq(val, cgx->reg_base + (lmac << 18) + offset); + struct cgx *cgx = cgxd; + + return (cgx->pdev->device == PCI_DEVID_CN10K_RPM); +} + +bool is_lmac_valid(struct cgx *cgx, int lmac_id) +{ + return cgx && test_bit(lmac_id, &cgx->lmac_bmap); } -static u64 cgx_read(struct cgx *cgx, u64 lmac, u64 offset) +struct mac_ops *get_mac_ops(void *cgxd) { - return readq(cgx->reg_base + (lmac << 18) + offset); + if (!cgxd) + return cgxd; + + return ((struct cgx *)cgxd)->mac_ops; } -static inline struct lmac *lmac_pdata(u8 lmac_id, struct cgx *cgx) +void cgx_write(struct cgx *cgx, u64 lmac, u64 offset, u64 val) +{ + writeq(val, cgx->reg_base + (lmac << cgx->mac_ops->lmac_offset) + + offset); +} + +u64 cgx_read(struct cgx *cgx, u64 lmac, u64 offset) +{ + return readq(cgx->reg_base + (lmac << cgx->mac_ops->lmac_offset) + + offset); +} + +struct lmac *lmac_pdata(u8 lmac_id, struct cgx *cgx) { if (!cgx || lmac_id >= MAX_LMAC_PER_CGX) return NULL; @@ -135,6 +123,20 @@ void *cgx_get_pdata(int cgx_id) return NULL; } +void cgx_lmac_write(int cgx_id, int lmac_id, u64 offset, u64 val) +{ + struct cgx *cgx_dev = cgx_get_pdata(cgx_id); + + cgx_write(cgx_dev, lmac_id, offset, val); +} + +u64 cgx_lmac_read(int cgx_id, int lmac_id, u64 offset) +{ + struct cgx *cgx_dev = cgx_get_pdata(cgx_id); + + return cgx_read(cgx_dev, lmac_id, offset); +} + int cgx_get_cgxid(void *cgxd) { struct cgx *cgx = cgxd; @@ -185,8 +187,10 @@ static u64 mac2u64 (u8 *mac_addr) int cgx_lmac_addr_set(u8 cgx_id, u8 lmac_id, u8 *mac_addr) { struct cgx *cgx_dev = cgx_get_pdata(cgx_id); + struct mac_ops *mac_ops; u64 cfg; + mac_ops = cgx_dev->mac_ops; /* copy 6bytes from macaddr */ /* memcpy(&cfg, mac_addr, 6); */ @@ -205,8 +209,11 @@ int cgx_lmac_addr_set(u8 cgx_id, u8 lmac_id, u8 *mac_addr) u64 cgx_lmac_addr_get(u8 cgx_id, u8 lmac_id) { struct cgx *cgx_dev = cgx_get_pdata(cgx_id); + struct mac_ops *mac_ops; u64 cfg; + mac_ops = cgx_dev->mac_ops; + cfg = cgx_read(cgx_dev, 0, CGXX_CMRX_RX_DMAC_CAM0 + lmac_id * 0x8); return cfg & CGX_RX_DMAC_ADR_MASK; } @@ -215,15 +222,16 @@ int cgx_set_pkind(void *cgxd, u8 lmac_id, int pkind) { struct cgx *cgx = cgxd; - if (!cgx || lmac_id >= cgx->lmac_count) + if (!is_lmac_valid(cgx, lmac_id)) return -ENODEV; cgx_write(cgx, lmac_id, CGXX_CMRX_RX_ID_MAP, (pkind & 0x3F)); return 0; } -static inline u8 cgx_get_lmac_type(struct cgx *cgx, int lmac_id) +static u8 cgx_get_lmac_type(void *cgxd, int lmac_id) { + struct cgx *cgx = cgxd; u64 cfg; cfg = cgx_read(cgx, lmac_id, CGXX_CMRX_CFG); @@ -237,10 +245,10 @@ int cgx_lmac_internal_loopback(void *cgxd, int lmac_id, bool enable) u8 lmac_type; u64 cfg; - if (!cgx || lmac_id >= cgx->lmac_count) + if (!is_lmac_valid(cgx, lmac_id)) return -ENODEV; - lmac_type = cgx_get_lmac_type(cgx, lmac_id); + lmac_type = cgx->mac_ops->get_lmac_type(cgx, lmac_id); if (lmac_type == LMAC_MODE_SGMII || lmac_type == LMAC_MODE_QSGMII) { cfg = cgx_read(cgx, lmac_id, CGXX_GMP_PCS_MRX_CTL); if (enable) @@ -262,11 +270,13 @@ int cgx_lmac_internal_loopback(void *cgxd, int lmac_id, bool enable) void cgx_lmac_promisc_config(int cgx_id, int lmac_id, bool enable) { struct cgx *cgx = cgx_get_pdata(cgx_id); + struct mac_ops *mac_ops; u64 cfg = 0; if (!cgx) return; + mac_ops = cgx->mac_ops; if (enable) { /* Enable promiscuous mode on LMAC */ cfg = cgx_read(cgx, lmac_id, CGXX_CMRX_RX_DMAC_CTL0); @@ -324,7 +334,7 @@ int cgx_get_rx_stats(void *cgxd, int lmac_id, int idx, u64 *rx_stat) { struct cgx *cgx = cgxd; - if (!cgx || lmac_id >= cgx->lmac_count) + if (!is_lmac_valid(cgx, lmac_id)) return -ENODEV; *rx_stat = cgx_read(cgx, lmac_id, CGXX_CMRX_RX_STAT0 + (idx * 8)); return 0; @@ -334,18 +344,77 @@ int cgx_get_tx_stats(void *cgxd, int lmac_id, int idx, u64 *tx_stat) { struct cgx *cgx = cgxd; - if (!cgx || lmac_id >= cgx->lmac_count) + if (!is_lmac_valid(cgx, lmac_id)) return -ENODEV; *tx_stat = cgx_read(cgx, lmac_id, CGXX_CMRX_TX_STAT0 + (idx * 8)); return 0; } +u64 cgx_features_get(void *cgxd) +{ + return ((struct cgx *)cgxd)->hw_features; +} + +static int cgx_set_fec_stats_count(struct cgx_link_user_info *linfo) +{ + if (!linfo->fec) + return 0; + + switch (linfo->lmac_type_id) { + case LMAC_MODE_SGMII: + case LMAC_MODE_XAUI: + case LMAC_MODE_RXAUI: + case LMAC_MODE_QSGMII: + return 0; + case LMAC_MODE_10G_R: + case LMAC_MODE_25G_R: + case LMAC_MODE_100G_R: + case LMAC_MODE_USXGMII: + return 1; + case LMAC_MODE_40G_R: + return 4; + case LMAC_MODE_50G_R: + if (linfo->fec == OTX2_FEC_BASER) + return 2; + else + return 1; + default: + return 0; + } +} + +int cgx_get_fec_stats(void *cgxd, int lmac_id, struct cgx_fec_stats_rsp *rsp) +{ + int stats, fec_stats_count = 0; + int corr_reg, uncorr_reg; + struct cgx *cgx = cgxd; + + if (!cgx || lmac_id >= cgx->lmac_count) + return -ENODEV; + fec_stats_count = + cgx_set_fec_stats_count(&cgx->lmac_idmap[lmac_id]->link_info); + if (cgx->lmac_idmap[lmac_id]->link_info.fec == OTX2_FEC_BASER) { + corr_reg = CGXX_SPUX_LNX_FEC_CORR_BLOCKS; + uncorr_reg = CGXX_SPUX_LNX_FEC_UNCORR_BLOCKS; + } else { + corr_reg = CGXX_SPUX_RSFEC_CORR; + uncorr_reg = CGXX_SPUX_RSFEC_UNCORR; + } + for (stats = 0; stats < fec_stats_count; stats++) { + rsp->fec_corr_blks += + cgx_read(cgx, lmac_id, corr_reg + (stats * 8)); + rsp->fec_uncorr_blks += + cgx_read(cgx, lmac_id, uncorr_reg + (stats * 8)); + } + return 0; +} + int cgx_lmac_rx_tx_enable(void *cgxd, int lmac_id, bool enable) { struct cgx *cgx = cgxd; u64 cfg; - if (!cgx || lmac_id >= cgx->lmac_count) + if (!is_lmac_valid(cgx, lmac_id)) return -ENODEV; cfg = cgx_read(cgx, lmac_id, CGXX_CMRX_CFG); @@ -362,7 +431,7 @@ int cgx_lmac_tx_enable(void *cgxd, int lmac_id, bool enable) struct cgx *cgx = cgxd; u64 cfg, last; - if (!cgx || lmac_id >= cgx->lmac_count) + if (!is_lmac_valid(cgx, lmac_id)) return -ENODEV; cfg = cgx_read(cgx, lmac_id, CGXX_CMRX_CFG); @@ -377,13 +446,16 @@ int cgx_lmac_tx_enable(void *cgxd, int lmac_id, bool enable) return !!(last & DATA_PKT_TX_EN); } -int cgx_lmac_get_pause_frm(void *cgxd, int lmac_id, - u8 *tx_pause, u8 *rx_pause) +static int cgx_lmac_get_pause_frm_status(void *cgxd, int lmac_id, + u8 *tx_pause, u8 *rx_pause) { struct cgx *cgx = cgxd; u64 cfg; - if (!cgx || lmac_id >= cgx->lmac_count) + if (is_dev_rpm(cgx)) + return 0; + + if (!is_lmac_valid(cgx, lmac_id)) return -ENODEV; cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL); @@ -394,13 +466,16 @@ int cgx_lmac_get_pause_frm(void *cgxd, int lmac_id, return 0; } -int cgx_lmac_set_pause_frm(void *cgxd, int lmac_id, - u8 tx_pause, u8 rx_pause) +static int cgx_lmac_enadis_pause_frm(void *cgxd, int lmac_id, + u8 tx_pause, u8 rx_pause) { struct cgx *cgx = cgxd; u64 cfg; - if (!cgx || lmac_id >= cgx->lmac_count) + if (is_dev_rpm(cgx)) + return 0; + + if (!is_lmac_valid(cgx, lmac_id)) return -ENODEV; cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL); @@ -424,11 +499,12 @@ int cgx_lmac_set_pause_frm(void *cgxd, int lmac_id, return 0; } -static void cgx_lmac_pause_frm_config(struct cgx *cgx, int lmac_id, bool enable) +static void cgx_lmac_pause_frm_config(void *cgxd, int lmac_id, bool enable) { + struct cgx *cgx = cgxd; u64 cfg; - if (!cgx || lmac_id >= cgx->lmac_count) + if (!is_lmac_valid(cgx, lmac_id)) return; if (enable) { /* Enable receive pause frames */ @@ -486,6 +562,9 @@ void cgx_lmac_ptp_config(void *cgxd, int lmac_id, bool enable) if (!cgx) return; + if (is_dev_rpm(cgx)) + return; + if (enable) { /* Enable inbound PTP timestamping */ cfg = cgx_read(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL); @@ -508,7 +587,7 @@ void cgx_lmac_ptp_config(void *cgxd, int lmac_id, bool enable) } /* CGX Firmware interface low level support */ -static int cgx_fwi_cmd_send(u64 req, u64 *resp, struct lmac *lmac) +int cgx_fwi_cmd_send(u64 req, u64 *resp, struct lmac *lmac) { struct cgx *cgx = lmac->cgx; struct device *dev; @@ -556,8 +635,7 @@ unlock: return err; } -static inline int cgx_fwi_cmd_generic(u64 req, u64 *resp, - struct cgx *cgx, int lmac_id) +int cgx_fwi_cmd_generic(u64 req, u64 *resp, struct cgx *cgx, int lmac_id) { struct lmac *lmac; int err; @@ -592,6 +670,7 @@ static inline void cgx_link_usertable_init(void) cgx_speed_mbps[CGX_LINK_25G] = 25000; cgx_speed_mbps[CGX_LINK_40G] = 40000; cgx_speed_mbps[CGX_LINK_50G] = 50000; + cgx_speed_mbps[CGX_LINK_80G] = 80000; cgx_speed_mbps[CGX_LINK_100G] = 100000; cgx_lmactype_string[LMAC_MODE_SGMII] = "SGMII"; @@ -606,6 +685,143 @@ static inline void cgx_link_usertable_init(void) cgx_lmactype_string[LMAC_MODE_USXGMII] = "USXGMII"; } +static int cgx_link_usertable_index_map(int speed) +{ + switch (speed) { + case SPEED_10: + return CGX_LINK_10M; + case SPEED_100: + return CGX_LINK_100M; + case SPEED_1000: + return CGX_LINK_1G; + case SPEED_2500: + return CGX_LINK_2HG; + case SPEED_5000: + return CGX_LINK_5G; + case SPEED_10000: + return CGX_LINK_10G; + case SPEED_20000: + return CGX_LINK_20G; + case SPEED_25000: + return CGX_LINK_25G; + case SPEED_40000: + return CGX_LINK_40G; + case SPEED_50000: + return CGX_LINK_50G; + case 80000: + return CGX_LINK_80G; + case SPEED_100000: + return CGX_LINK_100G; + case SPEED_UNKNOWN: + return CGX_LINK_NONE; + } + return CGX_LINK_NONE; +} + +static void set_mod_args(struct cgx_set_link_mode_args *args, + u32 speed, u8 duplex, u8 autoneg, u64 mode) +{ + /* Fill default values incase of user did not pass + * valid parameters + */ + if (args->duplex == DUPLEX_UNKNOWN) + args->duplex = duplex; + if (args->speed == SPEED_UNKNOWN) + args->speed = speed; + if (args->an == AUTONEG_UNKNOWN) + args->an = autoneg; + args->mode = mode; + args->ports = 0; +} + +static void otx2_map_ethtool_link_modes(u64 bitmask, + struct cgx_set_link_mode_args *args) +{ + switch (bitmask) { + case ETHTOOL_LINK_MODE_10baseT_Half_BIT: + set_mod_args(args, 10, 1, 1, BIT_ULL(CGX_MODE_SGMII)); + break; + case ETHTOOL_LINK_MODE_10baseT_Full_BIT: + set_mod_args(args, 10, 0, 1, BIT_ULL(CGX_MODE_SGMII)); + break; + case ETHTOOL_LINK_MODE_100baseT_Half_BIT: + set_mod_args(args, 100, 1, 1, BIT_ULL(CGX_MODE_SGMII)); + break; + case ETHTOOL_LINK_MODE_100baseT_Full_BIT: + set_mod_args(args, 100, 0, 1, BIT_ULL(CGX_MODE_SGMII)); + break; + case ETHTOOL_LINK_MODE_1000baseT_Half_BIT: + set_mod_args(args, 1000, 1, 1, BIT_ULL(CGX_MODE_SGMII)); + break; + case ETHTOOL_LINK_MODE_1000baseT_Full_BIT: + set_mod_args(args, 1000, 0, 1, BIT_ULL(CGX_MODE_SGMII)); + break; + case ETHTOOL_LINK_MODE_1000baseX_Full_BIT: + set_mod_args(args, 1000, 0, 0, BIT_ULL(CGX_MODE_1000_BASEX)); + break; + case ETHTOOL_LINK_MODE_10000baseT_Full_BIT: + set_mod_args(args, 1000, 0, 1, BIT_ULL(CGX_MODE_QSGMII)); + break; + case ETHTOOL_LINK_MODE_10000baseSR_Full_BIT: + set_mod_args(args, 10000, 0, 0, BIT_ULL(CGX_MODE_10G_C2C)); + break; + case ETHTOOL_LINK_MODE_10000baseLR_Full_BIT: + set_mod_args(args, 10000, 0, 0, BIT_ULL(CGX_MODE_10G_C2M)); + break; + case ETHTOOL_LINK_MODE_10000baseKR_Full_BIT: + set_mod_args(args, 10000, 0, 1, BIT_ULL(CGX_MODE_10G_KR)); + break; + case ETHTOOL_LINK_MODE_25000baseSR_Full_BIT: + set_mod_args(args, 25000, 0, 0, BIT_ULL(CGX_MODE_25G_C2C)); + break; + case ETHTOOL_LINK_MODE_25000baseCR_Full_BIT: + set_mod_args(args, 25000, 0, 1, BIT_ULL(CGX_MODE_25G_CR)); + break; + case ETHTOOL_LINK_MODE_25000baseKR_Full_BIT: + set_mod_args(args, 25000, 0, 1, BIT_ULL(CGX_MODE_25G_KR)); + break; + case ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT: + set_mod_args(args, 40000, 0, 0, BIT_ULL(CGX_MODE_40G_C2C)); + break; + case ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT: + set_mod_args(args, 40000, 0, 0, BIT_ULL(CGX_MODE_40G_C2M)); + break; + case ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT: + set_mod_args(args, 40000, 0, 1, BIT_ULL(CGX_MODE_40G_CR4)); + break; + case ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT: + set_mod_args(args, 40000, 0, 1, BIT_ULL(CGX_MODE_40G_KR4)); + break; + case ETHTOOL_LINK_MODE_50000baseSR_Full_BIT: + set_mod_args(args, 50000, 0, 0, BIT_ULL(CGX_MODE_50G_C2C)); + break; + case ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT: + set_mod_args(args, 50000, 0, 0, BIT_ULL(CGX_MODE_50G_C2M)); + break; + case ETHTOOL_LINK_MODE_50000baseCR_Full_BIT: + set_mod_args(args, 50000, 0, 1, BIT_ULL(CGX_MODE_50G_CR)); + break; + case ETHTOOL_LINK_MODE_50000baseKR_Full_BIT: + set_mod_args(args, 50000, 0, 1, BIT_ULL(CGX_MODE_50G_KR)); + break; + case ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT: + set_mod_args(args, 100000, 0, 0, BIT_ULL(CGX_MODE_100G_C2C)); + break; + case ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT: + set_mod_args(args, 100000, 0, 0, BIT_ULL(CGX_MODE_100G_C2M)); + break; + case ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT: + set_mod_args(args, 100000, 0, 1, BIT_ULL(CGX_MODE_100G_CR4)); + break; + case ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT: + set_mod_args(args, 100000, 0, 1, BIT_ULL(CGX_MODE_100G_KR4)); + break; + default: + set_mod_args(args, 0, 1, 0, BIT_ULL(CGX_MODE_MAX)); + break; + } +} + static inline void link_status_user_format(u64 lstat, struct cgx_link_user_info *linfo, struct cgx *cgx, u8 lmac_id) @@ -615,6 +831,8 @@ static inline void link_status_user_format(u64 lstat, linfo->link_up = FIELD_GET(RESP_LINKSTAT_UP, lstat); linfo->full_duplex = FIELD_GET(RESP_LINKSTAT_FDUPLEX, lstat); linfo->speed = cgx_speed_mbps[FIELD_GET(RESP_LINKSTAT_SPEED, lstat)]; + linfo->an = FIELD_GET(RESP_LINKSTAT_AN, lstat); + linfo->fec = FIELD_GET(RESP_LINKSTAT_FEC, lstat); linfo->lmac_type_id = cgx_get_lmac_type(cgx, lmac_id); lmac_string = cgx_lmactype_string[linfo->lmac_type_id]; strncpy(linfo->lmac_type, lmac_string, LMACTYPE_STR_LEN - 1); @@ -642,6 +860,9 @@ static inline void cgx_link_change_handler(u64 lstat, lmac->link_info = event.link_uinfo; linfo = &lmac->link_info; + if (err_type == CGX_ERR_SPEED_CHANGE_INVALID) + return; + /* Ensure callback doesn't get unregistered until we finish it */ spin_lock(&lmac->event_cb_lock); @@ -670,7 +891,8 @@ static inline bool cgx_cmdresp_is_linkevent(u64 event) id = FIELD_GET(EVTREG_ID, event); if (id == CGX_CMD_LINK_BRING_UP || - id == CGX_CMD_LINK_BRING_DOWN) + id == CGX_CMD_LINK_BRING_DOWN || + id == CGX_CMD_MODE_CHANGE) return true; else return false; @@ -686,12 +908,16 @@ static inline bool cgx_event_is_linkevent(u64 event) static irqreturn_t cgx_fwi_event_handler(int irq, void *data) { + u64 event, offset, clear_bit; struct lmac *lmac = data; struct cgx *cgx; - u64 event; cgx = lmac->cgx; + /* Clear SW_INT for RPM and CMR_INT for CGX */ + offset = cgx->mac_ops->int_register; + clear_bit = cgx->mac_ops->int_ena_bit; + event = cgx_read(cgx, lmac->lmac_id, CGX_EVENT_REG); if (!FIELD_GET(EVTREG_ACK, event)) @@ -727,7 +953,7 @@ static irqreturn_t cgx_fwi_event_handler(int irq, void *data) * Ack the interrupt register as well. */ cgx_write(lmac->cgx, lmac->lmac_id, CGX_EVENT_REG, 0); - cgx_write(lmac->cgx, lmac->lmac_id, CGXX_CMRX_INT, FW_CGX_INT); + cgx_write(lmac->cgx, lmac->lmac_id, offset, clear_bit); return IRQ_HANDLED; } @@ -771,20 +997,79 @@ int cgx_get_fwdata_base(u64 *base) { u64 req = 0, resp; struct cgx *cgx; + int first_lmac; int err; cgx = list_first_entry_or_null(&cgx_list, struct cgx, cgx_list); if (!cgx) return -ENXIO; + first_lmac = find_first_bit(&cgx->lmac_bmap, MAX_LMAC_PER_CGX); req = FIELD_SET(CMDREG_ID, CGX_CMD_GET_FWD_BASE, req); - err = cgx_fwi_cmd_generic(req, &resp, cgx, 0); + err = cgx_fwi_cmd_generic(req, &resp, cgx, first_lmac); if (!err) *base = FIELD_GET(RESP_FWD_BASE, resp); return err; } +int cgx_set_link_mode(void *cgxd, struct cgx_set_link_mode_args args, + int cgx_id, int lmac_id) +{ + struct cgx *cgx = cgxd; + u64 req = 0, resp; + + if (!cgx) + return -ENODEV; + + if (args.mode) + otx2_map_ethtool_link_modes(args.mode, &args); + if (!args.speed && args.duplex && !args.an) + return -EINVAL; + + req = FIELD_SET(CMDREG_ID, CGX_CMD_MODE_CHANGE, req); + req = FIELD_SET(CMDMODECHANGE_SPEED, + cgx_link_usertable_index_map(args.speed), req); + req = FIELD_SET(CMDMODECHANGE_DUPLEX, args.duplex, req); + req = FIELD_SET(CMDMODECHANGE_AN, args.an, req); + req = FIELD_SET(CMDMODECHANGE_PORT, args.ports, req); + req = FIELD_SET(CMDMODECHANGE_FLAGS, args.mode, req); + + return cgx_fwi_cmd_generic(req, &resp, cgx, lmac_id); +} +int cgx_set_fec(u64 fec, int cgx_id, int lmac_id) +{ + u64 req = 0, resp; + struct cgx *cgx; + int err = 0; + + cgx = cgx_get_pdata(cgx_id); + if (!cgx) + return -ENXIO; + + req = FIELD_SET(CMDREG_ID, CGX_CMD_SET_FEC, req); + req = FIELD_SET(CMDSETFEC, fec, req); + err = cgx_fwi_cmd_generic(req, &resp, cgx, lmac_id); + if (err) + return err; + + cgx->lmac_idmap[lmac_id]->link_info.fec = + FIELD_GET(RESP_LINKSTAT_FEC, resp); + return cgx->lmac_idmap[lmac_id]->link_info.fec; +} + +int cgx_get_phy_fec_stats(void *cgxd, int lmac_id) +{ + struct cgx *cgx = cgxd; + u64 req = 0, resp; + + if (!cgx) + return -ENODEV; + + req = FIELD_SET(CMDREG_ID, CGX_CMD_GET_PHY_FEC_STATS, req); + return cgx_fwi_cmd_generic(req, &resp, cgx, lmac_id); +} + static int cgx_fwi_link_change(struct cgx *cgx, int lmac_id, bool enable) { u64 req = 0; @@ -800,10 +1085,11 @@ static int cgx_fwi_link_change(struct cgx *cgx, int lmac_id, bool enable) static inline int cgx_fwi_read_version(u64 *resp, struct cgx *cgx) { + int first_lmac = find_first_bit(&cgx->lmac_bmap, MAX_LMAC_PER_CGX); u64 req = 0; req = FIELD_SET(CMDREG_ID, CGX_CMD_GET_FW_VER, req); - return cgx_fwi_cmd_generic(req, resp, cgx, 0); + return cgx_fwi_cmd_generic(req, resp, cgx, first_lmac); } static int cgx_lmac_verify_fwi_version(struct cgx *cgx) @@ -836,8 +1122,8 @@ static void cgx_lmac_linkup_work(struct work_struct *work) struct device *dev = &cgx->pdev->dev; int i, err; - /* Do Link up for all the lmacs */ - for (i = 0; i < cgx->lmac_count; i++) { + /* Do Link up for all the enabled lmacs */ + for_each_set_bit(i, &cgx->lmac_bmap, MAX_LMAC_PER_CGX) { err = cgx_fwi_link_change(cgx, i, true); if (err) dev_info(dev, "cgx port %d:%d Link up command failed\n", @@ -857,17 +1143,82 @@ int cgx_lmac_linkup_start(void *cgxd) return 0; } +static void cgx_lmac_get_fifolen(struct cgx *cgx) +{ + u64 cfg; + + cfg = cgx_read(cgx, 0, CGX_CONST); + cgx->mac_ops->fifo_len = FIELD_GET(CGX_CONST_RXFIFO_SIZE, cfg); +} + +static int cgx_configure_interrupt(struct cgx *cgx, struct lmac *lmac, + int cnt, bool req_free) +{ + struct mac_ops *mac_ops = cgx->mac_ops; + u64 offset, ena_bit; + unsigned int irq; + int err; + + irq = pci_irq_vector(cgx->pdev, mac_ops->lmac_fwi + + cnt * mac_ops->irq_offset); + offset = mac_ops->int_set_reg; + ena_bit = mac_ops->int_ena_bit; + + if (req_free) { + free_irq(irq, lmac); + return 0; + } + + err = request_irq(irq, cgx_fwi_event_handler, 0, lmac->name, lmac); + if (err) + return err; + + /* Enable interrupt */ + cgx_write(cgx, lmac->lmac_id, offset, ena_bit); + return 0; +} + +int cgx_get_nr_lmacs(void *cgxd) +{ + struct cgx *cgx = cgxd; + + return cgx_read(cgx, 0, CGXX_CMRX_RX_LMACS) & 0x7ULL; +} + +u8 cgx_get_lmacid(void *cgxd, u8 lmac_index) +{ + struct cgx *cgx = cgxd; + + return cgx->lmac_idmap[lmac_index]->lmac_id; +} + +unsigned long cgx_get_lmac_bmap(void *cgxd) +{ + struct cgx *cgx = cgxd; + + return cgx->lmac_bmap; +} + static int cgx_lmac_init(struct cgx *cgx) { struct lmac *lmac; + u64 lmac_list; int i, err; - cgx->lmac_count = cgx_read(cgx, 0, CGXX_CMRX_RX_LMACS) & 0x7; + cgx_lmac_get_fifolen(cgx); + + cgx->lmac_count = cgx->mac_ops->get_nr_lmacs(cgx); + /* lmac_list specifies which lmacs are enabled + * when bit n is set to 1, LMAC[n] is enabled + */ + if (cgx->mac_ops->non_contiguous_serdes_lane) + lmac_list = cgx_read(cgx, 0, CGXX_CMRX_RX_LMACS) & 0xFULL; + if (cgx->lmac_count > MAX_LMAC_PER_CGX) cgx->lmac_count = MAX_LMAC_PER_CGX; for (i = 0; i < cgx->lmac_count; i++) { - lmac = kcalloc(1, sizeof(struct lmac), GFP_KERNEL); + lmac = kzalloc(sizeof(struct lmac), GFP_KERNEL); if (!lmac) return -ENOMEM; lmac->name = kcalloc(1, sizeof("cgx_fwi_xxx_yyy"), GFP_KERNEL); @@ -876,24 +1227,25 @@ static int cgx_lmac_init(struct cgx *cgx) goto err_lmac_free; } sprintf(lmac->name, "cgx_fwi_%d_%d", cgx->cgx_id, i); - lmac->lmac_id = i; + if (cgx->mac_ops->non_contiguous_serdes_lane) { + lmac->lmac_id = __ffs64(lmac_list); + lmac_list &= ~BIT_ULL(lmac->lmac_id); + } else { + lmac->lmac_id = i; + } + lmac->cgx = cgx; init_waitqueue_head(&lmac->wq_cmd_cmplt); mutex_init(&lmac->cmd_lock); spin_lock_init(&lmac->event_cb_lock); - err = request_irq(pci_irq_vector(cgx->pdev, - CGX_LMAC_FWI + i * 9), - cgx_fwi_event_handler, 0, lmac->name, lmac); + err = cgx_configure_interrupt(cgx, lmac, lmac->lmac_id, false); if (err) goto err_irq; - /* Enable interrupt */ - cgx_write(cgx, lmac->lmac_id, CGXX_CMRX_INT_ENA_W1S, - FW_CGX_INT); - /* Add reference */ - cgx->lmac_idmap[i] = lmac; - cgx_lmac_pause_frm_config(cgx, i, true); + cgx->lmac_idmap[lmac->lmac_id] = lmac; + cgx->mac_ops->mac_pause_frm_config(cgx, lmac->lmac_id, true); + set_bit(lmac->lmac_id, &cgx->lmac_bmap); } return cgx_lmac_verify_fwi_version(cgx); @@ -917,12 +1269,12 @@ static int cgx_lmac_exit(struct cgx *cgx) } /* Free all lmac related resources */ - for (i = 0; i < cgx->lmac_count; i++) { - cgx_lmac_pause_frm_config(cgx, i, false); + for_each_set_bit(i, &cgx->lmac_bmap, MAX_LMAC_PER_CGX) { lmac = cgx->lmac_idmap[i]; if (!lmac) continue; - free_irq(pci_irq_vector(cgx->pdev, CGX_LMAC_FWI + i * 9), lmac); + cgx->mac_ops->mac_pause_frm_config(cgx, lmac->lmac_id, false); + cgx_configure_interrupt(cgx, lmac, lmac->lmac_id, true); kfree(lmac->name); kfree(lmac); } @@ -930,6 +1282,37 @@ static int cgx_lmac_exit(struct cgx *cgx) return 0; } +static void cgx_populate_features(struct cgx *cgx) +{ + if (is_dev_rpm(cgx)) + cgx->hw_features = (RVU_MAC_RPM | RVU_LMAC_FEAT_FC); + else + cgx->hw_features = (RVU_LMAC_FEAT_FC | RVU_LMAC_FEAT_PTP); +} + +static struct mac_ops cgx_mac_ops = { + .name = "cgx", + .csr_offset = 0, + .lmac_offset = 18, + .int_register = CGXX_CMRX_INT, + .int_set_reg = CGXX_CMRX_INT_ENA_W1S, + .irq_offset = 9, + .int_ena_bit = FW_CGX_INT, + .lmac_fwi = CGX_LMAC_FWI, + .non_contiguous_serdes_lane = false, + .rx_stats_cnt = 9, + .tx_stats_cnt = 18, + .get_nr_lmacs = cgx_get_nr_lmacs, + .get_lmac_type = cgx_get_lmac_type, + .mac_lmac_intl_lbk = cgx_lmac_internal_loopback, + .mac_get_rx_stats = cgx_get_rx_stats, + .mac_get_tx_stats = cgx_get_tx_stats, + .mac_enadis_rx_pause_fwding = cgx_lmac_enadis_rx_pause_fwding, + .mac_get_pause_frm_status = cgx_lmac_get_pause_frm_status, + .mac_enadis_pause_frm = cgx_lmac_enadis_pause_frm, + .mac_pause_frm_config = cgx_lmac_pause_frm_config, +}; + static int cgx_probe(struct pci_dev *pdev, const struct pci_device_id *id) { struct device *dev = &pdev->dev; @@ -943,6 +1326,12 @@ static int cgx_probe(struct pci_dev *pdev, const struct pci_device_id *id) pci_set_drvdata(pdev, cgx); + /* Use mac_ops to get MAC specific features */ + if (pdev->device == PCI_DEVID_CN10K_RPM) + cgx->mac_ops = rpm_get_mac_ops(); + else + cgx->mac_ops = &cgx_mac_ops; + err = pci_enable_device(pdev); if (err) { dev_err(dev, "Failed to enable PCI device\n"); @@ -964,7 +1353,7 @@ static int cgx_probe(struct pci_dev *pdev, const struct pci_device_id *id) goto err_release_regions; } - nvec = CGX_NVEC; + nvec = pci_msix_vec_count(cgx->pdev); err = pci_alloc_irq_vectors(pdev, nvec, nvec, PCI_IRQ_MSIX); if (err < 0 || err != nvec) { dev_err(dev, "Request for %d msix vectors failed, err %d\n", @@ -988,6 +1377,10 @@ static int cgx_probe(struct pci_dev *pdev, const struct pci_device_id *id) cgx_link_usertable_init(); + cgx_populate_features(cgx); + + mutex_init(&cgx->lock); + err = cgx_lmac_init(cgx); if (err) goto err_release_lmac; @@ -1011,8 +1404,10 @@ static void cgx_remove(struct pci_dev *pdev) { struct cgx *cgx = pci_get_drvdata(pdev); - cgx_lmac_exit(cgx); - list_del(&cgx->cgx_list); + if (cgx) { + cgx_lmac_exit(cgx); + list_del(&cgx->cgx_list); + } pci_free_irq_vectors(pdev); pci_release_regions(pdev); pci_disable_device(pdev); diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.h b/drivers/net/ethernet/marvell/octeontx2/af/cgx.h index bcfc3e5f66bb..12521262164a 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.h @@ -13,6 +13,7 @@ #include "mbox.h" #include "cgx_fw_if.h" +#include "rpm.h" /* PCI device IDs */ #define PCI_DEVID_OCTEONTX2_CGX 0xA059 @@ -42,12 +43,12 @@ #define CGXX_CMRX_RX_ID_MAP 0x060 #define CGXX_CMRX_RX_STAT0 0x070 #define CGXX_CMRX_RX_LMACS 0x128 -#define CGXX_CMRX_RX_DMAC_CTL0 0x1F8 +#define CGXX_CMRX_RX_DMAC_CTL0 (0x1F8 + mac_ops->csr_offset) #define CGX_DMAC_CTL0_CAM_ENABLE BIT_ULL(3) #define CGX_DMAC_CAM_ACCEPT BIT_ULL(3) #define CGX_DMAC_MCAST_MODE BIT_ULL(1) #define CGX_DMAC_BCAST_MODE BIT_ULL(0) -#define CGXX_CMRX_RX_DMAC_CAM0 0x200 +#define CGXX_CMRX_RX_DMAC_CAM0 (0x200 + mac_ops->csr_offset) #define CGX_DMAC_CAM_ADDR_ENABLE BIT_ULL(48) #define CGXX_CMRX_RX_DMAC_CAM1 0x400 #define CGX_RX_DMAC_ADR_MASK GENMASK_ULL(47, 0) @@ -55,7 +56,13 @@ #define CGXX_SCRATCH0_REG 0x1050 #define CGXX_SCRATCH1_REG 0x1058 #define CGX_CONST 0x2000 +#define CGX_CONST_RXFIFO_SIZE GENMASK_ULL(23, 0) #define CGXX_SPUX_CONTROL1 0x10000 +#define CGXX_SPUX_LNX_FEC_CORR_BLOCKS 0x10700 +#define CGXX_SPUX_LNX_FEC_UNCORR_BLOCKS 0x10800 +#define CGXX_SPUX_RSFEC_CORR 0x10088 +#define CGXX_SPUX_RSFEC_UNCORR 0x10090 + #define CGXX_SPUX_CONTROL1_LBK BIT_ULL(14) #define CGXX_GMP_PCS_MRX_CTL 0x30000 #define CGXX_GMP_PCS_MRX_CTL_LBK BIT_ULL(14) @@ -81,7 +88,6 @@ #define CGX_CMD_TIMEOUT 2200 /* msecs */ #define DEFAULT_PAUSE_TIME 0x7FF -#define CGX_NVEC 37 #define CGX_LMAC_FWI 0 enum cgx_nix_stat_type { @@ -147,5 +153,16 @@ int cgx_lmac_set_pause_frm(void *cgxd, int lmac_id, u8 tx_pause, u8 rx_pause); void cgx_lmac_ptp_config(void *cgxd, int lmac_id, bool enable); u8 cgx_lmac_get_p2x(int cgx_id, int lmac_id); - +int cgx_set_fec(u64 fec, int cgx_id, int lmac_id); +int cgx_get_fec_stats(void *cgxd, int lmac_id, struct cgx_fec_stats_rsp *rsp); +int cgx_get_phy_fec_stats(void *cgxd, int lmac_id); +int cgx_set_link_mode(void *cgxd, struct cgx_set_link_mode_args args, + int cgx_id, int lmac_id); +u64 cgx_features_get(void *cgxd); +struct mac_ops *get_mac_ops(void *cgxd); +int cgx_get_nr_lmacs(void *cgxd); +u8 cgx_get_lmacid(void *cgxd, u8 lmac_index); +unsigned long cgx_get_lmac_bmap(void *cgxd); +void cgx_lmac_write(int cgx_id, int lmac_id, u64 offset, u64 val); +u64 cgx_lmac_read(int cgx_id, int lmac_id, u64 offset); #endif /* CGX_H */ diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx_fw_if.h b/drivers/net/ethernet/marvell/octeontx2/af/cgx_fw_if.h index c3702fa58b6b..aa4e42f78f13 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/cgx_fw_if.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx_fw_if.h @@ -43,7 +43,13 @@ enum cgx_error_type { CGX_ERR_TRAINING_FAIL, CGX_ERR_RX_EQU_FAIL, CGX_ERR_SPUX_BER_FAIL, - CGX_ERR_SPUX_RSFEC_ALGN_FAIL, /* = 22 */ + CGX_ERR_SPUX_RSFEC_ALGN_FAIL, + CGX_ERR_SPUX_MARKER_LOCK_FAIL, + CGX_ERR_SET_FEC_INVALID, + CGX_ERR_SET_FEC_FAIL, + CGX_ERR_MODULE_INVALID, + CGX_ERR_MODULE_NOT_PRESENT, + CGX_ERR_SPEED_CHANGE_INVALID, }; /* LINK speed types */ @@ -59,10 +65,41 @@ enum cgx_link_speed { CGX_LINK_25G, CGX_LINK_40G, CGX_LINK_50G, + CGX_LINK_80G, CGX_LINK_100G, CGX_LINK_SPEED_MAX, }; +enum CGX_MODE_ { + CGX_MODE_SGMII, + CGX_MODE_1000_BASEX, + CGX_MODE_QSGMII, + CGX_MODE_10G_C2C, + CGX_MODE_10G_C2M, + CGX_MODE_10G_KR, + CGX_MODE_20G_C2C, + CGX_MODE_25G_C2C, + CGX_MODE_25G_C2M, + CGX_MODE_25G_2_C2C, + CGX_MODE_25G_CR, + CGX_MODE_25G_KR, + CGX_MODE_40G_C2C, + CGX_MODE_40G_C2M, + CGX_MODE_40G_CR4, + CGX_MODE_40G_KR4, + CGX_MODE_40GAUI_C2C, + CGX_MODE_50G_C2C, + CGX_MODE_50G_C2M, + CGX_MODE_50G_4_C2C, + CGX_MODE_50G_CR, + CGX_MODE_50G_KR, + CGX_MODE_80GAUI_C2C, + CGX_MODE_100G_C2C, + CGX_MODE_100G_C2M, + CGX_MODE_100G_CR4, + CGX_MODE_100G_KR4, + CGX_MODE_MAX /* = 29 */ +}; /* REQUEST ID types. Input to firmware */ enum cgx_cmd_id { CGX_CMD_NONE, @@ -75,12 +112,25 @@ enum cgx_cmd_id { CGX_CMD_INTERNAL_LBK, CGX_CMD_EXTERNAL_LBK, CGX_CMD_HIGIG, - CGX_CMD_LINK_STATE_CHANGE, + CGX_CMD_LINK_STAT_CHANGE, CGX_CMD_MODE_CHANGE, /* hot plug support */ CGX_CMD_INTF_SHUTDOWN, CGX_CMD_GET_MKEX_PRFL_SIZE, CGX_CMD_GET_MKEX_PRFL_ADDR, CGX_CMD_GET_FWD_BASE, /* get base address of shared FW data */ + CGX_CMD_GET_LINK_MODES, /* Supported Link Modes */ + CGX_CMD_SET_LINK_MODE, + CGX_CMD_GET_SUPPORTED_FEC, + CGX_CMD_SET_FEC, + CGX_CMD_GET_AN, + CGX_CMD_SET_AN, + CGX_CMD_GET_ADV_LINK_MODES, + CGX_CMD_GET_ADV_FEC, + CGX_CMD_GET_PHY_MOD_TYPE, /* line-side modulation type: NRZ or PAM4 */ + CGX_CMD_SET_PHY_MOD_TYPE, + CGX_CMD_PRBS, + CGX_CMD_DISPLAY_EYE, + CGX_CMD_GET_PHY_FEC_STATS, }; /* async event ids */ @@ -154,6 +204,7 @@ enum cgx_cmd_own { * CGX_STAT_SUCCESS */ #define RESP_FWD_BASE GENMASK_ULL(56, 9) +#define RESP_LINKSTAT_LMAC_TYPE GENMASK_ULL(35, 28) /* Response to cmd ID - CGX_CMD_LINK_BRING_UP/DOWN, event ID CGX_EVT_LINK_CHANGE * status can be either CGX_STAT_FAIL or CGX_STAT_SUCCESS @@ -171,13 +222,19 @@ struct cgx_lnk_sts { uint64_t full_duplex:1; uint64_t speed:4; /* cgx_link_speed */ uint64_t err_type:10; - uint64_t reserved2:39; + uint64_t an:1; /* AN supported or not */ + uint64_t fec:2; /* FEC type if enabled, if not 0 */ + uint64_t port:8; + uint64_t reserved2:28; }; #define RESP_LINKSTAT_UP GENMASK_ULL(9, 9) #define RESP_LINKSTAT_FDUPLEX GENMASK_ULL(10, 10) #define RESP_LINKSTAT_SPEED GENMASK_ULL(14, 11) #define RESP_LINKSTAT_ERRTYPE GENMASK_ULL(24, 15) +#define RESP_LINKSTAT_AN GENMASK_ULL(25, 25) +#define RESP_LINKSTAT_FEC GENMASK_ULL(27, 26) +#define RESP_LINKSTAT_PORT GENMASK_ULL(35, 28) /* scratchx(1) CSR used for non-secure SW->ATF communication * This CSR acts as a command register @@ -199,4 +256,12 @@ struct cgx_lnk_sts { #define CMDLINKCHANGE_FULLDPLX BIT_ULL(9) #define CMDLINKCHANGE_SPEED GENMASK_ULL(13, 10) +#define CMDSETFEC GENMASK_ULL(9, 8) +/* command argument to be passed for cmd ID - CGX_CMD_MODE_CHANGE */ +#define CMDMODECHANGE_SPEED GENMASK_ULL(11, 8) +#define CMDMODECHANGE_DUPLEX GENMASK_ULL(12, 12) +#define CMDMODECHANGE_AN GENMASK_ULL(13, 13) +#define CMDMODECHANGE_PORT GENMASK_ULL(21, 14) +#define CMDMODECHANGE_FLAGS GENMASK_ULL(63, 22) + #endif /* __CGX_FW_INTF_H__ */ diff --git a/drivers/net/ethernet/marvell/octeontx2/af/common.h b/drivers/net/ethernet/marvell/octeontx2/af/common.h index 17f6f42f4453..e66109367487 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/common.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/common.h @@ -155,6 +155,8 @@ enum nix_scheduler { #define NIC_HW_MIN_FRS 40 #define NIC_HW_MAX_FRS 9212 #define SDP_HW_MAX_FRS 65535 +#define CN10K_LMAC_LINK_MAX_FRS 16380 /* 16k - FCS */ +#define CN10K_LBK_LINK_MAX_FRS 65535 /* 64k */ /* NIX RX action operation*/ #define NIX_RX_ACTIONOP_DROP (0x0ull) @@ -191,6 +193,9 @@ enum nix_scheduler { #define NIX_LINK_LBK(a) (12 + (a)) #define NIX_CHAN_CGX_LMAC_CHX(a, b, c) (0x800 + 0x100 * (a) + 0x10 * (b) + (c)) #define NIX_CHAN_LBK_CHX(a, b) (0 + 0x100 * (a) + (b)) +#define NIX_CHAN_SDP_CH_START (0x700ull) + +#define SDP_CHANNELS 256 /* NIX LSO format indices. * As of now TSO is the only one using, so statically assigning indices. diff --git a/drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h b/drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h new file mode 100644 index 000000000000..45706fd87120 --- /dev/null +++ b/drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h @@ -0,0 +1,131 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Marvell OcteonTx2 RPM driver + * + * Copyright (C) 2020 Marvell. + */ + +#ifndef LMAC_COMMON_H +#define LMAC_COMMON_H + +#include "rvu.h" +#include "cgx.h" +/** + * struct lmac + * @wq_cmd_cmplt: waitq to keep the process blocked until cmd completion + * @cmd_lock: Lock to serialize the command interface + * @resp: command response + * @link_info: link related information + * @event_cb: callback for linkchange events + * @event_cb_lock: lock for serializing callback with unregister + * @cmd_pend: flag set before new command is started + * flag cleared after command response is received + * @cgx: parent cgx port + * @lmac_id: lmac port id + * @name: lmac port name + */ +struct lmac { + wait_queue_head_t wq_cmd_cmplt; + /* Lock to serialize the command interface */ + struct mutex cmd_lock; + u64 resp; + struct cgx_link_user_info link_info; + struct cgx_event_cb event_cb; + /* lock for serializing callback with unregister */ + spinlock_t event_cb_lock; + bool cmd_pend; + struct cgx *cgx; + u8 lmac_id; + char *name; +}; + +/* CGX & RPM has different feature set + * update the structure fields with different one + */ +struct mac_ops { + char *name; + /* Features like RXSTAT, TXSTAT, DMAC FILTER csrs differs by fixed + * bar offset for example + * CGX DMAC_CTL0 0x1f8 + * RPM DMAC_CTL0 0x4ff8 + */ + u64 csr_offset; + /* For ATF to send events to kernel, there is no dedicated interrupt + * defined hence CGX uses OVERFLOW bit in CMR_INT. RPM block supports + * SW_INT so that ATF triggers this interrupt after processing of + * requested command + */ + u64 int_register; + u64 int_set_reg; + /* lmac offset is different is RPM */ + u8 lmac_offset; + u8 irq_offset; + u8 int_ena_bit; + u8 lmac_fwi; + u32 fifo_len; + bool non_contiguous_serdes_lane; + /* RPM & CGX differs in number of Receive/transmit stats */ + u8 rx_stats_cnt; + u8 tx_stats_cnt; + /* Incase of RPM get number of lmacs from RPMX_CMR_RX_LMACS[LMAC_EXIST] + * number of setbits in lmac_exist tells number of lmacs + */ + int (*get_nr_lmacs)(void *cgx); + u8 (*get_lmac_type)(void *cgx, int lmac_id); + int (*mac_lmac_intl_lbk)(void *cgx, int lmac_id, + bool enable); + /* Register Stats related functions */ + int (*mac_get_rx_stats)(void *cgx, int lmac_id, + int idx, u64 *rx_stat); + int (*mac_get_tx_stats)(void *cgx, int lmac_id, + int idx, u64 *tx_stat); + + /* Enable LMAC Pause Frame Configuration */ + void (*mac_enadis_rx_pause_fwding)(void *cgxd, + int lmac_id, + bool enable); + + int (*mac_get_pause_frm_status)(void *cgxd, + int lmac_id, + u8 *tx_pause, + u8 *rx_pause); + + int (*mac_enadis_pause_frm)(void *cgxd, + int lmac_id, + u8 tx_pause, + u8 rx_pause); + + void (*mac_pause_frm_config)(void *cgxd, + int lmac_id, + bool enable); +}; + +struct cgx { + void __iomem *reg_base; + struct pci_dev *pdev; + u8 cgx_id; + u8 lmac_count; + struct lmac *lmac_idmap[MAX_LMAC_PER_CGX]; + struct work_struct cgx_cmd_work; + struct workqueue_struct *cgx_cmd_workq; + struct list_head cgx_list; + u64 hw_features; + struct mac_ops *mac_ops; + unsigned long lmac_bmap; /* bitmap of enabled lmacs */ + /* Lock to serialize read/write of global csrs like + * RPMX_MTI_STAT_DATA_HI_CDC etc + */ + struct mutex lock; +}; + +typedef struct cgx rpm_t; + +/* Function Declarations */ +void cgx_write(struct cgx *cgx, u64 lmac, u64 offset, u64 val); +u64 cgx_read(struct cgx *cgx, u64 lmac, u64 offset); +struct lmac *lmac_pdata(u8 lmac_id, struct cgx *cgx); +int cgx_fwi_cmd_send(u64 req, u64 *resp, struct lmac *lmac); +int cgx_fwi_cmd_generic(u64 req, u64 *resp, struct cgx *cgx, int lmac_id); +bool is_lmac_valid(struct cgx *cgx, int lmac_id); +struct mac_ops *rpm_get_mac_ops(void); + +#endif /* LMAC_COMMON_H */ diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.c b/drivers/net/ethernet/marvell/octeontx2/af/mbox.c index bbabb8e64201..0a37ca96aab8 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.c @@ -20,9 +20,9 @@ static const u16 msgs_offset = ALIGN(sizeof(struct mbox_hdr), MBOX_MSG_ALIGN); void __otx2_mbox_reset(struct otx2_mbox *mbox, int devid) { - void *hw_mbase = mbox->hwbase + (devid * MBOX_SIZE); struct otx2_mbox_dev *mdev = &mbox->dev[devid]; struct mbox_hdr *tx_hdr, *rx_hdr; + void *hw_mbase = mdev->hwbase; tx_hdr = hw_mbase + mbox->tx_start; rx_hdr = hw_mbase + mbox->rx_start; @@ -56,12 +56,9 @@ void otx2_mbox_destroy(struct otx2_mbox *mbox) } EXPORT_SYMBOL(otx2_mbox_destroy); -int otx2_mbox_init(struct otx2_mbox *mbox, void *hwbase, struct pci_dev *pdev, - void *reg_base, int direction, int ndevs) +static int otx2_mbox_setup(struct otx2_mbox *mbox, struct pci_dev *pdev, + void *reg_base, int direction, int ndevs) { - struct otx2_mbox_dev *mdev; - int devid; - switch (direction) { case MBOX_DIR_AFPF: case MBOX_DIR_PFVF: @@ -121,7 +118,6 @@ int otx2_mbox_init(struct otx2_mbox *mbox, void *hwbase, struct pci_dev *pdev, } mbox->reg_base = reg_base; - mbox->hwbase = hwbase; mbox->pdev = pdev; mbox->dev = kcalloc(ndevs, sizeof(struct otx2_mbox_dev), GFP_KERNEL); @@ -129,11 +125,27 @@ int otx2_mbox_init(struct otx2_mbox *mbox, void *hwbase, struct pci_dev *pdev, otx2_mbox_destroy(mbox); return -ENOMEM; } - mbox->ndevs = ndevs; + + return 0; +} + +int otx2_mbox_init(struct otx2_mbox *mbox, void *hwbase, struct pci_dev *pdev, + void *reg_base, int direction, int ndevs) +{ + struct otx2_mbox_dev *mdev; + int devid, err; + + err = otx2_mbox_setup(mbox, pdev, reg_base, direction, ndevs); + if (err) + return err; + + mbox->hwbase = hwbase; + for (devid = 0; devid < ndevs; devid++) { mdev = &mbox->dev[devid]; mdev->mbase = mbox->hwbase + (devid * MBOX_SIZE); + mdev->hwbase = mdev->mbase; spin_lock_init(&mdev->mbox_lock); /* Init header to reset value */ otx2_mbox_reset(mbox, devid); @@ -143,6 +155,35 @@ int otx2_mbox_init(struct otx2_mbox *mbox, void *hwbase, struct pci_dev *pdev, } EXPORT_SYMBOL(otx2_mbox_init); +/* Initialize mailbox with the set of mailbox region addresses + * in the array hwbase. + */ +int otx2_mbox_regions_init(struct otx2_mbox *mbox, void **hwbase, + struct pci_dev *pdev, void *reg_base, + int direction, int ndevs) +{ + struct otx2_mbox_dev *mdev; + int devid, err; + + err = otx2_mbox_setup(mbox, pdev, reg_base, direction, ndevs); + if (err) + return err; + + mbox->hwbase = hwbase[0]; + + for (devid = 0; devid < ndevs; devid++) { + mdev = &mbox->dev[devid]; + mdev->mbase = hwbase[devid]; + mdev->hwbase = hwbase[devid]; + spin_lock_init(&mdev->mbox_lock); + /* Init header to reset value */ + otx2_mbox_reset(mbox, devid); + } + + return 0; +} +EXPORT_SYMBOL(otx2_mbox_regions_init); + int otx2_mbox_wait_for_rsp(struct otx2_mbox *mbox, int devid) { unsigned long timeout = jiffies + msecs_to_jiffies(MBOX_RSP_TIMEOUT); @@ -175,9 +216,9 @@ EXPORT_SYMBOL(otx2_mbox_busy_poll_for_rsp); void otx2_mbox_msg_send(struct otx2_mbox *mbox, int devid) { - void *hw_mbase = mbox->hwbase + (devid * MBOX_SIZE); struct otx2_mbox_dev *mdev = &mbox->dev[devid]; struct mbox_hdr *tx_hdr, *rx_hdr; + void *hw_mbase = mdev->hwbase; tx_hdr = hw_mbase + mbox->tx_start; rx_hdr = hw_mbase + mbox->rx_start; diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h index f919283ddc34..ea456099b33c 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h @@ -36,7 +36,7 @@ #define INTR_MASK(pfvfs) ((pfvfs < 64) ? (BIT_ULL(pfvfs) - 1) : (~0ull)) -#define MBOX_RSP_TIMEOUT 2000 /* Time(ms) to wait for mbox response */ +#define MBOX_RSP_TIMEOUT 3000 /* Time(ms) to wait for mbox response */ #define MBOX_MSG_ALIGN 16 /* Align mbox msg start to 16bytes */ @@ -52,6 +52,7 @@ struct otx2_mbox_dev { void *mbase; /* This dev's mbox region */ + void *hwbase; spinlock_t mbox_lock; u16 msg_size; /* Total msg size to be sent */ u16 rsp_size; /* Total rsp size to be sure the reply is ok */ @@ -98,6 +99,9 @@ void otx2_mbox_destroy(struct otx2_mbox *mbox); int otx2_mbox_init(struct otx2_mbox *mbox, void __force *hwbase, struct pci_dev *pdev, void __force *reg_base, int direction, int ndevs); +int otx2_mbox_regions_init(struct otx2_mbox *mbox, void __force **hwbase, + struct pci_dev *pdev, void __force *reg_base, + int direction, int ndevs); void otx2_mbox_msg_send(struct otx2_mbox *mbox, int devid); int otx2_mbox_wait_for_rsp(struct otx2_mbox *mbox, int devid); int otx2_mbox_busy_poll_for_rsp(struct otx2_mbox *mbox, int devid); @@ -149,6 +153,16 @@ M(CGX_PTP_RX_ENABLE, 0x20C, cgx_ptp_rx_enable, msg_req, msg_rsp) \ M(CGX_PTP_RX_DISABLE, 0x20D, cgx_ptp_rx_disable, msg_req, msg_rsp) \ M(CGX_CFG_PAUSE_FRM, 0x20E, cgx_cfg_pause_frm, cgx_pause_frm_cfg, \ cgx_pause_frm_cfg) \ +M(CGX_FEC_SET, 0x210, cgx_set_fec_param, fec_mode, fec_mode) \ +M(CGX_FEC_STATS, 0x211, cgx_fec_stats, msg_req, cgx_fec_stats_rsp) \ +M(CGX_GET_PHY_FEC_STATS, 0x212, cgx_get_phy_fec_stats, msg_req, msg_rsp) \ +M(CGX_FW_DATA_GET, 0x213, cgx_get_aux_link_info, msg_req, cgx_fw_data) \ +M(CGX_SET_LINK_MODE, 0x214, cgx_set_link_mode, cgx_set_link_mode_req,\ + cgx_set_link_mode_rsp) \ +M(CGX_FEATURES_GET, 0x215, cgx_features_get, msg_req, \ + cgx_features_info_msg) \ +M(RPM_STATS, 0x216, rpm_stats, msg_req, rpm_stats_rsp) \ + /* NPA mbox IDs (range 0x400 - 0x5FF) */ \ /* NPA mbox IDs (range 0x400 - 0x5FF) */ \ M(NPA_LF_ALLOC, 0x400, npa_lf_alloc, \ npa_lf_alloc_req, npa_lf_alloc_rsp) \ @@ -237,6 +251,9 @@ M(NIX_BP_ENABLE, 0x8016, nix_bp_enable, nix_bp_cfg_req, \ nix_bp_cfg_rsp) \ M(NIX_BP_DISABLE, 0x8017, nix_bp_disable, nix_bp_cfg_req, msg_rsp) \ M(NIX_GET_MAC_ADDR, 0x8018, nix_get_mac_addr, msg_req, nix_get_mac_addr_rsp) \ +M(NIX_CN10K_AQ_ENQ, 0x8019, nix_cn10k_aq_enq, nix_cn10k_aq_enq_req, \ + nix_cn10k_aq_enq_rsp) \ +M(NIX_GET_HW_INFO, 0x801a, nix_get_hw_info, msg_req, nix_hw_info) /* Messages initiated by AF (range 0xC00 - 0xDFF) */ #define MBOX_UP_CGX_MESSAGES \ @@ -354,12 +371,17 @@ struct get_hw_cap_rsp { struct cgx_stats_rsp { struct mbox_msghdr hdr; -#define CGX_RX_STATS_COUNT 13 +#define CGX_RX_STATS_COUNT 9 #define CGX_TX_STATS_COUNT 18 u64 rx_stats[CGX_RX_STATS_COUNT]; u64 tx_stats[CGX_TX_STATS_COUNT]; }; +struct cgx_fec_stats_rsp { + struct mbox_msghdr hdr; + u64 fec_corr_blks; + u64 fec_uncorr_blks; +}; /* Structure for requesting the operation for * setting/getting mac address in the CGX interface */ @@ -373,6 +395,8 @@ struct cgx_link_user_info { uint64_t full_duplex:1; uint64_t lmac_type_id:4; uint64_t speed:20; /* speed in Mbps */ + uint64_t an:1; /* AN supported or not */ + uint64_t fec:2; /* FEC type if enabled else 0 */ #define LMACTYPE_STR_LEN 16 char lmac_type[LMACTYPE_STR_LEN]; }; @@ -391,6 +415,98 @@ struct cgx_pause_frm_cfg { u8 tx_pause; }; +enum fec_type { + OTX2_FEC_NONE, + OTX2_FEC_BASER, + OTX2_FEC_RS, + OTX2_FEC_STATS_CNT = 2, + OTX2_FEC_OFF, +}; + +struct fec_mode { + struct mbox_msghdr hdr; + int fec; +}; + +struct sfp_eeprom_s { +#define SFP_EEPROM_SIZE 256 + u16 sff_id; + u8 buf[SFP_EEPROM_SIZE]; + u64 reserved; +}; + +struct phy_s { + struct { + u64 can_change_mod_type:1; + u64 mod_type:1; + u64 has_fec_stats:1; + } misc; + struct fec_stats_s { + u32 rsfec_corr_cws; + u32 rsfec_uncorr_cws; + u32 brfec_corr_blks; + u32 brfec_uncorr_blks; + } fec_stats; +}; + +struct cgx_lmac_fwdata_s { + u16 rw_valid; + u64 supported_fec; + u64 supported_an; + u64 supported_link_modes; + /* only applicable if AN is supported */ + u64 advertised_fec; + u64 advertised_link_modes; + /* Only applicable if SFP/QSFP slot is present */ + struct sfp_eeprom_s sfp_eeprom; + struct phy_s phy; +#define LMAC_FWDATA_RESERVED_MEM 1021 + u64 reserved[LMAC_FWDATA_RESERVED_MEM]; +}; + +struct cgx_fw_data { + struct mbox_msghdr hdr; + struct cgx_lmac_fwdata_s fwdata; +}; + +struct cgx_set_link_mode_args { + u32 speed; + u8 duplex; + u8 an; + u8 ports; + u64 mode; +}; + +struct cgx_set_link_mode_req { +#define AUTONEG_UNKNOWN 0xff + struct mbox_msghdr hdr; + struct cgx_set_link_mode_args args; +}; + +struct cgx_set_link_mode_rsp { + struct mbox_msghdr hdr; + int status; +}; + +#define RVU_LMAC_FEAT_FC BIT_ULL(0) /* pause frames */ +#define RVU_LMAC_FEAT_PTP BIT_ULL(1) /* precison time protocol */ +#define RVU_MAC_VERSION BIT_ULL(2) +#define RVU_MAC_CGX BIT_ULL(3) +#define RVU_MAC_RPM BIT_ULL(4) + +struct cgx_features_info_msg { + struct mbox_msghdr hdr; + u64 lmac_features; +}; + +struct rpm_stats_rsp { + struct mbox_msghdr hdr; +#define RPM_RX_STATS_COUNT 43 +#define RPM_TX_STATS_COUNT 34 + u64 rx_stats[RPM_RX_STATS_COUNT]; + u64 tx_stats[RPM_TX_STATS_COUNT]; +}; + /* NPA mbox message formats */ /* NPA mailbox error codes @@ -545,6 +661,39 @@ struct nix_lf_free_req { u64 flags; }; +/* CN10K NIX AQ enqueue msg */ +struct nix_cn10k_aq_enq_req { + struct mbox_msghdr hdr; + u32 qidx; + u8 ctype; + u8 op; + union { + struct nix_cn10k_rq_ctx_s rq; + struct nix_cn10k_sq_ctx_s sq; + struct nix_cq_ctx_s cq; + struct nix_rsse_s rss; + struct nix_rx_mce_s mce; + }; + union { + struct nix_cn10k_rq_ctx_s rq_mask; + struct nix_cn10k_sq_ctx_s sq_mask; + struct nix_cq_ctx_s cq_mask; + struct nix_rsse_s rss_mask; + struct nix_rx_mce_s mce_mask; + }; +}; + +struct nix_cn10k_aq_enq_rsp { + struct mbox_msghdr hdr; + union { + struct nix_cn10k_rq_ctx_s rq; + struct nix_cn10k_sq_ctx_s sq; + struct nix_cq_ctx_s cq; + struct nix_rsse_s rss; + struct nix_rx_mce_s mce; + }; +}; + /* NIX AQ enqueue msg */ struct nix_aq_enq_req { struct mbox_msghdr hdr; @@ -717,6 +866,8 @@ struct nix_rss_flowkey_cfg { #define NIX_FLOW_KEY_TYPE_INNR_ETH_DMAC BIT(17) #define NIX_FLOW_KEY_TYPE_VLAN BIT(20) #define NIX_FLOW_KEY_TYPE_IPV4_PROTO BIT(21) +#define NIX_FLOW_KEY_TYPE_AH BIT(22) +#define NIX_FLOW_KEY_TYPE_ESP BIT(23) u32 flowkey_cfg; /* Flowkey types selected */ u8 group; /* RSS context or group */ }; @@ -807,6 +958,12 @@ struct nix_bp_cfg_rsp { u8 chan_cnt; /* Number of channel for which bpids are assigned */ }; +struct nix_hw_info { + struct mbox_msghdr hdr; + u16 max_mtu; + u16 min_mtu; +}; + /* NPC mbox message structs */ #define NPC_MCAM_ENTRY_INVALID 0xFFFF @@ -1071,6 +1228,7 @@ struct cpt_rd_wr_reg_msg { u64 *ret_val; u64 val; u8 is_write; + int blkaddr; }; struct cpt_lf_alloc_req_msg { @@ -1078,6 +1236,7 @@ struct cpt_lf_alloc_req_msg { u16 nix_pf_func; u16 sso_pf_func; u16 eng_grpmsk; + int blkaddr; }; #endif /* MBOX_H */ diff --git a/drivers/net/ethernet/marvell/octeontx2/af/npc.h b/drivers/net/ethernet/marvell/octeontx2/af/npc.h index a1f79445db71..3c640f6aba92 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/npc.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/npc.h @@ -162,6 +162,11 @@ enum key_fields { NPC_DIP_IPV4, NPC_SIP_IPV6, NPC_DIP_IPV6, + NPC_IPPROTO_TCP, + NPC_IPPROTO_UDP, + NPC_IPPROTO_SCTP, + NPC_IPPROTO_AH, + NPC_IPPROTO_ESP, NPC_SPORT_TCP, NPC_DPORT_TCP, NPC_SPORT_UDP, diff --git a/drivers/net/ethernet/marvell/octeontx2/af/ptp.c b/drivers/net/ethernet/marvell/octeontx2/af/ptp.c index f69f4f35ae48..1ee37853f338 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/ptp.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/ptp.c @@ -21,6 +21,9 @@ #define PCI_SUBSYS_DEVID_OCTX2_95XX_PTP 0xB300 #define PCI_SUBSYS_DEVID_OCTX2_LOKI_PTP 0xB400 #define PCI_SUBSYS_DEVID_OCTX2_95MM_PTP 0xB500 +#define PCI_SUBSYS_DEVID_CN10K_A_PTP 0xB900 +#define PCI_SUBSYS_DEVID_CNF10K_A_PTP 0xBA00 +#define PCI_SUBSYS_DEVID_CNF10K_B_PTP 0xBC00 #define PCI_DEVID_OCTEONTX2_RST 0xA085 #define PCI_PTP_BAR_NO 0 @@ -234,6 +237,15 @@ static const struct pci_device_id ptp_id_table[] = { { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_PTP, PCI_VENDOR_ID_CAVIUM, PCI_SUBSYS_DEVID_OCTX2_95MM_PTP) }, + { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_PTP, + PCI_VENDOR_ID_CAVIUM, + PCI_SUBSYS_DEVID_CN10K_A_PTP) }, + { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_PTP, + PCI_VENDOR_ID_CAVIUM, + PCI_SUBSYS_DEVID_CNF10K_A_PTP) }, + { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_PTP, + PCI_VENDOR_ID_CAVIUM, + PCI_SUBSYS_DEVID_CNF10K_B_PTP) }, { 0, } }; diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rpm.c b/drivers/net/ethernet/marvell/octeontx2/af/rpm.c new file mode 100644 index 000000000000..a91ccdc59403 --- /dev/null +++ b/drivers/net/ethernet/marvell/octeontx2/af/rpm.c @@ -0,0 +1,272 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Marvell OcteonTx2 RPM driver + * + * Copyright (C) 2020 Marvell. + * + */ + +#include "cgx.h" +#include "lmac_common.h" + +static struct mac_ops rpm_mac_ops = { + .name = "rpm", + .csr_offset = 0x4e00, + .lmac_offset = 20, + .int_register = RPMX_CMRX_SW_INT, + .int_set_reg = RPMX_CMRX_SW_INT_ENA_W1S, + .irq_offset = 1, + .int_ena_bit = BIT_ULL(0), + .lmac_fwi = RPM_LMAC_FWI, + .non_contiguous_serdes_lane = true, + .rx_stats_cnt = 43, + .tx_stats_cnt = 34, + .get_nr_lmacs = rpm_get_nr_lmacs, + .get_lmac_type = rpm_get_lmac_type, + .mac_lmac_intl_lbk = rpm_lmac_internal_loopback, + .mac_get_rx_stats = rpm_get_rx_stats, + .mac_get_tx_stats = rpm_get_tx_stats, + .mac_enadis_rx_pause_fwding = rpm_lmac_enadis_rx_pause_fwding, + .mac_get_pause_frm_status = rpm_lmac_get_pause_frm_status, + .mac_enadis_pause_frm = rpm_lmac_enadis_pause_frm, + .mac_pause_frm_config = rpm_lmac_pause_frm_config, +}; + +struct mac_ops *rpm_get_mac_ops(void) +{ + return &rpm_mac_ops; +} + +static void rpm_write(rpm_t *rpm, u64 lmac, u64 offset, u64 val) +{ + cgx_write(rpm, lmac, offset, val); +} + +static u64 rpm_read(rpm_t *rpm, u64 lmac, u64 offset) +{ + return cgx_read(rpm, lmac, offset); +} + +int rpm_get_nr_lmacs(void *rpmd) +{ + rpm_t *rpm = rpmd; + + return hweight8(rpm_read(rpm, 0, CGXX_CMRX_RX_LMACS) & 0xFULL); +} + +void rpm_lmac_enadis_rx_pause_fwding(void *rpmd, int lmac_id, bool enable) +{ + rpm_t *rpm = rpmd; + u64 cfg; + + if (!rpm) + return; + + if (enable) { + cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG); + cfg &= ~RPMX_MTI_MAC100X_COMMAND_CONFIG_PAUSE_IGNORE; + rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG, cfg); + } else { + cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG); + cfg |= RPMX_MTI_MAC100X_COMMAND_CONFIG_PAUSE_IGNORE; + rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG, cfg); + } +} + +int rpm_lmac_get_pause_frm_status(void *rpmd, int lmac_id, + u8 *tx_pause, u8 *rx_pause) +{ + rpm_t *rpm = rpmd; + u64 cfg; + + if (!is_lmac_valid(rpm, lmac_id)) + return -ENODEV; + + cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG); + *rx_pause = !(cfg & RPMX_MTI_MAC100X_COMMAND_CONFIG_RX_P_DISABLE); + + cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG); + *tx_pause = !(cfg & RPMX_MTI_MAC100X_COMMAND_CONFIG_TX_P_DISABLE); + return 0; +} + +int rpm_lmac_enadis_pause_frm(void *rpmd, int lmac_id, u8 tx_pause, + u8 rx_pause) +{ + rpm_t *rpm = rpmd; + u64 cfg; + + if (!is_lmac_valid(rpm, lmac_id)) + return -ENODEV; + + cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG); + cfg &= ~RPMX_MTI_MAC100X_COMMAND_CONFIG_RX_P_DISABLE; + cfg |= rx_pause ? 0x0 : RPMX_MTI_MAC100X_COMMAND_CONFIG_RX_P_DISABLE; + cfg &= ~RPMX_MTI_MAC100X_COMMAND_CONFIG_PAUSE_IGNORE; + cfg |= rx_pause ? 0x0 : RPMX_MTI_MAC100X_COMMAND_CONFIG_PAUSE_IGNORE; + rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG, cfg); + + cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG); + cfg &= ~RPMX_MTI_MAC100X_COMMAND_CONFIG_TX_P_DISABLE; + cfg |= tx_pause ? 0x0 : RPMX_MTI_MAC100X_COMMAND_CONFIG_TX_P_DISABLE; + rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG, cfg); + + cfg = rpm_read(rpm, 0, RPMX_CMR_RX_OVR_BP); + if (tx_pause) { + cfg &= ~RPMX_CMR_RX_OVR_BP_EN(lmac_id); + } else { + cfg |= RPMX_CMR_RX_OVR_BP_EN(lmac_id); + cfg &= ~RPMX_CMR_RX_OVR_BP_BP(lmac_id); + } + rpm_write(rpm, 0, RPMX_CMR_RX_OVR_BP, cfg); + return 0; +} + +void rpm_lmac_pause_frm_config(void *rpmd, int lmac_id, bool enable) +{ + rpm_t *rpm = rpmd; + u64 cfg; + + if (enable) { + /* Enable 802.3 pause frame mode */ + cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG); + cfg &= ~RPMX_MTI_MAC100X_COMMAND_CONFIG_PFC_MODE; + rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG, cfg); + + /* Enable receive pause frames */ + cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG); + cfg &= ~RPMX_MTI_MAC100X_COMMAND_CONFIG_RX_P_DISABLE; + rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG, cfg); + + /* Enable forward pause to TX block */ + cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG); + cfg &= ~RPMX_MTI_MAC100X_COMMAND_CONFIG_PAUSE_IGNORE; + rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG, cfg); + + /* Enable pause frames transmission */ + cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG); + cfg &= ~RPMX_MTI_MAC100X_COMMAND_CONFIG_TX_P_DISABLE; + rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG, cfg); + + /* Set pause time and interval */ + cfg = rpm_read(rpm, lmac_id, + RPMX_MTI_MAC100X_CL01_PAUSE_QUANTA); + cfg &= ~0xFFFFULL; + rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_CL01_PAUSE_QUANTA, + cfg | RPM_DEFAULT_PAUSE_TIME); + /* Set pause interval as the hardware default is too short */ + cfg = rpm_read(rpm, lmac_id, + RPMX_MTI_MAC100X_CL01_QUANTA_THRESH); + cfg &= ~0xFFFFULL; + rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_CL01_QUANTA_THRESH, + cfg | (RPM_DEFAULT_PAUSE_TIME / 2)); + + } else { + /* ALL pause frames received are completely ignored */ + cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG); + cfg |= RPMX_MTI_MAC100X_COMMAND_CONFIG_RX_P_DISABLE; + rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG, cfg); + + /* Disable forward pause to TX block */ + cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG); + cfg |= RPMX_MTI_MAC100X_COMMAND_CONFIG_PAUSE_IGNORE; + rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG, cfg); + + /* Disable pause frames transmission */ + cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG); + cfg |= RPMX_MTI_MAC100X_COMMAND_CONFIG_TX_P_DISABLE; + rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG, cfg); + } +} + +int rpm_get_rx_stats(void *rpmd, int lmac_id, int idx, u64 *rx_stat) +{ + rpm_t *rpm = rpmd; + u64 val_lo, val_hi; + + if (!rpm || lmac_id >= rpm->lmac_count) + return -ENODEV; + + mutex_lock(&rpm->lock); + + /* Update idx to point per lmac Rx statistics page */ + idx += lmac_id * rpm->mac_ops->rx_stats_cnt; + + /* Read lower 32 bits of counter */ + val_lo = rpm_read(rpm, 0, RPMX_MTI_STAT_RX_STAT_PAGES_COUNTERX + + (idx * 8)); + + /* upon read of lower 32 bits, higher 32 bits are written + * to RPMX_MTI_STAT_DATA_HI_CDC + */ + val_hi = rpm_read(rpm, 0, RPMX_MTI_STAT_DATA_HI_CDC); + + *rx_stat = (val_hi << 32 | val_lo); + + mutex_unlock(&rpm->lock); + return 0; +} + +int rpm_get_tx_stats(void *rpmd, int lmac_id, int idx, u64 *tx_stat) +{ + rpm_t *rpm = rpmd; + u64 val_lo, val_hi; + + if (!rpm || lmac_id >= rpm->lmac_count) + return -ENODEV; + + mutex_lock(&rpm->lock); + + /* Update idx to point per lmac Tx statistics page */ + idx += lmac_id * rpm->mac_ops->tx_stats_cnt; + + val_lo = rpm_read(rpm, 0, RPMX_MTI_STAT_TX_STAT_PAGES_COUNTERX + + (idx * 8)); + val_hi = rpm_read(rpm, 0, RPMX_MTI_STAT_DATA_HI_CDC); + + *tx_stat = (val_hi << 32 | val_lo); + + mutex_unlock(&rpm->lock); + return 0; +} + +u8 rpm_get_lmac_type(void *rpmd, int lmac_id) +{ + rpm_t *rpm = rpmd; + u64 req = 0, resp; + int err; + + req = FIELD_SET(CMDREG_ID, CGX_CMD_GET_LINK_STS, req); + err = cgx_fwi_cmd_generic(req, &resp, rpm, 0); + if (!err) + return FIELD_GET(RESP_LINKSTAT_LMAC_TYPE, resp); + return err; +} + +int rpm_lmac_internal_loopback(void *rpmd, int lmac_id, bool enable) +{ + rpm_t *rpm = rpmd; + u8 lmac_type; + u64 cfg; + + if (!rpm || lmac_id >= rpm->lmac_count) + return -ENODEV; + lmac_type = rpm->mac_ops->get_lmac_type(rpm, lmac_id); + if (lmac_type == LMAC_MODE_100G_R) { + cfg = rpm_read(rpm, lmac_id, RPMX_MTI_PCS100X_CONTROL1); + + if (enable) + cfg |= RPMX_MTI_PCS_LBK; + else + cfg &= ~RPMX_MTI_PCS_LBK; + rpm_write(rpm, lmac_id, RPMX_MTI_PCS100X_CONTROL1, cfg); + } else { + cfg = rpm_read(rpm, lmac_id, RPMX_MTI_LPCSX_CONTROL1); + if (enable) + cfg |= RPMX_MTI_PCS_LBK; + else + cfg &= ~RPMX_MTI_PCS_LBK; + rpm_write(rpm, lmac_id, RPMX_MTI_LPCSX_CONTROL1, cfg); + } + + return 0; +} diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rpm.h b/drivers/net/ethernet/marvell/octeontx2/af/rpm.h new file mode 100644 index 000000000000..d32e74bd5964 --- /dev/null +++ b/drivers/net/ethernet/marvell/octeontx2/af/rpm.h @@ -0,0 +1,57 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Marvell OcteonTx2 RPM driver + * + * Copyright (C) 2020 Marvell. + * + */ + +#ifndef RPM_H +#define RPM_H + +#include <linux/bits.h> + +/* PCI device IDs */ +#define PCI_DEVID_CN10K_RPM 0xA060 + +/* Registers */ +#define RPMX_CMRX_SW_INT 0x180 +#define RPMX_CMRX_SW_INT_W1S 0x188 +#define RPMX_CMRX_SW_INT_ENA_W1S 0x198 +#define RPMX_CMRX_LINK_CFG 0x1070 +#define RPMX_MTI_PCS100X_CONTROL1 0x20000 +#define RPMX_MTI_LPCSX_CONTROL1 0x30000 +#define RPMX_MTI_PCS_LBK BIT_ULL(14) +#define RPMX_MTI_LPCSX_CONTROL(id) (0x30000 | ((id) * 0x100)) + +#define RPMX_CMRX_LINK_RANGE_MASK GENMASK_ULL(19, 16) +#define RPMX_CMRX_LINK_BASE_MASK GENMASK_ULL(11, 0) +#define RPMX_MTI_MAC100X_COMMAND_CONFIG 0x8010 +#define RPMX_MTI_MAC100X_COMMAND_CONFIG_RX_P_DISABLE BIT_ULL(29) +#define RPMX_MTI_MAC100X_COMMAND_CONFIG_TX_P_DISABLE BIT_ULL(28) +#define RPMX_MTI_MAC100X_COMMAND_CONFIG_PAUSE_IGNORE BIT_ULL(8) +#define RPMX_MTI_MAC100X_COMMAND_CONFIG_PFC_MODE BIT_ULL(19) +#define RPMX_MTI_MAC100X_CL01_PAUSE_QUANTA 0x80A8 +#define RPMX_MTI_MAC100X_CL01_QUANTA_THRESH 0x80C8 +#define RPM_DEFAULT_PAUSE_TIME 0xFFFF +#define RPMX_CMR_RX_OVR_BP 0x4120 +#define RPMX_CMR_RX_OVR_BP_EN(x) BIT_ULL((x) + 8) +#define RPMX_CMR_RX_OVR_BP_BP(x) BIT_ULL((x) + 4) +#define RPMX_MTI_STAT_RX_STAT_PAGES_COUNTERX 0x12000 +#define RPMX_MTI_STAT_TX_STAT_PAGES_COUNTERX 0x13000 +#define RPMX_MTI_STAT_DATA_HI_CDC 0x10038 + +#define RPM_LMAC_FWI 0xa + +/* Function Declarations */ +int rpm_get_nr_lmacs(void *rpmd); +u8 rpm_get_lmac_type(void *rpmd, int lmac_id); +int rpm_lmac_internal_loopback(void *rpmd, int lmac_id, bool enable); +void rpm_lmac_enadis_rx_pause_fwding(void *rpmd, int lmac_id, bool enable); +int rpm_lmac_get_pause_frm_status(void *cgxd, int lmac_id, u8 *tx_pause, + u8 *rx_pause); +void rpm_lmac_pause_frm_config(void *rpmd, int lmac_id, bool enable); +int rpm_lmac_enadis_pause_frm(void *rpmd, int lmac_id, u8 tx_pause, + u8 rx_pause); +int rpm_get_tx_stats(void *rpmd, int lmac_id, int idx, u64 *tx_stat); +int rpm_get_rx_stats(void *rpmd, int lmac_id, int idx, u64 *rx_stat); +#endif /* RPM_H */ diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c index e8fd712860a1..d9a1a71c7ccc 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c @@ -22,7 +22,7 @@ #include "rvu_trace.h" -#define DRV_NAME "octeontx2-af" +#define DRV_NAME "rvu_af" #define DRV_STRING "Marvell OcteonTX2 RVU Admin Function Driver" static int rvu_get_hwvf(struct rvu *rvu, int pcifunc); @@ -78,6 +78,9 @@ static void rvu_setup_hw_capabilities(struct rvu *rvu) if (is_rvu_96xx_A0(rvu)) hw->cap.nix_rx_multicast = false; } + + if (!is_rvu_otx2(rvu)) + hw->cap.per_pf_mbox_regs = true; } /* Poll a RVU block's register 'offset', for a 'zero' @@ -646,7 +649,7 @@ setup_vfmsix: } /* HW interprets RVU_AF_MSIXTR_BASE address as an IOVA, hence - * create a IOMMU mapping for the physcial address configured by + * create an IOMMU mapping for the physical address configured by * firmware and reconfig RVU_AF_MSIXTR_BASE with IOVA. */ cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_CONST); @@ -852,6 +855,31 @@ static int rvu_setup_cpt_hw_resource(struct rvu *rvu, int blkaddr) return rvu_alloc_bitmap(&block->lf); } +static void rvu_get_lbk_bufsize(struct rvu *rvu) +{ + struct pci_dev *pdev = NULL; + void __iomem *base; + u64 lbk_const; + + pdev = pci_get_device(PCI_VENDOR_ID_CAVIUM, + PCI_DEVID_OCTEONTX2_LBK, pdev); + if (!pdev) + return; + + base = pci_ioremap_bar(pdev, 0); + if (!base) + goto err_put; + + lbk_const = readq(base + LBK_CONST); + + /* cache fifo size */ + rvu->hw->lbk_bufsize = FIELD_GET(LBK_CONST_BUF_SIZE, lbk_const); + + iounmap(base); +err_put: + pci_dev_put(pdev); +} + static int rvu_setup_hw_resources(struct rvu *rvu) { struct rvu_hwinfo *hw = rvu->hw; @@ -1003,6 +1031,10 @@ cpt: rvu_scan_block(rvu, block); } + err = rvu_set_channels_base(rvu); + if (err) + goto msix_err; + err = rvu_npc_init(rvu); if (err) goto npc_err; @@ -1018,10 +1050,14 @@ cpt: if (err) goto npa_err; + rvu_get_lbk_bufsize(rvu); + err = rvu_nix_init(rvu); if (err) goto nix_err; + rvu_program_channels(rvu); + return 0; nix_err: @@ -1323,7 +1359,7 @@ static int rvu_get_attach_blkaddr(struct rvu *rvu, int blktype, break; default: return rvu_get_blkaddr(rvu, blktype, 0); - }; + } if (is_block_implemented(rvu->hw, blkaddr)) return blkaddr; @@ -1936,41 +1972,105 @@ static inline void rvu_afvf_mbox_up_handler(struct work_struct *work) __rvu_mbox_up_handler(mwork, TYPE_AFVF); } +static int rvu_get_mbox_regions(struct rvu *rvu, void **mbox_addr, + int num, int type) +{ + struct rvu_hwinfo *hw = rvu->hw; + int region; + u64 bar4; + + /* For cn10k platform VF mailbox regions of a PF follows after the + * PF <-> AF mailbox region. Whereas for Octeontx2 it is read from + * RVU_PF_VF_BAR4_ADDR register. + */ + if (type == TYPE_AFVF) { + for (region = 0; region < num; region++) { + if (hw->cap.per_pf_mbox_regs) { + bar4 = rvu_read64(rvu, BLKADDR_RVUM, + RVU_AF_PFX_BAR4_ADDR(0)) + + MBOX_SIZE; + bar4 += region * MBOX_SIZE; + } else { + bar4 = rvupf_read64(rvu, RVU_PF_VF_BAR4_ADDR); + bar4 += region * MBOX_SIZE; + } + mbox_addr[region] = (void *)ioremap_wc(bar4, MBOX_SIZE); + if (!mbox_addr[region]) + goto error; + } + return 0; + } + + /* For cn10k platform AF <-> PF mailbox region of a PF is read from per + * PF registers. Whereas for Octeontx2 it is read from + * RVU_AF_PF_BAR4_ADDR register. + */ + for (region = 0; region < num; region++) { + if (hw->cap.per_pf_mbox_regs) { + bar4 = rvu_read64(rvu, BLKADDR_RVUM, + RVU_AF_PFX_BAR4_ADDR(region)); + } else { + bar4 = rvu_read64(rvu, BLKADDR_RVUM, + RVU_AF_PF_BAR4_ADDR); + bar4 += region * MBOX_SIZE; + } + mbox_addr[region] = (void *)ioremap_wc(bar4, MBOX_SIZE); + if (!mbox_addr[region]) + goto error; + } + return 0; + +error: + while (region--) + iounmap((void __iomem *)mbox_addr[region]); + return -ENOMEM; +} + static int rvu_mbox_init(struct rvu *rvu, struct mbox_wq_info *mw, int type, int num, void (mbox_handler)(struct work_struct *), void (mbox_up_handler)(struct work_struct *)) { - void __iomem *hwbase = NULL, *reg_base; - int err, i, dir, dir_up; + int err = -EINVAL, i, dir, dir_up; + void __iomem *reg_base; struct rvu_work *mwork; + void **mbox_regions; const char *name; - u64 bar4_addr; + + mbox_regions = kcalloc(num, sizeof(void *), GFP_KERNEL); + if (!mbox_regions) + return -ENOMEM; switch (type) { case TYPE_AFPF: name = "rvu_afpf_mailbox"; - bar4_addr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_PF_BAR4_ADDR); dir = MBOX_DIR_AFPF; dir_up = MBOX_DIR_AFPF_UP; reg_base = rvu->afreg_base; + err = rvu_get_mbox_regions(rvu, mbox_regions, num, TYPE_AFPF); + if (err) + goto free_regions; break; case TYPE_AFVF: name = "rvu_afvf_mailbox"; - bar4_addr = rvupf_read64(rvu, RVU_PF_VF_BAR4_ADDR); dir = MBOX_DIR_PFVF; dir_up = MBOX_DIR_PFVF_UP; reg_base = rvu->pfreg_base; + err = rvu_get_mbox_regions(rvu, mbox_regions, num, TYPE_AFVF); + if (err) + goto free_regions; break; default: - return -EINVAL; + return err; } mw->mbox_wq = alloc_workqueue(name, WQ_UNBOUND | WQ_HIGHPRI | WQ_MEM_RECLAIM, num); - if (!mw->mbox_wq) - return -ENOMEM; + if (!mw->mbox_wq) { + err = -ENOMEM; + goto unmap_regions; + } mw->mbox_wrk = devm_kcalloc(rvu->dev, num, sizeof(struct rvu_work), GFP_KERNEL); @@ -1986,23 +2086,13 @@ static int rvu_mbox_init(struct rvu *rvu, struct mbox_wq_info *mw, goto exit; } - /* Mailbox is a reserved memory (in RAM) region shared between - * RVU devices, shouldn't be mapped as device memory to allow - * unaligned accesses. - */ - hwbase = ioremap_wc(bar4_addr, MBOX_SIZE * num); - if (!hwbase) { - dev_err(rvu->dev, "Unable to map mailbox region\n"); - err = -ENOMEM; - goto exit; - } - - err = otx2_mbox_init(&mw->mbox, hwbase, rvu->pdev, reg_base, dir, num); + err = otx2_mbox_regions_init(&mw->mbox, mbox_regions, rvu->pdev, + reg_base, dir, num); if (err) goto exit; - err = otx2_mbox_init(&mw->mbox_up, hwbase, rvu->pdev, - reg_base, dir_up, num); + err = otx2_mbox_regions_init(&mw->mbox_up, mbox_regions, rvu->pdev, + reg_base, dir_up, num); if (err) goto exit; @@ -2015,25 +2105,36 @@ static int rvu_mbox_init(struct rvu *rvu, struct mbox_wq_info *mw, mwork->rvu = rvu; INIT_WORK(&mwork->work, mbox_up_handler); } - + kfree(mbox_regions); return 0; + exit: - if (hwbase) - iounmap((void __iomem *)hwbase); destroy_workqueue(mw->mbox_wq); +unmap_regions: + while (num--) + iounmap((void __iomem *)mbox_regions[num]); +free_regions: + kfree(mbox_regions); return err; } static void rvu_mbox_destroy(struct mbox_wq_info *mw) { + struct otx2_mbox *mbox = &mw->mbox; + struct otx2_mbox_dev *mdev; + int devid; + if (mw->mbox_wq) { flush_workqueue(mw->mbox_wq); destroy_workqueue(mw->mbox_wq); mw->mbox_wq = NULL; } - if (mw->mbox.hwbase) - iounmap((void __iomem *)mw->mbox.hwbase); + for (devid = 0; devid < mbox->ndevs; devid++) { + mdev = &mbox->dev[devid]; + if (mdev->hwbase) + iounmap((void __iomem *)mdev->hwbase); + } otx2_mbox_destroy(&mw->mbox); otx2_mbox_destroy(&mw->mbox_up); @@ -2150,6 +2251,9 @@ static void rvu_blklf_teardown(struct rvu *rvu, u16 pcifunc, u8 blkaddr) rvu_nix_lf_teardown(rvu, pcifunc, block->addr, lf); else if (block->addr == BLKADDR_NPA) rvu_npa_lf_teardown(rvu, pcifunc, lf); + else if ((block->addr == BLKADDR_CPT0) || + (block->addr == BLKADDR_CPT1)) + rvu_cpt_lf_teardown(rvu, pcifunc, lf, slot); err = rvu_lf_reset(rvu, block, lf); if (err) { @@ -2650,8 +2754,6 @@ static void rvu_enable_afvf_intr(struct rvu *rvu) rvupf_write64(rvu, RVU_PF_VFME_INT_ENA_W1SX(1), INTR_MASK(vfs - 64)); } -#define PCI_DEVID_OCTEONTX2_LBK 0xA061 - int rvu_get_num_lbk_chans(void) { struct pci_dev *pdev; diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h index b1a6ecfd563e..fa6e46e36ae4 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h @@ -19,12 +19,15 @@ #include "common.h" #include "mbox.h" #include "npc.h" +#include "rvu_reg.h" /* PCI device IDs */ #define PCI_DEVID_OCTEONTX2_RVU_AF 0xA065 +#define PCI_DEVID_OCTEONTX2_LBK 0xA061 /* Subsystem Device ID */ #define PCI_SUBSYS_DEVID_96XX 0xB200 +#define PCI_SUBSYS_DEVID_CN10K_A 0xB900 /* PCI BAR nos */ #define PCI_AF_REG_BAR_NUM 0 @@ -33,6 +36,7 @@ #define NAME_SIZE 32 #define MAX_NIX_BLKS 2 +#define MAX_CPT_BLKS 2 /* PF_FUNC */ #define RVU_PFVF_PF_SHIFT 10 @@ -47,6 +51,11 @@ struct dump_ctx { bool all; }; +struct cpt_ctx { + int blkaddr; + struct rvu *rvu; +}; + struct rvu_debugfs { struct dentry *root; struct dentry *cgx_root; @@ -61,6 +70,7 @@ struct rvu_debugfs { struct dump_ctx nix_cq_ctx; struct dump_ctx nix_rq_ctx; struct dump_ctx nix_sq_ctx; + struct cpt_ctx cpt_ctx[MAX_CPT_BLKS]; int npa_qsize_id; int nix_qsize_id; }; @@ -296,6 +306,8 @@ struct hw_cap { bool nix_shaping; /* Is shaping and coloring supported */ bool nix_tx_link_bp; /* Can link backpressure TL queues ? */ bool nix_rx_multicast; /* Rx packet replication support */ + bool per_pf_mbox_regs; /* PF mbox specified in per PF registers ? */ + bool programmable_chans; /* Channels programmable ? */ }; struct rvu_hwinfo { @@ -304,14 +316,20 @@ struct rvu_hwinfo { u16 max_vfs_per_pf; /* Max VFs that can be attached to a PF */ u8 cgx; u8 lmac_per_cgx; + u16 cgx_chan_base; /* CGX base channel number */ + u16 lbk_chan_base; /* LBK base channel number */ + u16 sdp_chan_base; /* SDP base channel number */ + u16 cpt_chan_base; /* CPT base channel number */ u8 cgx_links; u8 lbk_links; u8 sdp_links; + u8 cpt_links; /* Number of CPT links */ u8 npc_kpus; /* No of parser units */ u8 npc_pkinds; /* No of port kinds */ u8 npc_intfs; /* No of interfaces */ u8 npc_kpu_entries; /* No of KPU entries */ u16 npc_counters; /* No of match stats counters */ + u32 lbk_bufsize; /* FIFO size supported by LBK */ bool npc_ext_set; /* Extended register set */ struct hw_cap cap; @@ -350,6 +368,10 @@ struct rvu_fwdata { u64 msixtr_base; #define FWDATA_RESERVED_MEM 1023 u64 reserved[FWDATA_RESERVED_MEM]; +#define CGX_MAX 5 +#define CGX_LMACS_MAX 4 + struct cgx_lmac_fwdata_s cgx_fw_data[CGX_MAX][CGX_LMACS_MAX]; + /* Do not add new fields below this line */ }; struct ptp; @@ -465,6 +487,59 @@ static inline bool is_rvu_96xx_B0(struct rvu *rvu) (pdev->subsystem_device == PCI_SUBSYS_DEVID_96XX); } +/* REVID for PCIe devices. + * Bits 0..1: minor pass, bit 3..2: major pass + * bits 7..4: midr id + */ +#define PCI_REVISION_ID_96XX 0x00 +#define PCI_REVISION_ID_95XX 0x10 +#define PCI_REVISION_ID_LOKI 0x20 +#define PCI_REVISION_ID_98XX 0x30 +#define PCI_REVISION_ID_95XXMM 0x40 + +static inline bool is_rvu_otx2(struct rvu *rvu) +{ + struct pci_dev *pdev = rvu->pdev; + + u8 midr = pdev->revision & 0xF0; + + return (midr == PCI_REVISION_ID_96XX || midr == PCI_REVISION_ID_95XX || + midr == PCI_REVISION_ID_LOKI || midr == PCI_REVISION_ID_98XX || + midr == PCI_REVISION_ID_95XXMM); +} + +static inline u16 rvu_nix_chan_cgx(struct rvu *rvu, u8 cgxid, + u8 lmacid, u8 chan) +{ + u64 nix_const = rvu_read64(rvu, BLKADDR_NIX0, NIX_AF_CONST); + u16 cgx_chans = nix_const & 0xFFULL; + struct rvu_hwinfo *hw = rvu->hw; + + if (!hw->cap.programmable_chans) + return NIX_CHAN_CGX_LMAC_CHX(cgxid, lmacid, chan); + + return rvu->hw->cgx_chan_base + + (cgxid * hw->lmac_per_cgx + lmacid) * cgx_chans + chan; +} + +static inline u16 rvu_nix_chan_lbk(struct rvu *rvu, u8 lbkid, + u8 chan) +{ + u64 nix_const = rvu_read64(rvu, BLKADDR_NIX0, NIX_AF_CONST); + u16 lbk_chans = (nix_const >> 16) & 0xFFULL; + struct rvu_hwinfo *hw = rvu->hw; + + if (!hw->cap.programmable_chans) + return NIX_CHAN_LBK_CHX(lbkid, chan); + + return rvu->hw->lbk_chan_base + lbkid * lbk_chans + chan; +} + +static inline u16 rvu_nix_chan_cpt(struct rvu *rvu, u8 chan) +{ + return rvu->hw->cpt_chan_base + chan; +} + /* Function Prototypes * RVU */ @@ -601,6 +676,15 @@ void npc_enable_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam, void npc_read_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam, int blkaddr, u16 src, struct mcam_entry *entry, u8 *intf, u8 *ena); +bool is_mac_feature_supported(struct rvu *rvu, int pf, int feature); +u32 rvu_cgx_get_fifolen(struct rvu *rvu); + +/* CPT APIs */ +int rvu_cpt_lf_teardown(struct rvu *rvu, u16 pcifunc, int lf, int slot); + +/* CN10K RVU */ +int rvu_set_channels_base(struct rvu *rvu); +void rvu_program_channels(struct rvu *rvu); #ifdef CONFIG_DEBUG_FS void rvu_dbg_init(struct rvu *rvu); diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c index 6c6b411e78fd..e668e482383a 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c @@ -14,6 +14,7 @@ #include "rvu.h" #include "cgx.h" +#include "lmac_common.h" #include "rvu_reg.h" #include "rvu_trace.h" @@ -42,6 +43,20 @@ static struct _req_type __maybe_unused \ MBOX_UP_CGX_MESSAGES #undef M +bool is_mac_feature_supported(struct rvu *rvu, int pf, int feature) +{ + u8 cgx_id, lmac_id; + void *cgxd; + + if (!is_pf_cgxmapped(rvu, pf)) + return 0; + + rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); + cgxd = rvu_cgx_pdata(cgx_id, rvu); + + return (cgx_features_get(cgxd) & feature); +} + /* Returns bitmap of mapped PFs */ static u16 cgxlmac_to_pfmap(struct rvu *rvu, u8 cgx_id, u8 lmac_id) { @@ -92,9 +107,10 @@ static int rvu_map_cgx_lmac_pf(struct rvu *rvu) { struct npc_pkind *pkind = &rvu->hw->pkind; int cgx_cnt_max = rvu->cgx_cnt_max; - int cgx, lmac_cnt, lmac; int pf = PF_CGXMAP_BASE; + unsigned long lmac_bmap; int size, free_pkind; + int cgx, lmac, iter; if (!cgx_cnt_max) return 0; @@ -125,14 +141,17 @@ static int rvu_map_cgx_lmac_pf(struct rvu *rvu) for (cgx = 0; cgx < cgx_cnt_max; cgx++) { if (!rvu_cgx_pdata(cgx, rvu)) continue; - lmac_cnt = cgx_get_lmac_cnt(rvu_cgx_pdata(cgx, rvu)); - for (lmac = 0; lmac < lmac_cnt; lmac++, pf++) { + lmac_bmap = cgx_get_lmac_bmap(rvu_cgx_pdata(cgx, rvu)); + for_each_set_bit(iter, &lmac_bmap, MAX_LMAC_PER_CGX) { + lmac = cgx_get_lmacid(rvu_cgx_pdata(cgx, rvu), + iter); rvu->pf2cgxlmac_map[pf] = cgxlmac_id_to_bmap(cgx, lmac); rvu->cgxlmac2pf_map[CGX_OFFSET(cgx) + lmac] = 1 << pf; free_pkind = rvu_alloc_rsrc(&pkind->rsrc); pkind->pfchan_map[free_pkind] = ((pf) & 0x3F) << 16; rvu_map_cgx_nix_block(rvu, pf, cgx, lmac); rvu->cgx_mapped_pfs++; + pf++; } } return 0; @@ -154,8 +173,10 @@ static int rvu_cgx_send_link_info(int cgx_id, int lmac_id, struct rvu *rvu) &qentry->link_event.link_uinfo); qentry->link_event.cgx_id = cgx_id; qentry->link_event.lmac_id = lmac_id; - if (err) + if (err) { + kfree(qentry); goto skip_add; + } list_add_tail(&qentry->evq_node, &rvu->cgx_evq_head); skip_add: spin_unlock_irqrestore(&rvu->cgx_evq_lock, flags); @@ -251,6 +272,7 @@ static void cgx_evhandler_task(struct work_struct *work) static int cgx_lmac_event_handler_init(struct rvu *rvu) { + unsigned long lmac_bmap; struct cgx_event_cb cb; int cgx, lmac, err; void *cgxd; @@ -271,7 +293,8 @@ static int cgx_lmac_event_handler_init(struct rvu *rvu) cgxd = rvu_cgx_pdata(cgx, rvu); if (!cgxd) continue; - for (lmac = 0; lmac < cgx_get_lmac_cnt(cgxd); lmac++) { + lmac_bmap = cgx_get_lmac_bmap(cgxd); + for_each_set_bit(lmac, &lmac_bmap, MAX_LMAC_PER_CGX) { err = cgx_lmac_evh_register(&cb, cgxd, lmac); if (err) dev_err(rvu->dev, @@ -349,6 +372,7 @@ int rvu_cgx_init(struct rvu *rvu) int rvu_cgx_exit(struct rvu *rvu) { + unsigned long lmac_bmap; int cgx, lmac; void *cgxd; @@ -356,7 +380,8 @@ int rvu_cgx_exit(struct rvu *rvu) cgxd = rvu_cgx_pdata(cgx, rvu); if (!cgxd) continue; - for (lmac = 0; lmac < cgx_get_lmac_cnt(cgxd); lmac++) + lmac_bmap = cgx_get_lmac_bmap(cgxd); + for_each_set_bit(lmac, &lmac_bmap, MAX_LMAC_PER_CGX) cgx_lmac_evh_unregister(cgxd, lmac); } @@ -381,6 +406,7 @@ static bool is_cgx_config_permitted(struct rvu *rvu, u16 pcifunc) void rvu_cgx_enadis_rx_bp(struct rvu *rvu, int pf, bool enable) { + struct mac_ops *mac_ops; u8 cgx_id, lmac_id; void *cgxd; @@ -390,11 +416,12 @@ void rvu_cgx_enadis_rx_bp(struct rvu *rvu, int pf, bool enable) rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); cgxd = rvu_cgx_pdata(cgx_id, rvu); + mac_ops = get_mac_ops(cgxd); /* Set / clear CTL_BCK to control pause frame forwarding to NIX */ if (enable) - cgx_lmac_enadis_rx_pause_fwding(cgxd, lmac_id, true); + mac_ops->mac_enadis_rx_pause_fwding(cgxd, lmac_id, true); else - cgx_lmac_enadis_rx_pause_fwding(cgxd, lmac_id, false); + mac_ops->mac_enadis_rx_pause_fwding(cgxd, lmac_id, false); } int rvu_cgx_config_rxtx(struct rvu *rvu, u16 pcifunc, bool start) @@ -426,10 +453,11 @@ int rvu_mbox_handler_cgx_stop_rxtx(struct rvu *rvu, struct msg_req *req, return 0; } -int rvu_mbox_handler_cgx_stats(struct rvu *rvu, struct msg_req *req, - struct cgx_stats_rsp *rsp) +static int rvu_lmac_get_stats(struct rvu *rvu, struct msg_req *req, + void *rsp) { int pf = rvu_get_pf(req->hdr.pcifunc); + struct mac_ops *mac_ops; int stat = 0, err = 0; u64 tx_stat, rx_stat; u8 cgx_idx, lmac; @@ -440,28 +468,63 @@ int rvu_mbox_handler_cgx_stats(struct rvu *rvu, struct msg_req *req, rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_idx, &lmac); cgxd = rvu_cgx_pdata(cgx_idx, rvu); + mac_ops = get_mac_ops(cgxd); /* Rx stats */ - while (stat < CGX_RX_STATS_COUNT) { - err = cgx_get_rx_stats(cgxd, lmac, stat, &rx_stat); + while (stat < mac_ops->rx_stats_cnt) { + err = mac_ops->mac_get_rx_stats(cgxd, lmac, stat, &rx_stat); if (err) return err; - rsp->rx_stats[stat] = rx_stat; + if (mac_ops->rx_stats_cnt == RPM_RX_STATS_COUNT) + ((struct rpm_stats_rsp *)rsp)->rx_stats[stat] = rx_stat; + else + ((struct cgx_stats_rsp *)rsp)->rx_stats[stat] = rx_stat; stat++; } /* Tx stats */ stat = 0; - while (stat < CGX_TX_STATS_COUNT) { - err = cgx_get_tx_stats(cgxd, lmac, stat, &tx_stat); + while (stat < mac_ops->tx_stats_cnt) { + err = mac_ops->mac_get_tx_stats(cgxd, lmac, stat, &tx_stat); if (err) return err; - rsp->tx_stats[stat] = tx_stat; + if (mac_ops->tx_stats_cnt == RPM_TX_STATS_COUNT) + ((struct rpm_stats_rsp *)rsp)->tx_stats[stat] = tx_stat; + else + ((struct cgx_stats_rsp *)rsp)->tx_stats[stat] = tx_stat; stat++; } return 0; } +int rvu_mbox_handler_cgx_stats(struct rvu *rvu, struct msg_req *req, + struct cgx_stats_rsp *rsp) +{ + return rvu_lmac_get_stats(rvu, req, (void *)rsp); +} + +int rvu_mbox_handler_rpm_stats(struct rvu *rvu, struct msg_req *req, + struct rpm_stats_rsp *rsp) +{ + return rvu_lmac_get_stats(rvu, req, (void *)rsp); +} + +int rvu_mbox_handler_cgx_fec_stats(struct rvu *rvu, + struct msg_req *req, + struct cgx_fec_stats_rsp *rsp) +{ + int pf = rvu_get_pf(req->hdr.pcifunc); + u8 cgx_idx, lmac; + void *cgxd; + + if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc)) + return -EPERM; + rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_idx, &lmac); + + cgxd = rvu_cgx_pdata(cgx_idx, rvu); + return cgx_get_fec_stats(cgxd, lmac, rsp); +} + int rvu_mbox_handler_cgx_mac_addr_set(struct rvu *rvu, struct cgx_mac_addr_set_or_get *req, struct cgx_mac_addr_set_or_get *rsp) @@ -538,6 +601,9 @@ static int rvu_cgx_ptp_rx_cfg(struct rvu *rvu, u16 pcifunc, bool enable) u8 cgx_id, lmac_id; void *cgxd; + if (!is_mac_feature_supported(rvu, pf, RVU_LMAC_FEAT_PTP)) + return 0; + /* This msg is expected only from PFs that are mapped to CGX LMACs, * if received from other PF/VF simply ACK, nothing to do. */ @@ -624,17 +690,49 @@ int rvu_mbox_handler_cgx_get_linkinfo(struct rvu *rvu, struct msg_req *req, return err; } +int rvu_mbox_handler_cgx_features_get(struct rvu *rvu, + struct msg_req *req, + struct cgx_features_info_msg *rsp) +{ + int pf = rvu_get_pf(req->hdr.pcifunc); + u8 cgx_idx, lmac; + void *cgxd; + + if (!is_pf_cgxmapped(rvu, pf)) + return 0; + + rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_idx, &lmac); + cgxd = rvu_cgx_pdata(cgx_idx, rvu); + rsp->lmac_features = cgx_features_get(cgxd); + + return 0; +} + +u32 rvu_cgx_get_fifolen(struct rvu *rvu) +{ + struct mac_ops *mac_ops; + int rvu_def_cgx_id = 0; + u32 fifo_len; + + mac_ops = get_mac_ops(rvu_cgx_pdata(rvu_def_cgx_id, rvu)); + fifo_len = mac_ops ? mac_ops->fifo_len : 0; + + return fifo_len; +} + static int rvu_cgx_config_intlbk(struct rvu *rvu, u16 pcifunc, bool en) { int pf = rvu_get_pf(pcifunc); + struct mac_ops *mac_ops; u8 cgx_id, lmac_id; if (!is_cgx_config_permitted(rvu, pcifunc)) return -EPERM; rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); + mac_ops = get_mac_ops(rvu_cgx_pdata(cgx_id, rvu)); - return cgx_lmac_internal_loopback(rvu_cgx_pdata(cgx_id, rvu), + return mac_ops->mac_lmac_intl_lbk(rvu_cgx_pdata(cgx_id, rvu), lmac_id, en); } @@ -657,7 +755,12 @@ int rvu_mbox_handler_cgx_cfg_pause_frm(struct rvu *rvu, struct cgx_pause_frm_cfg *rsp) { int pf = rvu_get_pf(req->hdr.pcifunc); + struct mac_ops *mac_ops; u8 cgx_id, lmac_id; + void *cgxd; + + if (!is_mac_feature_supported(rvu, pf, RVU_LMAC_FEAT_FC)) + return 0; /* This msg is expected only from PF/VFs that are mapped to CGX LMACs, * if received from other PF/VF simply ACK, nothing to do. @@ -666,16 +769,32 @@ int rvu_mbox_handler_cgx_cfg_pause_frm(struct rvu *rvu, return -ENODEV; rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); + cgxd = rvu_cgx_pdata(cgx_id, rvu); + mac_ops = get_mac_ops(cgxd); if (req->set) - cgx_lmac_set_pause_frm(rvu_cgx_pdata(cgx_id, rvu), lmac_id, - req->tx_pause, req->rx_pause); + mac_ops->mac_enadis_pause_frm(cgxd, lmac_id, + req->tx_pause, req->rx_pause); else - cgx_lmac_get_pause_frm(rvu_cgx_pdata(cgx_id, rvu), lmac_id, - &rsp->tx_pause, &rsp->rx_pause); + mac_ops->mac_get_pause_frm_status(cgxd, lmac_id, + &rsp->tx_pause, + &rsp->rx_pause); return 0; } +int rvu_mbox_handler_cgx_get_phy_fec_stats(struct rvu *rvu, struct msg_req *req, + struct msg_rsp *rsp) +{ + int pf = rvu_get_pf(req->hdr.pcifunc); + u8 cgx_id, lmac_id; + + if (!is_pf_cgxmapped(rvu, pf)) + return -EPERM; + + rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); + return cgx_get_phy_fec_stats(rvu_cgx_pdata(cgx_id, rvu), lmac_id); +} + /* Finds cumulative status of NIX rx/tx counters from LF of a PF and those * from its VFs as well. ie. NIX rx/tx counters at the CGX port level */ @@ -767,3 +886,56 @@ exit: mutex_unlock(&rvu->cgx_cfg_lock); return err; } + +int rvu_mbox_handler_cgx_set_fec_param(struct rvu *rvu, + struct fec_mode *req, + struct fec_mode *rsp) +{ + int pf = rvu_get_pf(req->hdr.pcifunc); + u8 cgx_id, lmac_id; + + if (!is_pf_cgxmapped(rvu, pf)) + return -EPERM; + + if (req->fec == OTX2_FEC_OFF) + req->fec = OTX2_FEC_NONE; + rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); + rsp->fec = cgx_set_fec(req->fec, cgx_id, lmac_id); + return 0; +} + +int rvu_mbox_handler_cgx_get_aux_link_info(struct rvu *rvu, struct msg_req *req, + struct cgx_fw_data *rsp) +{ + int pf = rvu_get_pf(req->hdr.pcifunc); + u8 cgx_id, lmac_id; + + if (!rvu->fwdata) + return -ENXIO; + + if (!is_pf_cgxmapped(rvu, pf)) + return -EPERM; + + rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); + + memcpy(&rsp->fwdata, &rvu->fwdata->cgx_fw_data[cgx_id][lmac_id], + sizeof(struct cgx_lmac_fwdata_s)); + return 0; +} + +int rvu_mbox_handler_cgx_set_link_mode(struct rvu *rvu, + struct cgx_set_link_mode_req *req, + struct cgx_set_link_mode_rsp *rsp) +{ + int pf = rvu_get_pf(req->hdr.pcifunc); + u8 cgx_idx, lmac; + void *cgxd; + + if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc)) + return -EPERM; + + rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_idx, &lmac); + cgxd = rvu_cgx_pdata(cgx_idx, rvu); + rsp->status = cgx_set_link_mode(cgxd, req->args, cgx_idx, lmac); + return 0; +} diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c new file mode 100644 index 000000000000..7d9e71c6965f --- /dev/null +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c @@ -0,0 +1,261 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Marvell RPM CN10K driver + * + * Copyright (C) 2020 Marvell. + */ + +#include <linux/bitfield.h> +#include <linux/pci.h> +#include "rvu.h" +#include "cgx.h" +#include "rvu_reg.h" + +int rvu_set_channels_base(struct rvu *rvu) +{ + struct rvu_hwinfo *hw = rvu->hw; + u16 cpt_chan_base; + u64 nix_const; + int blkaddr; + + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0); + if (blkaddr < 0) + return blkaddr; + + nix_const = rvu_read64(rvu, blkaddr, NIX_AF_CONST); + + hw->cgx = (nix_const >> 12) & 0xFULL; + hw->lmac_per_cgx = (nix_const >> 8) & 0xFULL; + hw->cgx_links = hw->cgx * hw->lmac_per_cgx; + hw->lbk_links = (nix_const >> 24) & 0xFULL; + hw->cpt_links = (nix_const >> 44) & 0xFULL; + hw->sdp_links = 1; + + hw->cgx_chan_base = NIX_CHAN_CGX_LMAC_CHX(0, 0, 0); + hw->lbk_chan_base = NIX_CHAN_LBK_CHX(0, 0); + hw->sdp_chan_base = NIX_CHAN_SDP_CH_START; + + /* No Programmable channels */ + if (!(nix_const & BIT_ULL(60))) + return 0; + + hw->cap.programmable_chans = true; + + /* If programmable channels are present then configure + * channels such that all channel numbers are contiguous + * leaving no holes. This way the new CPT channels can be + * accomodated. The order of channel numbers assigned is + * LBK, SDP, CGX and CPT. + */ + hw->sdp_chan_base = hw->lbk_chan_base + hw->lbk_links * + ((nix_const >> 16) & 0xFFULL); + hw->cgx_chan_base = hw->sdp_chan_base + hw->sdp_links * SDP_CHANNELS; + + cpt_chan_base = hw->cgx_chan_base + hw->cgx_links * + (nix_const & 0xFFULL); + + /* Out of 4096 channels start CPT from 2048 so + * that MSB for CPT channels is always set + */ + if (cpt_chan_base <= 0x800) { + hw->cpt_chan_base = 0x800; + } else { + dev_err(rvu->dev, + "CPT channels could not fit in the range 2048-4095\n"); + return -EINVAL; + } + + return 0; +} + +#define LBK_CONNECT_NIXX(a) (0x0 + (a)) + +static void __rvu_lbk_set_chans(struct rvu *rvu, void __iomem *base, + u64 offset, int lbkid, u16 chans) +{ + struct rvu_hwinfo *hw = rvu->hw; + u64 cfg; + + cfg = readq(base + offset); + cfg &= ~(LBK_LINK_CFG_RANGE_MASK | + LBK_LINK_CFG_ID_MASK | LBK_LINK_CFG_BASE_MASK); + cfg |= FIELD_PREP(LBK_LINK_CFG_RANGE_MASK, ilog2(chans)); + cfg |= FIELD_PREP(LBK_LINK_CFG_ID_MASK, lbkid); + cfg |= FIELD_PREP(LBK_LINK_CFG_BASE_MASK, hw->lbk_chan_base); + + writeq(cfg, base + offset); +} + +static void rvu_lbk_set_channels(struct rvu *rvu) +{ + struct pci_dev *pdev = NULL; + void __iomem *base; + u64 lbk_const; + u8 src, dst; + u16 chans; + + /* To loopback packets between multiple NIX blocks + * mutliple LBK blocks are needed. With two NIX blocks, + * four LBK blocks are needed and each LBK block + * source and destination are as follows: + * LBK0 - source NIX0 and destination NIX1 + * LBK1 - source NIX0 and destination NIX1 + * LBK2 - source NIX1 and destination NIX0 + * LBK3 - source NIX1 and destination NIX1 + * As per the HRM channel numbers should be programmed as: + * P2X and X2P of LBK0 as same + * P2X and X2P of LBK3 as same + * P2X of LBK1 and X2P of LBK2 as same + * P2X of LBK2 and X2P of LBK1 as same + */ + while (true) { + pdev = pci_get_device(PCI_VENDOR_ID_CAVIUM, + PCI_DEVID_OCTEONTX2_LBK, pdev); + if (!pdev) + return; + + base = pci_ioremap_bar(pdev, 0); + if (!base) + goto err_put; + + lbk_const = readq(base + LBK_CONST); + chans = FIELD_GET(LBK_CONST_CHANS, lbk_const); + dst = FIELD_GET(LBK_CONST_DST, lbk_const); + src = FIELD_GET(LBK_CONST_SRC, lbk_const); + + if (src == dst) { + if (src == LBK_CONNECT_NIXX(0)) { /* LBK0 */ + __rvu_lbk_set_chans(rvu, base, LBK_LINK_CFG_X2P, + 0, chans); + __rvu_lbk_set_chans(rvu, base, LBK_LINK_CFG_P2X, + 0, chans); + } else if (src == LBK_CONNECT_NIXX(1)) { /* LBK3 */ + __rvu_lbk_set_chans(rvu, base, LBK_LINK_CFG_X2P, + 1, chans); + __rvu_lbk_set_chans(rvu, base, LBK_LINK_CFG_P2X, + 1, chans); + } + } else { + if (src == LBK_CONNECT_NIXX(0)) { /* LBK1 */ + __rvu_lbk_set_chans(rvu, base, LBK_LINK_CFG_X2P, + 0, chans); + __rvu_lbk_set_chans(rvu, base, LBK_LINK_CFG_P2X, + 1, chans); + } else if (src == LBK_CONNECT_NIXX(1)) { /* LBK2 */ + __rvu_lbk_set_chans(rvu, base, LBK_LINK_CFG_X2P, + 1, chans); + __rvu_lbk_set_chans(rvu, base, LBK_LINK_CFG_P2X, + 0, chans); + } + } + iounmap(base); + } +err_put: + pci_dev_put(pdev); +} + +static void __rvu_nix_set_channels(struct rvu *rvu, int blkaddr) +{ + u64 nix_const = rvu_read64(rvu, blkaddr, NIX_AF_CONST); + u16 cgx_chans, lbk_chans, sdp_chans, cpt_chans; + struct rvu_hwinfo *hw = rvu->hw; + int link, nix_link = 0; + u16 start; + u64 cfg; + + cgx_chans = nix_const & 0xFFULL; + lbk_chans = (nix_const >> 16) & 0xFFULL; + sdp_chans = SDP_CHANNELS; + cpt_chans = (nix_const >> 32) & 0xFFFULL; + + start = hw->cgx_chan_base; + for (link = 0; link < hw->cgx_links; link++, nix_link++) { + cfg = rvu_read64(rvu, blkaddr, NIX_AF_LINKX_CFG(nix_link)); + cfg &= ~(NIX_AF_LINKX_BASE_MASK | NIX_AF_LINKX_RANGE_MASK); + cfg |= FIELD_PREP(NIX_AF_LINKX_RANGE_MASK, ilog2(cgx_chans)); + cfg |= FIELD_PREP(NIX_AF_LINKX_BASE_MASK, start); + rvu_write64(rvu, blkaddr, NIX_AF_LINKX_CFG(nix_link), cfg); + start += cgx_chans; + } + + start = hw->lbk_chan_base; + for (link = 0; link < hw->lbk_links; link++, nix_link++) { + cfg = rvu_read64(rvu, blkaddr, NIX_AF_LINKX_CFG(nix_link)); + cfg &= ~(NIX_AF_LINKX_BASE_MASK | NIX_AF_LINKX_RANGE_MASK); + cfg |= FIELD_PREP(NIX_AF_LINKX_RANGE_MASK, ilog2(lbk_chans)); + cfg |= FIELD_PREP(NIX_AF_LINKX_BASE_MASK, start); + rvu_write64(rvu, blkaddr, NIX_AF_LINKX_CFG(nix_link), cfg); + start += lbk_chans; + } + + start = hw->sdp_chan_base; + for (link = 0; link < hw->sdp_links; link++, nix_link++) { + cfg = rvu_read64(rvu, blkaddr, NIX_AF_LINKX_CFG(nix_link)); + cfg &= ~(NIX_AF_LINKX_BASE_MASK | NIX_AF_LINKX_RANGE_MASK); + cfg |= FIELD_PREP(NIX_AF_LINKX_RANGE_MASK, ilog2(sdp_chans)); + cfg |= FIELD_PREP(NIX_AF_LINKX_BASE_MASK, start); + rvu_write64(rvu, blkaddr, NIX_AF_LINKX_CFG(nix_link), cfg); + start += sdp_chans; + } + + start = hw->cpt_chan_base; + for (link = 0; link < hw->cpt_links; link++, nix_link++) { + cfg = rvu_read64(rvu, blkaddr, NIX_AF_LINKX_CFG(nix_link)); + cfg &= ~(NIX_AF_LINKX_BASE_MASK | NIX_AF_LINKX_RANGE_MASK); + cfg |= FIELD_PREP(NIX_AF_LINKX_RANGE_MASK, ilog2(cpt_chans)); + cfg |= FIELD_PREP(NIX_AF_LINKX_BASE_MASK, start); + rvu_write64(rvu, blkaddr, NIX_AF_LINKX_CFG(nix_link), cfg); + start += cpt_chans; + } +} + +static void rvu_nix_set_channels(struct rvu *rvu) +{ + int blkaddr = 0; + + blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr); + while (blkaddr) { + __rvu_nix_set_channels(rvu, blkaddr); + blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr); + } +} + +static void __rvu_rpm_set_channels(int cgxid, int lmacid, u16 base) +{ + u64 cfg; + + cfg = cgx_lmac_read(cgxid, lmacid, RPMX_CMRX_LINK_CFG); + cfg &= ~(RPMX_CMRX_LINK_BASE_MASK | RPMX_CMRX_LINK_RANGE_MASK); + + /* There is no read-only constant register to read + * the number of channels for LMAC and it is always 16. + */ + cfg |= FIELD_PREP(RPMX_CMRX_LINK_RANGE_MASK, ilog2(16)); + cfg |= FIELD_PREP(RPMX_CMRX_LINK_BASE_MASK, base); + cgx_lmac_write(cgxid, lmacid, RPMX_CMRX_LINK_CFG, cfg); +} + +static void rvu_rpm_set_channels(struct rvu *rvu) +{ + struct rvu_hwinfo *hw = rvu->hw; + u16 base = hw->cgx_chan_base; + int cgx, lmac; + + for (cgx = 0; cgx < rvu->cgx_cnt_max; cgx++) { + for (lmac = 0; lmac < hw->lmac_per_cgx; lmac++) { + __rvu_rpm_set_channels(cgx, lmac, base); + base += 16; + } + } +} + +void rvu_program_channels(struct rvu *rvu) +{ + struct rvu_hwinfo *hw = rvu->hw; + + if (!hw->cap.programmable_chans) + return; + + rvu_nix_set_channels(rvu); + rvu_lbk_set_channels(rvu); + rvu_rpm_set_channels(rvu); +} diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c index 35261d52c997..0945c3a3b180 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c @@ -65,13 +65,13 @@ int rvu_mbox_handler_cpt_lf_alloc(struct rvu *rvu, int num_lfs, slot; u64 val; + blkaddr = req->blkaddr ? req->blkaddr : BLKADDR_CPT0; + if (blkaddr != BLKADDR_CPT0 && blkaddr != BLKADDR_CPT1) + return -ENODEV; + if (req->eng_grpmsk == 0x0) return CPT_AF_ERR_GRP_INVALID; - blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_CPT, 0); - if (blkaddr < 0) - return blkaddr; - block = &rvu->hw->block[blkaddr]; num_lfs = rvu_get_rsrc_mapcount(rvu_get_pfvf(rvu, pcifunc), block->addr); @@ -114,23 +114,17 @@ int rvu_mbox_handler_cpt_lf_alloc(struct rvu *rvu, return 0; } -int rvu_mbox_handler_cpt_lf_free(struct rvu *rvu, struct msg_req *req, - struct msg_rsp *rsp) +static int cpt_lf_free(struct rvu *rvu, struct msg_req *req, int blkaddr) { u16 pcifunc = req->hdr.pcifunc; + int num_lfs, cptlf, slot; struct rvu_block *block; - int cptlf, blkaddr; - int num_lfs, slot; - - blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_CPT, 0); - if (blkaddr < 0) - return blkaddr; block = &rvu->hw->block[blkaddr]; num_lfs = rvu_get_rsrc_mapcount(rvu_get_pfvf(rvu, pcifunc), block->addr); if (!num_lfs) - return CPT_AF_ERR_LF_INVALID; + return 0; for (slot = 0; slot < num_lfs; slot++) { cptlf = rvu_get_lf(rvu, block, pcifunc, slot); @@ -146,6 +140,21 @@ int rvu_mbox_handler_cpt_lf_free(struct rvu *rvu, struct msg_req *req, return 0; } +int rvu_mbox_handler_cpt_lf_free(struct rvu *rvu, struct msg_req *req, + struct msg_rsp *rsp) +{ + int ret; + + ret = cpt_lf_free(rvu, req, BLKADDR_CPT0); + if (ret) + return ret; + + if (is_block_implemented(rvu->hw, BLKADDR_CPT1)) + ret = cpt_lf_free(rvu, req, BLKADDR_CPT1); + + return ret; +} + static bool is_valid_offset(struct rvu *rvu, struct cpt_rd_wr_reg_msg *req) { u64 offset = req->reg_offset; @@ -208,9 +217,9 @@ int rvu_mbox_handler_cpt_rd_wr_register(struct rvu *rvu, { int blkaddr; - blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_CPT, 0); - if (blkaddr < 0) - return blkaddr; + blkaddr = req->blkaddr ? req->blkaddr : BLKADDR_CPT0; + if (blkaddr != BLKADDR_CPT0 && blkaddr != BLKADDR_CPT1) + return -ENODEV; /* This message is accepted only if sent from CPT PF/VF */ if (!is_cpt_pf(rvu, req->hdr.pcifunc) && @@ -231,3 +240,92 @@ int rvu_mbox_handler_cpt_rd_wr_register(struct rvu *rvu, return 0; } + +#define INPROG_INFLIGHT(reg) ((reg) & 0x1FF) +#define INPROG_GRB_PARTIAL(reg) ((reg) & BIT_ULL(31)) +#define INPROG_GRB(reg) (((reg) >> 32) & 0xFF) +#define INPROG_GWB(reg) (((reg) >> 40) & 0xFF) + +static void cpt_lf_disable_iqueue(struct rvu *rvu, int blkaddr, int slot) +{ + int i = 0, hard_lp_ctr = 100000; + u64 inprog, grp_ptr; + u16 nq_ptr, dq_ptr; + + /* Disable instructions enqueuing */ + rvu_write64(rvu, blkaddr, CPT_AF_BAR2_ALIASX(slot, CPT_LF_CTL), 0x0); + + /* Disable executions in the LF's queue */ + inprog = rvu_read64(rvu, blkaddr, + CPT_AF_BAR2_ALIASX(slot, CPT_LF_INPROG)); + inprog &= ~BIT_ULL(16); + rvu_write64(rvu, blkaddr, + CPT_AF_BAR2_ALIASX(slot, CPT_LF_INPROG), inprog); + + /* Wait for CPT queue to become execution-quiescent */ + do { + inprog = rvu_read64(rvu, blkaddr, + CPT_AF_BAR2_ALIASX(slot, CPT_LF_INPROG)); + if (INPROG_GRB_PARTIAL(inprog)) { + i = 0; + hard_lp_ctr--; + } else { + i++; + } + + grp_ptr = rvu_read64(rvu, blkaddr, + CPT_AF_BAR2_ALIASX(slot, + CPT_LF_Q_GRP_PTR)); + nq_ptr = (grp_ptr >> 32) & 0x7FFF; + dq_ptr = grp_ptr & 0x7FFF; + + } while (hard_lp_ctr && (i < 10) && (nq_ptr != dq_ptr)); + + if (hard_lp_ctr == 0) + dev_warn(rvu->dev, "CPT FLR hits hard loop counter\n"); + + i = 0; + hard_lp_ctr = 100000; + do { + inprog = rvu_read64(rvu, blkaddr, + CPT_AF_BAR2_ALIASX(slot, CPT_LF_INPROG)); + + if ((INPROG_INFLIGHT(inprog) == 0) && + (INPROG_GWB(inprog) < 40) && + ((INPROG_GRB(inprog) == 0) || + (INPROG_GRB((inprog)) == 40))) { + i++; + } else { + i = 0; + hard_lp_ctr--; + } + } while (hard_lp_ctr && (i < 10)); + + if (hard_lp_ctr == 0) + dev_warn(rvu->dev, "CPT FLR hits hard loop counter\n"); +} + +int rvu_cpt_lf_teardown(struct rvu *rvu, u16 pcifunc, int lf, int slot) +{ + int blkaddr; + u64 reg; + + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_CPT, pcifunc); + if (blkaddr != BLKADDR_CPT0 && blkaddr != BLKADDR_CPT1) + return -EINVAL; + + /* Enable BAR2 ALIAS for this pcifunc. */ + reg = BIT_ULL(16) | pcifunc; + rvu_write64(rvu, blkaddr, CPT_AF_BAR2_SEL, reg); + + cpt_lf_disable_iqueue(rvu, blkaddr, slot); + + /* Set group drop to help clear out hardware */ + reg = rvu_read64(rvu, blkaddr, CPT_AF_BAR2_ALIASX(slot, CPT_LF_INPROG)); + reg |= BIT_ULL(17); + rvu_write64(rvu, blkaddr, CPT_AF_BAR2_ALIASX(slot, CPT_LF_INPROG), reg); + + rvu_write64(rvu, blkaddr, CPT_AF_BAR2_SEL, 0); + + return 0; +} diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c index d27543c1a166..094124b695dc 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c @@ -19,6 +19,7 @@ #include "rvu_reg.h" #include "rvu.h" #include "cgx.h" +#include "lmac_common.h" #include "npc.h" #define DEBUGFS_DIR_NAME "octeontx2" @@ -109,6 +110,89 @@ static char *cgx_tx_stats_fields[] = { [CGX_STAT17] = "Control/PAUSE packets sent", }; +static char *rpm_rx_stats_fields[] = { + "Octets of received packets", + "Octets of received packets with out error", + "Received packets with alignment errors", + "Control/PAUSE packets received", + "Packets received with Frame too long Errors", + "Packets received with a1nrange length Errors", + "Received packets", + "Packets received with FrameCheckSequenceErrors", + "Packets received with VLAN header", + "Error packets", + "Packets received with unicast DMAC", + "Packets received with multicast DMAC", + "Packets received with broadcast DMAC", + "Dropped packets", + "Total frames received on interface", + "Packets received with an octet count < 64", + "Packets received with an octet count == 64", + "Packets received with an octet count of 65–127", + "Packets received with an octet count of 128-255", + "Packets received with an octet count of 256-511", + "Packets received with an octet count of 512-1023", + "Packets received with an octet count of 1024-1518", + "Packets received with an octet count of > 1518", + "Oversized Packets", + "Jabber Packets", + "Fragmented Packets", + "CBFC(class based flow control) pause frames received for class 0", + "CBFC pause frames received for class 1", + "CBFC pause frames received for class 2", + "CBFC pause frames received for class 3", + "CBFC pause frames received for class 4", + "CBFC pause frames received for class 5", + "CBFC pause frames received for class 6", + "CBFC pause frames received for class 7", + "CBFC pause frames received for class 8", + "CBFC pause frames received for class 9", + "CBFC pause frames received for class 10", + "CBFC pause frames received for class 11", + "CBFC pause frames received for class 12", + "CBFC pause frames received for class 13", + "CBFC pause frames received for class 14", + "CBFC pause frames received for class 15", + "MAC control packets received", +}; + +static char *rpm_tx_stats_fields[] = { + "Total octets sent on the interface", + "Total octets transmitted OK", + "Control/Pause frames sent", + "Total frames transmitted OK", + "Total frames sent with VLAN header", + "Error Packets", + "Packets sent to unicast DMAC", + "Packets sent to the multicast DMAC", + "Packets sent to a broadcast DMAC", + "Packets sent with an octet count == 64", + "Packets sent with an octet count of 65–127", + "Packets sent with an octet count of 128-255", + "Packets sent with an octet count of 256-511", + "Packets sent with an octet count of 512-1023", + "Packets sent with an octet count of 1024-1518", + "Packets sent with an octet count of > 1518", + "CBFC(class based flow control) pause frames transmitted for class 0", + "CBFC pause frames transmitted for class 1", + "CBFC pause frames transmitted for class 2", + "CBFC pause frames transmitted for class 3", + "CBFC pause frames transmitted for class 4", + "CBFC pause frames transmitted for class 5", + "CBFC pause frames transmitted for class 6", + "CBFC pause frames transmitted for class 7", + "CBFC pause frames transmitted for class 8", + "CBFC pause frames transmitted for class 9", + "CBFC pause frames transmitted for class 10", + "CBFC pause frames transmitted for class 11", + "CBFC pause frames transmitted for class 12", + "CBFC pause frames transmitted for class 13", + "CBFC pause frames transmitted for class 14", + "CBFC pause frames transmitted for class 15", + "MAC control packets sent", + "Total frames sent on the interface" +}; + enum cpt_eng_type { CPT_AE_TYPE = 1, CPT_SE_TYPE = 2, @@ -234,6 +318,8 @@ static int rvu_dbg_rvu_pf_cgx_map_display(struct seq_file *filp, void *unused) { struct rvu *rvu = filp->private; struct pci_dev *pdev = NULL; + struct mac_ops *mac_ops; + int rvu_def_cgx_id = 0; char cgx[10], lmac[10]; struct rvu_pfvf *pfvf; int pf, domain, blkid; @@ -241,7 +327,9 @@ static int rvu_dbg_rvu_pf_cgx_map_display(struct seq_file *filp, void *unused) u16 pcifunc; domain = 2; - seq_puts(filp, "PCI dev\t\tRVU PF Func\tNIX block\tCGX\tLMAC\n"); + mac_ops = get_mac_ops(rvu_cgx_pdata(rvu_def_cgx_id, rvu)); + seq_printf(filp, "PCI dev\t\tRVU PF Func\tNIX block\t%s\tLMAC\n", + mac_ops->name); for (pf = 0; pf < rvu->hw->total_pfs; pf++) { if (!is_pf_cgxmapped(rvu, pf)) continue; @@ -262,7 +350,7 @@ static int rvu_dbg_rvu_pf_cgx_map_display(struct seq_file *filp, void *unused) rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); - sprintf(cgx, "CGX%d", cgx_id); + sprintf(cgx, "%s%d", mac_ops->name, cgx_id); sprintf(lmac, "LMAC%d", lmac_id); seq_printf(filp, "%s\t0x%x\t\tNIX%d\t\t%s\t%s\n", dev_name(&pdev->dev), pcifunc, blkid, cgx, lmac); @@ -449,6 +537,7 @@ RVU_DEBUG_SEQ_FOPS(npa_qsize, npa_qsize_display, npa_qsize_write); static void print_npa_aura_ctx(struct seq_file *m, struct npa_aq_enq_rsp *rsp) { struct npa_aura_s *aura = &rsp->aura; + struct rvu *rvu = m->private; seq_printf(m, "W0: Pool addr\t\t%llx\n", aura->pool_addr); @@ -468,6 +557,9 @@ static void print_npa_aura_ctx(struct seq_file *m, struct npa_aq_enq_rsp *rsp) seq_printf(m, "W3: limit\t\t%llu\nW3: bp\t\t\t%d\nW3: fc_ena\t\t%d\n", (u64)aura->limit, aura->bp, aura->fc_ena); + + if (!is_rvu_otx2(rvu)) + seq_printf(m, "W3: fc_be\t\t%d\n", aura->fc_be); seq_printf(m, "W3: fc_up_crossing\t%d\nW3: fc_stype\t\t%d\n", aura->fc_up_crossing, aura->fc_stype); seq_printf(m, "W3: fc_hyst_bits\t%d\n", aura->fc_hyst_bits); @@ -485,12 +577,15 @@ static void print_npa_aura_ctx(struct seq_file *m, struct npa_aq_enq_rsp *rsp) seq_printf(m, "W5: err_qint_idx \t%d\n", aura->err_qint_idx); seq_printf(m, "W6: thresh\t\t%llu\n", (u64)aura->thresh); + if (!is_rvu_otx2(rvu)) + seq_printf(m, "W6: fc_msh_dst\t\t%d\n", aura->fc_msh_dst); } /* Dumps given NPA Pool's context */ static void print_npa_pool_ctx(struct seq_file *m, struct npa_aq_enq_rsp *rsp) { struct npa_pool_s *pool = &rsp->pool; + struct rvu *rvu = m->private; seq_printf(m, "W0: Stack base\t\t%llx\n", pool->stack_base); @@ -512,6 +607,8 @@ static void print_npa_pool_ctx(struct seq_file *m, struct npa_aq_enq_rsp *rsp) pool->avg_con, pool->fc_ena, pool->fc_stype); seq_printf(m, "W4: fc_hyst_bits\t%d\nW4: fc_up_crossing\t%d\n", pool->fc_hyst_bits, pool->fc_up_crossing); + if (!is_rvu_otx2(rvu)) + seq_printf(m, "W4: fc_be\t\t%d\n", pool->fc_be); seq_printf(m, "W4: update_time\t\t%d\n", pool->update_time); seq_printf(m, "W5: fc_addr\t\t%llx\n", pool->fc_addr); @@ -525,8 +622,10 @@ static void print_npa_pool_ctx(struct seq_file *m, struct npa_aq_enq_rsp *rsp) seq_printf(m, "W8: thresh_int\t\t%d\n", pool->thresh_int); seq_printf(m, "W8: thresh_int_ena\t%d\nW8: thresh_up\t\t%d\n", pool->thresh_int_ena, pool->thresh_up); - seq_printf(m, "W8: thresh_qint_idx\t%d\nW8: err_qint_idx\t\t%d\n", + seq_printf(m, "W8: thresh_qint_idx\t%d\nW8: err_qint_idx\t%d\n", pool->thresh_qint_idx, pool->err_qint_idx); + if (!is_rvu_otx2(rvu)) + seq_printf(m, "W8: fc_msh_dst\t\t%d\n", pool->fc_msh_dst); } /* Reads aura/pool's ctx from admin queue */ @@ -910,11 +1009,78 @@ static int rvu_dbg_nix_ndc_tx_hits_miss_display(struct seq_file *filp, RVU_DEBUG_SEQ_FOPS(nix_ndc_tx_hits_miss, nix_ndc_tx_hits_miss_display, NULL); +static void print_nix_cn10k_sq_ctx(struct seq_file *m, + struct nix_cn10k_sq_ctx_s *sq_ctx) +{ + seq_printf(m, "W0: ena \t\t\t%d\nW0: qint_idx \t\t\t%d\n", + sq_ctx->ena, sq_ctx->qint_idx); + seq_printf(m, "W0: substream \t\t\t0x%03x\nW0: sdp_mcast \t\t\t%d\n", + sq_ctx->substream, sq_ctx->sdp_mcast); + seq_printf(m, "W0: cq \t\t\t\t%d\nW0: sqe_way_mask \t\t%d\n\n", + sq_ctx->cq, sq_ctx->sqe_way_mask); + + seq_printf(m, "W1: smq \t\t\t%d\nW1: cq_ena \t\t\t%d\nW1: xoff\t\t\t%d\n", + sq_ctx->smq, sq_ctx->cq_ena, sq_ctx->xoff); + seq_printf(m, "W1: sso_ena \t\t\t%d\nW1: smq_rr_weight\t\t%d\n", + sq_ctx->sso_ena, sq_ctx->smq_rr_weight); + seq_printf(m, "W1: default_chan\t\t%d\nW1: sqb_count\t\t\t%d\n\n", + sq_ctx->default_chan, sq_ctx->sqb_count); + + seq_printf(m, "W2: smq_rr_count_lb \t\t%d\n", sq_ctx->smq_rr_count_lb); + seq_printf(m, "W2: smq_rr_count_ub \t\t%d\n", sq_ctx->smq_rr_count_ub); + seq_printf(m, "W2: sqb_aura \t\t\t%d\nW2: sq_int \t\t\t%d\n", + sq_ctx->sqb_aura, sq_ctx->sq_int); + seq_printf(m, "W2: sq_int_ena \t\t\t%d\nW2: sqe_stype \t\t\t%d\n", + sq_ctx->sq_int_ena, sq_ctx->sqe_stype); + + seq_printf(m, "W3: max_sqe_size\t\t%d\nW3: cq_limit\t\t\t%d\n", + sq_ctx->max_sqe_size, sq_ctx->cq_limit); + seq_printf(m, "W3: lmt_dis \t\t\t%d\nW3: mnq_dis \t\t\t%d\n", + sq_ctx->mnq_dis, sq_ctx->lmt_dis); + seq_printf(m, "W3: smq_next_sq\t\t\t%d\nW3: smq_lso_segnum\t\t%d\n", + sq_ctx->smq_next_sq, sq_ctx->smq_lso_segnum); + seq_printf(m, "W3: tail_offset \t\t%d\nW3: smenq_offset\t\t%d\n", + sq_ctx->tail_offset, sq_ctx->smenq_offset); + seq_printf(m, "W3: head_offset\t\t\t%d\nW3: smenq_next_sqb_vld\t\t%d\n\n", + sq_ctx->head_offset, sq_ctx->smenq_next_sqb_vld); + + seq_printf(m, "W4: next_sqb \t\t\t%llx\n\n", sq_ctx->next_sqb); + seq_printf(m, "W5: tail_sqb \t\t\t%llx\n\n", sq_ctx->tail_sqb); + seq_printf(m, "W6: smenq_sqb \t\t\t%llx\n\n", sq_ctx->smenq_sqb); + seq_printf(m, "W7: smenq_next_sqb \t\t%llx\n\n", + sq_ctx->smenq_next_sqb); + + seq_printf(m, "W8: head_sqb\t\t\t%llx\n\n", sq_ctx->head_sqb); + + seq_printf(m, "W9: vfi_lso_total\t\t%d\n", sq_ctx->vfi_lso_total); + seq_printf(m, "W9: vfi_lso_sizem1\t\t%d\nW9: vfi_lso_sb\t\t\t%d\n", + sq_ctx->vfi_lso_sizem1, sq_ctx->vfi_lso_sb); + seq_printf(m, "W9: vfi_lso_mps\t\t\t%d\nW9: vfi_lso_vlan0_ins_ena\t%d\n", + sq_ctx->vfi_lso_mps, sq_ctx->vfi_lso_vlan0_ins_ena); + seq_printf(m, "W9: vfi_lso_vlan1_ins_ena\t%d\nW9: vfi_lso_vld \t\t%d\n\n", + sq_ctx->vfi_lso_vld, sq_ctx->vfi_lso_vlan1_ins_ena); + + seq_printf(m, "W10: scm_lso_rem \t\t%llu\n\n", + (u64)sq_ctx->scm_lso_rem); + seq_printf(m, "W11: octs \t\t\t%llu\n\n", (u64)sq_ctx->octs); + seq_printf(m, "W12: pkts \t\t\t%llu\n\n", (u64)sq_ctx->pkts); + seq_printf(m, "W14: dropped_octs \t\t%llu\n\n", + (u64)sq_ctx->dropped_octs); + seq_printf(m, "W15: dropped_pkts \t\t%llu\n\n", + (u64)sq_ctx->dropped_pkts); +} + /* Dumps given nix_sq's context */ static void print_nix_sq_ctx(struct seq_file *m, struct nix_aq_enq_rsp *rsp) { struct nix_sq_ctx_s *sq_ctx = &rsp->sq; + struct nix_hw *nix_hw = m->private; + struct rvu *rvu = nix_hw->rvu; + if (!is_rvu_otx2(rvu)) { + print_nix_cn10k_sq_ctx(m, (struct nix_cn10k_sq_ctx_s *)sq_ctx); + return; + } seq_printf(m, "W0: sqe_way_mask \t\t%d\nW0: cq \t\t\t\t%d\n", sq_ctx->sqe_way_mask, sq_ctx->cq); seq_printf(m, "W0: sdp_mcast \t\t\t%d\nW0: substream \t\t\t0x%03x\n", @@ -974,10 +1140,94 @@ static void print_nix_sq_ctx(struct seq_file *m, struct nix_aq_enq_rsp *rsp) (u64)sq_ctx->dropped_pkts); } +static void print_nix_cn10k_rq_ctx(struct seq_file *m, + struct nix_cn10k_rq_ctx_s *rq_ctx) +{ + seq_printf(m, "W0: ena \t\t\t%d\nW0: sso_ena \t\t\t%d\n", + rq_ctx->ena, rq_ctx->sso_ena); + seq_printf(m, "W0: ipsech_ena \t\t\t%d\nW0: ena_wqwd \t\t\t%d\n", + rq_ctx->ipsech_ena, rq_ctx->ena_wqwd); + seq_printf(m, "W0: cq \t\t\t\t%d\nW0: lenerr_dis \t\t\t%d\n", + rq_ctx->cq, rq_ctx->lenerr_dis); + seq_printf(m, "W0: csum_il4_dis \t\t%d\nW0: csum_ol4_dis \t\t%d\n", + rq_ctx->csum_il4_dis, rq_ctx->csum_ol4_dis); + seq_printf(m, "W0: len_il4_dis \t\t%d\nW0: len_il3_dis \t\t%d\n", + rq_ctx->len_il4_dis, rq_ctx->len_il3_dis); + seq_printf(m, "W0: len_ol4_dis \t\t%d\nW0: len_ol3_dis \t\t%d\n", + rq_ctx->len_ol4_dis, rq_ctx->len_ol3_dis); + seq_printf(m, "W0: wqe_aura \t\t\t%d\n\n", rq_ctx->wqe_aura); + + seq_printf(m, "W1: spb_aura \t\t\t%d\nW1: lpb_aura \t\t\t%d\n", + rq_ctx->spb_aura, rq_ctx->lpb_aura); + seq_printf(m, "W1: spb_aura \t\t\t%d\n", rq_ctx->spb_aura); + seq_printf(m, "W1: sso_grp \t\t\t%d\nW1: sso_tt \t\t\t%d\n", + rq_ctx->sso_grp, rq_ctx->sso_tt); + seq_printf(m, "W1: pb_caching \t\t\t%d\nW1: wqe_caching \t\t%d\n", + rq_ctx->pb_caching, rq_ctx->wqe_caching); + seq_printf(m, "W1: xqe_drop_ena \t\t%d\nW1: spb_drop_ena \t\t%d\n", + rq_ctx->xqe_drop_ena, rq_ctx->spb_drop_ena); + seq_printf(m, "W1: lpb_drop_ena \t\t%d\nW1: pb_stashing \t\t%d\n", + rq_ctx->lpb_drop_ena, rq_ctx->pb_stashing); + seq_printf(m, "W1: ipsecd_drop_ena \t\t%d\nW1: chi_ena \t\t\t%d\n\n", + rq_ctx->ipsecd_drop_ena, rq_ctx->chi_ena); + + seq_printf(m, "W2: band_prof_id \t\t%d\n", rq_ctx->band_prof_id); + seq_printf(m, "W2: policer_ena \t\t%d\n", rq_ctx->policer_ena); + seq_printf(m, "W2: spb_sizem1 \t\t\t%d\n", rq_ctx->spb_sizem1); + seq_printf(m, "W2: wqe_skip \t\t\t%d\nW2: sqb_ena \t\t\t%d\n", + rq_ctx->wqe_skip, rq_ctx->spb_ena); + seq_printf(m, "W2: lpb_size1 \t\t\t%d\nW2: first_skip \t\t\t%d\n", + rq_ctx->lpb_sizem1, rq_ctx->first_skip); + seq_printf(m, "W2: later_skip\t\t\t%d\nW2: xqe_imm_size\t\t%d\n", + rq_ctx->later_skip, rq_ctx->xqe_imm_size); + seq_printf(m, "W2: xqe_imm_copy \t\t%d\nW2: xqe_hdr_split \t\t%d\n\n", + rq_ctx->xqe_imm_copy, rq_ctx->xqe_hdr_split); + + seq_printf(m, "W3: xqe_drop \t\t\t%d\nW3: xqe_pass \t\t\t%d\n", + rq_ctx->xqe_drop, rq_ctx->xqe_pass); + seq_printf(m, "W3: wqe_pool_drop \t\t%d\nW3: wqe_pool_pass \t\t%d\n", + rq_ctx->wqe_pool_drop, rq_ctx->wqe_pool_pass); + seq_printf(m, "W3: spb_pool_drop \t\t%d\nW3: spb_pool_pass \t\t%d\n", + rq_ctx->spb_pool_drop, rq_ctx->spb_pool_pass); + seq_printf(m, "W3: spb_aura_drop \t\t%d\nW3: spb_aura_pass \t\t%d\n\n", + rq_ctx->spb_aura_pass, rq_ctx->spb_aura_drop); + + seq_printf(m, "W4: lpb_aura_drop \t\t%d\nW3: lpb_aura_pass \t\t%d\n", + rq_ctx->lpb_aura_pass, rq_ctx->lpb_aura_drop); + seq_printf(m, "W4: lpb_pool_drop \t\t%d\nW3: lpb_pool_pass \t\t%d\n", + rq_ctx->lpb_pool_drop, rq_ctx->lpb_pool_pass); + seq_printf(m, "W4: rq_int \t\t\t%d\nW4: rq_int_ena\t\t\t%d\n", + rq_ctx->rq_int, rq_ctx->rq_int_ena); + seq_printf(m, "W4: qint_idx \t\t\t%d\n\n", rq_ctx->qint_idx); + + seq_printf(m, "W5: ltag \t\t\t%d\nW5: good_utag \t\t\t%d\n", + rq_ctx->ltag, rq_ctx->good_utag); + seq_printf(m, "W5: bad_utag \t\t\t%d\nW5: flow_tagw \t\t\t%d\n", + rq_ctx->bad_utag, rq_ctx->flow_tagw); + seq_printf(m, "W5: ipsec_vwqe \t\t\t%d\nW5: vwqe_ena \t\t\t%d\n", + rq_ctx->ipsec_vwqe, rq_ctx->vwqe_ena); + seq_printf(m, "W5: vwqe_wait \t\t\t%d\nW5: max_vsize_exp\t\t%d\n", + rq_ctx->vwqe_wait, rq_ctx->max_vsize_exp); + seq_printf(m, "W5: vwqe_skip \t\t\t%d\n\n", rq_ctx->vwqe_skip); + + seq_printf(m, "W6: octs \t\t\t%llu\n\n", (u64)rq_ctx->octs); + seq_printf(m, "W7: pkts \t\t\t%llu\n\n", (u64)rq_ctx->pkts); + seq_printf(m, "W8: drop_octs \t\t\t%llu\n\n", (u64)rq_ctx->drop_octs); + seq_printf(m, "W9: drop_pkts \t\t\t%llu\n\n", (u64)rq_ctx->drop_pkts); + seq_printf(m, "W10: re_pkts \t\t\t%llu\n", (u64)rq_ctx->re_pkts); +} + /* Dumps given nix_rq's context */ static void print_nix_rq_ctx(struct seq_file *m, struct nix_aq_enq_rsp *rsp) { struct nix_rq_ctx_s *rq_ctx = &rsp->rq; + struct nix_hw *nix_hw = m->private; + struct rvu *rvu = nix_hw->rvu; + + if (!is_rvu_otx2(rvu)) { + print_nix_cn10k_rq_ctx(m, (struct nix_cn10k_rq_ctx_s *)rq_ctx); + return; + } seq_printf(m, "W0: wqe_aura \t\t\t%d\nW0: substream \t\t\t0x%03x\n", rq_ctx->wqe_aura, rq_ctx->substream); @@ -1439,6 +1689,7 @@ static void rvu_dbg_npa_init(struct rvu *rvu) static int cgx_print_stats(struct seq_file *s, int lmac_id) { struct cgx_link_user_info linfo; + struct mac_ops *mac_ops; void *cgxd = s->private; u64 ucast, mcast, bcast; int stat = 0, err = 0; @@ -1450,6 +1701,11 @@ static int cgx_print_stats(struct seq_file *s, int lmac_id) if (!rvu) return -ENODEV; + mac_ops = get_mac_ops(cgxd); + + if (!mac_ops) + return 0; + /* Link status */ seq_puts(s, "\n=======Link Status======\n\n"); err = cgx_get_link_info(cgxd, lmac_id, &linfo); @@ -1459,7 +1715,8 @@ static int cgx_print_stats(struct seq_file *s, int lmac_id) linfo.link_up ? "UP" : "DOWN", linfo.speed); /* Rx stats */ - seq_puts(s, "\n=======NIX RX_STATS(CGX port level)======\n\n"); + seq_printf(s, "\n=======NIX RX_STATS(%s port level)======\n\n", + mac_ops->name); ucast = PRINT_CGX_CUML_NIXRX_STATUS(RX_UCAST, "rx_ucast_frames"); if (err) return err; @@ -1481,7 +1738,8 @@ static int cgx_print_stats(struct seq_file *s, int lmac_id) return err; /* Tx stats */ - seq_puts(s, "\n=======NIX TX_STATS(CGX port level)======\n\n"); + seq_printf(s, "\n=======NIX TX_STATS(%s port level)======\n\n", + mac_ops->name); ucast = PRINT_CGX_CUML_NIXTX_STATUS(TX_UCAST, "tx_ucast_frames"); if (err) return err; @@ -1500,24 +1758,35 @@ static int cgx_print_stats(struct seq_file *s, int lmac_id) return err; /* Rx stats */ - seq_puts(s, "\n=======CGX RX_STATS======\n\n"); - while (stat < CGX_RX_STATS_COUNT) { - err = cgx_get_rx_stats(cgxd, lmac_id, stat, &rx_stat); + seq_printf(s, "\n=======%s RX_STATS======\n\n", mac_ops->name); + while (stat < mac_ops->rx_stats_cnt) { + err = mac_ops->mac_get_rx_stats(cgxd, lmac_id, stat, &rx_stat); if (err) return err; - seq_printf(s, "%s: %llu\n", cgx_rx_stats_fields[stat], rx_stat); + if (is_rvu_otx2(rvu)) + seq_printf(s, "%s: %llu\n", cgx_rx_stats_fields[stat], + rx_stat); + else + seq_printf(s, "%s: %llu\n", rpm_rx_stats_fields[stat], + rx_stat); stat++; } /* Tx stats */ stat = 0; - seq_puts(s, "\n=======CGX TX_STATS======\n\n"); - while (stat < CGX_TX_STATS_COUNT) { - err = cgx_get_tx_stats(cgxd, lmac_id, stat, &tx_stat); + seq_printf(s, "\n=======%s TX_STATS======\n\n", mac_ops->name); + while (stat < mac_ops->tx_stats_cnt) { + err = mac_ops->mac_get_tx_stats(cgxd, lmac_id, stat, &tx_stat); if (err) return err; - seq_printf(s, "%s: %llu\n", cgx_tx_stats_fields[stat], tx_stat); - stat++; + + if (is_rvu_otx2(rvu)) + seq_printf(s, "%s: %llu\n", cgx_tx_stats_fields[stat], + tx_stat); + else + seq_printf(s, "%s: %llu\n", rpm_tx_stats_fields[stat], + tx_stat); + stat++; } return err; @@ -1547,21 +1816,34 @@ RVU_DEBUG_SEQ_FOPS(cgx_stat, cgx_stat_display, NULL); static void rvu_dbg_cgx_init(struct rvu *rvu) { + struct mac_ops *mac_ops; + unsigned long lmac_bmap; + int rvu_def_cgx_id = 0; int i, lmac_id; char dname[20]; void *cgx; - rvu->rvu_dbg.cgx_root = debugfs_create_dir("cgx", rvu->rvu_dbg.root); + if (!cgx_get_cgxcnt_max()) + return; + + mac_ops = get_mac_ops(rvu_cgx_pdata(rvu_def_cgx_id, rvu)); + if (!mac_ops) + return; + + rvu->rvu_dbg.cgx_root = debugfs_create_dir(mac_ops->name, + rvu->rvu_dbg.root); for (i = 0; i < cgx_get_cgxcnt_max(); i++) { cgx = rvu_cgx_pdata(i, rvu); if (!cgx) continue; + lmac_bmap = cgx_get_lmac_bmap(cgx); /* cgx debugfs dir */ - sprintf(dname, "cgx%d", i); + sprintf(dname, "%s%d", mac_ops->name, i); rvu->rvu_dbg.cgx = debugfs_create_dir(dname, rvu->rvu_dbg.cgx_root); - for (lmac_id = 0; lmac_id < cgx_get_lmac_cnt(cgx); lmac_id++) { + + for_each_set_bit(lmac_id, &lmac_bmap, MAX_LMAC_PER_CGX) { /* lmac debugfs dir */ sprintf(dname, "lmac%d", lmac_id); rvu->rvu_dbg.lmac = @@ -1757,6 +2039,7 @@ static void rvu_dbg_npc_mcam_show_flows(struct seq_file *s, seq_printf(s, "mask 0x%x\n", ntohs(rule->mask.dport)); break; default: + seq_puts(s, "\n"); break; } } @@ -1785,7 +2068,7 @@ static void rvu_dbg_npc_mcam_show_action(struct seq_file *s, break; default: break; - }; + } } else { switch (rule->rx_action.op) { case NIX_RX_ACTIONOP_DROP: @@ -1806,7 +2089,7 @@ static void rvu_dbg_npc_mcam_show_action(struct seq_file *s, break; default: break; - }; + } } } @@ -1903,20 +2186,16 @@ static void rvu_dbg_npc_init(struct rvu *rvu) &rvu_dbg_npc_rx_miss_act_fops); } -/* CPT debugfs APIs */ static int cpt_eng_sts_display(struct seq_file *filp, u8 eng_type) { - struct rvu *rvu = filp->private; + struct cpt_ctx *ctx = filp->private; u64 busy_sts = 0, free_sts = 0; u32 e_min = 0, e_max = 0, e, i; u16 max_ses, max_ies, max_aes; - int blkaddr; + struct rvu *rvu = ctx->rvu; + int blkaddr = ctx->blkaddr; u64 reg; - blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_CPT, 0); - if (blkaddr < 0) - return -ENODEV; - reg = rvu_read64(rvu, blkaddr, CPT_AF_CONSTANTS1); max_ses = reg & 0xffff; max_ies = (reg >> 16) & 0xffff; @@ -1976,16 +2255,13 @@ RVU_DEBUG_SEQ_FOPS(cpt_ie_sts, cpt_ie_sts_display, NULL); static int rvu_dbg_cpt_engines_info_display(struct seq_file *filp, void *unused) { - struct rvu *rvu = filp->private; + struct cpt_ctx *ctx = filp->private; u16 max_ses, max_ies, max_aes; + struct rvu *rvu = ctx->rvu; + int blkaddr = ctx->blkaddr; u32 e_max, e; - int blkaddr; u64 reg; - blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_CPT, 0); - if (blkaddr < 0) - return -ENODEV; - reg = rvu_read64(rvu, blkaddr, CPT_AF_CONSTANTS1); max_ses = reg & 0xffff; max_ies = (reg >> 16) & 0xffff; @@ -2013,17 +2289,15 @@ RVU_DEBUG_SEQ_FOPS(cpt_engines_info, cpt_engines_info_display, NULL); static int rvu_dbg_cpt_lfs_info_display(struct seq_file *filp, void *unused) { - struct rvu *rvu = filp->private; - struct rvu_hwinfo *hw = rvu->hw; + struct cpt_ctx *ctx = filp->private; + int blkaddr = ctx->blkaddr; + struct rvu *rvu = ctx->rvu; struct rvu_block *block; - int blkaddr; + struct rvu_hwinfo *hw; u64 reg; u32 lf; - blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_CPT, 0); - if (blkaddr < 0) - return -ENODEV; - + hw = rvu->hw; block = &hw->block[blkaddr]; if (!block->lf.bmap) return -ENODEV; @@ -2048,13 +2322,10 @@ RVU_DEBUG_SEQ_FOPS(cpt_lfs_info, cpt_lfs_info_display, NULL); static int rvu_dbg_cpt_err_info_display(struct seq_file *filp, void *unused) { - struct rvu *rvu = filp->private; + struct cpt_ctx *ctx = filp->private; + struct rvu *rvu = ctx->rvu; + int blkaddr = ctx->blkaddr; u64 reg0, reg1; - int blkaddr; - - blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_CPT, 0); - if (blkaddr < 0) - return -ENODEV; reg0 = rvu_read64(rvu, blkaddr, CPT_AF_FLTX_INT(0)); reg1 = rvu_read64(rvu, blkaddr, CPT_AF_FLTX_INT(1)); @@ -2078,15 +2349,11 @@ RVU_DEBUG_SEQ_FOPS(cpt_err_info, cpt_err_info_display, NULL); static int rvu_dbg_cpt_pc_display(struct seq_file *filp, void *unused) { - struct rvu *rvu; - int blkaddr; + struct cpt_ctx *ctx = filp->private; + struct rvu *rvu = ctx->rvu; + int blkaddr = ctx->blkaddr; u64 reg; - rvu = filp->private; - blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_CPT, 0); - if (blkaddr < 0) - return -ENODEV; - reg = rvu_read64(rvu, blkaddr, CPT_AF_INST_REQ_PC); seq_printf(filp, "CPT instruction requests %llu\n", reg); reg = rvu_read64(rvu, blkaddr, CPT_AF_INST_LATENCY_PC); @@ -2107,45 +2374,76 @@ static int rvu_dbg_cpt_pc_display(struct seq_file *filp, void *unused) RVU_DEBUG_SEQ_FOPS(cpt_pc, cpt_pc_display, NULL); -static void rvu_dbg_cpt_init(struct rvu *rvu) +static void rvu_dbg_cpt_init(struct rvu *rvu, int blkaddr) { - if (!is_block_implemented(rvu->hw, BLKADDR_CPT0)) + struct cpt_ctx *ctx; + + if (!is_block_implemented(rvu->hw, blkaddr)) return; - rvu->rvu_dbg.cpt = debugfs_create_dir("cpt", rvu->rvu_dbg.root); + if (blkaddr == BLKADDR_CPT0) { + rvu->rvu_dbg.cpt = debugfs_create_dir("cpt", rvu->rvu_dbg.root); + ctx = &rvu->rvu_dbg.cpt_ctx[0]; + ctx->blkaddr = BLKADDR_CPT0; + ctx->rvu = rvu; + } else { + rvu->rvu_dbg.cpt = debugfs_create_dir("cpt1", + rvu->rvu_dbg.root); + ctx = &rvu->rvu_dbg.cpt_ctx[1]; + ctx->blkaddr = BLKADDR_CPT1; + ctx->rvu = rvu; + } - debugfs_create_file("cpt_pc", 0600, rvu->rvu_dbg.cpt, rvu, + debugfs_create_file("cpt_pc", 0600, rvu->rvu_dbg.cpt, ctx, &rvu_dbg_cpt_pc_fops); - debugfs_create_file("cpt_ae_sts", 0600, rvu->rvu_dbg.cpt, rvu, + debugfs_create_file("cpt_ae_sts", 0600, rvu->rvu_dbg.cpt, ctx, &rvu_dbg_cpt_ae_sts_fops); - debugfs_create_file("cpt_se_sts", 0600, rvu->rvu_dbg.cpt, rvu, + debugfs_create_file("cpt_se_sts", 0600, rvu->rvu_dbg.cpt, ctx, &rvu_dbg_cpt_se_sts_fops); - debugfs_create_file("cpt_ie_sts", 0600, rvu->rvu_dbg.cpt, rvu, + debugfs_create_file("cpt_ie_sts", 0600, rvu->rvu_dbg.cpt, ctx, &rvu_dbg_cpt_ie_sts_fops); - debugfs_create_file("cpt_engines_info", 0600, rvu->rvu_dbg.cpt, rvu, + debugfs_create_file("cpt_engines_info", 0600, rvu->rvu_dbg.cpt, ctx, &rvu_dbg_cpt_engines_info_fops); - debugfs_create_file("cpt_lfs_info", 0600, rvu->rvu_dbg.cpt, rvu, + debugfs_create_file("cpt_lfs_info", 0600, rvu->rvu_dbg.cpt, ctx, &rvu_dbg_cpt_lfs_info_fops); - debugfs_create_file("cpt_err_info", 0600, rvu->rvu_dbg.cpt, rvu, + debugfs_create_file("cpt_err_info", 0600, rvu->rvu_dbg.cpt, ctx, &rvu_dbg_cpt_err_info_fops); } +static const char *rvu_get_dbg_dir_name(struct rvu *rvu) +{ + if (!is_rvu_otx2(rvu)) + return "cn10k"; + else + return "octeontx2"; +} + void rvu_dbg_init(struct rvu *rvu) { - rvu->rvu_dbg.root = debugfs_create_dir(DEBUGFS_DIR_NAME, NULL); + rvu->rvu_dbg.root = debugfs_create_dir(rvu_get_dbg_dir_name(rvu), NULL); debugfs_create_file("rsrc_alloc", 0444, rvu->rvu_dbg.root, rvu, &rvu_dbg_rsrc_status_fops); - debugfs_create_file("rvu_pf_cgx_map", 0444, rvu->rvu_dbg.root, rvu, - &rvu_dbg_rvu_pf_cgx_map_fops); + if (!cgx_get_cgxcnt_max()) + goto create; + + if (is_rvu_otx2(rvu)) + debugfs_create_file("rvu_pf_cgx_map", 0444, rvu->rvu_dbg.root, + rvu, &rvu_dbg_rvu_pf_cgx_map_fops); + else + debugfs_create_file("rvu_pf_rpm_map", 0444, rvu->rvu_dbg.root, + rvu, &rvu_dbg_rvu_pf_cgx_map_fops); + +create: rvu_dbg_npa_init(rvu); rvu_dbg_nix_init(rvu, BLKADDR_NIX0); rvu_dbg_nix_init(rvu, BLKADDR_NIX1); rvu_dbg_cgx_init(rvu); rvu_dbg_npc_init(rvu); - rvu_dbg_cpt_init(rvu); + rvu_dbg_cpt_init(rvu, BLKADDR_CPT0); + rvu_dbg_cpt_init(rvu, BLKADDR_CPT1); } void rvu_dbg_exit(struct rvu *rvu) diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c index bc0e4113370e..10a98bcb7c54 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c @@ -52,6 +52,650 @@ static bool rvu_common_request_irq(struct rvu *rvu, int offset, return rvu->irq_allocated[offset]; } +static void rvu_nix_intr_work(struct work_struct *work) +{ + struct rvu_nix_health_reporters *rvu_nix_health_reporter; + + rvu_nix_health_reporter = container_of(work, struct rvu_nix_health_reporters, intr_work); + devlink_health_report(rvu_nix_health_reporter->rvu_hw_nix_intr_reporter, + "NIX_AF_RVU Error", + rvu_nix_health_reporter->nix_event_ctx); +} + +static irqreturn_t rvu_nix_af_rvu_intr_handler(int irq, void *rvu_irq) +{ + struct rvu_nix_event_ctx *nix_event_context; + struct rvu_devlink *rvu_dl = rvu_irq; + struct rvu *rvu; + int blkaddr; + u64 intr; + + rvu = rvu_dl->rvu; + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0); + if (blkaddr < 0) + return IRQ_NONE; + + nix_event_context = rvu_dl->rvu_nix_health_reporter->nix_event_ctx; + intr = rvu_read64(rvu, blkaddr, NIX_AF_RVU_INT); + nix_event_context->nix_af_rvu_int = intr; + + /* Clear interrupts */ + rvu_write64(rvu, blkaddr, NIX_AF_RVU_INT, intr); + rvu_write64(rvu, blkaddr, NIX_AF_RVU_INT_ENA_W1C, ~0ULL); + queue_work(rvu_dl->devlink_wq, &rvu_dl->rvu_nix_health_reporter->intr_work); + + return IRQ_HANDLED; +} + +static void rvu_nix_gen_work(struct work_struct *work) +{ + struct rvu_nix_health_reporters *rvu_nix_health_reporter; + + rvu_nix_health_reporter = container_of(work, struct rvu_nix_health_reporters, gen_work); + devlink_health_report(rvu_nix_health_reporter->rvu_hw_nix_gen_reporter, + "NIX_AF_GEN Error", + rvu_nix_health_reporter->nix_event_ctx); +} + +static irqreturn_t rvu_nix_af_rvu_gen_handler(int irq, void *rvu_irq) +{ + struct rvu_nix_event_ctx *nix_event_context; + struct rvu_devlink *rvu_dl = rvu_irq; + struct rvu *rvu; + int blkaddr; + u64 intr; + + rvu = rvu_dl->rvu; + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0); + if (blkaddr < 0) + return IRQ_NONE; + + nix_event_context = rvu_dl->rvu_nix_health_reporter->nix_event_ctx; + intr = rvu_read64(rvu, blkaddr, NIX_AF_GEN_INT); + nix_event_context->nix_af_rvu_gen = intr; + + /* Clear interrupts */ + rvu_write64(rvu, blkaddr, NIX_AF_GEN_INT, intr); + rvu_write64(rvu, blkaddr, NIX_AF_GEN_INT_ENA_W1C, ~0ULL); + queue_work(rvu_dl->devlink_wq, &rvu_dl->rvu_nix_health_reporter->gen_work); + + return IRQ_HANDLED; +} + +static void rvu_nix_err_work(struct work_struct *work) +{ + struct rvu_nix_health_reporters *rvu_nix_health_reporter; + + rvu_nix_health_reporter = container_of(work, struct rvu_nix_health_reporters, err_work); + devlink_health_report(rvu_nix_health_reporter->rvu_hw_nix_err_reporter, + "NIX_AF_ERR Error", + rvu_nix_health_reporter->nix_event_ctx); +} + +static irqreturn_t rvu_nix_af_rvu_err_handler(int irq, void *rvu_irq) +{ + struct rvu_nix_event_ctx *nix_event_context; + struct rvu_devlink *rvu_dl = rvu_irq; + struct rvu *rvu; + int blkaddr; + u64 intr; + + rvu = rvu_dl->rvu; + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0); + if (blkaddr < 0) + return IRQ_NONE; + + nix_event_context = rvu_dl->rvu_nix_health_reporter->nix_event_ctx; + intr = rvu_read64(rvu, blkaddr, NIX_AF_ERR_INT); + nix_event_context->nix_af_rvu_err = intr; + + /* Clear interrupts */ + rvu_write64(rvu, blkaddr, NIX_AF_ERR_INT, intr); + rvu_write64(rvu, blkaddr, NIX_AF_ERR_INT_ENA_W1C, ~0ULL); + queue_work(rvu_dl->devlink_wq, &rvu_dl->rvu_nix_health_reporter->err_work); + + return IRQ_HANDLED; +} + +static void rvu_nix_ras_work(struct work_struct *work) +{ + struct rvu_nix_health_reporters *rvu_nix_health_reporter; + + rvu_nix_health_reporter = container_of(work, struct rvu_nix_health_reporters, ras_work); + devlink_health_report(rvu_nix_health_reporter->rvu_hw_nix_ras_reporter, + "NIX_AF_RAS Error", + rvu_nix_health_reporter->nix_event_ctx); +} + +static irqreturn_t rvu_nix_af_rvu_ras_handler(int irq, void *rvu_irq) +{ + struct rvu_nix_event_ctx *nix_event_context; + struct rvu_devlink *rvu_dl = rvu_irq; + struct rvu *rvu; + int blkaddr; + u64 intr; + + rvu = rvu_dl->rvu; + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0); + if (blkaddr < 0) + return IRQ_NONE; + + nix_event_context = rvu_dl->rvu_nix_health_reporter->nix_event_ctx; + intr = rvu_read64(rvu, blkaddr, NIX_AF_ERR_INT); + nix_event_context->nix_af_rvu_ras = intr; + + /* Clear interrupts */ + rvu_write64(rvu, blkaddr, NIX_AF_RAS, intr); + rvu_write64(rvu, blkaddr, NIX_AF_RAS_ENA_W1C, ~0ULL); + queue_work(rvu_dl->devlink_wq, &rvu_dl->rvu_nix_health_reporter->ras_work); + + return IRQ_HANDLED; +} + +static void rvu_nix_unregister_interrupts(struct rvu *rvu) +{ + struct rvu_devlink *rvu_dl = rvu->rvu_dl; + int offs, i, blkaddr; + + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0); + if (blkaddr < 0) + return; + + offs = rvu_read64(rvu, blkaddr, NIX_PRIV_AF_INT_CFG) & 0x3ff; + if (!offs) + return; + + rvu_write64(rvu, blkaddr, NIX_AF_RVU_INT_ENA_W1C, ~0ULL); + rvu_write64(rvu, blkaddr, NIX_AF_GEN_INT_ENA_W1C, ~0ULL); + rvu_write64(rvu, blkaddr, NIX_AF_ERR_INT_ENA_W1C, ~0ULL); + rvu_write64(rvu, blkaddr, NIX_AF_RAS_ENA_W1C, ~0ULL); + + if (rvu->irq_allocated[offs + NIX_AF_INT_VEC_RVU]) { + free_irq(pci_irq_vector(rvu->pdev, offs + NIX_AF_INT_VEC_RVU), + rvu_dl); + rvu->irq_allocated[offs + NIX_AF_INT_VEC_RVU] = false; + } + + for (i = NIX_AF_INT_VEC_AF_ERR; i < NIX_AF_INT_VEC_CNT; i++) + if (rvu->irq_allocated[offs + i]) { + free_irq(pci_irq_vector(rvu->pdev, offs + i), rvu_dl); + rvu->irq_allocated[offs + i] = false; + } +} + +static int rvu_nix_register_interrupts(struct rvu *rvu) +{ + int blkaddr, base; + bool rc; + + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0); + if (blkaddr < 0) + return blkaddr; + + /* Get NIX AF MSIX vectors offset. */ + base = rvu_read64(rvu, blkaddr, NIX_PRIV_AF_INT_CFG) & 0x3ff; + if (!base) { + dev_warn(rvu->dev, + "Failed to get NIX%d NIX_AF_INT vector offsets\n", + blkaddr - BLKADDR_NIX0); + return 0; + } + /* Register and enable NIX_AF_RVU_INT interrupt */ + rc = rvu_common_request_irq(rvu, base + NIX_AF_INT_VEC_RVU, + "NIX_AF_RVU_INT", + rvu_nix_af_rvu_intr_handler); + if (!rc) + goto err; + rvu_write64(rvu, blkaddr, NIX_AF_RVU_INT_ENA_W1S, ~0ULL); + + /* Register and enable NIX_AF_GEN_INT interrupt */ + rc = rvu_common_request_irq(rvu, base + NIX_AF_INT_VEC_GEN, + "NIX_AF_GEN_INT", + rvu_nix_af_rvu_gen_handler); + if (!rc) + goto err; + rvu_write64(rvu, blkaddr, NIX_AF_GEN_INT_ENA_W1S, ~0ULL); + + /* Register and enable NIX_AF_ERR_INT interrupt */ + rc = rvu_common_request_irq(rvu, base + NIX_AF_INT_VEC_AF_ERR, + "NIX_AF_ERR_INT", + rvu_nix_af_rvu_err_handler); + if (!rc) + goto err; + rvu_write64(rvu, blkaddr, NIX_AF_ERR_INT_ENA_W1S, ~0ULL); + + /* Register and enable NIX_AF_RAS interrupt */ + rc = rvu_common_request_irq(rvu, base + NIX_AF_INT_VEC_POISON, + "NIX_AF_RAS", + rvu_nix_af_rvu_ras_handler); + if (!rc) + goto err; + rvu_write64(rvu, blkaddr, NIX_AF_RAS_ENA_W1S, ~0ULL); + + return 0; +err: + rvu_nix_unregister_interrupts(rvu); + return rc; +} + +static int rvu_nix_report_show(struct devlink_fmsg *fmsg, void *ctx, + enum nix_af_rvu_health health_reporter) +{ + struct rvu_nix_event_ctx *nix_event_context; + u64 intr_val; + int err; + + nix_event_context = ctx; + switch (health_reporter) { + case NIX_AF_RVU_INTR: + intr_val = nix_event_context->nix_af_rvu_int; + err = rvu_report_pair_start(fmsg, "NIX_AF_RVU"); + if (err) + return err; + err = devlink_fmsg_u64_pair_put(fmsg, "\tNIX RVU Interrupt Reg ", + nix_event_context->nix_af_rvu_int); + if (err) + return err; + if (intr_val & BIT_ULL(0)) { + err = devlink_fmsg_string_put(fmsg, "\n\tUnmap Slot Error"); + if (err) + return err; + } + err = rvu_report_pair_end(fmsg); + if (err) + return err; + break; + case NIX_AF_RVU_GEN: + intr_val = nix_event_context->nix_af_rvu_gen; + err = rvu_report_pair_start(fmsg, "NIX_AF_GENERAL"); + if (err) + return err; + err = devlink_fmsg_u64_pair_put(fmsg, "\tNIX General Interrupt Reg ", + nix_event_context->nix_af_rvu_gen); + if (err) + return err; + if (intr_val & BIT_ULL(0)) { + err = devlink_fmsg_string_put(fmsg, "\n\tRx multicast pkt drop"); + if (err) + return err; + } + if (intr_val & BIT_ULL(1)) { + err = devlink_fmsg_string_put(fmsg, "\n\tRx mirror pkt drop"); + if (err) + return err; + } + if (intr_val & BIT_ULL(4)) { + err = devlink_fmsg_string_put(fmsg, "\n\tSMQ flush done"); + if (err) + return err; + } + err = rvu_report_pair_end(fmsg); + if (err) + return err; + break; + case NIX_AF_RVU_ERR: + intr_val = nix_event_context->nix_af_rvu_err; + err = rvu_report_pair_start(fmsg, "NIX_AF_ERR"); + if (err) + return err; + err = devlink_fmsg_u64_pair_put(fmsg, "\tNIX Error Interrupt Reg ", + nix_event_context->nix_af_rvu_err); + if (err) + return err; + if (intr_val & BIT_ULL(14)) { + err = devlink_fmsg_string_put(fmsg, "\n\tFault on NIX_AQ_INST_S read"); + if (err) + return err; + } + if (intr_val & BIT_ULL(13)) { + err = devlink_fmsg_string_put(fmsg, "\n\tFault on NIX_AQ_RES_S write"); + if (err) + return err; + } + if (intr_val & BIT_ULL(12)) { + err = devlink_fmsg_string_put(fmsg, "\n\tAQ Doorbell Error"); + if (err) + return err; + } + if (intr_val & BIT_ULL(6)) { + err = devlink_fmsg_string_put(fmsg, "\n\tRx on unmapped PF_FUNC"); + if (err) + return err; + } + if (intr_val & BIT_ULL(5)) { + err = devlink_fmsg_string_put(fmsg, "\n\tRx multicast replication error"); + if (err) + return err; + } + if (intr_val & BIT_ULL(4)) { + err = devlink_fmsg_string_put(fmsg, "\n\tFault on NIX_RX_MCE_S read"); + if (err) + return err; + } + if (intr_val & BIT_ULL(3)) { + err = devlink_fmsg_string_put(fmsg, "\n\tFault on multicast WQE read"); + if (err) + return err; + } + if (intr_val & BIT_ULL(2)) { + err = devlink_fmsg_string_put(fmsg, "\n\tFault on mirror WQE read"); + if (err) + return err; + } + if (intr_val & BIT_ULL(1)) { + err = devlink_fmsg_string_put(fmsg, "\n\tFault on mirror pkt write"); + if (err) + return err; + } + if (intr_val & BIT_ULL(0)) { + err = devlink_fmsg_string_put(fmsg, "\n\tFault on multicast pkt write"); + if (err) + return err; + } + err = rvu_report_pair_end(fmsg); + if (err) + return err; + break; + case NIX_AF_RVU_RAS: + intr_val = nix_event_context->nix_af_rvu_err; + err = rvu_report_pair_start(fmsg, "NIX_AF_RAS"); + if (err) + return err; + err = devlink_fmsg_u64_pair_put(fmsg, "\tNIX RAS Interrupt Reg ", + nix_event_context->nix_af_rvu_err); + if (err) + return err; + err = devlink_fmsg_string_put(fmsg, "\n\tPoison Data on:"); + if (err) + return err; + if (intr_val & BIT_ULL(34)) { + err = devlink_fmsg_string_put(fmsg, "\n\tNIX_AQ_INST_S"); + if (err) + return err; + } + if (intr_val & BIT_ULL(33)) { + err = devlink_fmsg_string_put(fmsg, "\n\tNIX_AQ_RES_S"); + if (err) + return err; + } + if (intr_val & BIT_ULL(32)) { + err = devlink_fmsg_string_put(fmsg, "\n\tHW ctx"); + if (err) + return err; + } + if (intr_val & BIT_ULL(4)) { + err = devlink_fmsg_string_put(fmsg, "\n\tPacket from mirror buffer"); + if (err) + return err; + } + if (intr_val & BIT_ULL(3)) { + err = devlink_fmsg_string_put(fmsg, "\n\tPacket from multicast buffer"); + + if (err) + return err; + } + if (intr_val & BIT_ULL(2)) { + err = devlink_fmsg_string_put(fmsg, "\n\tWQE read from mirror buffer"); + if (err) + return err; + } + if (intr_val & BIT_ULL(1)) { + err = devlink_fmsg_string_put(fmsg, "\n\tWQE read from multicast buffer"); + if (err) + return err; + } + if (intr_val & BIT_ULL(0)) { + err = devlink_fmsg_string_put(fmsg, "\n\tNIX_RX_MCE_S read"); + if (err) + return err; + } + err = rvu_report_pair_end(fmsg); + if (err) + return err; + break; + default: + return -EINVAL; + } + + return 0; +} + +static int rvu_hw_nix_intr_dump(struct devlink_health_reporter *reporter, + struct devlink_fmsg *fmsg, void *ctx, + struct netlink_ext_ack *netlink_extack) +{ + struct rvu *rvu = devlink_health_reporter_priv(reporter); + struct rvu_devlink *rvu_dl = rvu->rvu_dl; + struct rvu_nix_event_ctx *nix_ctx; + + nix_ctx = rvu_dl->rvu_nix_health_reporter->nix_event_ctx; + + return ctx ? rvu_nix_report_show(fmsg, ctx, NIX_AF_RVU_INTR) : + rvu_nix_report_show(fmsg, nix_ctx, NIX_AF_RVU_INTR); +} + +static int rvu_hw_nix_intr_recover(struct devlink_health_reporter *reporter, + void *ctx, struct netlink_ext_ack *netlink_extack) +{ + struct rvu *rvu = devlink_health_reporter_priv(reporter); + struct rvu_nix_event_ctx *nix_event_ctx = ctx; + int blkaddr; + + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0); + if (blkaddr < 0) + return blkaddr; + + if (nix_event_ctx->nix_af_rvu_int) + rvu_write64(rvu, blkaddr, NIX_AF_RVU_INT_ENA_W1S, ~0ULL); + + return 0; +} + +static int rvu_hw_nix_gen_dump(struct devlink_health_reporter *reporter, + struct devlink_fmsg *fmsg, void *ctx, + struct netlink_ext_ack *netlink_extack) +{ + struct rvu *rvu = devlink_health_reporter_priv(reporter); + struct rvu_devlink *rvu_dl = rvu->rvu_dl; + struct rvu_nix_event_ctx *nix_ctx; + + nix_ctx = rvu_dl->rvu_nix_health_reporter->nix_event_ctx; + + return ctx ? rvu_nix_report_show(fmsg, ctx, NIX_AF_RVU_GEN) : + rvu_nix_report_show(fmsg, nix_ctx, NIX_AF_RVU_GEN); +} + +static int rvu_hw_nix_gen_recover(struct devlink_health_reporter *reporter, + void *ctx, struct netlink_ext_ack *netlink_extack) +{ + struct rvu *rvu = devlink_health_reporter_priv(reporter); + struct rvu_nix_event_ctx *nix_event_ctx = ctx; + int blkaddr; + + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0); + if (blkaddr < 0) + return blkaddr; + + if (nix_event_ctx->nix_af_rvu_gen) + rvu_write64(rvu, blkaddr, NIX_AF_GEN_INT_ENA_W1S, ~0ULL); + + return 0; +} + +static int rvu_hw_nix_err_dump(struct devlink_health_reporter *reporter, + struct devlink_fmsg *fmsg, void *ctx, + struct netlink_ext_ack *netlink_extack) +{ + struct rvu *rvu = devlink_health_reporter_priv(reporter); + struct rvu_devlink *rvu_dl = rvu->rvu_dl; + struct rvu_nix_event_ctx *nix_ctx; + + nix_ctx = rvu_dl->rvu_nix_health_reporter->nix_event_ctx; + + return ctx ? rvu_nix_report_show(fmsg, ctx, NIX_AF_RVU_ERR) : + rvu_nix_report_show(fmsg, nix_ctx, NIX_AF_RVU_ERR); +} + +static int rvu_hw_nix_err_recover(struct devlink_health_reporter *reporter, + void *ctx, struct netlink_ext_ack *netlink_extack) +{ + struct rvu *rvu = devlink_health_reporter_priv(reporter); + struct rvu_nix_event_ctx *nix_event_ctx = ctx; + int blkaddr; + + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0); + if (blkaddr < 0) + return blkaddr; + + if (nix_event_ctx->nix_af_rvu_err) + rvu_write64(rvu, blkaddr, NIX_AF_ERR_INT_ENA_W1S, ~0ULL); + + return 0; +} + +static int rvu_hw_nix_ras_dump(struct devlink_health_reporter *reporter, + struct devlink_fmsg *fmsg, void *ctx, + struct netlink_ext_ack *netlink_extack) +{ + struct rvu *rvu = devlink_health_reporter_priv(reporter); + struct rvu_devlink *rvu_dl = rvu->rvu_dl; + struct rvu_nix_event_ctx *nix_ctx; + + nix_ctx = rvu_dl->rvu_nix_health_reporter->nix_event_ctx; + + return ctx ? rvu_nix_report_show(fmsg, ctx, NIX_AF_RVU_RAS) : + rvu_nix_report_show(fmsg, nix_ctx, NIX_AF_RVU_RAS); +} + +static int rvu_hw_nix_ras_recover(struct devlink_health_reporter *reporter, + void *ctx, struct netlink_ext_ack *netlink_extack) +{ + struct rvu *rvu = devlink_health_reporter_priv(reporter); + struct rvu_nix_event_ctx *nix_event_ctx = ctx; + int blkaddr; + + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0); + if (blkaddr < 0) + return blkaddr; + + if (nix_event_ctx->nix_af_rvu_int) + rvu_write64(rvu, blkaddr, NIX_AF_RAS_ENA_W1S, ~0ULL); + + return 0; +} + +RVU_REPORTERS(hw_nix_intr); +RVU_REPORTERS(hw_nix_gen); +RVU_REPORTERS(hw_nix_err); +RVU_REPORTERS(hw_nix_ras); + +static void rvu_nix_health_reporters_destroy(struct rvu_devlink *rvu_dl); + +static int rvu_nix_register_reporters(struct rvu_devlink *rvu_dl) +{ + struct rvu_nix_health_reporters *rvu_reporters; + struct rvu_nix_event_ctx *nix_event_context; + struct rvu *rvu = rvu_dl->rvu; + + rvu_reporters = kzalloc(sizeof(*rvu_reporters), GFP_KERNEL); + if (!rvu_reporters) + return -ENOMEM; + + rvu_dl->rvu_nix_health_reporter = rvu_reporters; + nix_event_context = kzalloc(sizeof(*nix_event_context), GFP_KERNEL); + if (!nix_event_context) + return -ENOMEM; + + rvu_reporters->nix_event_ctx = nix_event_context; + rvu_reporters->rvu_hw_nix_intr_reporter = + devlink_health_reporter_create(rvu_dl->dl, &rvu_hw_nix_intr_reporter_ops, 0, rvu); + if (IS_ERR(rvu_reporters->rvu_hw_nix_intr_reporter)) { + dev_warn(rvu->dev, "Failed to create hw_nix_intr reporter, err=%ld\n", + PTR_ERR(rvu_reporters->rvu_hw_nix_intr_reporter)); + return PTR_ERR(rvu_reporters->rvu_hw_nix_intr_reporter); + } + + rvu_reporters->rvu_hw_nix_gen_reporter = + devlink_health_reporter_create(rvu_dl->dl, &rvu_hw_nix_gen_reporter_ops, 0, rvu); + if (IS_ERR(rvu_reporters->rvu_hw_nix_gen_reporter)) { + dev_warn(rvu->dev, "Failed to create hw_nix_gen reporter, err=%ld\n", + PTR_ERR(rvu_reporters->rvu_hw_nix_gen_reporter)); + return PTR_ERR(rvu_reporters->rvu_hw_nix_gen_reporter); + } + + rvu_reporters->rvu_hw_nix_err_reporter = + devlink_health_reporter_create(rvu_dl->dl, &rvu_hw_nix_err_reporter_ops, 0, rvu); + if (IS_ERR(rvu_reporters->rvu_hw_nix_err_reporter)) { + dev_warn(rvu->dev, "Failed to create hw_nix_err reporter, err=%ld\n", + PTR_ERR(rvu_reporters->rvu_hw_nix_err_reporter)); + return PTR_ERR(rvu_reporters->rvu_hw_nix_err_reporter); + } + + rvu_reporters->rvu_hw_nix_ras_reporter = + devlink_health_reporter_create(rvu_dl->dl, &rvu_hw_nix_ras_reporter_ops, 0, rvu); + if (IS_ERR(rvu_reporters->rvu_hw_nix_ras_reporter)) { + dev_warn(rvu->dev, "Failed to create hw_nix_ras reporter, err=%ld\n", + PTR_ERR(rvu_reporters->rvu_hw_nix_ras_reporter)); + return PTR_ERR(rvu_reporters->rvu_hw_nix_ras_reporter); + } + + rvu_dl->devlink_wq = create_workqueue("rvu_devlink_wq"); + if (!rvu_dl->devlink_wq) + goto err; + + INIT_WORK(&rvu_reporters->intr_work, rvu_nix_intr_work); + INIT_WORK(&rvu_reporters->gen_work, rvu_nix_gen_work); + INIT_WORK(&rvu_reporters->err_work, rvu_nix_err_work); + INIT_WORK(&rvu_reporters->ras_work, rvu_nix_ras_work); + + return 0; +err: + rvu_nix_health_reporters_destroy(rvu_dl); + return -ENOMEM; +} + +static int rvu_nix_health_reporters_create(struct rvu_devlink *rvu_dl) +{ + struct rvu *rvu = rvu_dl->rvu; + int err; + + err = rvu_nix_register_reporters(rvu_dl); + if (err) { + dev_warn(rvu->dev, "Failed to create nix reporter, err =%d\n", + err); + return err; + } + rvu_nix_register_interrupts(rvu); + + return 0; +} + +static void rvu_nix_health_reporters_destroy(struct rvu_devlink *rvu_dl) +{ + struct rvu_nix_health_reporters *nix_reporters; + struct rvu *rvu = rvu_dl->rvu; + + nix_reporters = rvu_dl->rvu_nix_health_reporter; + + if (!nix_reporters->rvu_hw_nix_ras_reporter) + return; + if (!IS_ERR_OR_NULL(nix_reporters->rvu_hw_nix_intr_reporter)) + devlink_health_reporter_destroy(nix_reporters->rvu_hw_nix_intr_reporter); + + if (!IS_ERR_OR_NULL(nix_reporters->rvu_hw_nix_gen_reporter)) + devlink_health_reporter_destroy(nix_reporters->rvu_hw_nix_gen_reporter); + + if (!IS_ERR_OR_NULL(nix_reporters->rvu_hw_nix_err_reporter)) + devlink_health_reporter_destroy(nix_reporters->rvu_hw_nix_err_reporter); + + if (!IS_ERR_OR_NULL(nix_reporters->rvu_hw_nix_ras_reporter)) + devlink_health_reporter_destroy(nix_reporters->rvu_hw_nix_ras_reporter); + + rvu_nix_unregister_interrupts(rvu); + kfree(rvu_dl->rvu_nix_health_reporter->nix_event_ctx); + kfree(rvu_dl->rvu_nix_health_reporter); +} + static void rvu_npa_intr_work(struct work_struct *work) { struct rvu_npa_health_reporters *rvu_npa_health_reporter; @@ -698,9 +1342,14 @@ static void rvu_npa_health_reporters_destroy(struct rvu_devlink *rvu_dl) static int rvu_health_reporters_create(struct rvu *rvu) { struct rvu_devlink *rvu_dl; + int err; rvu_dl = rvu->rvu_dl; - return rvu_npa_health_reporters_create(rvu_dl); + err = rvu_npa_health_reporters_create(rvu_dl); + if (err) + return err; + + return rvu_nix_health_reporters_create(rvu_dl); } static void rvu_health_reporters_destroy(struct rvu *rvu) @@ -712,6 +1361,7 @@ static void rvu_health_reporters_destroy(struct rvu *rvu) rvu_dl = rvu->rvu_dl; rvu_npa_health_reporters_destroy(rvu_dl); + rvu_nix_health_reporters_destroy(rvu_dl); } static int rvu_devlink_info_get(struct devlink *devlink, struct devlink_info_req *req, diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.h index d7578fa92ac1..471e57dedb20 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.h @@ -41,11 +41,38 @@ struct rvu_npa_health_reporters { struct work_struct ras_work; }; +enum nix_af_rvu_health { + NIX_AF_RVU_INTR, + NIX_AF_RVU_GEN, + NIX_AF_RVU_ERR, + NIX_AF_RVU_RAS, +}; + +struct rvu_nix_event_ctx { + u64 nix_af_rvu_int; + u64 nix_af_rvu_gen; + u64 nix_af_rvu_err; + u64 nix_af_rvu_ras; +}; + +struct rvu_nix_health_reporters { + struct rvu_nix_event_ctx *nix_event_ctx; + struct devlink_health_reporter *rvu_hw_nix_intr_reporter; + struct work_struct intr_work; + struct devlink_health_reporter *rvu_hw_nix_gen_reporter; + struct work_struct gen_work; + struct devlink_health_reporter *rvu_hw_nix_err_reporter; + struct work_struct err_work; + struct devlink_health_reporter *rvu_hw_nix_ras_reporter; + struct work_struct ras_work; +}; + struct rvu_devlink { struct devlink *dl; struct rvu *rvu; struct workqueue_struct *devlink_wq; struct rvu_npa_health_reporters *rvu_npa_health_reporter; + struct rvu_nix_health_reporters *rvu_nix_health_reporter; }; /* Devlink APIs */ diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c index a8dfbb6d1774..d3000194e2d3 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c @@ -16,6 +16,7 @@ #include "rvu.h" #include "npc.h" #include "cgx.h" +#include "lmac_common.h" static void nix_free_tx_vtag_entries(struct rvu *rvu, u16 pcifunc); static int rvu_nix_get_bpid(struct rvu *rvu, struct nix_bp_cfg_req *req, @@ -214,6 +215,7 @@ static bool is_valid_txschq(struct rvu *rvu, int blkaddr, static int nix_interface_init(struct rvu *rvu, u16 pcifunc, int type, int nixlf) { struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); + struct mac_ops *mac_ops; int pkind, pf, vf, lbkid; u8 cgx_id, lmac_id; int err; @@ -233,17 +235,19 @@ static int nix_interface_init(struct rvu *rvu, u16 pcifunc, int type, int nixlf) "PF_Func 0x%x: Invalid pkind\n", pcifunc); return -EINVAL; } - pfvf->rx_chan_base = NIX_CHAN_CGX_LMAC_CHX(cgx_id, lmac_id, 0); + pfvf->rx_chan_base = rvu_nix_chan_cgx(rvu, cgx_id, lmac_id, 0); pfvf->tx_chan_base = pfvf->rx_chan_base; pfvf->rx_chan_cnt = 1; pfvf->tx_chan_cnt = 1; cgx_set_pkind(rvu_cgx_pdata(cgx_id, rvu), lmac_id, pkind); rvu_npc_set_pkind(rvu, pkind, pfvf); + mac_ops = get_mac_ops(rvu_cgx_pdata(cgx_id, rvu)); /* By default we enable pause frames */ if ((pcifunc & RVU_PFVF_FUNC_MASK) == 0) - cgx_lmac_set_pause_frm(rvu_cgx_pdata(cgx_id, rvu), - lmac_id, true, true); + mac_ops->mac_enadis_pause_frm(rvu_cgx_pdata(cgx_id, + rvu), + lmac_id, true, true); break; case NIX_INTF_TYPE_LBK: vf = (pcifunc & RVU_PFVF_FUNC_MASK) - 1; @@ -262,10 +266,10 @@ static int nix_interface_init(struct rvu *rvu, u16 pcifunc, int type, int nixlf) * loopback channels.Therefore if odd number of AF VFs are * enabled then the last VF remains with no pair. */ - pfvf->rx_chan_base = NIX_CHAN_LBK_CHX(lbkid, vf); + pfvf->rx_chan_base = rvu_nix_chan_lbk(rvu, lbkid, vf); pfvf->tx_chan_base = vf & 0x1 ? - NIX_CHAN_LBK_CHX(lbkid, vf - 1) : - NIX_CHAN_LBK_CHX(lbkid, vf + 1); + rvu_nix_chan_lbk(rvu, lbkid, vf - 1) : + rvu_nix_chan_lbk(rvu, lbkid, vf + 1); pfvf->rx_chan_cnt = 1; pfvf->tx_chan_cnt = 1; rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf, @@ -1000,6 +1004,14 @@ int rvu_mbox_handler_nix_aq_enq(struct rvu *rvu, return rvu_nix_aq_enq_inst(rvu, req, rsp); } #endif +/* CN10K mbox handler */ +int rvu_mbox_handler_nix_cn10k_aq_enq(struct rvu *rvu, + struct nix_cn10k_aq_enq_req *req, + struct nix_cn10k_aq_enq_rsp *rsp) +{ + return rvu_nix_aq_enq_inst(rvu, (struct nix_aq_enq_req *)req, + (struct nix_aq_enq_rsp *)rsp); +} int rvu_mbox_handler_nix_hwctx_disable(struct rvu *rvu, struct hwctx_disable_req *req, @@ -2535,6 +2547,43 @@ static int nix_af_mark_format_setup(struct rvu *rvu, struct nix_hw *nix_hw, return 0; } +static void rvu_get_lbk_link_max_frs(struct rvu *rvu, u16 *max_mtu) +{ + /* CN10K supports LBK FIFO size 72 KB */ + if (rvu->hw->lbk_bufsize == 0x12000) + *max_mtu = CN10K_LBK_LINK_MAX_FRS; + else + *max_mtu = NIC_HW_MAX_FRS; +} + +static void rvu_get_lmac_link_max_frs(struct rvu *rvu, u16 *max_mtu) +{ + /* RPM supports FIFO len 128 KB */ + if (rvu_cgx_get_fifolen(rvu) == 0x20000) + *max_mtu = CN10K_LMAC_LINK_MAX_FRS; + else + *max_mtu = NIC_HW_MAX_FRS; +} + +int rvu_mbox_handler_nix_get_hw_info(struct rvu *rvu, struct msg_req *req, + struct nix_hw_info *rsp) +{ + u16 pcifunc = req->hdr.pcifunc; + int blkaddr; + + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); + if (blkaddr < 0) + return NIX_AF_ERR_AF_LF_INVALID; + + if (is_afvf(pcifunc)) + rvu_get_lbk_link_max_frs(rvu, &rsp->max_mtu); + else + rvu_get_lmac_link_max_frs(rvu, &rsp->max_mtu); + + rsp->min_mtu = NIC_HW_MIN_FRS; + return 0; +} + int rvu_mbox_handler_nix_stats_rst(struct rvu *rvu, struct msg_req *req, struct msg_rsp *rsp) { @@ -2580,6 +2629,7 @@ static int set_flowkey_fields(struct nix_rx_flowkey_alg *alg, u32 flow_cfg) struct nix_rx_flowkey_alg *field; struct nix_rx_flowkey_alg tmp; u32 key_type, valid_key; + int l4_key_offset; if (!alg) return -EINVAL; @@ -2712,6 +2762,12 @@ static int set_flowkey_fields(struct nix_rx_flowkey_alg *alg, u32 flow_cfg) field_marker = false; keyoff_marker = false; } + + /* TCP/UDP/SCTP and ESP/AH falls at same offset so + * remember the TCP key offset of 40 byte hash key. + */ + if (key_type == NIX_FLOW_KEY_TYPE_TCP) + l4_key_offset = key_off; break; case NIX_FLOW_KEY_TYPE_NVGRE: field->lid = NPC_LID_LD; @@ -2783,11 +2839,31 @@ static int set_flowkey_fields(struct nix_rx_flowkey_alg *alg, u32 flow_cfg) field->ltype_mask = 0xF; field->fn_mask = 1; /* Mask out the first nibble */ break; + case NIX_FLOW_KEY_TYPE_AH: + case NIX_FLOW_KEY_TYPE_ESP: + field->hdr_offset = 0; + field->bytesm1 = 7; /* SPI + sequence number */ + field->ltype_mask = 0xF; + field->lid = NPC_LID_LE; + field->ltype_match = NPC_LT_LE_ESP; + if (key_type == NIX_FLOW_KEY_TYPE_AH) { + field->lid = NPC_LID_LD; + field->ltype_match = NPC_LT_LD_AH; + field->hdr_offset = 4; + keyoff_marker = false; + } + break; } field->ena = 1; /* Found a valid flow key type */ if (valid_key) { + /* Use the key offset of TCP/UDP/SCTP fields + * for ESP/AH fields. + */ + if (key_type == NIX_FLOW_KEY_TYPE_ESP || + key_type == NIX_FLOW_KEY_TYPE_AH) + key_off = l4_key_offset; field->key_offset = key_off; memcpy(&alg[nr_field], field, sizeof(*field)); max_key_off = max(max_key_off, field->bytesm1 + 1); @@ -3072,6 +3148,7 @@ int rvu_mbox_handler_nix_set_hw_frs(struct rvu *rvu, struct nix_frs_cfg *req, u64 cfg, lmac_fifo_len; struct nix_hw *nix_hw; u8 cgx = 0, lmac = 0; + u16 max_mtu; blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); if (blkaddr < 0) @@ -3081,7 +3158,12 @@ int rvu_mbox_handler_nix_set_hw_frs(struct rvu *rvu, struct nix_frs_cfg *req, if (!nix_hw) return -EINVAL; - if (!req->sdp_link && req->maxlen > NIC_HW_MAX_FRS) + if (is_afvf(pcifunc)) + rvu_get_lbk_link_max_frs(rvu, &max_mtu); + else + rvu_get_lmac_link_max_frs(rvu, &max_mtu); + + if (!req->sdp_link && req->maxlen > max_mtu) return NIX_AF_ERR_FRS_INVALID; if (req->update_minlen && req->minlen < NIC_HW_MIN_FRS) @@ -3141,7 +3223,8 @@ linkcfg: /* Update transmit credits for CGX links */ lmac_fifo_len = - CGX_FIFO_LEN / cgx_get_lmac_cnt(rvu_cgx_pdata(cgx, rvu)); + rvu_cgx_get_fifolen(rvu) / + cgx_get_lmac_cnt(rvu_cgx_pdata(cgx, rvu)); cfg = rvu_read64(rvu, blkaddr, NIX_AF_TX_LINKX_NORM_CREDIT(link)); cfg &= ~(0xFFFFFULL << 12); cfg |= ((lmac_fifo_len - req->maxlen) / 16) << 12; @@ -3181,23 +3264,40 @@ int rvu_mbox_handler_nix_set_rx_cfg(struct rvu *rvu, struct nix_rx_cfg *req, return 0; } +static u64 rvu_get_lbk_link_credits(struct rvu *rvu, u16 lbk_max_frs) +{ + /* CN10k supports 72KB FIFO size and max packet size of 64k */ + if (rvu->hw->lbk_bufsize == 0x12000) + return (rvu->hw->lbk_bufsize - lbk_max_frs) / 16; + + return 1600; /* 16 * max LBK datarate = 16 * 100Gbps */ +} + static void nix_link_config(struct rvu *rvu, int blkaddr) { struct rvu_hwinfo *hw = rvu->hw; int cgx, lmac_cnt, slink, link; + u16 lbk_max_frs, lmac_max_frs; u64 tx_credits; + rvu_get_lbk_link_max_frs(rvu, &lbk_max_frs); + rvu_get_lmac_link_max_frs(rvu, &lmac_max_frs); + /* Set default min/max packet lengths allowed on NIX Rx links. * * With HW reset minlen value of 60byte, HW will treat ARP pkts * as undersize and report them to SW as error pkts, hence * setting it to 40 bytes. */ - for (link = 0; link < (hw->cgx_links + hw->lbk_links); link++) { + for (link = 0; link < hw->cgx_links; link++) { rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link), - NIC_HW_MAX_FRS << 16 | NIC_HW_MIN_FRS); + ((u64)lmac_max_frs << 16) | NIC_HW_MIN_FRS); } + for (link = hw->cgx_links; link < hw->lbk_links; link++) { + rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link), + ((u64)lbk_max_frs << 16) | NIC_HW_MIN_FRS); + } if (hw->sdp_links) { link = hw->cgx_links + hw->lbk_links; rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link), @@ -3209,7 +3309,8 @@ static void nix_link_config(struct rvu *rvu, int blkaddr) */ for (cgx = 0; cgx < hw->cgx; cgx++) { lmac_cnt = cgx_get_lmac_cnt(rvu_cgx_pdata(cgx, rvu)); - tx_credits = ((CGX_FIFO_LEN / lmac_cnt) - NIC_HW_MAX_FRS) / 16; + tx_credits = ((rvu_cgx_get_fifolen(rvu) / lmac_cnt) - + lmac_max_frs) / 16; /* Enable credits and set credit pkt count to max allowed */ tx_credits = (tx_credits << 12) | (0x1FF << 2) | BIT_ULL(1); slink = cgx * hw->lmac_per_cgx; @@ -3223,7 +3324,7 @@ static void nix_link_config(struct rvu *rvu, int blkaddr) /* Set Tx credits for LBK link */ slink = hw->cgx_links; for (link = slink; link < (slink + hw->lbk_links); link++) { - tx_credits = 1000; /* 10 * max LBK datarate = 10 * 100Gbps */ + tx_credits = rvu_get_lbk_link_credits(rvu, lbk_max_frs); /* Enable credits and set credit pkt count to max allowed */ tx_credits = (tx_credits << 12) | (0x1FF << 2) | BIT_ULL(1); rvu_write64(rvu, blkaddr, @@ -3354,14 +3455,6 @@ static int rvu_nix_block_init(struct rvu *rvu, struct nix_hw *nix_hw) if (err) return err; - /* Set num of links of each type */ - cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST); - hw->cgx = (cfg >> 12) & 0xF; - hw->lmac_per_cgx = (cfg >> 8) & 0xF; - hw->cgx_links = hw->cgx * hw->lmac_per_cgx; - hw->lbk_links = (cfg >> 24) & 0xF; - hw->sdp_links = 1; - /* Initialize admin queue */ err = nix_aq_init(rvu, block); if (err) @@ -3596,10 +3689,14 @@ static int rvu_nix_lf_ptp_tx_cfg(struct rvu *rvu, u16 pcifunc, bool enable) { struct rvu_hwinfo *hw = rvu->hw; struct rvu_block *block; - int blkaddr; + int blkaddr, pf; int nixlf; u64 cfg; + pf = rvu_get_pf(pcifunc); + if (!is_mac_feature_supported(rvu, pf, RVU_LMAC_FEAT_PTP)) + return 0; + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); if (blkaddr < 0) return NIX_AF_ERR_AF_LF_INVALID; diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c index 5cf9b7a907ae..04bb0803a5c5 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c @@ -102,9 +102,9 @@ int npc_mcam_verify_channel(struct rvu *rvu, u16 pcifunc, u8 intf, u16 channel) return -EINVAL; } else { rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); - base = NIX_CHAN_CGX_LMAC_CHX(cgx_id, lmac_id, 0x0); + base = rvu_nix_chan_cgx(rvu, cgx_id, lmac_id, 0x0); /* CGX mapped functions has maximum of 16 channels */ - end = NIX_CHAN_CGX_LMAC_CHX(cgx_id, lmac_id, 0xF); + end = rvu_nix_chan_cgx(rvu, cgx_id, lmac_id, 0xF); } if (channel < base || channel > end) diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c index 14832b66d1fe..4ba9d54ce4e3 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c @@ -26,6 +26,11 @@ static const char * const npc_flow_names[] = { [NPC_DIP_IPV4] = "ipv4 destination ip", [NPC_SIP_IPV6] = "ipv6 source ip", [NPC_DIP_IPV6] = "ipv6 destination ip", + [NPC_IPPROTO_TCP] = "ip proto tcp", + [NPC_IPPROTO_UDP] = "ip proto udp", + [NPC_IPPROTO_SCTP] = "ip proto sctp", + [NPC_IPPROTO_AH] = "ip proto AH", + [NPC_IPPROTO_ESP] = "ip proto ESP", [NPC_SPORT_TCP] = "tcp source port", [NPC_DPORT_TCP] = "tcp destination port", [NPC_SPORT_UDP] = "udp source port", @@ -212,13 +217,13 @@ static bool npc_check_overlap(struct rvu *rvu, int blkaddr, return false; } -static int npc_check_field(struct rvu *rvu, int blkaddr, enum key_fields type, - u8 intf) +static bool npc_check_field(struct rvu *rvu, int blkaddr, enum key_fields type, + u8 intf) { if (!npc_is_field_present(rvu, type, intf) || npc_check_overlap(rvu, blkaddr, type, 0, intf)) - return -EOPNOTSUPP; - return 0; + return false; + return true; } static void npc_scan_parse_result(struct npc_mcam *mcam, u8 bit_number, @@ -269,7 +274,7 @@ static void npc_scan_parse_result(struct npc_mcam *mcam, u8 bit_number, break; default: return; - }; + } npc_set_kw_masks(mcam, type, nr_bits, kwi, offset, intf); } @@ -448,14 +453,13 @@ static void npc_set_features(struct rvu *rvu, int blkaddr, u8 intf) struct npc_mcam *mcam = &rvu->hw->mcam; u64 *features = &mcam->rx_features; u64 tcp_udp_sctp; - int err, hdr; + int hdr; if (is_npc_intf_tx(intf)) features = &mcam->tx_features; for (hdr = NPC_DMAC; hdr < NPC_HEADER_FIELDS_MAX; hdr++) { - err = npc_check_field(rvu, blkaddr, hdr, intf); - if (!err) + if (npc_check_field(rvu, blkaddr, hdr, intf)) *features |= BIT_ULL(hdr); } @@ -464,13 +468,26 @@ static void npc_set_features(struct rvu *rvu, int blkaddr, u8 intf) BIT_ULL(NPC_SPORT_SCTP) | BIT_ULL(NPC_DPORT_SCTP); /* for tcp/udp/sctp corresponding layer type should be in the key */ - if (*features & tcp_udp_sctp) - if (npc_check_field(rvu, blkaddr, NPC_LD, intf)) + if (*features & tcp_udp_sctp) { + if (!npc_check_field(rvu, blkaddr, NPC_LD, intf)) *features &= ~tcp_udp_sctp; + else + *features |= BIT_ULL(NPC_IPPROTO_TCP) | + BIT_ULL(NPC_IPPROTO_UDP) | + BIT_ULL(NPC_IPPROTO_SCTP); + } + + /* for AH, check if corresponding layer type is present in the key */ + if (npc_check_field(rvu, blkaddr, NPC_LD, intf)) + *features |= BIT_ULL(NPC_IPPROTO_AH); + + /* for ESP, check if corresponding layer type is present in the key */ + if (npc_check_field(rvu, blkaddr, NPC_LE, intf)) + *features |= BIT_ULL(NPC_IPPROTO_ESP); /* for vlan corresponding layer type should be in the key */ if (*features & BIT_ULL(NPC_OUTER_VID)) - if (npc_check_field(rvu, blkaddr, NPC_LB, intf)) + if (!npc_check_field(rvu, blkaddr, NPC_LB, intf)) *features &= ~BIT_ULL(NPC_OUTER_VID); } @@ -743,13 +760,13 @@ static void npc_update_flow(struct rvu *rvu, struct mcam_entry *entry, return; /* For tcp/udp/sctp LTYPE should be present in entry */ - if (features & (BIT_ULL(NPC_SPORT_TCP) | BIT_ULL(NPC_DPORT_TCP))) + if (features & BIT_ULL(NPC_IPPROTO_TCP)) npc_update_entry(rvu, NPC_LD, entry, NPC_LT_LD_TCP, 0, ~0ULL, 0, intf); - if (features & (BIT_ULL(NPC_SPORT_UDP) | BIT_ULL(NPC_DPORT_UDP))) + if (features & BIT_ULL(NPC_IPPROTO_UDP)) npc_update_entry(rvu, NPC_LD, entry, NPC_LT_LD_UDP, 0, ~0ULL, 0, intf); - if (features & (BIT_ULL(NPC_SPORT_SCTP) | BIT_ULL(NPC_DPORT_SCTP))) + if (features & BIT_ULL(NPC_IPPROTO_SCTP)) npc_update_entry(rvu, NPC_LD, entry, NPC_LT_LD_SCTP, 0, ~0ULL, 0, intf); @@ -758,6 +775,15 @@ static void npc_update_flow(struct rvu *rvu, struct mcam_entry *entry, NPC_LT_LB_STAG_QINQ | NPC_LT_LB_CTAG, 0, NPC_LT_LB_STAG_QINQ & NPC_LT_LB_CTAG, 0, intf); + /* For AH, LTYPE should be present in entry */ + if (features & BIT_ULL(NPC_IPPROTO_AH)) + npc_update_entry(rvu, NPC_LD, entry, NPC_LT_LD_AH, + 0, ~0ULL, 0, intf); + /* For ESP, LTYPE should be present in entry */ + if (features & BIT_ULL(NPC_IPPROTO_ESP)) + npc_update_entry(rvu, NPC_LE, entry, NPC_LT_LE_ESP, + 0, ~0ULL, 0, intf); + #define NPC_WRITE_FLOW(field, member, val_lo, val_hi, mask_lo, mask_hi) \ do { \ if (features & BIT_ULL((field))) { \ diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h index 0fb2aa909a23..3e401fd8ac63 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h @@ -44,6 +44,11 @@ #define RVU_AF_PFME_INT_W1S (0x28c8) #define RVU_AF_PFME_INT_ENA_W1S (0x28d0) #define RVU_AF_PFME_INT_ENA_W1C (0x28d8) +#define RVU_AF_PFX_BAR4_ADDR(a) (0x5000 | (a) << 4) +#define RVU_AF_PFX_BAR4_CFG (0x5200 | (a) << 4) +#define RVU_AF_PFX_VF_BAR4_ADDR (0x5400 | (a) << 4) +#define RVU_AF_PFX_VF_BAR4_CFG (0x5600 | (a) << 4) +#define RVU_AF_PFX_LMTLINE_ADDR (0x5800 | (a) << 4) /* Admin function's privileged PF/VF registers */ #define RVU_PRIV_CONST (0x8000000) @@ -100,6 +105,8 @@ #define RVU_PF_MSIX_VECX_ADDR(a) (0x000 | (a) << 4) #define RVU_PF_MSIX_VECX_CTL(a) (0x008 | (a) << 4) #define RVU_PF_MSIX_PBAX(a) (0xF0000 | (a) << 3) +#define RVU_PF_VF_MBOX_ADDR (0xC40) +#define RVU_PF_LMTLINE_ADDR (0xC48) /* RVU VF registers */ #define RVU_VF_VFPF_MBOX0 (0x00000) @@ -399,12 +406,16 @@ #define NIX_AF_RX_NPC_MIRROR_RCV (0x4720) #define NIX_AF_RX_NPC_MIRROR_DROP (0x4730) #define NIX_AF_RX_ACTIVE_CYCLES_PCX(a) (0x4800 | (a) << 16) +#define NIX_AF_LINKX_CFG(a) (0x4010 | (a) << 17) #define NIX_PRIV_AF_INT_CFG (0x8000000) #define NIX_PRIV_LFX_CFG (0x8000010) #define NIX_PRIV_LFX_INT_CFG (0x8000020) #define NIX_AF_RVU_LF_CFG_DEBUG (0x8000030) +#define NIX_AF_LINKX_BASE_MASK GENMASK_ULL(11, 0) +#define NIX_AF_LINKX_RANGE_MASK GENMASK_ULL(19, 16) + /* SSO */ #define SSO_AF_CONST (0x1000) #define SSO_AF_CONST1 (0x1008) @@ -484,9 +495,17 @@ #define CPT_AF_RAS_INT_ENA_W1S (0x47030) #define CPT_AF_RAS_INT_ENA_W1C (0x47038) +#define AF_BAR2_ALIASX(a, b) (0x9100000ull | (a) << 12 | (b)) +#define CPT_AF_BAR2_SEL 0x9000000 +#define CPT_AF_BAR2_ALIASX(a, b) AF_BAR2_ALIASX(a, b) + #define CPT_AF_LF_CTL2_SHIFT 3 #define CPT_AF_LF_SSO_PF_FUNC_SHIFT 32 +#define CPT_LF_CTL 0x10 +#define CPT_LF_INPROG 0x40 +#define CPT_LF_Q_GRP_PTR 0x120 + #define NPC_AF_BLK_RST (0x00040) /* NPC */ @@ -629,4 +648,17 @@ (0x00F00 | (a) << 5 | (b) << 4) #define NDC_AF_BANKX_HIT_PC(a) (0x01000 | (a) << 3) #define NDC_AF_BANKX_MISS_PC(a) (0x01100 | (a) << 3) + +/* LBK */ +#define LBK_CONST (0x10ull) +#define LBK_LINK_CFG_P2X (0x400ull) +#define LBK_LINK_CFG_X2P (0x408ull) +#define LBK_CONST_CHANS GENMASK_ULL(47, 32) +#define LBK_CONST_DST GENMASK_ULL(31, 28) +#define LBK_CONST_SRC GENMASK_ULL(27, 24) +#define LBK_CONST_BUF_SIZE GENMASK_ULL(23, 0) +#define LBK_LINK_CFG_RANGE_MASK GENMASK_ULL(19, 16) +#define LBK_LINK_CFG_ID_MASK GENMASK_ULL(11, 6) +#define LBK_LINK_CFG_BASE_MASK GENMASK_ULL(5, 0) + #endif /* RVU_REG_H */ diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h index e2153d47c373..5e5f45c7eab0 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h @@ -74,6 +74,16 @@ enum npa_af_int_vec_e { NPA_AF_INT_VEC_CNT = 0x5, }; +/* NIX Admin function Interrupt Vector Enumeration */ +enum nix_af_int_vec_e { + NIX_AF_INT_VEC_RVU = 0x0, + NIX_AF_INT_VEC_GEN = 0x1, + NIX_AF_INT_VEC_AQ_DONE = 0x2, + NIX_AF_INT_VEC_AF_ERR = 0x3, + NIX_AF_INT_VEC_POISON = 0x4, + NIX_AF_INT_VEC_CNT = 0x5, +}; + /** * RVU PF Interrupt Vector Enumeration */ @@ -129,63 +139,29 @@ enum npa_inpq { /* NPA admin queue instruction structure */ struct npa_aq_inst_s { -#if defined(__BIG_ENDIAN_BITFIELD) - u64 doneint : 1; /* W0 */ - u64 reserved_44_62 : 19; - u64 cindex : 20; - u64 reserved_17_23 : 7; - u64 lf : 9; - u64 ctype : 4; - u64 op : 4; -#else - u64 op : 4; + u64 op : 4; /* W0 */ u64 ctype : 4; u64 lf : 9; u64 reserved_17_23 : 7; u64 cindex : 20; u64 reserved_44_62 : 19; u64 doneint : 1; -#endif u64 res_addr; /* W1 */ }; /* NPA admin queue result structure */ struct npa_aq_res_s { -#if defined(__BIG_ENDIAN_BITFIELD) - u64 reserved_17_63 : 47; /* W0 */ - u64 doneint : 1; - u64 compcode : 8; - u64 ctype : 4; - u64 op : 4; -#else - u64 op : 4; + u64 op : 4; /* W0 */ u64 ctype : 4; u64 compcode : 8; u64 doneint : 1; u64 reserved_17_63 : 47; -#endif u64 reserved_64_127; /* W1 */ }; struct npa_aura_s { u64 pool_addr; /* W0 */ -#if defined(__BIG_ENDIAN_BITFIELD) /* W1 */ - u64 avg_level : 8; - u64 reserved_118_119 : 2; - u64 shift : 6; - u64 aura_drop : 8; - u64 reserved_98_103 : 6; - u64 bp_ena : 2; - u64 aura_drop_ena : 1; - u64 pool_drop_ena : 1; - u64 reserved_93 : 1; - u64 avg_con : 9; - u64 pool_way_mask : 16; - u64 pool_caching : 1; - u64 reserved_65 : 2; - u64 ena : 1; -#else - u64 ena : 1; + u64 ena : 1; /* W1 */ u64 reserved_65 : 2; u64 pool_caching : 1; u64 pool_way_mask : 16; @@ -199,59 +175,24 @@ struct npa_aura_s { u64 shift : 6; u64 reserved_118_119 : 2; u64 avg_level : 8; -#endif -#if defined(__BIG_ENDIAN_BITFIELD) /* W2 */ - u64 reserved_189_191 : 3; - u64 nix1_bpid : 9; - u64 reserved_177_179 : 3; - u64 nix0_bpid : 9; - u64 reserved_164_167 : 4; - u64 count : 36; -#else - u64 count : 36; + u64 count : 36; /* W2 */ u64 reserved_164_167 : 4; u64 nix0_bpid : 9; u64 reserved_177_179 : 3; u64 nix1_bpid : 9; u64 reserved_189_191 : 3; -#endif -#if defined(__BIG_ENDIAN_BITFIELD) /* W3 */ - u64 reserved_252_255 : 4; - u64 fc_hyst_bits : 4; - u64 fc_stype : 2; - u64 fc_up_crossing : 1; - u64 fc_ena : 1; - u64 reserved_240_243 : 4; - u64 bp : 8; - u64 reserved_228_231 : 4; - u64 limit : 36; -#else - u64 limit : 36; + u64 limit : 36; /* W3 */ u64 reserved_228_231 : 4; u64 bp : 8; - u64 reserved_240_243 : 4; + u64 reserved_241_243 : 3; + u64 fc_be : 1; u64 fc_ena : 1; u64 fc_up_crossing : 1; u64 fc_stype : 2; u64 fc_hyst_bits : 4; u64 reserved_252_255 : 4; -#endif u64 fc_addr; /* W4 */ -#if defined(__BIG_ENDIAN_BITFIELD) /* W5 */ - u64 reserved_379_383 : 5; - u64 err_qint_idx : 7; - u64 reserved_371 : 1; - u64 thresh_qint_idx : 7; - u64 reserved_363 : 1; - u64 thresh_up : 1; - u64 thresh_int_ena : 1; - u64 thresh_int : 1; - u64 err_int_ena : 8; - u64 err_int : 8; - u64 update_time : 16; - u64 pool_drop : 8; -#else - u64 pool_drop : 8; + u64 pool_drop : 8; /* W5 */ u64 update_time : 16; u64 err_int : 8; u64 err_int_ena : 8; @@ -263,31 +204,15 @@ struct npa_aura_s { u64 reserved_371 : 1; u64 err_qint_idx : 7; u64 reserved_379_383 : 5; -#endif -#if defined(__BIG_ENDIAN_BITFIELD) /* W6 */ - u64 reserved_420_447 : 28; - u64 thresh : 36; -#else - u64 thresh : 36; - u64 reserved_420_447 : 28; -#endif + u64 thresh : 36; /* W6*/ + u64 rsvd_423_420 : 4; + u64 fc_msh_dst : 11; + u64 reserved_435_447 : 13; u64 reserved_448_511; /* W7 */ }; struct npa_pool_s { u64 stack_base; /* W0 */ -#if defined(__BIG_ENDIAN_BITFIELD) /* W1 */ - u64 reserved_115_127 : 13; - u64 buf_size : 11; - u64 reserved_100_103 : 4; - u64 buf_offset : 12; - u64 stack_way_mask : 16; - u64 reserved_70_71 : 3; - u64 stack_caching : 1; - u64 reserved_66_67 : 2; - u64 nat_align : 1; - u64 ena : 1; -#else u64 ena : 1; u64 nat_align : 1; u64 reserved_66_67 : 2; @@ -298,36 +223,10 @@ struct npa_pool_s { u64 reserved_100_103 : 4; u64 buf_size : 11; u64 reserved_115_127 : 13; -#endif -#if defined(__BIG_ENDIAN_BITFIELD) /* W2 */ - u64 stack_pages : 32; - u64 stack_max_pages : 32; -#else u64 stack_max_pages : 32; u64 stack_pages : 32; -#endif -#if defined(__BIG_ENDIAN_BITFIELD) /* W3 */ - u64 reserved_240_255 : 16; - u64 op_pc : 48; -#else u64 op_pc : 48; u64 reserved_240_255 : 16; -#endif -#if defined(__BIG_ENDIAN_BITFIELD) /* W4 */ - u64 reserved_316_319 : 4; - u64 update_time : 16; - u64 reserved_297_299 : 3; - u64 fc_up_crossing : 1; - u64 fc_hyst_bits : 4; - u64 fc_stype : 2; - u64 fc_ena : 1; - u64 avg_con : 9; - u64 avg_level : 8; - u64 reserved_270_271 : 2; - u64 shift : 6; - u64 reserved_260_263 : 4; - u64 stack_offset : 4; -#else u64 stack_offset : 4; u64 reserved_260_263 : 4; u64 shift : 6; @@ -338,26 +237,13 @@ struct npa_pool_s { u64 fc_stype : 2; u64 fc_hyst_bits : 4; u64 fc_up_crossing : 1; - u64 reserved_297_299 : 3; + u64 fc_be : 1; + u64 reserved_298_299 : 2; u64 update_time : 16; u64 reserved_316_319 : 4; -#endif u64 fc_addr; /* W5 */ u64 ptr_start; /* W6 */ u64 ptr_end; /* W7 */ -#if defined(__BIG_ENDIAN_BITFIELD) /* W8 */ - u64 reserved_571_575 : 5; - u64 err_qint_idx : 7; - u64 reserved_563 : 1; - u64 thresh_qint_idx : 7; - u64 reserved_555 : 1; - u64 thresh_up : 1; - u64 thresh_int_ena : 1; - u64 thresh_int : 1; - u64 err_int_ena : 8; - u64 err_int : 8; - u64 reserved_512_535 : 24; -#else u64 reserved_512_535 : 24; u64 err_int : 8; u64 err_int_ena : 8; @@ -369,14 +255,10 @@ struct npa_pool_s { u64 reserved_563 : 1; u64 err_qint_idx : 7; u64 reserved_571_575 : 5; -#endif -#if defined(__BIG_ENDIAN_BITFIELD) /* W9 */ - u64 reserved_612_639 : 28; u64 thresh : 36; -#else - u64 thresh : 36; - u64 reserved_612_639 : 28; -#endif + u64 rsvd_615_612 : 4; + u64 fc_msh_dst : 11; + u64 reserved_627_639 : 13; u64 reserved_640_703; /* W10 */ u64 reserved_704_767; /* W11 */ u64 reserved_768_831; /* W12 */ @@ -404,6 +286,7 @@ enum nix_aq_ctype { NIX_AQ_CTYPE_MCE = 0x3, NIX_AQ_CTYPE_RSS = 0x4, NIX_AQ_CTYPE_DYNO = 0x5, + NIX_AQ_CTYPE_BAND_PROF = 0x6, }; /* NIX admin queue instruction opcodes */ @@ -418,59 +301,29 @@ enum nix_aq_instop { /* NIX admin queue instruction structure */ struct nix_aq_inst_s { -#if defined(__BIG_ENDIAN_BITFIELD) - u64 doneint : 1; /* W0 */ - u64 reserved_44_62 : 19; - u64 cindex : 20; - u64 reserved_15_23 : 9; - u64 lf : 7; - u64 ctype : 4; - u64 op : 4; -#else u64 op : 4; u64 ctype : 4; - u64 lf : 7; - u64 reserved_15_23 : 9; + u64 lf : 9; + u64 reserved_17_23 : 7; u64 cindex : 20; u64 reserved_44_62 : 19; u64 doneint : 1; -#endif u64 res_addr; /* W1 */ }; /* NIX admin queue result structure */ struct nix_aq_res_s { -#if defined(__BIG_ENDIAN_BITFIELD) - u64 reserved_17_63 : 47; /* W0 */ - u64 doneint : 1; - u64 compcode : 8; - u64 ctype : 4; - u64 op : 4; -#else u64 op : 4; u64 ctype : 4; u64 compcode : 8; u64 doneint : 1; u64 reserved_17_63 : 47; -#endif u64 reserved_64_127; /* W1 */ }; /* NIX Completion queue context structure */ struct nix_cq_ctx_s { u64 base; -#if defined(__BIG_ENDIAN_BITFIELD) /* W1 */ - u64 wrptr : 20; - u64 avg_con : 9; - u64 cint_idx : 7; - u64 cq_err : 1; - u64 qint_idx : 7; - u64 rsvd_81_83 : 3; - u64 bpid : 9; - u64 rsvd_69_71 : 3; - u64 bp_ena : 1; - u64 rsvd_64_67 : 4; -#else u64 rsvd_64_67 : 4; u64 bp_ena : 1; u64 rsvd_69_71 : 3; @@ -481,31 +334,10 @@ struct nix_cq_ctx_s { u64 cint_idx : 7; u64 avg_con : 9; u64 wrptr : 20; -#endif -#if defined(__BIG_ENDIAN_BITFIELD) /* W2 */ - u64 update_time : 16; - u64 avg_level : 8; - u64 head : 20; - u64 tail : 20; -#else u64 tail : 20; u64 head : 20; u64 avg_level : 8; u64 update_time : 16; -#endif -#if defined(__BIG_ENDIAN_BITFIELD) /* W3 */ - u64 cq_err_int_ena : 8; - u64 cq_err_int : 8; - u64 qsize : 4; - u64 rsvd_233_235 : 3; - u64 caching : 1; - u64 substream : 20; - u64 rsvd_210_211 : 2; - u64 ena : 1; - u64 drop_ena : 1; - u64 drop : 8; - u64 bp : 8; -#else u64 bp : 8; u64 drop : 8; u64 drop_ena : 1; @@ -517,20 +349,161 @@ struct nix_cq_ctx_s { u64 qsize : 4; u64 cq_err_int : 8; u64 cq_err_int_ena : 8; -#endif +}; + +/* CN10K NIX Receive queue context structure */ +struct nix_cn10k_rq_ctx_s { + u64 ena : 1; + u64 sso_ena : 1; + u64 ipsech_ena : 1; + u64 ena_wqwd : 1; + u64 cq : 20; + u64 rsvd_36_24 : 13; + u64 lenerr_dis : 1; + u64 csum_il4_dis : 1; + u64 csum_ol4_dis : 1; + u64 len_il4_dis : 1; + u64 len_il3_dis : 1; + u64 len_ol4_dis : 1; + u64 len_ol3_dis : 1; + u64 wqe_aura : 20; + u64 spb_aura : 20; + u64 lpb_aura : 20; + u64 sso_grp : 10; + u64 sso_tt : 2; + u64 pb_caching : 2; + u64 wqe_caching : 1; + u64 xqe_drop_ena : 1; + u64 spb_drop_ena : 1; + u64 lpb_drop_ena : 1; + u64 pb_stashing : 1; + u64 ipsecd_drop_ena : 1; + u64 chi_ena : 1; + u64 rsvd_127_125 : 3; + u64 band_prof_id : 10; /* W2 */ + u64 rsvd_138 : 1; + u64 policer_ena : 1; + u64 spb_sizem1 : 6; + u64 wqe_skip : 2; + u64 rsvd_150_148 : 3; + u64 spb_ena : 1; + u64 lpb_sizem1 : 12; + u64 first_skip : 7; + u64 rsvd_171 : 1; + u64 later_skip : 6; + u64 xqe_imm_size : 6; + u64 rsvd_189_184 : 6; + u64 xqe_imm_copy : 1; + u64 xqe_hdr_split : 1; + u64 xqe_drop : 8; /* W3 */ + u64 xqe_pass : 8; + u64 wqe_pool_drop : 8; + u64 wqe_pool_pass : 8; + u64 spb_aura_drop : 8; + u64 spb_aura_pass : 8; + u64 spb_pool_drop : 8; + u64 spb_pool_pass : 8; + u64 lpb_aura_drop : 8; /* W4 */ + u64 lpb_aura_pass : 8; + u64 lpb_pool_drop : 8; + u64 lpb_pool_pass : 8; + u64 rsvd_291_288 : 4; + u64 rq_int : 8; + u64 rq_int_ena : 8; + u64 qint_idx : 7; + u64 rsvd_319_315 : 5; + u64 ltag : 24; /* W5 */ + u64 good_utag : 8; + u64 bad_utag : 8; + u64 flow_tagw : 6; + u64 ipsec_vwqe : 1; + u64 vwqe_ena : 1; + u64 vwqe_wait : 8; + u64 max_vsize_exp : 4; + u64 vwqe_skip : 2; + u64 rsvd_383_382 : 2; + u64 octs : 48; /* W6 */ + u64 rsvd_447_432 : 16; + u64 pkts : 48; /* W7 */ + u64 rsvd_511_496 : 16; + u64 drop_octs : 48; /* W8 */ + u64 rsvd_575_560 : 16; + u64 drop_pkts : 48; /* W9 */ + u64 rsvd_639_624 : 16; + u64 re_pkts : 48; /* W10 */ + u64 rsvd_703_688 : 16; + u64 rsvd_767_704; /* W11 */ + u64 rsvd_831_768; /* W12 */ + u64 rsvd_895_832; /* W13 */ + u64 rsvd_959_896; /* W14 */ + u64 rsvd_1023_960; /* W15 */ +}; + +/* CN10K NIX Send queue context structure */ +struct nix_cn10k_sq_ctx_s { + u64 ena : 1; + u64 qint_idx : 6; + u64 substream : 20; + u64 sdp_mcast : 1; + u64 cq : 20; + u64 sqe_way_mask : 16; + u64 smq : 10; /* W1 */ + u64 cq_ena : 1; + u64 xoff : 1; + u64 sso_ena : 1; + u64 smq_rr_weight : 14; + u64 default_chan : 12; + u64 sqb_count : 16; + u64 rsvd_120_119 : 2; + u64 smq_rr_count_lb : 7; + u64 smq_rr_count_ub : 25; /* W2 */ + u64 sqb_aura : 20; + u64 sq_int : 8; + u64 sq_int_ena : 8; + u64 sqe_stype : 2; + u64 rsvd_191 : 1; + u64 max_sqe_size : 2; /* W3 */ + u64 cq_limit : 8; + u64 lmt_dis : 1; + u64 mnq_dis : 1; + u64 smq_next_sq : 20; + u64 smq_lso_segnum : 8; + u64 tail_offset : 6; + u64 smenq_offset : 6; + u64 head_offset : 6; + u64 smenq_next_sqb_vld : 1; + u64 smq_pend : 1; + u64 smq_next_sq_vld : 1; + u64 rsvd_255_253 : 3; + u64 next_sqb : 64; /* W4 */ + u64 tail_sqb : 64; /* W5 */ + u64 smenq_sqb : 64; /* W6 */ + u64 smenq_next_sqb : 64; /* W7 */ + u64 head_sqb : 64; /* W8 */ + u64 rsvd_583_576 : 8; /* W9 */ + u64 vfi_lso_total : 18; + u64 vfi_lso_sizem1 : 3; + u64 vfi_lso_sb : 8; + u64 vfi_lso_mps : 14; + u64 vfi_lso_vlan0_ins_ena : 1; + u64 vfi_lso_vlan1_ins_ena : 1; + u64 vfi_lso_vld : 1; + u64 rsvd_639_630 : 10; + u64 scm_lso_rem : 18; /* W10 */ + u64 rsvd_703_658 : 46; + u64 octs : 48; /* W11 */ + u64 rsvd_767_752 : 16; + u64 pkts : 48; /* W12 */ + u64 rsvd_831_816 : 16; + u64 rsvd_895_832 : 64; /* W13 */ + u64 dropped_octs : 48; + u64 rsvd_959_944 : 16; + u64 dropped_pkts : 48; + u64 rsvd_1023_1008 : 16; }; /* NIX Receive queue context structure */ struct nix_rq_ctx_s { -#if defined(__BIG_ENDIAN_BITFIELD) /* W0 */ - u64 wqe_aura : 20; - u64 substream : 20; - u64 cq : 20; - u64 ena_wqwd : 1; - u64 ipsech_ena : 1; - u64 sso_ena : 1; - u64 ena : 1; -#else u64 ena : 1; u64 sso_ena : 1; u64 ipsech_ena : 1; @@ -538,19 +511,6 @@ struct nix_rq_ctx_s { u64 cq : 20; u64 substream : 20; u64 wqe_aura : 20; -#endif -#if defined(__BIG_ENDIAN_BITFIELD) /* W1 */ - u64 rsvd_127_122 : 6; - u64 lpb_drop_ena : 1; - u64 spb_drop_ena : 1; - u64 xqe_drop_ena : 1; - u64 wqe_caching : 1; - u64 pb_caching : 2; - u64 sso_tt : 2; - u64 sso_grp : 10; - u64 lpb_aura : 20; - u64 spb_aura : 20; -#else u64 spb_aura : 20; u64 lpb_aura : 20; u64 sso_grp : 10; @@ -561,23 +521,7 @@ struct nix_rq_ctx_s { u64 spb_drop_ena : 1; u64 lpb_drop_ena : 1; u64 rsvd_127_122 : 6; -#endif -#if defined(__BIG_ENDIAN_BITFIELD) /* W2 */ - u64 xqe_hdr_split : 1; - u64 xqe_imm_copy : 1; - u64 rsvd_189_184 : 6; - u64 xqe_imm_size : 6; - u64 later_skip : 6; - u64 rsvd_171 : 1; - u64 first_skip : 7; - u64 lpb_sizem1 : 12; - u64 spb_ena : 1; - u64 rsvd_150_148 : 3; - u64 wqe_skip : 2; - u64 spb_sizem1 : 6; - u64 rsvd_139_128 : 12; -#else - u64 rsvd_139_128 : 12; + u64 rsvd_139_128 : 12; /* W2 */ u64 spb_sizem1 : 6; u64 wqe_skip : 2; u64 rsvd_150_148 : 3; @@ -590,18 +534,7 @@ struct nix_rq_ctx_s { u64 rsvd_189_184 : 6; u64 xqe_imm_copy : 1; u64 xqe_hdr_split : 1; -#endif -#if defined(__BIG_ENDIAN_BITFIELD) /* W3 */ - u64 spb_pool_pass : 8; - u64 spb_pool_drop : 8; - u64 spb_aura_pass : 8; - u64 spb_aura_drop : 8; - u64 wqe_pool_pass : 8; - u64 wqe_pool_drop : 8; - u64 xqe_pass : 8; - u64 xqe_drop : 8; -#else - u64 xqe_drop : 8; + u64 xqe_drop : 8; /* W3*/ u64 xqe_pass : 8; u64 wqe_pool_drop : 8; u64 wqe_pool_pass : 8; @@ -609,19 +542,7 @@ struct nix_rq_ctx_s { u64 spb_aura_pass : 8; u64 spb_pool_drop : 8; u64 spb_pool_pass : 8; -#endif -#if defined(__BIG_ENDIAN_BITFIELD) /* W4 */ - u64 rsvd_319_315 : 5; - u64 qint_idx : 7; - u64 rq_int_ena : 8; - u64 rq_int : 8; - u64 rsvd_291_288 : 4; - u64 lpb_pool_pass : 8; - u64 lpb_pool_drop : 8; - u64 lpb_aura_pass : 8; - u64 lpb_aura_drop : 8; -#else - u64 lpb_aura_drop : 8; + u64 lpb_aura_drop : 8; /* W4 */ u64 lpb_aura_pass : 8; u64 lpb_pool_drop : 8; u64 lpb_pool_pass : 8; @@ -630,55 +551,21 @@ struct nix_rq_ctx_s { u64 rq_int_ena : 8; u64 qint_idx : 7; u64 rsvd_319_315 : 5; -#endif -#if defined(__BIG_ENDIAN_BITFIELD) /* W5 */ - u64 rsvd_383_366 : 18; - u64 flow_tagw : 6; - u64 bad_utag : 8; - u64 good_utag : 8; - u64 ltag : 24; -#else - u64 ltag : 24; + u64 ltag : 24; /* W5 */ u64 good_utag : 8; u64 bad_utag : 8; u64 flow_tagw : 6; u64 rsvd_383_366 : 18; -#endif -#if defined(__BIG_ENDIAN_BITFIELD) /* W6 */ - u64 rsvd_447_432 : 16; - u64 octs : 48; -#else - u64 octs : 48; + u64 octs : 48; /* W6 */ u64 rsvd_447_432 : 16; -#endif -#if defined(__BIG_ENDIAN_BITFIELD) /* W7 */ - u64 rsvd_511_496 : 16; - u64 pkts : 48; -#else - u64 pkts : 48; + u64 pkts : 48; /* W7 */ u64 rsvd_511_496 : 16; -#endif -#if defined(__BIG_ENDIAN_BITFIELD) /* W8 */ + u64 drop_octs : 48; /* W8 */ u64 rsvd_575_560 : 16; - u64 drop_octs : 48; -#else - u64 drop_octs : 48; - u64 rsvd_575_560 : 16; -#endif -#if defined(__BIG_ENDIAN_BITFIELD) /* W9 */ - u64 rsvd_639_624 : 16; - u64 drop_pkts : 48; -#else - u64 drop_pkts : 48; + u64 drop_pkts : 48; /* W9 */ u64 rsvd_639_624 : 16; -#endif -#if defined(__BIG_ENDIAN_BITFIELD) /* W10 */ + u64 re_pkts : 48; /* W10 */ u64 rsvd_703_688 : 16; - u64 re_pkts : 48; -#else - u64 re_pkts : 48; - u64 rsvd_703_688 : 16; -#endif u64 rsvd_767_704; /* W11 */ u64 rsvd_831_768; /* W12 */ u64 rsvd_895_832; /* W13 */ @@ -701,30 +588,12 @@ enum nix_stype { /* NIX Send queue context structure */ struct nix_sq_ctx_s { -#if defined(__BIG_ENDIAN_BITFIELD) /* W0 */ - u64 sqe_way_mask : 16; - u64 cq : 20; - u64 sdp_mcast : 1; - u64 substream : 20; - u64 qint_idx : 6; - u64 ena : 1; -#else u64 ena : 1; u64 qint_idx : 6; u64 substream : 20; u64 sdp_mcast : 1; u64 cq : 20; u64 sqe_way_mask : 16; -#endif -#if defined(__BIG_ENDIAN_BITFIELD) /* W1 */ - u64 sqb_count : 16; - u64 default_chan : 12; - u64 smq_rr_quantum : 24; - u64 sso_ena : 1; - u64 xoff : 1; - u64 cq_ena : 1; - u64 smq : 9; -#else u64 smq : 9; u64 cq_ena : 1; u64 xoff : 1; @@ -732,37 +601,12 @@ struct nix_sq_ctx_s { u64 smq_rr_quantum : 24; u64 default_chan : 12; u64 sqb_count : 16; -#endif -#if defined(__BIG_ENDIAN_BITFIELD) /* W2 */ - u64 rsvd_191 : 1; - u64 sqe_stype : 2; - u64 sq_int_ena : 8; - u64 sq_int : 8; - u64 sqb_aura : 20; - u64 smq_rr_count : 25; -#else u64 smq_rr_count : 25; u64 sqb_aura : 20; u64 sq_int : 8; u64 sq_int_ena : 8; u64 sqe_stype : 2; u64 rsvd_191 : 1; -#endif -#if defined(__BIG_ENDIAN_BITFIELD) /* W3 */ - u64 rsvd_255_253 : 3; - u64 smq_next_sq_vld : 1; - u64 smq_pend : 1; - u64 smenq_next_sqb_vld : 1; - u64 head_offset : 6; - u64 smenq_offset : 6; - u64 tail_offset : 6; - u64 smq_lso_segnum : 8; - u64 smq_next_sq : 20; - u64 mnq_dis : 1; - u64 lmt_dis : 1; - u64 cq_limit : 8; - u64 max_sqe_size : 2; -#else u64 max_sqe_size : 2; u64 cq_limit : 8; u64 lmt_dis : 1; @@ -776,23 +620,11 @@ struct nix_sq_ctx_s { u64 smq_pend : 1; u64 smq_next_sq_vld : 1; u64 rsvd_255_253 : 3; -#endif u64 next_sqb : 64;/* W4 */ u64 tail_sqb : 64;/* W5 */ u64 smenq_sqb : 64;/* W6 */ u64 smenq_next_sqb : 64;/* W7 */ u64 head_sqb : 64;/* W8 */ -#if defined(__BIG_ENDIAN_BITFIELD) /* W9 */ - u64 rsvd_639_630 : 10; - u64 vfi_lso_vld : 1; - u64 vfi_lso_vlan1_ins_ena : 1; - u64 vfi_lso_vlan0_ins_ena : 1; - u64 vfi_lso_mps : 14; - u64 vfi_lso_sb : 8; - u64 vfi_lso_sizem1 : 3; - u64 vfi_lso_total : 18; - u64 rsvd_583_576 : 8; -#else u64 rsvd_583_576 : 8; u64 vfi_lso_total : 18; u64 vfi_lso_sizem1 : 3; @@ -802,68 +634,28 @@ struct nix_sq_ctx_s { u64 vfi_lso_vlan1_ins_ena : 1; u64 vfi_lso_vld : 1; u64 rsvd_639_630 : 10; -#endif -#if defined(__BIG_ENDIAN_BITFIELD) /* W10 */ - u64 rsvd_703_658 : 46; - u64 scm_lso_rem : 18; -#else u64 scm_lso_rem : 18; u64 rsvd_703_658 : 46; -#endif -#if defined(__BIG_ENDIAN_BITFIELD) /* W11 */ - u64 rsvd_767_752 : 16; - u64 octs : 48; -#else u64 octs : 48; u64 rsvd_767_752 : 16; -#endif -#if defined(__BIG_ENDIAN_BITFIELD) /* W12 */ - u64 rsvd_831_816 : 16; - u64 pkts : 48; -#else u64 pkts : 48; u64 rsvd_831_816 : 16; -#endif u64 rsvd_895_832 : 64;/* W13 */ -#if defined(__BIG_ENDIAN_BITFIELD) /* W14 */ - u64 rsvd_959_944 : 16; - u64 dropped_octs : 48; -#else u64 dropped_octs : 48; u64 rsvd_959_944 : 16; -#endif -#if defined(__BIG_ENDIAN_BITFIELD) /* W15 */ - u64 rsvd_1023_1008 : 16; - u64 dropped_pkts : 48; -#else u64 dropped_pkts : 48; u64 rsvd_1023_1008 : 16; -#endif }; /* NIX Receive side scaling entry structure*/ struct nix_rsse_s { -#if defined(__BIG_ENDIAN_BITFIELD) - uint32_t reserved_20_31 : 12; - uint32_t rq : 20; -#else uint32_t rq : 20; uint32_t reserved_20_31 : 12; -#endif }; /* NIX receive multicast/mirror entry structure */ struct nix_rx_mce_s { -#if defined(__BIG_ENDIAN_BITFIELD) /* W0 */ - uint64_t next : 16; - uint64_t pf_func : 16; - uint64_t rsvd_31_24 : 8; - uint64_t index : 20; - uint64_t eol : 1; - uint64_t rsvd_2 : 1; - uint64_t op : 2; -#else uint64_t op : 2; uint64_t rsvd_2 : 1; uint64_t eol : 1; @@ -871,7 +663,6 @@ struct nix_rx_mce_s { uint64_t rsvd_31_24 : 8; uint64_t pf_func : 16; uint64_t next : 16; -#endif }; enum nix_lsoalg { @@ -890,15 +681,6 @@ enum nix_txlayer { }; struct nix_lso_format { -#if defined(__BIG_ENDIAN_BITFIELD) - u64 rsvd_19_63 : 45; - u64 alg : 3; - u64 rsvd_14_15 : 2; - u64 sizem1 : 2; - u64 rsvd_10_11 : 2; - u64 layer : 2; - u64 offset : 8; -#else u64 offset : 8; u64 layer : 2; u64 rsvd_10_11 : 2; @@ -906,24 +688,9 @@ struct nix_lso_format { u64 rsvd_14_15 : 2; u64 alg : 3; u64 rsvd_19_63 : 45; -#endif }; struct nix_rx_flowkey_alg { -#if defined(__BIG_ENDIAN_BITFIELD) - u64 reserved_35_63 :29; - u64 ltype_match :4; - u64 ltype_mask :4; - u64 sel_chan :1; - u64 ena :1; - u64 reserved_24_24 :1; - u64 lid :3; - u64 bytesm1 :5; - u64 hdr_offset :8; - u64 fn_mask :1; - u64 ln_mask :1; - u64 key_offset :6; -#else u64 key_offset :6; u64 ln_mask :1; u64 fn_mask :1; @@ -936,7 +703,6 @@ struct nix_rx_flowkey_alg { u64 ltype_mask :4; u64 ltype_match :4; u64 reserved_35_63 :29; -#endif }; /* NIX VTAG size */ |