diff options
Diffstat (limited to 'drivers/net/ethernet/marvell/octeontx2/af/cgx.c')
| -rw-r--r-- | drivers/net/ethernet/marvell/octeontx2/af/cgx.c | 964 |
1 files changed, 802 insertions, 162 deletions
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c index fac6474ad694..42044cd810b1 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c @@ -1,11 +1,8 @@ // SPDX-License-Identifier: GPL-2.0 /* Marvell OcteonTx2 CGX driver * - * Copyright (C) 2018 Marvell International Ltd. + * Copyright (C) 2018 Marvell. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. */ #include <linux/acpi.h> @@ -24,8 +21,9 @@ #include "rvu.h" #include "lmac_common.h" -#define DRV_NAME "Marvell-CGX/RPM" -#define DRV_STRING "Marvell CGX/RPM Driver" +#define DRV_NAME "Marvell-CGX-RPM" + +#define CGX_RX_STAT_GLOBAL_INDEX 9 static LIST_HEAD(cgx_list); @@ -58,6 +56,7 @@ static const char *cgx_lmactype_string[LMAC_MODE_MAX] = { [LMAC_MODE_50G_R] = "50G_R", [LMAC_MODE_100G_R] = "100G_R", [LMAC_MODE_USXGMII] = "USXGMII", + [LMAC_MODE_USGMII] = "USGMII", }; /* CGX PHY management internal APIs */ @@ -66,7 +65,18 @@ static int cgx_fwi_link_change(struct cgx *cgx, int lmac_id, bool en); /* Supported devices */ static const struct pci_device_id cgx_id_table[] = { { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_CGX) }, - { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CN10K_RPM) }, + { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CN10K_RPM, + PCI_ANY_ID, PCI_SUBSYS_DEVID_CN10K_A) }, + { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CN10K_RPM, + PCI_ANY_ID, PCI_SUBSYS_DEVID_CNF10K_A) }, + { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CN10K_RPM, + PCI_ANY_ID, PCI_SUBSYS_DEVID_CNF10K_B) }, + { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CN10KB_RPM, + PCI_ANY_ID, PCI_SUBSYS_DEVID_CN10K_B) }, + { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CN10KB_RPM, + PCI_ANY_ID, PCI_SUBSYS_DEVID_CN20KA) }, + { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CN10KB_RPM, + PCI_ANY_ID, PCI_SUBSYS_DEVID_CNF20KA) }, { 0, } /* end of table */ }; @@ -76,16 +86,33 @@ static bool is_dev_rpm(void *cgxd) { struct cgx *cgx = cgxd; - return (cgx->pdev->device == PCI_DEVID_CN10K_RPM); + return (cgx->pdev->device == PCI_DEVID_CN10K_RPM) || + (cgx->pdev->device == PCI_DEVID_CN10KB_RPM); } bool is_lmac_valid(struct cgx *cgx, int lmac_id) { - if (!cgx || lmac_id < 0 || lmac_id >= MAX_LMAC_PER_CGX) + if (!cgx || lmac_id < 0 || lmac_id >= cgx->max_lmac_per_mac) return false; return test_bit(lmac_id, &cgx->lmac_bmap); } +/* Helper function to get sequential index + * given the enabled LMAC of a CGX + */ +static int get_sequence_id_of_lmac(struct cgx *cgx, int lmac_id) +{ + int tmp, id = 0; + + for_each_set_bit(tmp, &cgx->lmac_bmap, cgx->max_lmac_per_mac) { + if (tmp == lmac_id) + break; + id++; + } + + return id; +} + struct mac_ops *get_mac_ops(void *cgxd) { if (!cgxd) @@ -94,6 +121,11 @@ struct mac_ops *get_mac_ops(void *cgxd) return ((struct cgx *)cgxd)->mac_ops; } +u32 cgx_get_fifo_len(void *cgxd) +{ + return ((struct cgx *)cgxd)->fifo_len; +} + void cgx_write(struct cgx *cgx, u64 lmac, u64 offset, u64 val) { writeq(val, cgx->reg_base + (lmac << cgx->mac_ops->lmac_offset) + @@ -108,7 +140,7 @@ u64 cgx_read(struct cgx *cgx, u64 lmac, u64 offset) struct lmac *lmac_pdata(u8 lmac_id, struct cgx *cgx) { - if (!cgx || lmac_id >= MAX_LMAC_PER_CGX) + if (!cgx || lmac_id >= cgx->max_lmac_per_mac) return NULL; return cgx->lmac_idmap[lmac_id]; @@ -154,6 +186,9 @@ void cgx_lmac_write(int cgx_id, int lmac_id, u64 offset, u64 val) { struct cgx *cgx_dev = cgx_get_pdata(cgx_id); + /* Software must not access disabled LMAC registers */ + if (!is_lmac_valid(cgx_dev, lmac_id)) + return; cgx_write(cgx_dev, lmac_id, offset, val); } @@ -161,6 +196,10 @@ u64 cgx_lmac_read(int cgx_id, int lmac_id, u64 offset) { struct cgx *cgx_dev = cgx_get_pdata(cgx_id); + /* Software must not access disabled LMAC registers */ + if (!is_lmac_valid(cgx_dev, lmac_id)) + return 0; + return cgx_read(cgx_dev, lmac_id, offset); } @@ -184,6 +223,24 @@ u8 cgx_lmac_get_p2x(int cgx_id, int lmac_id) return (cfg & CMR_P2X_SEL_MASK) >> CMR_P2X_SEL_SHIFT; } +static u8 cgx_get_nix_resetbit(struct cgx *cgx) +{ + int first_lmac; + u8 p2x; + + /* non 98XX silicons supports only NIX0 block */ + if (cgx->pdev->subsystem_device != PCI_SUBSYS_DEVID_98XX) + return CGX_NIX0_RESET; + + first_lmac = find_first_bit(&cgx->lmac_bmap, cgx->max_lmac_per_mac); + p2x = cgx_lmac_get_p2x(cgx->cgx_id, first_lmac); + + if (p2x == CMR_P2X_SEL_NIX1) + return CGX_NIX1_RESET; + else + return CGX_NIX0_RESET; +} + /* Ensure the required lock for event queue(where asynchronous events are * posted) is acquired before calling this API. Else an asynchronous event(with * latest link status) can reach the destination before this function returns @@ -201,47 +258,252 @@ int cgx_get_link_info(void *cgxd, int lmac_id, return 0; } -static u64 mac2u64 (u8 *mac_addr) -{ - u64 mac = 0; - int index; - - for (index = ETH_ALEN - 1; index >= 0; index--) - mac |= ((u64)*mac_addr++) << (8 * index); - return mac; -} - int cgx_lmac_addr_set(u8 cgx_id, u8 lmac_id, u8 *mac_addr) { struct cgx *cgx_dev = cgx_get_pdata(cgx_id); + struct lmac *lmac = lmac_pdata(lmac_id, cgx_dev); struct mac_ops *mac_ops; + int index, id; u64 cfg; + if (!lmac) + return -ENODEV; + + /* access mac_ops to know csr_offset */ mac_ops = cgx_dev->mac_ops; + /* copy 6bytes from macaddr */ /* memcpy(&cfg, mac_addr, 6); */ - cfg = mac2u64 (mac_addr); + cfg = ether_addr_to_u64(mac_addr); - cgx_write(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (lmac_id * 0x8)), + id = get_sequence_id_of_lmac(cgx_dev, lmac_id); + + index = id * lmac->mac_to_index_bmap.max; + + cgx_write(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 0x8)), cfg | CGX_DMAC_CAM_ADDR_ENABLE | ((u64)lmac_id << 49)); cfg = cgx_read(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0); - cfg |= CGX_DMAC_CTL0_CAM_ENABLE; + cfg |= (CGX_DMAC_CTL0_CAM_ENABLE | CGX_DMAC_BCAST_MODE | + CGX_DMAC_MCAST_MODE); cgx_write(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0, cfg); return 0; } +u64 cgx_read_dmac_ctrl(void *cgxd, int lmac_id) +{ + struct mac_ops *mac_ops; + struct cgx *cgx = cgxd; + + if (!cgxd || !is_lmac_valid(cgxd, lmac_id)) + return 0; + + cgx = cgxd; + /* Get mac_ops to know csr offset */ + mac_ops = cgx->mac_ops; + + return cgx_read(cgxd, lmac_id, CGXX_CMRX_RX_DMAC_CTL0); +} + +u64 cgx_read_dmac_entry(void *cgxd, int index) +{ + struct mac_ops *mac_ops; + struct cgx *cgx; + + if (!cgxd) + return 0; + + cgx = cgxd; + mac_ops = cgx->mac_ops; + return cgx_read(cgx, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 8))); +} + +int cgx_lmac_addr_add(u8 cgx_id, u8 lmac_id, u8 *mac_addr) +{ + struct cgx *cgx_dev = cgx_get_pdata(cgx_id); + struct lmac *lmac = lmac_pdata(lmac_id, cgx_dev); + struct mac_ops *mac_ops; + int index, idx; + u64 cfg = 0; + int id; + + if (!lmac) + return -ENODEV; + + mac_ops = cgx_dev->mac_ops; + /* Get available index where entry is to be installed */ + idx = rvu_alloc_rsrc(&lmac->mac_to_index_bmap); + if (idx < 0) + return idx; + + id = get_sequence_id_of_lmac(cgx_dev, lmac_id); + + index = id * lmac->mac_to_index_bmap.max + idx; + + cfg = ether_addr_to_u64(mac_addr); + cfg |= CGX_DMAC_CAM_ADDR_ENABLE; + cfg |= ((u64)lmac_id << 49); + cgx_write(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 0x8)), cfg); + + cfg = cgx_read(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0); + cfg |= (CGX_DMAC_BCAST_MODE | CGX_DMAC_CAM_ACCEPT); + + if (is_multicast_ether_addr(mac_addr)) { + cfg &= ~GENMASK_ULL(2, 1); + cfg |= CGX_DMAC_MCAST_MODE_CAM; + lmac->mcast_filters_count++; + } else if (!lmac->mcast_filters_count) { + cfg |= CGX_DMAC_MCAST_MODE; + } + + cgx_write(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0, cfg); + + return idx; +} + +int cgx_lmac_addr_reset(u8 cgx_id, u8 lmac_id) +{ + struct cgx *cgx_dev = cgx_get_pdata(cgx_id); + struct lmac *lmac = lmac_pdata(lmac_id, cgx_dev); + struct mac_ops *mac_ops; + u8 index = 0, id; + u64 cfg; + + if (!lmac) + return -ENODEV; + + mac_ops = cgx_dev->mac_ops; + /* Restore index 0 to its default init value as done during + * cgx_lmac_init + */ + set_bit(0, lmac->mac_to_index_bmap.bmap); + + id = get_sequence_id_of_lmac(cgx_dev, lmac_id); + + index = id * lmac->mac_to_index_bmap.max + index; + cgx_write(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 0x8)), 0); + + /* Reset CGXX_CMRX_RX_DMAC_CTL0 register to default state */ + cfg = cgx_read(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0); + cfg &= ~CGX_DMAC_CAM_ACCEPT; + cfg |= (CGX_DMAC_BCAST_MODE | CGX_DMAC_MCAST_MODE); + cgx_write(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0, cfg); + + return 0; +} + +/* Allows caller to change macaddress associated with index + * in dmac filter table including index 0 reserved for + * interface mac address + */ +int cgx_lmac_addr_update(u8 cgx_id, u8 lmac_id, u8 *mac_addr, u8 index) +{ + struct cgx *cgx_dev = cgx_get_pdata(cgx_id); + struct mac_ops *mac_ops; + struct lmac *lmac; + u64 cfg; + int id; + + lmac = lmac_pdata(lmac_id, cgx_dev); + if (!lmac) + return -ENODEV; + + mac_ops = cgx_dev->mac_ops; + /* Validate the index */ + if (index >= lmac->mac_to_index_bmap.max) + return -EINVAL; + + /* ensure index is already set */ + if (!test_bit(index, lmac->mac_to_index_bmap.bmap)) + return -EINVAL; + + id = get_sequence_id_of_lmac(cgx_dev, lmac_id); + + index = id * lmac->mac_to_index_bmap.max + index; + + cfg = cgx_read(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 0x8))); + cfg &= ~CGX_RX_DMAC_ADR_MASK; + cfg |= ether_addr_to_u64(mac_addr); + + cgx_write(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 0x8)), cfg); + return 0; +} + +int cgx_lmac_addr_del(u8 cgx_id, u8 lmac_id, u8 index) +{ + struct cgx *cgx_dev = cgx_get_pdata(cgx_id); + struct lmac *lmac = lmac_pdata(lmac_id, cgx_dev); + struct mac_ops *mac_ops; + u8 mac[ETH_ALEN]; + u64 cfg; + int id; + + if (!lmac) + return -ENODEV; + + mac_ops = cgx_dev->mac_ops; + /* Validate the index */ + if (index >= lmac->mac_to_index_bmap.max) + return -EINVAL; + + /* Skip deletion for reserved index i.e. index 0 */ + if (index == 0) + return 0; + + rvu_free_rsrc(&lmac->mac_to_index_bmap, index); + + id = get_sequence_id_of_lmac(cgx_dev, lmac_id); + + index = id * lmac->mac_to_index_bmap.max + index; + + /* Read MAC address to check whether it is ucast or mcast */ + cfg = cgx_read(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 0x8))); + + u64_to_ether_addr(cfg, mac); + if (is_multicast_ether_addr(mac)) + lmac->mcast_filters_count--; + + if (!lmac->mcast_filters_count) { + cfg = cgx_read(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0); + cfg &= ~GENMASK_ULL(2, 1); + cfg |= CGX_DMAC_MCAST_MODE; + cgx_write(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0, cfg); + } + + cgx_write(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 0x8)), 0); + + return 0; +} + +int cgx_lmac_addr_max_entries_get(u8 cgx_id, u8 lmac_id) +{ + struct cgx *cgx_dev = cgx_get_pdata(cgx_id); + struct lmac *lmac = lmac_pdata(lmac_id, cgx_dev); + + if (lmac) + return lmac->mac_to_index_bmap.max; + + return 0; +} + u64 cgx_lmac_addr_get(u8 cgx_id, u8 lmac_id) { struct cgx *cgx_dev = cgx_get_pdata(cgx_id); + struct lmac *lmac = lmac_pdata(lmac_id, cgx_dev); struct mac_ops *mac_ops; + int index; u64 cfg; + int id; mac_ops = cgx_dev->mac_ops; - cfg = cgx_read(cgx_dev, 0, CGXX_CMRX_RX_DMAC_CAM0 + lmac_id * 0x8); + id = get_sequence_id_of_lmac(cgx_dev, lmac_id); + + index = id * lmac->mac_to_index_bmap.max; + + cfg = cgx_read(cgx_dev, 0, CGXX_CMRX_RX_DMAC_CAM0 + index * 0x8); return cfg & CGX_RX_DMAC_ADR_MASK; } @@ -252,7 +514,7 @@ int cgx_set_pkind(void *cgxd, u8 lmac_id, int pkind) if (!is_lmac_valid(cgx, lmac_id)) return -ENODEV; - cgx_write(cgx, lmac_id, CGXX_CMRX_RX_ID_MAP, (pkind & 0x3F)); + cgx_write(cgx, lmac_id, cgx->mac_ops->rxid_map_offset, (pkind & 0x3F)); return 0; } @@ -265,18 +527,45 @@ static u8 cgx_get_lmac_type(void *cgxd, int lmac_id) return (cfg >> CGX_LMAC_TYPE_SHIFT) & CGX_LMAC_TYPE_MASK; } +static u32 cgx_get_lmac_fifo_len(void *cgxd, int lmac_id) +{ + struct cgx *cgx = cgxd; + u8 num_lmacs; + u32 fifo_len; + + fifo_len = cgx->fifo_len; + num_lmacs = cgx->mac_ops->get_nr_lmacs(cgx); + + switch (num_lmacs) { + case 1: + return fifo_len; + case 2: + return fifo_len / 2; + case 3: + /* LMAC0 gets half of the FIFO, reset 1/4th */ + if (lmac_id == 0) + return fifo_len / 2; + return fifo_len / 4; + case 4: + default: + return fifo_len / 4; + } + return 0; +} + /* Configure CGX LMAC in internal loopback mode */ int cgx_lmac_internal_loopback(void *cgxd, int lmac_id, bool enable) { struct cgx *cgx = cgxd; - u8 lmac_type; + struct lmac *lmac; u64 cfg; if (!is_lmac_valid(cgx, lmac_id)) return -ENODEV; - lmac_type = cgx->mac_ops->get_lmac_type(cgx, lmac_id); - if (lmac_type == LMAC_MODE_SGMII || lmac_type == LMAC_MODE_QSGMII) { + lmac = lmac_pdata(lmac_id, cgx); + if (lmac->lmac_type == LMAC_MODE_SGMII || + lmac->lmac_type == LMAC_MODE_QSGMII) { cfg = cgx_read(cgx, lmac_id, CGXX_GMP_PCS_MRX_CTL); if (enable) cfg |= CGXX_GMP_PCS_MRX_CTL_LBK; @@ -297,63 +586,127 @@ int cgx_lmac_internal_loopback(void *cgxd, int lmac_id, bool enable) void cgx_lmac_promisc_config(int cgx_id, int lmac_id, bool enable) { struct cgx *cgx = cgx_get_pdata(cgx_id); + struct lmac *lmac = lmac_pdata(lmac_id, cgx); struct mac_ops *mac_ops; + u16 max_dmac; + int index, i; u64 cfg = 0; + int id; - if (!cgx) + if (!cgx || !lmac) return; + max_dmac = lmac->mac_to_index_bmap.max; + id = get_sequence_id_of_lmac(cgx, lmac_id); + mac_ops = cgx->mac_ops; if (enable) { /* Enable promiscuous mode on LMAC */ cfg = cgx_read(cgx, lmac_id, CGXX_CMRX_RX_DMAC_CTL0); - cfg &= ~(CGX_DMAC_CAM_ACCEPT | CGX_DMAC_MCAST_MODE); - cfg |= CGX_DMAC_BCAST_MODE; + cfg &= ~CGX_DMAC_CAM_ACCEPT; + cfg |= (CGX_DMAC_BCAST_MODE | CGX_DMAC_MCAST_MODE); cgx_write(cgx, lmac_id, CGXX_CMRX_RX_DMAC_CTL0, cfg); - cfg = cgx_read(cgx, 0, - (CGXX_CMRX_RX_DMAC_CAM0 + lmac_id * 0x8)); - cfg &= ~CGX_DMAC_CAM_ADDR_ENABLE; - cgx_write(cgx, 0, - (CGXX_CMRX_RX_DMAC_CAM0 + lmac_id * 0x8), cfg); + for (i = 0; i < max_dmac; i++) { + index = id * max_dmac + i; + cfg = cgx_read(cgx, 0, + (CGXX_CMRX_RX_DMAC_CAM0 + index * 0x8)); + cfg &= ~CGX_DMAC_CAM_ADDR_ENABLE; + cgx_write(cgx, 0, + (CGXX_CMRX_RX_DMAC_CAM0 + index * 0x8), cfg); + } } else { /* Disable promiscuous mode */ cfg = cgx_read(cgx, lmac_id, CGXX_CMRX_RX_DMAC_CTL0); cfg |= CGX_DMAC_CAM_ACCEPT | CGX_DMAC_MCAST_MODE; cgx_write(cgx, lmac_id, CGXX_CMRX_RX_DMAC_CTL0, cfg); - cfg = cgx_read(cgx, 0, - (CGXX_CMRX_RX_DMAC_CAM0 + lmac_id * 0x8)); - cfg |= CGX_DMAC_CAM_ADDR_ENABLE; - cgx_write(cgx, 0, - (CGXX_CMRX_RX_DMAC_CAM0 + lmac_id * 0x8), cfg); + for (i = 0; i < max_dmac; i++) { + index = id * max_dmac + i; + cfg = cgx_read(cgx, 0, + (CGXX_CMRX_RX_DMAC_CAM0 + index * 0x8)); + if ((cfg & CGX_RX_DMAC_ADR_MASK) != 0) { + cfg |= CGX_DMAC_CAM_ADDR_ENABLE; + cgx_write(cgx, 0, + (CGXX_CMRX_RX_DMAC_CAM0 + + index * 0x8), + cfg); + } + } } } +static int cgx_lmac_get_pause_frm_status(void *cgxd, int lmac_id, + u8 *tx_pause, u8 *rx_pause) +{ + struct cgx *cgx = cgxd; + u64 cfg; + + if (is_dev_rpm(cgx)) + return 0; + + if (!is_lmac_valid(cgx, lmac_id)) + return -ENODEV; + + cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL); + *rx_pause = !!(cfg & CGX_SMUX_RX_FRM_CTL_CTL_BCK); + + cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_TX_CTL); + *tx_pause = !!(cfg & CGX_SMUX_TX_CTL_L2P_BP_CONV); + return 0; +} + /* Enable or disable forwarding received pause frames to Tx block */ void cgx_lmac_enadis_rx_pause_fwding(void *cgxd, int lmac_id, bool enable) { struct cgx *cgx = cgxd; + u8 rx_pause, tx_pause; + bool is_pfc_enabled; + struct lmac *lmac; u64 cfg; if (!cgx) return; - if (enable) { - cfg = cgx_read(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL); - cfg |= CGX_GMP_GMI_RXX_FRM_CTL_CTL_BCK; - cgx_write(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL, cfg); + lmac = lmac_pdata(lmac_id, cgx); + if (!lmac) + return; - cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL); - cfg |= CGX_SMUX_RX_FRM_CTL_CTL_BCK; - cgx_write(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL, cfg); + /* Pause frames are not enabled just return */ + if (!bitmap_weight(lmac->rx_fc_pfvf_bmap.bmap, lmac->rx_fc_pfvf_bmap.max)) + return; + + cgx_lmac_get_pause_frm_status(cgx, lmac_id, &rx_pause, &tx_pause); + is_pfc_enabled = rx_pause ? false : true; + + if (enable) { + if (!is_pfc_enabled) { + cfg = cgx_read(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL); + cfg |= CGX_GMP_GMI_RXX_FRM_CTL_CTL_BCK; + cgx_write(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL, cfg); + + cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL); + cfg |= CGX_SMUX_RX_FRM_CTL_CTL_BCK; + cgx_write(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL, cfg); + } else { + cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_CBFC_CTL); + cfg |= CGXX_SMUX_CBFC_CTL_BCK_EN; + cgx_write(cgx, lmac_id, CGXX_SMUX_CBFC_CTL, cfg); + } } else { - cfg = cgx_read(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL); - cfg &= ~CGX_GMP_GMI_RXX_FRM_CTL_CTL_BCK; - cgx_write(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL, cfg); - cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL); - cfg &= ~CGX_SMUX_RX_FRM_CTL_CTL_BCK; - cgx_write(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL, cfg); + if (!is_pfc_enabled) { + cfg = cgx_read(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL); + cfg &= ~CGX_GMP_GMI_RXX_FRM_CTL_CTL_BCK; + cgx_write(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL, cfg); + + cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL); + cfg &= ~CGX_SMUX_RX_FRM_CTL_CTL_BCK; + cgx_write(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL, cfg); + } else { + cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_CBFC_CTL); + cfg &= ~CGXX_SMUX_CBFC_CTL_BCK_EN; + cgx_write(cgx, lmac_id, CGXX_SMUX_CBFC_CTL, cfg); + } } } @@ -363,6 +716,11 @@ int cgx_get_rx_stats(void *cgxd, int lmac_id, int idx, u64 *rx_stat) if (!is_lmac_valid(cgx, lmac_id)) return -ENODEV; + + /* pass lmac as 0 for CGX_CMR_RX_STAT9-12 */ + if (idx >= CGX_RX_STAT_GLOBAL_INDEX) + lmac_id = 0; + *rx_stat = cgx_read(cgx, lmac_id, CGXX_CMRX_RX_STAT0 + (idx * 8)); return 0; } @@ -382,6 +740,30 @@ u64 cgx_features_get(void *cgxd) return ((struct cgx *)cgxd)->hw_features; } +int cgx_stats_reset(void *cgxd, int lmac_id) +{ + struct cgx *cgx = cgxd; + int stat_id; + + if (!is_lmac_valid(cgx, lmac_id)) + return -ENODEV; + + for (stat_id = 0 ; stat_id < CGX_RX_STATS_COUNT; stat_id++) { + if (stat_id >= CGX_RX_STAT_GLOBAL_INDEX) + /* pass lmac as 0 for CGX_CMR_RX_STAT9-12 */ + cgx_write(cgx, 0, + (CGXX_CMRX_RX_STAT0 + (stat_id * 8)), 0); + else + cgx_write(cgx, lmac_id, + (CGXX_CMRX_RX_STAT0 + (stat_id * 8)), 0); + } + + for (stat_id = 0 ; stat_id < CGX_TX_STATS_COUNT; stat_id++) + cgx_write(cgx, lmac_id, CGXX_CMRX_TX_STAT0 + (stat_id * 8), 0); + + return 0; +} + static int cgx_set_fec_stats_count(struct cgx_link_user_info *linfo) { if (!linfo->fec) @@ -416,8 +798,12 @@ int cgx_get_fec_stats(void *cgxd, int lmac_id, struct cgx_fec_stats_rsp *rsp) int corr_reg, uncorr_reg; struct cgx *cgx = cgxd; - if (!cgx || lmac_id >= cgx->lmac_count) + if (!is_lmac_valid(cgx, lmac_id)) return -ENODEV; + + if (cgx->lmac_idmap[lmac_id]->link_info.fec == OTX2_FEC_NONE) + return 0; + fec_stats_count = cgx_set_fec_stats_count(&cgx->lmac_idmap[lmac_id]->link_info); if (cgx->lmac_idmap[lmac_id]->link_info.fec == OTX2_FEC_BASER) { @@ -446,9 +832,9 @@ int cgx_lmac_rx_tx_enable(void *cgxd, int lmac_id, bool enable) cfg = cgx_read(cgx, lmac_id, CGXX_CMRX_CFG); if (enable) - cfg |= CMR_EN | DATA_PKT_RX_EN | DATA_PKT_TX_EN; + cfg |= DATA_PKT_RX_EN | DATA_PKT_TX_EN; else - cfg &= ~(CMR_EN | DATA_PKT_RX_EN | DATA_PKT_TX_EN); + cfg &= ~(DATA_PKT_RX_EN | DATA_PKT_TX_EN); cgx_write(cgx, lmac_id, CGXX_CMRX_CFG, cfg); return 0; } @@ -473,26 +859,6 @@ int cgx_lmac_tx_enable(void *cgxd, int lmac_id, bool enable) return !!(last & DATA_PKT_TX_EN); } -static int cgx_lmac_get_pause_frm_status(void *cgxd, int lmac_id, - u8 *tx_pause, u8 *rx_pause) -{ - struct cgx *cgx = cgxd; - u64 cfg; - - if (is_dev_rpm(cgx)) - return 0; - - if (!is_lmac_valid(cgx, lmac_id)) - return -ENODEV; - - cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL); - *rx_pause = !!(cfg & CGX_SMUX_RX_FRM_CTL_CTL_BCK); - - cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_TX_CTL); - *tx_pause = !!(cfg & CGX_SMUX_TX_CTL_L2P_BP_CONV); - return 0; -} - static int cgx_lmac_enadis_pause_frm(void *cgxd, int lmac_id, u8 tx_pause, u8 rx_pause) { @@ -505,6 +871,11 @@ static int cgx_lmac_enadis_pause_frm(void *cgxd, int lmac_id, if (!is_lmac_valid(cgx, lmac_id)) return -ENODEV; + cfg = cgx_read(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL); + cfg &= ~CGX_GMP_GMI_RXX_FRM_CTL_CTL_BCK; + cfg |= rx_pause ? CGX_GMP_GMI_RXX_FRM_CTL_CTL_BCK : 0x0; + cgx_write(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL, cfg); + cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL); cfg &= ~CGX_SMUX_RX_FRM_CTL_CTL_BCK; cfg |= rx_pause ? CGX_SMUX_RX_FRM_CTL_CTL_BCK : 0x0; @@ -533,21 +904,8 @@ static void cgx_lmac_pause_frm_config(void *cgxd, int lmac_id, bool enable) if (!is_lmac_valid(cgx, lmac_id)) return; - if (enable) { - /* Enable receive pause frames */ - cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL); - cfg |= CGX_SMUX_RX_FRM_CTL_CTL_BCK; - cgx_write(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL, cfg); - - cfg = cgx_read(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL); - cfg |= CGX_GMP_GMI_RXX_FRM_CTL_CTL_BCK; - cgx_write(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL, cfg); - - /* Enable pause frames transmission */ - cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_TX_CTL); - cfg |= CGX_SMUX_TX_CTL_L2P_BP_CONV; - cgx_write(cgx, lmac_id, CGXX_SMUX_TX_CTL, cfg); + if (enable) { /* Set pause time and interval */ cgx_write(cgx, lmac_id, CGXX_SMUX_TX_PAUSE_PKT_TIME, DEFAULT_PAUSE_TIME); @@ -564,21 +922,127 @@ static void cgx_lmac_pause_frm_config(void *cgxd, int lmac_id, bool enable) cfg &= ~0xFFFFULL; cgx_write(cgx, lmac_id, CGXX_GMP_GMI_TX_PAUSE_PKT_INTERVAL, cfg | (DEFAULT_PAUSE_TIME / 2)); - } else { - /* ALL pause frames received are completely ignored */ - cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL); - cfg &= ~CGX_SMUX_RX_FRM_CTL_CTL_BCK; - cgx_write(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL, cfg); + } - cfg = cgx_read(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL); - cfg &= ~CGX_GMP_GMI_RXX_FRM_CTL_CTL_BCK; - cgx_write(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL, cfg); + /* ALL pause frames received are completely ignored */ + cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL); + cfg &= ~CGX_SMUX_RX_FRM_CTL_CTL_BCK; + cgx_write(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL, cfg); + + cfg = cgx_read(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL); + cfg &= ~CGX_GMP_GMI_RXX_FRM_CTL_CTL_BCK; + cgx_write(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL, cfg); + + /* Disable pause frames transmission */ + cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_TX_CTL); + cfg &= ~CGX_SMUX_TX_CTL_L2P_BP_CONV; + cgx_write(cgx, lmac_id, CGXX_SMUX_TX_CTL, cfg); + + cfg = cgx_read(cgx, 0, CGXX_CMR_RX_OVR_BP); + cfg |= CGX_CMR_RX_OVR_BP_EN(lmac_id); + cfg &= ~CGX_CMR_RX_OVR_BP_BP(lmac_id); + cgx_write(cgx, 0, CGXX_CMR_RX_OVR_BP, cfg); + + /* Disable all PFC classes by default */ + cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_CBFC_CTL); + cfg = FIELD_SET(CGX_PFC_CLASS_MASK, 0, cfg); + cgx_write(cgx, lmac_id, CGXX_SMUX_CBFC_CTL, cfg); +} + +int verify_lmac_fc_cfg(void *cgxd, int lmac_id, u8 tx_pause, u8 rx_pause, + int pfvf_idx) +{ + struct cgx *cgx = cgxd; + struct lmac *lmac; + + lmac = lmac_pdata(lmac_id, cgx); + if (!lmac) + return -ENODEV; + + if (!rx_pause) + clear_bit(pfvf_idx, lmac->rx_fc_pfvf_bmap.bmap); + else + set_bit(pfvf_idx, lmac->rx_fc_pfvf_bmap.bmap); + + if (!tx_pause) + clear_bit(pfvf_idx, lmac->tx_fc_pfvf_bmap.bmap); + else + set_bit(pfvf_idx, lmac->tx_fc_pfvf_bmap.bmap); + + /* check if other pfvfs are using flow control */ + if (!rx_pause && bitmap_weight(lmac->rx_fc_pfvf_bmap.bmap, lmac->rx_fc_pfvf_bmap.max)) { + dev_warn(&cgx->pdev->dev, + "Receive Flow control disable not permitted as its used by other PFVFs\n"); + return -EPERM; + } + + if (!tx_pause && bitmap_weight(lmac->tx_fc_pfvf_bmap.bmap, lmac->tx_fc_pfvf_bmap.max)) { + dev_warn(&cgx->pdev->dev, + "Transmit Flow control disable not permitted as its used by other PFVFs\n"); + return -EPERM; + } + + return 0; +} + +int cgx_lmac_pfc_config(void *cgxd, int lmac_id, u8 tx_pause, + u8 rx_pause, u16 pfc_en) +{ + struct cgx *cgx = cgxd; + u64 cfg; + + if (!is_lmac_valid(cgx, lmac_id)) + return -ENODEV; + + /* Return as no traffic classes are requested */ + if (tx_pause && !pfc_en) + return 0; + + cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_CBFC_CTL); + pfc_en |= FIELD_GET(CGX_PFC_CLASS_MASK, cfg); + + if (rx_pause) { + cfg |= (CGXX_SMUX_CBFC_CTL_RX_EN | + CGXX_SMUX_CBFC_CTL_BCK_EN | + CGXX_SMUX_CBFC_CTL_DRP_EN); + } else { + cfg &= ~(CGXX_SMUX_CBFC_CTL_RX_EN | + CGXX_SMUX_CBFC_CTL_BCK_EN | + CGXX_SMUX_CBFC_CTL_DRP_EN); + } - /* Disable pause frames transmission */ - cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_TX_CTL); - cfg &= ~CGX_SMUX_TX_CTL_L2P_BP_CONV; - cgx_write(cgx, lmac_id, CGXX_SMUX_TX_CTL, cfg); + if (tx_pause) { + cfg |= CGXX_SMUX_CBFC_CTL_TX_EN; + cfg = FIELD_SET(CGX_PFC_CLASS_MASK, pfc_en, cfg); + } else { + cfg &= ~CGXX_SMUX_CBFC_CTL_TX_EN; + cfg = FIELD_SET(CGX_PFC_CLASS_MASK, 0, cfg); } + + cgx_write(cgx, lmac_id, CGXX_SMUX_CBFC_CTL, cfg); + + /* Write source MAC address which will be filled into PFC packet */ + cfg = cgx_lmac_addr_get(cgx->cgx_id, lmac_id); + cgx_write(cgx, lmac_id, CGXX_SMUX_SMAC, cfg); + + return 0; +} + +int cgx_lmac_get_pfc_frm_cfg(void *cgxd, int lmac_id, u8 *tx_pause, + u8 *rx_pause) +{ + struct cgx *cgx = cgxd; + u64 cfg; + + if (!is_lmac_valid(cgx, lmac_id)) + return -ENODEV; + + cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_CBFC_CTL); + + *rx_pause = !!(cfg & CGXX_SMUX_CBFC_CTL_RX_EN); + *tx_pause = !!(cfg & CGXX_SMUX_CBFC_CTL_TX_EN); + + return 0; } void cgx_lmac_ptp_config(void *cgxd, int lmac_id, bool enable) @@ -589,9 +1053,6 @@ void cgx_lmac_ptp_config(void *cgxd, int lmac_id, bool enable) if (!cgx) return; - if (is_dev_rpm(cgx)) - return; - if (enable) { /* Enable inbound PTP timestamping */ cfg = cgx_read(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL); @@ -646,9 +1107,9 @@ int cgx_fwi_cmd_send(u64 req, u64 *resp, struct lmac *lmac) if (!wait_event_timeout(lmac->wq_cmd_cmplt, !lmac->cmd_pend, msecs_to_jiffies(CGX_CMD_TIMEOUT))) { dev = &cgx->pdev->dev; - dev_err(dev, "cgx port %d:%d cmd timeout\n", - cgx->cgx_id, lmac->lmac_id); - err = -EIO; + dev_err(dev, "cgx port %d:%d cmd %lld timeout\n", + cgx->cgx_id, lmac->lmac_id, FIELD_GET(CMDREG_ID, req)); + err = LMAC_AF_ERR_CMD_TIMEOUT; goto unlock; } @@ -720,17 +1181,25 @@ static int cgx_link_usertable_index_map(int speed) static void set_mod_args(struct cgx_set_link_mode_args *args, u32 speed, u8 duplex, u8 autoneg, u64 mode) { - /* Fill default values incase of user did not pass - * valid parameters + int mode_baseidx; + u8 cgx_mode; + + if (args->multimode) { + args->mode |= mode; + return; + } + + /* Derive mode_base_idx and mode fields based + * on cgx_mode value */ - if (args->duplex == DUPLEX_UNKNOWN) - args->duplex = duplex; - if (args->speed == SPEED_UNKNOWN) - args->speed = speed; - if (args->an == AUTONEG_UNKNOWN) - args->an = autoneg; + cgx_mode = find_first_bit((unsigned long *)&mode, + CGX_MODE_MAX); args->mode = mode; - args->ports = 0; + mode_baseidx = cgx_mode - 41; + if (mode_baseidx > 0) { + args->mode_baseidx = 1; + args->mode = BIT_ULL(mode_baseidx); + } } static void otx2_map_ethtool_link_modes(u64 bitmask, @@ -738,16 +1207,16 @@ static void otx2_map_ethtool_link_modes(u64 bitmask, { switch (bitmask) { case ETHTOOL_LINK_MODE_10baseT_Half_BIT: - set_mod_args(args, 10, 1, 1, BIT_ULL(CGX_MODE_SGMII)); + set_mod_args(args, 10, 1, 1, BIT_ULL(CGX_MODE_SGMII_10M_BIT)); break; case ETHTOOL_LINK_MODE_10baseT_Full_BIT: - set_mod_args(args, 10, 0, 1, BIT_ULL(CGX_MODE_SGMII)); + set_mod_args(args, 10, 0, 1, BIT_ULL(CGX_MODE_SGMII_10M_BIT)); break; case ETHTOOL_LINK_MODE_100baseT_Half_BIT: - set_mod_args(args, 100, 1, 1, BIT_ULL(CGX_MODE_SGMII)); + set_mod_args(args, 100, 1, 1, BIT_ULL(CGX_MODE_SGMII_100M_BIT)); break; case ETHTOOL_LINK_MODE_100baseT_Full_BIT: - set_mod_args(args, 100, 0, 1, BIT_ULL(CGX_MODE_SGMII)); + set_mod_args(args, 100, 0, 1, BIT_ULL(CGX_MODE_SGMII_100M_BIT)); break; case ETHTOOL_LINK_MODE_1000baseT_Half_BIT: set_mod_args(args, 1000, 1, 1, BIT_ULL(CGX_MODE_SGMII)); @@ -825,16 +1294,22 @@ static inline void link_status_user_format(u64 lstat, struct cgx_link_user_info *linfo, struct cgx *cgx, u8 lmac_id) { - const char *lmac_string; - linfo->link_up = FIELD_GET(RESP_LINKSTAT_UP, lstat); linfo->full_duplex = FIELD_GET(RESP_LINKSTAT_FDUPLEX, lstat); linfo->speed = cgx_speed_mbps[FIELD_GET(RESP_LINKSTAT_SPEED, lstat)]; linfo->an = FIELD_GET(RESP_LINKSTAT_AN, lstat); linfo->fec = FIELD_GET(RESP_LINKSTAT_FEC, lstat); - linfo->lmac_type_id = cgx_get_lmac_type(cgx, lmac_id); - lmac_string = cgx_lmactype_string[linfo->lmac_type_id]; - strncpy(linfo->lmac_type, lmac_string, LMACTYPE_STR_LEN - 1); + linfo->lmac_type_id = FIELD_GET(RESP_LINKSTAT_LMAC_TYPE, lstat); + + if (linfo->lmac_type_id >= LMAC_MODE_MAX) { + dev_err(&cgx->pdev->dev, "Unknown lmac_type_id %d reported by firmware on cgx port%d:%d", + linfo->lmac_type_id, cgx->cgx_id, lmac_id); + strscpy(linfo->lmac_type, "Unknown", sizeof(linfo->lmac_type)); + return; + } + + strscpy(linfo->lmac_type, cgx_lmactype_string[linfo->lmac_type_id], + sizeof(linfo->lmac_type)); } /* Hardware event handlers */ @@ -939,7 +1414,7 @@ static irqreturn_t cgx_fwi_event_handler(int irq, void *data) /* Release thread waiting for completion */ lmac->cmd_pend = false; - wake_up_interruptible(&lmac->wq_cmd_cmplt); + wake_up(&lmac->wq_cmd_cmplt); break; case CGX_EVT_ASYNC: if (cgx_event_is_linkevent(event)) @@ -1003,7 +1478,7 @@ int cgx_get_fwdata_base(u64 *base) if (!cgx) return -ENXIO; - first_lmac = find_first_bit(&cgx->lmac_bmap, MAX_LMAC_PER_CGX); + first_lmac = find_first_bit(&cgx->lmac_bmap, cgx->max_lmac_per_mac); req = FIELD_SET(CMDREG_ID, CGX_CMD_GET_FWD_BASE, req); err = cgx_fwi_cmd_generic(req, &resp, cgx, first_lmac); if (!err) @@ -1013,25 +1488,36 @@ int cgx_get_fwdata_base(u64 *base) } int cgx_set_link_mode(void *cgxd, struct cgx_set_link_mode_args args, + struct cgx_lmac_fwdata_s *linkmodes, int cgx_id, int lmac_id) { struct cgx *cgx = cgxd; u64 req = 0, resp; + u8 bit; if (!cgx) return -ENODEV; - if (args.mode) - otx2_map_ethtool_link_modes(args.mode, &args); - if (!args.speed && args.duplex && !args.an) - return -EINVAL; + for_each_set_bit(bit, args.advertising, + __ETHTOOL_LINK_MODE_MASK_NBITS) + otx2_map_ethtool_link_modes(bit, &args); + + if (args.multimode) { + if (linkmodes->advertised_link_modes_own != CGX_CMD_OWN_NS) + return -EBUSY; + + linkmodes->advertised_link_modes = args.mode; + /* Update ownership */ + linkmodes->advertised_link_modes_own = CGX_CMD_OWN_FIRMWARE; + args.mode = GENMASK_ULL(41, 0); + } req = FIELD_SET(CMDREG_ID, CGX_CMD_MODE_CHANGE, req); req = FIELD_SET(CMDMODECHANGE_SPEED, cgx_link_usertable_index_map(args.speed), req); req = FIELD_SET(CMDMODECHANGE_DUPLEX, args.duplex, req); req = FIELD_SET(CMDMODECHANGE_AN, args.an, req); - req = FIELD_SET(CMDMODECHANGE_PORT, args.ports, req); + req = FIELD_SET(CMDMODECHANGE_MODE_BASEIDX, args.mode_baseidx, req); req = FIELD_SET(CMDMODECHANGE_FLAGS, args.mode, req); return cgx_fwi_cmd_generic(req, &resp, cgx, lmac_id); @@ -1074,17 +1560,25 @@ static int cgx_fwi_link_change(struct cgx *cgx, int lmac_id, bool enable) u64 req = 0; u64 resp; - if (enable) + if (enable) { req = FIELD_SET(CMDREG_ID, CGX_CMD_LINK_BRING_UP, req); - else - req = FIELD_SET(CMDREG_ID, CGX_CMD_LINK_BRING_DOWN, req); + /* On CN10K firmware offloads link bring up/down operations to ECP + * On Octeontx2 link operations are handled by firmware itself + * which can cause mbox errors so configure maximum time firmware + * poll for Link as 1000 ms + */ + if (!is_dev_rpm(cgx)) + req = FIELD_SET(LINKCFG_TIMEOUT, 1000, req); + } else { + req = FIELD_SET(CMDREG_ID, CGX_CMD_LINK_BRING_DOWN, req); + } return cgx_fwi_cmd_generic(req, &resp, cgx, lmac_id); } static inline int cgx_fwi_read_version(u64 *resp, struct cgx *cgx) { - int first_lmac = find_first_bit(&cgx->lmac_bmap, MAX_LMAC_PER_CGX); + int first_lmac = find_first_bit(&cgx->lmac_bmap, cgx->max_lmac_per_mac); u64 req = 0; req = FIELD_SET(CMDREG_ID, CGX_CMD_GET_FW_VER, req); @@ -1122,7 +1616,7 @@ static void cgx_lmac_linkup_work(struct work_struct *work) int i, err; /* Do Link up for all the enabled lmacs */ - for_each_set_bit(i, &cgx->lmac_bmap, MAX_LMAC_PER_CGX) { + for_each_set_bit(i, &cgx->lmac_bmap, cgx->max_lmac_per_mac) { err = cgx_fwi_link_change(cgx, i, true); if (err) dev_info(dev, "cgx port %d:%d Link up command failed\n", @@ -1142,12 +1636,21 @@ int cgx_lmac_linkup_start(void *cgxd) return 0; } -static void cgx_lmac_get_fifolen(struct cgx *cgx) +int cgx_lmac_reset(void *cgxd, int lmac_id, u8 pf_req_flr) { + struct cgx *cgx = cgxd; u64 cfg; - cfg = cgx_read(cgx, 0, CGX_CONST); - cgx->mac_ops->fifo_len = FIELD_GET(CGX_CONST_RXFIFO_SIZE, cfg); + if (!is_lmac_valid(cgx, lmac_id)) + return -ENODEV; + + /* Resetting PFC related CSRs */ + cfg = 0xff; + cgx_write(cgxd, lmac_id, CGXX_CMRX_RX_LOGL_XON, cfg); + + if (pf_req_flr) + cgx_lmac_internal_loopback(cgxd, lmac_id, false); + return 0; } static int cgx_configure_interrupt(struct cgx *cgx, struct lmac *lmac, @@ -1200,21 +1703,26 @@ unsigned long cgx_get_lmac_bmap(void *cgxd) static int cgx_lmac_init(struct cgx *cgx) { + u8 max_dmac_filters; struct lmac *lmac; + int err, filter; + unsigned int i; u64 lmac_list; - int i, err; - - cgx_lmac_get_fifolen(cgx); - cgx->lmac_count = cgx->mac_ops->get_nr_lmacs(cgx); /* lmac_list specifies which lmacs are enabled * when bit n is set to 1, LMAC[n] is enabled */ - if (cgx->mac_ops->non_contiguous_serdes_lane) - lmac_list = cgx_read(cgx, 0, CGXX_CMRX_RX_LMACS) & 0xFULL; + if (cgx->mac_ops->non_contiguous_serdes_lane) { + if (is_dev_rpm2(cgx)) + lmac_list = + cgx_read(cgx, 0, RPM2_CMRX_RX_LMACS) & 0xFFULL; + else + lmac_list = + cgx_read(cgx, 0, CGXX_CMRX_RX_LMACS) & 0xFULL; + } - if (cgx->lmac_count > MAX_LMAC_PER_CGX) - cgx->lmac_count = MAX_LMAC_PER_CGX; + if (cgx->lmac_count > cgx->max_lmac_per_mac) + cgx->lmac_count = cgx->max_lmac_per_mac; for (i = 0; i < cgx->lmac_count; i++) { lmac = kzalloc(sizeof(struct lmac), GFP_KERNEL); @@ -1225,7 +1733,7 @@ static int cgx_lmac_init(struct cgx *cgx) err = -ENOMEM; goto err_lmac_free; } - sprintf(lmac->name, "cgx_fwi_%d_%d", cgx->cgx_id, i); + sprintf(lmac->name, "cgx_fwi_%u_%u", cgx->cgx_id, i); if (cgx->mac_ops->non_contiguous_serdes_lane) { lmac->lmac_id = __ffs64(lmac_list); lmac_list &= ~BIT_ULL(lmac->lmac_id); @@ -1234,22 +1742,63 @@ static int cgx_lmac_init(struct cgx *cgx) } lmac->cgx = cgx; + lmac->mac_to_index_bmap.max = + cgx->mac_ops->dmac_filter_count / + cgx->lmac_count; + + max_dmac_filters = lmac->mac_to_index_bmap.max; + + err = rvu_alloc_bitmap(&lmac->mac_to_index_bmap); + if (err) + goto err_name_free; + + /* Reserve first entry for default MAC address */ + set_bit(0, lmac->mac_to_index_bmap.bmap); + + lmac->rx_fc_pfvf_bmap.max = 128; + err = rvu_alloc_bitmap(&lmac->rx_fc_pfvf_bmap); + if (err) + goto err_dmac_bmap_free; + + lmac->tx_fc_pfvf_bmap.max = 128; + err = rvu_alloc_bitmap(&lmac->tx_fc_pfvf_bmap); + if (err) + goto err_rx_fc_bmap_free; + init_waitqueue_head(&lmac->wq_cmd_cmplt); mutex_init(&lmac->cmd_lock); spin_lock_init(&lmac->event_cb_lock); err = cgx_configure_interrupt(cgx, lmac, lmac->lmac_id, false); if (err) - goto err_irq; + goto err_bitmap_free; /* Add reference */ cgx->lmac_idmap[lmac->lmac_id] = lmac; - cgx->mac_ops->mac_pause_frm_config(cgx, lmac->lmac_id, true); set_bit(lmac->lmac_id, &cgx->lmac_bmap); + cgx->mac_ops->mac_pause_frm_config(cgx, lmac->lmac_id, true); + lmac->lmac_type = cgx->mac_ops->get_lmac_type(cgx, lmac->lmac_id); + + /* Disable stale DMAC filters for sane state */ + for (filter = 0; filter < max_dmac_filters; filter++) + cgx_lmac_addr_del(cgx->cgx_id, lmac->lmac_id, filter); + + /* As cgx_lmac_addr_del does not clear entry for index 0 + * so it needs to be done explicitly + */ + cgx_lmac_addr_reset(cgx->cgx_id, lmac->lmac_id); } + /* Start X2P reset on given MAC block */ + cgx->mac_ops->mac_x2p_reset(cgx, true); return cgx_lmac_verify_fwi_version(cgx); -err_irq: +err_bitmap_free: + rvu_free_bitmap(&lmac->tx_fc_pfvf_bmap); +err_rx_fc_bmap_free: + rvu_free_bitmap(&lmac->rx_fc_pfvf_bmap); +err_dmac_bmap_free: + rvu_free_bitmap(&lmac->mac_to_index_bmap); +err_name_free: kfree(lmac->name); err_lmac_free: kfree(lmac); @@ -1262,18 +1811,18 @@ static int cgx_lmac_exit(struct cgx *cgx) int i; if (cgx->cgx_cmd_workq) { - flush_workqueue(cgx->cgx_cmd_workq); destroy_workqueue(cgx->cgx_cmd_workq); cgx->cgx_cmd_workq = NULL; } /* Free all lmac related resources */ - for_each_set_bit(i, &cgx->lmac_bmap, MAX_LMAC_PER_CGX) { + for_each_set_bit(i, &cgx->lmac_bmap, cgx->max_lmac_per_mac) { lmac = cgx->lmac_idmap[i]; if (!lmac) continue; cgx->mac_ops->mac_pause_frm_config(cgx, lmac->lmac_id, false); cgx_configure_interrupt(cgx, lmac, lmac->lmac_id, true); + kfree(lmac->mac_to_index_bmap.bmap); kfree(lmac->name); kfree(lmac); } @@ -1283,10 +1832,66 @@ static int cgx_lmac_exit(struct cgx *cgx) static void cgx_populate_features(struct cgx *cgx) { + u64 cfg; + + cfg = cgx_read(cgx, 0, CGX_CONST); + cgx->fifo_len = FIELD_GET(CGX_CONST_RXFIFO_SIZE, cfg); + cgx->max_lmac_per_mac = FIELD_GET(CGX_CONST_MAX_LMACS, cfg); + if (is_dev_rpm(cgx)) - cgx->hw_features = (RVU_MAC_RPM | RVU_LMAC_FEAT_FC); + cgx->hw_features = (RVU_LMAC_FEAT_DMACF | RVU_MAC_RPM | + RVU_LMAC_FEAT_FC | RVU_LMAC_FEAT_PTP); + else + cgx->hw_features = (RVU_LMAC_FEAT_FC | RVU_LMAC_FEAT_HIGIG2 | + RVU_LMAC_FEAT_PTP | RVU_LMAC_FEAT_DMACF); +} + +static u8 cgx_get_rxid_mapoffset(struct cgx *cgx) +{ + if (cgx->pdev->subsystem_device == PCI_SUBSYS_DEVID_CNF10KB_RPM || + is_dev_rpm2(cgx)) + return 0x80; + else + return 0x60; +} + +static void cgx_x2p_reset(void *cgxd, bool enable) +{ + struct cgx *cgx = cgxd; + int lmac_id; + u64 cfg; + + if (enable) { + for_each_set_bit(lmac_id, &cgx->lmac_bmap, cgx->max_lmac_per_mac) + cgx->mac_ops->mac_enadis_rx(cgx, lmac_id, false); + + usleep_range(1000, 2000); + + cfg = cgx_read(cgx, 0, CGXX_CMR_GLOBAL_CONFIG); + cfg |= cgx_get_nix_resetbit(cgx) | CGX_NSCI_DROP; + cgx_write(cgx, 0, CGXX_CMR_GLOBAL_CONFIG, cfg); + } else { + cfg = cgx_read(cgx, 0, CGXX_CMR_GLOBAL_CONFIG); + cfg &= ~(cgx_get_nix_resetbit(cgx) | CGX_NSCI_DROP); + cgx_write(cgx, 0, CGXX_CMR_GLOBAL_CONFIG, cfg); + } +} + +static int cgx_enadis_rx(void *cgxd, int lmac_id, bool enable) +{ + struct cgx *cgx = cgxd; + u64 cfg; + + if (!is_lmac_valid(cgx, lmac_id)) + return -ENODEV; + + cfg = cgx_read(cgx, lmac_id, CGXX_CMRX_CFG); + if (enable) + cfg |= DATA_PKT_RX_EN; else - cgx->hw_features = (RVU_LMAC_FEAT_FC | RVU_LMAC_FEAT_PTP); + cfg &= ~DATA_PKT_RX_EN; + cgx_write(cgx, lmac_id, CGXX_CMRX_CFG, cfg); + return 0; } static struct mac_ops cgx_mac_ops = { @@ -1301,15 +1906,27 @@ static struct mac_ops cgx_mac_ops = { .non_contiguous_serdes_lane = false, .rx_stats_cnt = 9, .tx_stats_cnt = 18, + .dmac_filter_count = 32, .get_nr_lmacs = cgx_get_nr_lmacs, .get_lmac_type = cgx_get_lmac_type, + .lmac_fifo_len = cgx_get_lmac_fifo_len, .mac_lmac_intl_lbk = cgx_lmac_internal_loopback, .mac_get_rx_stats = cgx_get_rx_stats, .mac_get_tx_stats = cgx_get_tx_stats, + .get_fec_stats = cgx_get_fec_stats, .mac_enadis_rx_pause_fwding = cgx_lmac_enadis_rx_pause_fwding, .mac_get_pause_frm_status = cgx_lmac_get_pause_frm_status, .mac_enadis_pause_frm = cgx_lmac_enadis_pause_frm, .mac_pause_frm_config = cgx_lmac_pause_frm_config, + .mac_enadis_ptp_config = cgx_lmac_ptp_config, + .mac_rx_tx_enable = cgx_lmac_rx_tx_enable, + .mac_tx_enable = cgx_lmac_tx_enable, + .pfc_config = cgx_lmac_pfc_config, + .mac_get_pfc_frm_cfg = cgx_lmac_get_pfc_frm_cfg, + .mac_reset = cgx_lmac_reset, + .mac_stats_reset = cgx_stats_reset, + .mac_x2p_reset = cgx_x2p_reset, + .mac_enadis_rx = cgx_enadis_rx, }; static int cgx_probe(struct pci_dev *pdev, const struct pci_device_id *id) @@ -1326,11 +1943,13 @@ static int cgx_probe(struct pci_dev *pdev, const struct pci_device_id *id) pci_set_drvdata(pdev, cgx); /* Use mac_ops to get MAC specific features */ - if (pdev->device == PCI_DEVID_CN10K_RPM) - cgx->mac_ops = rpm_get_mac_ops(); + if (is_dev_rpm(cgx)) + cgx->mac_ops = rpm_get_mac_ops(cgx); else cgx->mac_ops = &cgx_mac_ops; + cgx->mac_ops->rxid_map_offset = cgx_get_rxid_mapoffset(cgx); + err = pci_enable_device(pdev); if (err) { dev_err(dev, "Failed to enable PCI device\n"); @@ -1344,6 +1963,12 @@ static int cgx_probe(struct pci_dev *pdev, const struct pci_device_id *id) goto err_disable_device; } + err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48)); + if (err) { + dev_err(dev, "DMA mask config failed, abort\n"); + goto err_release_regions; + } + /* MAP configuration registers */ cgx->reg_base = pcim_iomap(pdev, PCI_CFG_REG_BAR_NUM, 0); if (!cgx->reg_base) { @@ -1352,9 +1977,24 @@ static int cgx_probe(struct pci_dev *pdev, const struct pci_device_id *id) goto err_release_regions; } + if (!is_cn20k(pdev) && + !is_cgx_mapped_to_nix(pdev->subsystem_device, cgx->cgx_id)) { + dev_notice(dev, "CGX %d not mapped to NIX, skipping probe\n", + cgx->cgx_id); + err = -ENODEV; + goto err_release_regions; + } + + cgx->lmac_count = cgx->mac_ops->get_nr_lmacs(cgx); + if (!cgx->lmac_count) { + dev_notice(dev, "CGX %d LMAC count is zero, skipping probe\n", cgx->cgx_id); + err = -EOPNOTSUPP; + goto err_release_regions; + } + nvec = pci_msix_vec_count(cgx->pdev); err = pci_alloc_irq_vectors(pdev, nvec, nvec, PCI_IRQ_MSIX); - if (err < 0 || err != nvec) { + if (err < 0) { dev_err(dev, "Request for %d msix vectors failed, err %d\n", nvec, err); goto err_release_regions; @@ -1365,7 +2005,7 @@ static int cgx_probe(struct pci_dev *pdev, const struct pci_device_id *id) /* init wq for processing linkup requests */ INIT_WORK(&cgx->cgx_cmd_work, cgx_lmac_linkup_work); - cgx->cgx_cmd_workq = alloc_workqueue("cgx_cmd_workq", 0, 0); + cgx->cgx_cmd_workq = alloc_workqueue("cgx_cmd_workq", WQ_PERCPU, 0); if (!cgx->cgx_cmd_workq) { dev_err(dev, "alloc workqueue failed for cgx cmd"); err = -ENOMEM; |
