diff options
Diffstat (limited to 'drivers/net/ethernet/broadcom/bnge')
17 files changed, 4131 insertions, 0 deletions
diff --git a/drivers/net/ethernet/broadcom/bnge/Makefile b/drivers/net/ethernet/broadcom/bnge/Makefile new file mode 100644 index 000000000000..6142d9c57f49 --- /dev/null +++ b/drivers/net/ethernet/broadcom/bnge/Makefile @@ -0,0 +1,12 @@ +# SPDX-License-Identifier: GPL-2.0-only + +obj-$(CONFIG_BNGE) += bng_en.o + +bng_en-y := bnge_core.o \ + bnge_devlink.o \ + bnge_hwrm.o \ + bnge_hwrm_lib.o \ + bnge_rmem.o \ + bnge_resc.o \ + bnge_netdev.o \ + bnge_ethtool.o diff --git a/drivers/net/ethernet/broadcom/bnge/bnge.h b/drivers/net/ethernet/broadcom/bnge/bnge.h new file mode 100644 index 000000000000..6fb3683b6b04 --- /dev/null +++ b/drivers/net/ethernet/broadcom/bnge/bnge.h @@ -0,0 +1,218 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2025 Broadcom */ + +#ifndef _BNGE_H_ +#define _BNGE_H_ + +#define DRV_NAME "bng_en" +#define DRV_SUMMARY "Broadcom 800G Ethernet Linux Driver" + +#include <linux/etherdevice.h> +#include <linux/bnxt/hsi.h> +#include "bnge_rmem.h" +#include "bnge_resc.h" + +#define DRV_VER_MAJ 1 +#define DRV_VER_MIN 15 +#define DRV_VER_UPD 1 + +extern char bnge_driver_name[]; + +enum board_idx { + BCM57708, +}; + +struct bnge_pf_info { + u16 fw_fid; + u16 port_id; + u8 mac_addr[ETH_ALEN]; +}; + +#define INVALID_HW_RING_ID ((u16)-1) + +enum { + BNGE_FW_CAP_SHORT_CMD = BIT_ULL(0), + BNGE_FW_CAP_LLDP_AGENT = BIT_ULL(1), + BNGE_FW_CAP_DCBX_AGENT = BIT_ULL(2), + BNGE_FW_CAP_IF_CHANGE = BIT_ULL(3), + BNGE_FW_CAP_KONG_MB_CHNL = BIT_ULL(4), + BNGE_FW_CAP_ERROR_RECOVERY = BIT_ULL(5), + BNGE_FW_CAP_PKG_VER = BIT_ULL(6), + BNGE_FW_CAP_CFA_ADV_FLOW = BIT_ULL(7), + BNGE_FW_CAP_CFA_RFS_RING_TBL_IDX_V2 = BIT_ULL(8), + BNGE_FW_CAP_PCIE_STATS_SUPPORTED = BIT_ULL(9), + BNGE_FW_CAP_EXT_STATS_SUPPORTED = BIT_ULL(10), + BNGE_FW_CAP_ERR_RECOVER_RELOAD = BIT_ULL(11), + BNGE_FW_CAP_HOT_RESET = BIT_ULL(12), + BNGE_FW_CAP_RX_ALL_PKT_TS = BIT_ULL(13), + BNGE_FW_CAP_VLAN_RX_STRIP = BIT_ULL(14), + BNGE_FW_CAP_VLAN_TX_INSERT = BIT_ULL(15), + BNGE_FW_CAP_EXT_HW_STATS_SUPPORTED = BIT_ULL(16), + BNGE_FW_CAP_LIVEPATCH = BIT_ULL(17), + BNGE_FW_CAP_HOT_RESET_IF = BIT_ULL(18), + BNGE_FW_CAP_RING_MONITOR = BIT_ULL(19), + BNGE_FW_CAP_DBG_QCAPS = BIT_ULL(20), + BNGE_FW_CAP_THRESHOLD_TEMP_SUPPORTED = BIT_ULL(21), + BNGE_FW_CAP_DFLT_VLAN_TPID_PCP = BIT_ULL(22), + BNGE_FW_CAP_VNIC_TUNNEL_TPA = BIT_ULL(23), + BNGE_FW_CAP_CFA_NTUPLE_RX_EXT_IP_PROTO = BIT_ULL(24), + BNGE_FW_CAP_CFA_RFS_RING_TBL_IDX_V3 = BIT_ULL(25), + BNGE_FW_CAP_VNIC_RE_FLUSH = BIT_ULL(26), +}; + +enum { + BNGE_EN_ROCE_V1 = BIT_ULL(0), + BNGE_EN_ROCE_V2 = BIT_ULL(1), + BNGE_EN_STRIP_VLAN = BIT_ULL(2), + BNGE_EN_SHARED_CHNL = BIT_ULL(3), + BNGE_EN_UDP_GSO_SUPP = BIT_ULL(4), +}; + +#define BNGE_EN_ROCE (BNGE_EN_ROCE_V1 | BNGE_EN_ROCE_V2) + +enum { + BNGE_RSS_CAP_RSS_HASH_TYPE_DELTA = BIT(0), + BNGE_RSS_CAP_UDP_RSS_CAP = BIT(1), + BNGE_RSS_CAP_NEW_RSS_CAP = BIT(2), + BNGE_RSS_CAP_RSS_TCAM = BIT(3), + BNGE_RSS_CAP_AH_V4_RSS_CAP = BIT(4), + BNGE_RSS_CAP_AH_V6_RSS_CAP = BIT(5), + BNGE_RSS_CAP_ESP_V4_RSS_CAP = BIT(6), + BNGE_RSS_CAP_ESP_V6_RSS_CAP = BIT(7), +}; + +#define BNGE_MAX_QUEUE 8 +struct bnge_queue_info { + u8 queue_id; + u8 queue_profile; +}; + +struct bnge_dev { + struct device *dev; + struct pci_dev *pdev; + struct net_device *netdev; + u64 dsn; +#define BNGE_VPD_FLD_LEN 32 + char board_partno[BNGE_VPD_FLD_LEN]; + char board_serialno[BNGE_VPD_FLD_LEN]; + + void __iomem *bar0; + void __iomem *bar1; + + u16 chip_num; + u8 chip_rev; + + int db_offset; /* db_offset within db_size */ + int db_size; + + /* HWRM members */ + u16 hwrm_cmd_seq; + u16 hwrm_cmd_kong_seq; + struct dma_pool *hwrm_dma_pool; + struct hlist_head hwrm_pending_list; + u16 hwrm_max_req_len; + u16 hwrm_max_ext_req_len; + unsigned int hwrm_cmd_timeout; + unsigned int hwrm_cmd_max_timeout; + struct mutex hwrm_cmd_lock; /* serialize hwrm messages */ + + struct hwrm_ver_get_output ver_resp; +#define FW_VER_STR_LEN 32 + char fw_ver_str[FW_VER_STR_LEN]; + char hwrm_ver_supp[FW_VER_STR_LEN]; + char nvm_cfg_ver[FW_VER_STR_LEN]; + u64 fw_ver_code; +#define BNGE_FW_VER_CODE(maj, min, bld, rsv) \ + ((u64)(maj) << 48 | (u64)(min) << 32 | (u64)(bld) << 16 | (rsv)) + + struct bnge_pf_info pf; + + unsigned long state; +#define BNGE_STATE_DRV_REGISTERED 0 + + u64 fw_cap; + + /* Backing stores */ + struct bnge_ctx_mem_info *ctx; + + u64 flags; + + struct bnge_hw_resc hw_resc; + + u16 tso_max_segs; + + int max_fltr; +#define BNGE_L2_FLTR_MAX_FLTR 1024 + + u32 *rss_indir_tbl; +#define BNGE_RSS_TABLE_ENTRIES 64 +#define BNGE_RSS_TABLE_SIZE (BNGE_RSS_TABLE_ENTRIES * 4) +#define BNGE_RSS_TABLE_MAX_TBL 8 +#define BNGE_MAX_RSS_TABLE_SIZE \ + (BNGE_RSS_TABLE_SIZE * BNGE_RSS_TABLE_MAX_TBL) +#define BNGE_MAX_RSS_TABLE_ENTRIES \ + (BNGE_RSS_TABLE_ENTRIES * BNGE_RSS_TABLE_MAX_TBL) + u16 rss_indir_tbl_entries; + + u32 rss_cap; + + u16 rx_nr_rings; + u16 tx_nr_rings; + u16 tx_nr_rings_per_tc; + /* Number of NQs */ + u16 nq_nr_rings; + + /* Aux device resources */ + u16 aux_num_msix; + u16 aux_num_stat_ctxs; + + u16 max_mtu; +#define BNGE_MAX_MTU 9500 + + u16 hw_ring_stats_size; +#define BNGE_NUM_RX_RING_STATS 8 +#define BNGE_NUM_TX_RING_STATS 8 +#define BNGE_NUM_TPA_RING_STATS 6 +#define BNGE_RING_STATS_SIZE \ + ((BNGE_NUM_RX_RING_STATS + BNGE_NUM_TX_RING_STATS + \ + BNGE_NUM_TPA_RING_STATS) * 8) + + u16 max_tpa_v2; +#define BNGE_SUPPORTS_TPA(bd) ((bd)->max_tpa_v2) + + u8 num_tc; + u8 max_tc; + u8 max_lltc; /* lossless TCs */ + struct bnge_queue_info q_info[BNGE_MAX_QUEUE]; + u8 tc_to_qidx[BNGE_MAX_QUEUE]; + u8 q_ids[BNGE_MAX_QUEUE]; + u8 max_q; + u8 port_count; + + struct bnge_irq *irq_tbl; + u16 irqs_acquired; +}; + +static inline bool bnge_is_roce_en(struct bnge_dev *bd) +{ + return bd->flags & BNGE_EN_ROCE; +} + +static inline bool bnge_is_agg_reqd(struct bnge_dev *bd) +{ + if (bd->netdev) { + struct bnge_net *bn = netdev_priv(bd->netdev); + + if (bn->priv_flags & BNGE_NET_EN_TPA || + bn->priv_flags & BNGE_NET_EN_JUMBO) + return true; + else + return false; + } + + return true; +} + +bool bnge_aux_registered(struct bnge_dev *bd); + +#endif /* _BNGE_H_ */ diff --git a/drivers/net/ethernet/broadcom/bnge/bnge_core.c b/drivers/net/ethernet/broadcom/bnge/bnge_core.c new file mode 100644 index 000000000000..68da656f2894 --- /dev/null +++ b/drivers/net/ethernet/broadcom/bnge/bnge_core.c @@ -0,0 +1,388 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) 2025 Broadcom. + +#include <linux/init.h> +#include <linux/crash_dump.h> +#include <linux/module.h> +#include <linux/pci.h> + +#include "bnge.h" +#include "bnge_devlink.h" +#include "bnge_hwrm.h" +#include "bnge_hwrm_lib.h" + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION(DRV_SUMMARY); + +char bnge_driver_name[] = DRV_NAME; + +static const struct { + char *name; +} board_info[] = { + [BCM57708] = { "Broadcom BCM57708 50Gb/100Gb/200Gb/400Gb/800Gb Ethernet" }, +}; + +static const struct pci_device_id bnge_pci_tbl[] = { + { PCI_VDEVICE(BROADCOM, 0x1780), .driver_data = BCM57708 }, + /* Required last entry */ + {0, } +}; +MODULE_DEVICE_TABLE(pci, bnge_pci_tbl); + +static void bnge_print_device_info(struct pci_dev *pdev, enum board_idx idx) +{ + struct device *dev = &pdev->dev; + + dev_info(dev, "%s found at mem %lx\n", board_info[idx].name, + (long)pci_resource_start(pdev, 0)); + + pcie_print_link_status(pdev); +} + +bool bnge_aux_registered(struct bnge_dev *bd) +{ + return false; +} + +static void bnge_nvm_cfg_ver_get(struct bnge_dev *bd) +{ + struct hwrm_nvm_get_dev_info_output nvm_info; + + if (!bnge_hwrm_nvm_dev_info(bd, &nvm_info)) + snprintf(bd->nvm_cfg_ver, FW_VER_STR_LEN, "%d.%d.%d", + nvm_info.nvm_cfg_ver_maj, nvm_info.nvm_cfg_ver_min, + nvm_info.nvm_cfg_ver_upd); +} + +static int bnge_func_qcaps(struct bnge_dev *bd) +{ + int rc; + + rc = bnge_hwrm_func_qcaps(bd); + if (rc) + return rc; + + rc = bnge_hwrm_queue_qportcfg(bd); + if (rc) { + dev_err(bd->dev, "query qportcfg failure rc: %d\n", rc); + return rc; + } + + rc = bnge_hwrm_func_resc_qcaps(bd); + if (rc) { + dev_err(bd->dev, "query resc caps failure rc: %d\n", rc); + return rc; + } + + rc = bnge_hwrm_func_qcfg(bd); + if (rc) { + dev_err(bd->dev, "query config failure rc: %d\n", rc); + return rc; + } + + rc = bnge_hwrm_vnic_qcaps(bd); + if (rc) { + dev_err(bd->dev, "vnic caps failure rc: %d\n", rc); + return rc; + } + + return 0; +} + +static void bnge_fw_unregister_dev(struct bnge_dev *bd) +{ + /* ctx mem free after unrgtr only */ + bnge_hwrm_func_drv_unrgtr(bd); + bnge_free_ctx_mem(bd); +} + +static int bnge_fw_register_dev(struct bnge_dev *bd) +{ + int rc; + + bd->fw_cap = 0; + rc = bnge_hwrm_ver_get(bd); + if (rc) { + dev_err(bd->dev, "Get Version command failed rc: %d\n", rc); + return rc; + } + + bnge_nvm_cfg_ver_get(bd); + + rc = bnge_hwrm_func_reset(bd); + if (rc) { + dev_err(bd->dev, "Failed to reset function rc: %d\n", rc); + return rc; + } + + bnge_hwrm_fw_set_time(bd); + + rc = bnge_hwrm_func_drv_rgtr(bd); + if (rc) { + dev_err(bd->dev, "Failed to rgtr with firmware rc: %d\n", rc); + return rc; + } + + rc = bnge_alloc_ctx_mem(bd); + if (rc) { + dev_err(bd->dev, "Failed to allocate ctx mem rc: %d\n", rc); + goto err_func_unrgtr; + } + + /* Get the resources and configuration from firmware */ + rc = bnge_func_qcaps(bd); + if (rc) { + dev_err(bd->dev, "Failed initial configuration rc: %d\n", rc); + rc = -ENODEV; + goto err_func_unrgtr; + } + + return 0; + +err_func_unrgtr: + bnge_fw_unregister_dev(bd); + return rc; +} + +static void bnge_pci_disable(struct pci_dev *pdev) +{ + pci_release_regions(pdev); + if (pci_is_enabled(pdev)) + pci_disable_device(pdev); +} + +static int bnge_pci_enable(struct pci_dev *pdev) +{ + int rc; + + rc = pci_enable_device(pdev); + if (rc) { + dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n"); + return rc; + } + + if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { + dev_err(&pdev->dev, + "Cannot find PCI device base address, aborting\n"); + rc = -ENODEV; + goto err_pci_disable; + } + + rc = pci_request_regions(pdev, bnge_driver_name); + if (rc) { + dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n"); + goto err_pci_disable; + } + + dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); + + pci_set_master(pdev); + + return 0; + +err_pci_disable: + pci_disable_device(pdev); + return rc; +} + +static void bnge_unmap_bars(struct pci_dev *pdev) +{ + struct bnge_dev *bd = pci_get_drvdata(pdev); + + if (bd->bar1) { + pci_iounmap(pdev, bd->bar1); + bd->bar1 = NULL; + } + + if (bd->bar0) { + pci_iounmap(pdev, bd->bar0); + bd->bar0 = NULL; + } +} + +static void bnge_set_max_func_irqs(struct bnge_dev *bd, + unsigned int max_irqs) +{ + bd->hw_resc.max_irqs = max_irqs; +} + +static int bnge_get_max_irq(struct pci_dev *pdev) +{ + u16 ctrl; + + pci_read_config_word(pdev, pdev->msix_cap + PCI_MSIX_FLAGS, &ctrl); + return (ctrl & PCI_MSIX_FLAGS_QSIZE) + 1; +} + +static int bnge_map_db_bar(struct bnge_dev *bd) +{ + if (!bd->db_size) + return -ENODEV; + + bd->bar1 = pci_iomap(bd->pdev, 2, bd->db_size); + if (!bd->bar1) + return -ENOMEM; + + return 0; +} + +static int bnge_probe_one(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + unsigned int max_irqs; + struct bnge_dev *bd; + int rc; + + if (pci_is_bridge(pdev)) + return -ENODEV; + + if (!pdev->msix_cap) { + dev_err(&pdev->dev, "MSIX capability missing, aborting\n"); + return -ENODEV; + } + + if (is_kdump_kernel()) { + pci_clear_master(pdev); + pcie_flr(pdev); + } + + rc = bnge_pci_enable(pdev); + if (rc) + return rc; + + bnge_print_device_info(pdev, ent->driver_data); + + bd = bnge_devlink_alloc(pdev); + if (!bd) { + dev_err(&pdev->dev, "Devlink allocation failed\n"); + rc = -ENOMEM; + goto err_pci_disable; + } + + bd->bar0 = pci_ioremap_bar(pdev, 0); + if (!bd->bar0) { + dev_err(&pdev->dev, "Failed mapping BAR-0, aborting\n"); + rc = -ENOMEM; + goto err_devl_free; + } + + rc = bnge_init_hwrm_resources(bd); + if (rc) + goto err_bar_unmap; + + rc = bnge_fw_register_dev(bd); + if (rc) { + dev_err(&pdev->dev, "Failed to register with firmware rc = %d\n", rc); + goto err_hwrm_cleanup; + } + + bnge_devlink_register(bd); + + max_irqs = bnge_get_max_irq(pdev); + bnge_set_max_func_irqs(bd, max_irqs); + + bnge_aux_init_dflt_config(bd); + + rc = bnge_net_init_dflt_config(bd); + if (rc) { + dev_err(&pdev->dev, "Error setting up default cfg to netdev rc = %d\n", + rc); + goto err_fw_reg; + } + + rc = bnge_map_db_bar(bd); + if (rc) { + dev_err(&pdev->dev, "Failed mapping doorbell BAR rc = %d, aborting\n", + rc); + goto err_config_uninit; + } + + rc = bnge_alloc_irqs(bd); + if (rc) { + dev_err(&pdev->dev, "Error IRQ allocation rc = %d\n", rc); + goto err_config_uninit; + } + + rc = bnge_netdev_alloc(bd, max_irqs); + if (rc) + goto err_free_irq; + + pci_save_state(pdev); + + return 0; + +err_free_irq: + bnge_free_irqs(bd); + +err_config_uninit: + bnge_net_uninit_dflt_config(bd); + +err_fw_reg: + bnge_devlink_unregister(bd); + bnge_fw_unregister_dev(bd); + +err_hwrm_cleanup: + bnge_cleanup_hwrm_resources(bd); + +err_bar_unmap: + bnge_unmap_bars(pdev); + +err_devl_free: + bnge_devlink_free(bd); + +err_pci_disable: + bnge_pci_disable(pdev); + return rc; +} + +static void bnge_remove_one(struct pci_dev *pdev) +{ + struct bnge_dev *bd = pci_get_drvdata(pdev); + + bnge_netdev_free(bd); + + bnge_free_irqs(bd); + + bnge_net_uninit_dflt_config(bd); + + bnge_devlink_unregister(bd); + + bnge_fw_unregister_dev(bd); + + bnge_cleanup_hwrm_resources(bd); + + bnge_unmap_bars(pdev); + + bnge_devlink_free(bd); + + bnge_pci_disable(pdev); +} + +static void bnge_shutdown(struct pci_dev *pdev) +{ + pci_disable_device(pdev); + + if (system_state == SYSTEM_POWER_OFF) { + pci_wake_from_d3(pdev, 0); + pci_set_power_state(pdev, PCI_D3hot); + } +} + +static struct pci_driver bnge_driver = { + .name = bnge_driver_name, + .id_table = bnge_pci_tbl, + .probe = bnge_probe_one, + .remove = bnge_remove_one, + .shutdown = bnge_shutdown, +}; + +static int __init bnge_init_module(void) +{ + return pci_register_driver(&bnge_driver); +} +module_init(bnge_init_module); + +static void __exit bnge_exit_module(void) +{ + pci_unregister_driver(&bnge_driver); +} +module_exit(bnge_exit_module); diff --git a/drivers/net/ethernet/broadcom/bnge/bnge_devlink.c b/drivers/net/ethernet/broadcom/bnge/bnge_devlink.c new file mode 100644 index 000000000000..a987afebd64d --- /dev/null +++ b/drivers/net/ethernet/broadcom/bnge/bnge_devlink.c @@ -0,0 +1,306 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) 2025 Broadcom. + +#include <linux/unaligned.h> +#include <linux/pci.h> +#include <linux/types.h> +#include <net/devlink.h> + +#include "bnge.h" +#include "bnge_devlink.h" +#include "bnge_hwrm_lib.h" + +static int bnge_dl_info_put(struct bnge_dev *bd, struct devlink_info_req *req, + enum bnge_dl_version_type type, const char *key, + char *buf) +{ + if (!strlen(buf)) + return 0; + + if (!strcmp(key, DEVLINK_INFO_VERSION_GENERIC_FW_NCSI) || + !strcmp(key, DEVLINK_INFO_VERSION_GENERIC_FW_ROCE)) + return 0; + + switch (type) { + case BNGE_VERSION_FIXED: + return devlink_info_version_fixed_put(req, key, buf); + case BNGE_VERSION_RUNNING: + return devlink_info_version_running_put(req, key, buf); + case BNGE_VERSION_STORED: + return devlink_info_version_stored_put(req, key, buf); + } + + return 0; +} + +static void bnge_vpd_read_info(struct bnge_dev *bd) +{ + struct pci_dev *pdev = bd->pdev; + unsigned int vpd_size, kw_len; + int pos, size; + u8 *vpd_data; + + vpd_data = pci_vpd_alloc(pdev, &vpd_size); + if (IS_ERR(vpd_data)) { + pci_warn(pdev, "Unable to read VPD\n"); + return; + } + + pos = pci_vpd_find_ro_info_keyword(vpd_data, vpd_size, + PCI_VPD_RO_KEYWORD_PARTNO, &kw_len); + if (pos < 0) + goto read_sn; + + size = min_t(int, kw_len, BNGE_VPD_FLD_LEN - 1); + memcpy(bd->board_partno, &vpd_data[pos], size); + +read_sn: + pos = pci_vpd_find_ro_info_keyword(vpd_data, vpd_size, + PCI_VPD_RO_KEYWORD_SERIALNO, + &kw_len); + if (pos < 0) + goto exit; + + size = min_t(int, kw_len, BNGE_VPD_FLD_LEN - 1); + memcpy(bd->board_serialno, &vpd_data[pos], size); + +exit: + kfree(vpd_data); +} + +#define HWRM_FW_VER_STR_LEN 16 + +static int bnge_devlink_info_get(struct devlink *devlink, + struct devlink_info_req *req, + struct netlink_ext_ack *extack) +{ + struct hwrm_nvm_get_dev_info_output nvm_dev_info; + struct bnge_dev *bd = devlink_priv(devlink); + struct hwrm_ver_get_output *ver_resp; + char mgmt_ver[FW_VER_STR_LEN]; + char roce_ver[FW_VER_STR_LEN]; + char ncsi_ver[FW_VER_STR_LEN]; + char buf[32]; + + int rc; + + if (bd->dsn) { + char buf[32]; + u8 dsn[8]; + int rc; + + put_unaligned_le64(bd->dsn, dsn); + sprintf(buf, "%02X-%02X-%02X-%02X-%02X-%02X-%02X-%02X", + dsn[7], dsn[6], dsn[5], dsn[4], + dsn[3], dsn[2], dsn[1], dsn[0]); + rc = devlink_info_serial_number_put(req, buf); + if (rc) { + NL_SET_ERR_MSG_MOD(extack, "Failed to set dsn"); + return rc; + } + } + + if (strlen(bd->board_serialno)) { + rc = devlink_info_board_serial_number_put(req, + bd->board_serialno); + if (rc) { + NL_SET_ERR_MSG_MOD(extack, + "Failed to set board serial number"); + return rc; + } + } + + rc = bnge_dl_info_put(bd, req, BNGE_VERSION_FIXED, + DEVLINK_INFO_VERSION_GENERIC_BOARD_ID, + bd->board_partno); + if (rc) { + NL_SET_ERR_MSG_MOD(extack, "Failed to set board part number"); + return rc; + } + + /* More information from HWRM ver get command */ + sprintf(buf, "%X", bd->chip_num); + rc = bnge_dl_info_put(bd, req, BNGE_VERSION_FIXED, + DEVLINK_INFO_VERSION_GENERIC_ASIC_ID, buf); + if (rc) { + NL_SET_ERR_MSG_MOD(extack, "Failed to set asic id"); + return rc; + } + + ver_resp = &bd->ver_resp; + sprintf(buf, "%c%d", 'A' + ver_resp->chip_rev, ver_resp->chip_metal); + rc = bnge_dl_info_put(bd, req, BNGE_VERSION_FIXED, + DEVLINK_INFO_VERSION_GENERIC_ASIC_REV, buf); + if (rc) { + NL_SET_ERR_MSG_MOD(extack, "Failed to set asic info"); + return rc; + } + + rc = bnge_dl_info_put(bd, req, BNGE_VERSION_RUNNING, + DEVLINK_INFO_VERSION_GENERIC_FW_PSID, + bd->nvm_cfg_ver); + if (rc) { + NL_SET_ERR_MSG_MOD(extack, "Failed to set firmware version"); + return rc; + } + + buf[0] = 0; + strncat(buf, ver_resp->active_pkg_name, HWRM_FW_VER_STR_LEN); + rc = bnge_dl_info_put(bd, req, BNGE_VERSION_RUNNING, + DEVLINK_INFO_VERSION_GENERIC_FW, buf); + if (rc) { + NL_SET_ERR_MSG_MOD(extack, + "Failed to set firmware generic version"); + return rc; + } + + if (ver_resp->flags & VER_GET_RESP_FLAGS_EXT_VER_AVAIL) { + snprintf(mgmt_ver, FW_VER_STR_LEN, "%d.%d.%d.%d", + ver_resp->hwrm_fw_major, ver_resp->hwrm_fw_minor, + ver_resp->hwrm_fw_build, ver_resp->hwrm_fw_patch); + + snprintf(ncsi_ver, FW_VER_STR_LEN, "%d.%d.%d.%d", + ver_resp->mgmt_fw_major, ver_resp->mgmt_fw_minor, + ver_resp->mgmt_fw_build, ver_resp->mgmt_fw_patch); + + snprintf(roce_ver, FW_VER_STR_LEN, "%d.%d.%d.%d", + ver_resp->roce_fw_major, ver_resp->roce_fw_minor, + ver_resp->roce_fw_build, ver_resp->roce_fw_patch); + } else { + snprintf(mgmt_ver, FW_VER_STR_LEN, "%d.%d.%d.%d", + ver_resp->hwrm_fw_maj_8b, ver_resp->hwrm_fw_min_8b, + ver_resp->hwrm_fw_bld_8b, ver_resp->hwrm_fw_rsvd_8b); + + snprintf(ncsi_ver, FW_VER_STR_LEN, "%d.%d.%d.%d", + ver_resp->mgmt_fw_maj_8b, ver_resp->mgmt_fw_min_8b, + ver_resp->mgmt_fw_bld_8b, ver_resp->mgmt_fw_rsvd_8b); + + snprintf(roce_ver, FW_VER_STR_LEN, "%d.%d.%d.%d", + ver_resp->roce_fw_maj_8b, ver_resp->roce_fw_min_8b, + ver_resp->roce_fw_bld_8b, ver_resp->roce_fw_rsvd_8b); + } + rc = bnge_dl_info_put(bd, req, BNGE_VERSION_RUNNING, + DEVLINK_INFO_VERSION_GENERIC_FW_MGMT, mgmt_ver); + if (rc) { + NL_SET_ERR_MSG_MOD(extack, + "Failed to set firmware mgmt version"); + return rc; + } + + rc = bnge_dl_info_put(bd, req, BNGE_VERSION_RUNNING, + DEVLINK_INFO_VERSION_GENERIC_FW_MGMT_API, + bd->hwrm_ver_supp); + if (rc) { + NL_SET_ERR_MSG_MOD(extack, + "Failed to set firmware mgmt api version"); + return rc; + } + + rc = bnge_dl_info_put(bd, req, BNGE_VERSION_RUNNING, + DEVLINK_INFO_VERSION_GENERIC_FW_NCSI, ncsi_ver); + if (rc) { + NL_SET_ERR_MSG_MOD(extack, + "Failed to set ncsi firmware version"); + return rc; + } + + rc = bnge_dl_info_put(bd, req, BNGE_VERSION_RUNNING, + DEVLINK_INFO_VERSION_GENERIC_FW_ROCE, roce_ver); + if (rc) { + NL_SET_ERR_MSG_MOD(extack, "Failed to set roce firmware version"); + return rc; + } + + rc = bnge_hwrm_nvm_dev_info(bd, &nvm_dev_info); + if (!(nvm_dev_info.flags & NVM_GET_DEV_INFO_RESP_FLAGS_FW_VER_VALID)) + return 0; + + buf[0] = 0; + strncat(buf, nvm_dev_info.pkg_name, HWRM_FW_VER_STR_LEN); + rc = bnge_dl_info_put(bd, req, BNGE_VERSION_STORED, + DEVLINK_INFO_VERSION_GENERIC_FW, buf); + if (rc) { + NL_SET_ERR_MSG_MOD(extack, + "Failed to set roce firmware version"); + return rc; + } + + snprintf(mgmt_ver, FW_VER_STR_LEN, "%d.%d.%d.%d", + nvm_dev_info.hwrm_fw_major, nvm_dev_info.hwrm_fw_minor, + nvm_dev_info.hwrm_fw_build, nvm_dev_info.hwrm_fw_patch); + rc = bnge_dl_info_put(bd, req, BNGE_VERSION_STORED, + DEVLINK_INFO_VERSION_GENERIC_FW_MGMT, mgmt_ver); + if (rc) { + NL_SET_ERR_MSG_MOD(extack, + "Failed to set stored firmware version"); + return rc; + } + + snprintf(ncsi_ver, FW_VER_STR_LEN, "%d.%d.%d.%d", + nvm_dev_info.mgmt_fw_major, nvm_dev_info.mgmt_fw_minor, + nvm_dev_info.mgmt_fw_build, nvm_dev_info.mgmt_fw_patch); + rc = bnge_dl_info_put(bd, req, BNGE_VERSION_STORED, + DEVLINK_INFO_VERSION_GENERIC_FW_NCSI, ncsi_ver); + if (rc) { + NL_SET_ERR_MSG_MOD(extack, + "Failed to set stored ncsi firmware version"); + return rc; + } + + snprintf(roce_ver, FW_VER_STR_LEN, "%d.%d.%d.%d", + nvm_dev_info.roce_fw_major, nvm_dev_info.roce_fw_minor, + nvm_dev_info.roce_fw_build, nvm_dev_info.roce_fw_patch); + rc = bnge_dl_info_put(bd, req, BNGE_VERSION_STORED, + DEVLINK_INFO_VERSION_GENERIC_FW_ROCE, roce_ver); + if (rc) + NL_SET_ERR_MSG_MOD(extack, + "Failed to set stored roce firmware version"); + + return rc; +} + +static const struct devlink_ops bnge_devlink_ops = { + .info_get = bnge_devlink_info_get, +}; + +void bnge_devlink_free(struct bnge_dev *bd) +{ + struct devlink *devlink = priv_to_devlink(bd); + + devlink_free(devlink); +} + +struct bnge_dev *bnge_devlink_alloc(struct pci_dev *pdev) +{ + struct devlink *devlink; + struct bnge_dev *bd; + + devlink = devlink_alloc(&bnge_devlink_ops, sizeof(*bd), &pdev->dev); + if (!devlink) + return NULL; + + bd = devlink_priv(devlink); + pci_set_drvdata(pdev, bd); + bd->dev = &pdev->dev; + bd->pdev = pdev; + + bd->dsn = pci_get_dsn(pdev); + if (!bd->dsn) + pci_warn(pdev, "Failed to get DSN\n"); + + bnge_vpd_read_info(bd); + + return bd; +} + +void bnge_devlink_register(struct bnge_dev *bd) +{ + struct devlink *devlink = priv_to_devlink(bd); + devlink_register(devlink); +} + +void bnge_devlink_unregister(struct bnge_dev *bd) +{ + struct devlink *devlink = priv_to_devlink(bd); + devlink_unregister(devlink); +} diff --git a/drivers/net/ethernet/broadcom/bnge/bnge_devlink.h b/drivers/net/ethernet/broadcom/bnge/bnge_devlink.h new file mode 100644 index 000000000000..c6575255e650 --- /dev/null +++ b/drivers/net/ethernet/broadcom/bnge/bnge_devlink.h @@ -0,0 +1,18 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2025 Broadcom */ + +#ifndef _BNGE_DEVLINK_H_ +#define _BNGE_DEVLINK_H_ + +enum bnge_dl_version_type { + BNGE_VERSION_FIXED, + BNGE_VERSION_RUNNING, + BNGE_VERSION_STORED, +}; + +void bnge_devlink_free(struct bnge_dev *bd); +struct bnge_dev *bnge_devlink_alloc(struct pci_dev *pdev); +void bnge_devlink_register(struct bnge_dev *bd); +void bnge_devlink_unregister(struct bnge_dev *bd); + +#endif /* _BNGE_DEVLINK_H_ */ diff --git a/drivers/net/ethernet/broadcom/bnge/bnge_ethtool.c b/drivers/net/ethernet/broadcom/bnge/bnge_ethtool.c new file mode 100644 index 000000000000..569371c1b4f2 --- /dev/null +++ b/drivers/net/ethernet/broadcom/bnge/bnge_ethtool.c @@ -0,0 +1,33 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) 2025 Broadcom. + +#include <linux/unaligned.h> +#include <linux/pci.h> +#include <linux/types.h> +#include <net/devlink.h> +#include <linux/ethtool.h> +#include <linux/etherdevice.h> +#include <linux/ethtool_netlink.h> + +#include "bnge.h" +#include "bnge_ethtool.h" + +static void bnge_get_drvinfo(struct net_device *dev, + struct ethtool_drvinfo *info) +{ + struct bnge_net *bn = netdev_priv(dev); + struct bnge_dev *bd = bn->bd; + + strscpy(info->driver, DRV_NAME, sizeof(info->driver)); + strscpy(info->fw_version, bd->fw_ver_str, sizeof(info->fw_version)); + strscpy(info->bus_info, pci_name(bd->pdev), sizeof(info->bus_info)); +} + +static const struct ethtool_ops bnge_ethtool_ops = { + .get_drvinfo = bnge_get_drvinfo, +}; + +void bnge_set_ethtool_ops(struct net_device *dev) +{ + dev->ethtool_ops = &bnge_ethtool_ops; +} diff --git a/drivers/net/ethernet/broadcom/bnge/bnge_ethtool.h b/drivers/net/ethernet/broadcom/bnge/bnge_ethtool.h new file mode 100644 index 000000000000..21e96a0976d5 --- /dev/null +++ b/drivers/net/ethernet/broadcom/bnge/bnge_ethtool.h @@ -0,0 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2025 Broadcom */ + +#ifndef _BNGE_ETHTOOL_H_ +#define _BNGE_ETHTOOL_H_ + +void bnge_set_ethtool_ops(struct net_device *dev); + +#endif /* _BNGE_ETHTOOL_H_ */ diff --git a/drivers/net/ethernet/broadcom/bnge/bnge_hwrm.c b/drivers/net/ethernet/broadcom/bnge/bnge_hwrm.c new file mode 100644 index 000000000000..0f971af24142 --- /dev/null +++ b/drivers/net/ethernet/broadcom/bnge/bnge_hwrm.c @@ -0,0 +1,508 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) 2025 Broadcom. + +#include <asm/byteorder.h> +#include <linux/dma-mapping.h> +#include <linux/dmapool.h> +#include <linux/delay.h> +#include <linux/errno.h> +#include <linux/kernel.h> +#include <linux/list.h> +#include <linux/pci.h> + +#include "bnge.h" +#include "bnge_hwrm.h" + +static u64 bnge_cal_sentinel(struct bnge_hwrm_ctx *ctx, u16 req_type) +{ + return (((uintptr_t)ctx) + req_type) ^ BNGE_HWRM_SENTINEL; +} + +int bnge_hwrm_req_create(struct bnge_dev *bd, void **req, u16 req_type, + u32 req_len) +{ + struct bnge_hwrm_ctx *ctx; + dma_addr_t dma_handle; + u8 *req_addr; + + if (req_len > BNGE_HWRM_CTX_OFFSET) + return -E2BIG; + + req_addr = dma_pool_alloc(bd->hwrm_dma_pool, GFP_KERNEL | __GFP_ZERO, + &dma_handle); + if (!req_addr) + return -ENOMEM; + + ctx = (struct bnge_hwrm_ctx *)(req_addr + BNGE_HWRM_CTX_OFFSET); + /* safety first, sentinel used to check for invalid requests */ + ctx->sentinel = bnge_cal_sentinel(ctx, req_type); + ctx->req_len = req_len; + ctx->req = (struct input *)req_addr; + ctx->resp = (struct output *)(req_addr + BNGE_HWRM_RESP_OFFSET); + ctx->dma_handle = dma_handle; + ctx->flags = 0; /* __GFP_ZERO, but be explicit regarding ownership */ + ctx->timeout = bd->hwrm_cmd_timeout ?: BNGE_DFLT_HWRM_CMD_TIMEOUT; + ctx->allocated = BNGE_HWRM_DMA_SIZE - BNGE_HWRM_CTX_OFFSET; + ctx->gfp = GFP_KERNEL; + ctx->slice_addr = NULL; + + /* initialize common request fields */ + ctx->req->req_type = cpu_to_le16(req_type); + ctx->req->resp_addr = cpu_to_le64(dma_handle + BNGE_HWRM_RESP_OFFSET); + ctx->req->cmpl_ring = cpu_to_le16(BNGE_HWRM_NO_CMPL_RING); + ctx->req->target_id = cpu_to_le16(BNGE_HWRM_TARGET); + *req = ctx->req; + + return 0; +} + +static struct bnge_hwrm_ctx *__hwrm_ctx_get(struct bnge_dev *bd, u8 *req_addr) +{ + void *ctx_addr = req_addr + BNGE_HWRM_CTX_OFFSET; + struct input *req = (struct input *)req_addr; + struct bnge_hwrm_ctx *ctx = ctx_addr; + u64 sentinel; + + if (!req) { + dev_err(bd->dev, "null HWRM request"); + dump_stack(); + return NULL; + } + + /* HWRM API has no type safety, verify sentinel to validate address */ + sentinel = bnge_cal_sentinel(ctx, le16_to_cpu(req->req_type)); + if (ctx->sentinel != sentinel) { + dev_err(bd->dev, "HWRM sentinel mismatch, req_type = %u\n", + (u32)le16_to_cpu(req->req_type)); + dump_stack(); + return NULL; + } + + return ctx; +} + +void bnge_hwrm_req_timeout(struct bnge_dev *bd, + void *req, unsigned int timeout) +{ + struct bnge_hwrm_ctx *ctx = __hwrm_ctx_get(bd, req); + + if (ctx) + ctx->timeout = timeout; +} + +void bnge_hwrm_req_alloc_flags(struct bnge_dev *bd, void *req, gfp_t gfp) +{ + struct bnge_hwrm_ctx *ctx = __hwrm_ctx_get(bd, req); + + if (ctx) + ctx->gfp = gfp; +} + +void bnge_hwrm_req_flags(struct bnge_dev *bd, void *req, + enum bnge_hwrm_ctx_flags flags) +{ + struct bnge_hwrm_ctx *ctx = __hwrm_ctx_get(bd, req); + + if (ctx) + ctx->flags |= (flags & BNGE_HWRM_API_FLAGS); +} + +void *bnge_hwrm_req_hold(struct bnge_dev *bd, void *req) +{ + struct bnge_hwrm_ctx *ctx = __hwrm_ctx_get(bd, req); + struct input *input = (struct input *)req; + + if (!ctx) + return NULL; + + if (ctx->flags & BNGE_HWRM_INTERNAL_CTX_OWNED) { + dev_err(bd->dev, "HWRM context already owned, req_type = %u\n", + (u32)le16_to_cpu(input->req_type)); + dump_stack(); + return NULL; + } + + ctx->flags |= BNGE_HWRM_INTERNAL_CTX_OWNED; + return ((u8 *)req) + BNGE_HWRM_RESP_OFFSET; +} + +static void __hwrm_ctx_invalidate(struct bnge_dev *bd, + struct bnge_hwrm_ctx *ctx) +{ + void *addr = ((u8 *)ctx) - BNGE_HWRM_CTX_OFFSET; + dma_addr_t dma_handle = ctx->dma_handle; /* save before invalidate */ + + /* unmap any auxiliary DMA slice */ + if (ctx->slice_addr) + dma_free_coherent(bd->dev, ctx->slice_size, + ctx->slice_addr, ctx->slice_handle); + + /* invalidate, ensure ownership, sentinel and dma_handle are cleared */ + memset(ctx, 0, sizeof(struct bnge_hwrm_ctx)); + + /* return the buffer to the DMA pool */ + if (dma_handle) + dma_pool_free(bd->hwrm_dma_pool, addr, dma_handle); +} + +void bnge_hwrm_req_drop(struct bnge_dev *bd, void *req) +{ + struct bnge_hwrm_ctx *ctx = __hwrm_ctx_get(bd, req); + + if (ctx) + __hwrm_ctx_invalidate(bd, ctx); +} + +static int bnge_map_hwrm_error(u32 hwrm_err) +{ + switch (hwrm_err) { + case HWRM_ERR_CODE_SUCCESS: + return 0; + case HWRM_ERR_CODE_RESOURCE_LOCKED: + return -EROFS; + case HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED: + return -EACCES; + case HWRM_ERR_CODE_RESOURCE_ALLOC_ERROR: + return -ENOSPC; + case HWRM_ERR_CODE_INVALID_PARAMS: + case HWRM_ERR_CODE_INVALID_FLAGS: + case HWRM_ERR_CODE_INVALID_ENABLES: + case HWRM_ERR_CODE_UNSUPPORTED_TLV: + case HWRM_ERR_CODE_UNSUPPORTED_OPTION_ERR: + return -EINVAL; + case HWRM_ERR_CODE_NO_BUFFER: + return -ENOMEM; + case HWRM_ERR_CODE_HOT_RESET_PROGRESS: + case HWRM_ERR_CODE_BUSY: + return -EAGAIN; + case HWRM_ERR_CODE_CMD_NOT_SUPPORTED: + return -EOPNOTSUPP; + case HWRM_ERR_CODE_PF_UNAVAILABLE: + return -ENODEV; + default: + return -EIO; + } +} + +static struct bnge_hwrm_wait_token * +bnge_hwrm_create_token(struct bnge_dev *bd, enum bnge_hwrm_chnl dst) +{ + struct bnge_hwrm_wait_token *token; + + token = kzalloc(sizeof(*token), GFP_KERNEL); + if (!token) + return NULL; + + mutex_lock(&bd->hwrm_cmd_lock); + + token->dst = dst; + token->state = BNGE_HWRM_PENDING; + if (dst == BNGE_HWRM_CHNL_CHIMP) { + token->seq_id = bd->hwrm_cmd_seq++; + hlist_add_head_rcu(&token->node, &bd->hwrm_pending_list); + } else { + token->seq_id = bd->hwrm_cmd_kong_seq++; + } + + return token; +} + +static void +bnge_hwrm_destroy_token(struct bnge_dev *bd, struct bnge_hwrm_wait_token *token) +{ + if (token->dst == BNGE_HWRM_CHNL_CHIMP) { + hlist_del_rcu(&token->node); + kfree_rcu(token, rcu); + } else { + kfree(token); + } + mutex_unlock(&bd->hwrm_cmd_lock); +} + +static void bnge_hwrm_req_dbg(struct bnge_dev *bd, struct input *req) +{ + u32 ring = le16_to_cpu(req->cmpl_ring); + u32 type = le16_to_cpu(req->req_type); + u32 tgt = le16_to_cpu(req->target_id); + u32 seq = le16_to_cpu(req->seq_id); + char opt[32] = "\n"; + + if (unlikely(ring != (u16)BNGE_HWRM_NO_CMPL_RING)) + snprintf(opt, 16, " ring %d\n", ring); + + if (unlikely(tgt != BNGE_HWRM_TARGET)) + snprintf(opt + strlen(opt) - 1, 16, " tgt 0x%x\n", tgt); + + dev_dbg(bd->dev, "sent hwrm req_type 0x%x seq id 0x%x%s", + type, seq, opt); +} + +#define bnge_hwrm_err(bd, ctx, fmt, ...) \ + do { \ + if ((ctx)->flags & BNGE_HWRM_CTX_SILENT) \ + dev_dbg((bd)->dev, fmt, __VA_ARGS__); \ + else \ + dev_err((bd)->dev, fmt, __VA_ARGS__); \ + } while (0) + +static int __hwrm_send_ctx(struct bnge_dev *bd, struct bnge_hwrm_ctx *ctx) +{ + u32 doorbell_offset = BNGE_GRCPF_REG_CHIMP_COMM_TRIGGER; + enum bnge_hwrm_chnl dst = BNGE_HWRM_CHNL_CHIMP; + u32 bar_offset = BNGE_GRCPF_REG_CHIMP_COMM; + struct bnge_hwrm_wait_token *token = NULL; + u16 max_req_len = BNGE_HWRM_MAX_REQ_LEN; + unsigned int i, timeout, tmo_count; + u32 *data = (u32 *)ctx->req; + u32 msg_len = ctx->req_len; + int rc = -EBUSY; + u32 req_type; + u16 len = 0; + u8 *valid; + + if (ctx->flags & BNGE_HWRM_INTERNAL_RESP_DIRTY) + memset(ctx->resp, 0, PAGE_SIZE); + + req_type = le16_to_cpu(ctx->req->req_type); + + if (msg_len > BNGE_HWRM_MAX_REQ_LEN && + msg_len > bd->hwrm_max_ext_req_len) { + dev_warn(bd->dev, "oversized hwrm request, req_type 0x%x", + req_type); + rc = -E2BIG; + goto exit; + } + + token = bnge_hwrm_create_token(bd, dst); + if (!token) { + rc = -ENOMEM; + goto exit; + } + ctx->req->seq_id = cpu_to_le16(token->seq_id); + + /* Ensure any associated DMA buffers are written before doorbell */ + wmb(); + + /* Write request msg to hwrm channel */ + __iowrite32_copy(bd->bar0 + bar_offset, data, msg_len / 4); + + for (i = msg_len; i < max_req_len; i += 4) + writel(0, bd->bar0 + bar_offset + i); + + /* Ring channel doorbell */ + writel(1, bd->bar0 + doorbell_offset); + + bnge_hwrm_req_dbg(bd, ctx->req); + + /* Limit timeout to an upper limit */ + timeout = min(ctx->timeout, + bd->hwrm_cmd_max_timeout ?: BNGE_HWRM_CMD_MAX_TIMEOUT); + /* convert timeout to usec */ + timeout *= 1000; + + i = 0; + /* Short timeout for the first few iterations: + * number of loops = number of loops for short timeout + + * number of loops for standard timeout. + */ + tmo_count = BNGE_HWRM_SHORT_TIMEOUT_COUNTER; + timeout = timeout - BNGE_HWRM_SHORT_MIN_TIMEOUT * + BNGE_HWRM_SHORT_TIMEOUT_COUNTER; + tmo_count += DIV_ROUND_UP(timeout, BNGE_HWRM_MIN_TIMEOUT); + + if (le16_to_cpu(ctx->req->cmpl_ring) != INVALID_HW_RING_ID) { + /* Wait until hwrm response cmpl interrupt is processed */ + while (READ_ONCE(token->state) < BNGE_HWRM_COMPLETE && + i++ < tmo_count) { + /* on first few passes, just barely sleep */ + if (i < BNGE_HWRM_SHORT_TIMEOUT_COUNTER) { + usleep_range(BNGE_HWRM_SHORT_MIN_TIMEOUT, + BNGE_HWRM_SHORT_MAX_TIMEOUT); + } else { + usleep_range(BNGE_HWRM_MIN_TIMEOUT, + BNGE_HWRM_MAX_TIMEOUT); + } + } + + if (READ_ONCE(token->state) != BNGE_HWRM_COMPLETE) { + bnge_hwrm_err(bd, ctx, "No hwrm cmpl received: 0x%x\n", + req_type); + goto exit; + } + len = le16_to_cpu(READ_ONCE(ctx->resp->resp_len)); + valid = ((u8 *)ctx->resp) + len - 1; + } else { + __le16 seen_out_of_seq = ctx->req->seq_id; /* will never see */ + int j; + + /* Check if response len is updated */ + for (i = 0; i < tmo_count; i++) { + if (token && + READ_ONCE(token->state) == BNGE_HWRM_DEFERRED) { + bnge_hwrm_destroy_token(bd, token); + token = NULL; + } + + len = le16_to_cpu(READ_ONCE(ctx->resp->resp_len)); + if (len) { + __le16 resp_seq = READ_ONCE(ctx->resp->seq_id); + + if (resp_seq == ctx->req->seq_id) + break; + if (resp_seq != seen_out_of_seq) { + dev_warn(bd->dev, "Discarding out of seq response: 0x%x for msg {0x%x 0x%x}\n", + le16_to_cpu(resp_seq), req_type, le16_to_cpu(ctx->req->seq_id)); + seen_out_of_seq = resp_seq; + } + } + + /* on first few passes, just barely sleep */ + if (i < BNGE_HWRM_SHORT_TIMEOUT_COUNTER) { + usleep_range(BNGE_HWRM_SHORT_MIN_TIMEOUT, + BNGE_HWRM_SHORT_MAX_TIMEOUT); + } else { + usleep_range(BNGE_HWRM_MIN_TIMEOUT, + BNGE_HWRM_MAX_TIMEOUT); + } + } + + if (i >= tmo_count) { + bnge_hwrm_err(bd, ctx, + "Error (timeout: %u) msg {0x%x 0x%x} len:%d\n", + bnge_hwrm_timeout(i), req_type, + le16_to_cpu(ctx->req->seq_id), len); + goto exit; + } + + /* Last byte of resp contains valid bit */ + valid = ((u8 *)ctx->resp) + len - 1; + for (j = 0; j < BNGE_HWRM_FIN_WAIT_USEC; ) { + /* make sure we read from updated DMA memory */ + dma_rmb(); + if (*valid) + break; + if (j < 10) { + udelay(1); + j++; + } else { + usleep_range(20, 30); + j += 20; + } + } + + if (j >= BNGE_HWRM_FIN_WAIT_USEC) { + bnge_hwrm_err(bd, ctx, "Error (timeout: %u) msg {0x%x 0x%x} len:%d v:%d\n", + bnge_hwrm_timeout(i) + j, req_type, + le16_to_cpu(ctx->req->seq_id), len, *valid); + goto exit; + } + } + + /* Zero valid bit for compatibility. Valid bit in an older spec + * may become a new field in a newer spec. We must make sure that + * a new field not implemented by old spec will read zero. + */ + *valid = 0; + rc = le16_to_cpu(ctx->resp->error_code); + if (rc == HWRM_ERR_CODE_BUSY && !(ctx->flags & BNGE_HWRM_CTX_SILENT)) + dev_warn(bd->dev, "FW returned busy, hwrm req_type 0x%x\n", + req_type); + else if (rc && rc != HWRM_ERR_CODE_PF_UNAVAILABLE) + bnge_hwrm_err(bd, ctx, "hwrm req_type 0x%x seq id 0x%x error %d\n", + req_type, le16_to_cpu(ctx->req->seq_id), rc); + rc = bnge_map_hwrm_error(rc); + +exit: + if (token) + bnge_hwrm_destroy_token(bd, token); + if (ctx->flags & BNGE_HWRM_INTERNAL_CTX_OWNED) + ctx->flags |= BNGE_HWRM_INTERNAL_RESP_DIRTY; + else + __hwrm_ctx_invalidate(bd, ctx); + return rc; +} + +int bnge_hwrm_req_send(struct bnge_dev *bd, void *req) +{ + struct bnge_hwrm_ctx *ctx = __hwrm_ctx_get(bd, req); + + if (!ctx) + return -EINVAL; + + return __hwrm_send_ctx(bd, ctx); +} + +int bnge_hwrm_req_send_silent(struct bnge_dev *bd, void *req) +{ + bnge_hwrm_req_flags(bd, req, BNGE_HWRM_CTX_SILENT); + return bnge_hwrm_req_send(bd, req); +} + +void * +bnge_hwrm_req_dma_slice(struct bnge_dev *bd, void *req, u32 size, + dma_addr_t *dma_handle) +{ + struct bnge_hwrm_ctx *ctx = __hwrm_ctx_get(bd, req); + u8 *end = ((u8 *)req) + BNGE_HWRM_DMA_SIZE; + struct input *input = req; + u8 *addr, *req_addr = req; + u32 max_offset, offset; + + if (!ctx) + return NULL; + + max_offset = BNGE_HWRM_DMA_SIZE - ctx->allocated; + offset = max_offset - size; + offset = ALIGN_DOWN(offset, BNGE_HWRM_DMA_ALIGN); + addr = req_addr + offset; + + if (addr < req_addr + max_offset && req_addr + ctx->req_len <= addr) { + ctx->allocated = end - addr; + *dma_handle = ctx->dma_handle + offset; + return addr; + } + + if (ctx->slice_addr) { + dev_err(bd->dev, "HWRM refusing to reallocate DMA slice, req_type = %u\n", + (u32)le16_to_cpu(input->req_type)); + dump_stack(); + return NULL; + } + + addr = dma_alloc_coherent(bd->dev, size, dma_handle, ctx->gfp); + if (!addr) + return NULL; + + ctx->slice_addr = addr; + ctx->slice_size = size; + ctx->slice_handle = *dma_handle; + + return addr; +} + +void bnge_cleanup_hwrm_resources(struct bnge_dev *bd) +{ + struct bnge_hwrm_wait_token *token; + + dma_pool_destroy(bd->hwrm_dma_pool); + bd->hwrm_dma_pool = NULL; + + rcu_read_lock(); + hlist_for_each_entry_rcu(token, &bd->hwrm_pending_list, node) + WRITE_ONCE(token->state, BNGE_HWRM_CANCELLED); + rcu_read_unlock(); +} + +int bnge_init_hwrm_resources(struct bnge_dev *bd) +{ + bd->hwrm_dma_pool = dma_pool_create("bnge_hwrm", bd->dev, + BNGE_HWRM_DMA_SIZE, + BNGE_HWRM_DMA_ALIGN, 0); + if (!bd->hwrm_dma_pool) + return -ENOMEM; + + INIT_HLIST_HEAD(&bd->hwrm_pending_list); + mutex_init(&bd->hwrm_cmd_lock); + + return 0; +} diff --git a/drivers/net/ethernet/broadcom/bnge/bnge_hwrm.h b/drivers/net/ethernet/broadcom/bnge/bnge_hwrm.h new file mode 100644 index 000000000000..83794a12cc81 --- /dev/null +++ b/drivers/net/ethernet/broadcom/bnge/bnge_hwrm.h @@ -0,0 +1,110 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2025 Broadcom */ + +#ifndef _BNGE_HWRM_H_ +#define _BNGE_HWRM_H_ + +#include <linux/bnxt/hsi.h> + +enum bnge_hwrm_ctx_flags { + BNGE_HWRM_INTERNAL_CTX_OWNED = BIT(0), + BNGE_HWRM_INTERNAL_RESP_DIRTY = BIT(1), + BNGE_HWRM_CTX_SILENT = BIT(2), + BNGE_HWRM_FULL_WAIT = BIT(3), +}; + +#define BNGE_HWRM_API_FLAGS (BNGE_HWRM_CTX_SILENT | BNGE_HWRM_FULL_WAIT) + +struct bnge_hwrm_ctx { + u64 sentinel; + dma_addr_t dma_handle; + struct output *resp; + struct input *req; + dma_addr_t slice_handle; + void *slice_addr; + u32 slice_size; + u32 req_len; + enum bnge_hwrm_ctx_flags flags; + unsigned int timeout; + u32 allocated; + gfp_t gfp; +}; + +enum bnge_hwrm_wait_state { + BNGE_HWRM_PENDING, + BNGE_HWRM_DEFERRED, + BNGE_HWRM_COMPLETE, + BNGE_HWRM_CANCELLED, +}; + +enum bnge_hwrm_chnl { BNGE_HWRM_CHNL_CHIMP, BNGE_HWRM_CHNL_KONG }; + +struct bnge_hwrm_wait_token { + struct rcu_head rcu; + struct hlist_node node; + enum bnge_hwrm_wait_state state; + enum bnge_hwrm_chnl dst; + u16 seq_id; +}; + +#define BNGE_DFLT_HWRM_CMD_TIMEOUT 500 + +#define BNGE_GRCPF_REG_CHIMP_COMM 0x0 +#define BNGE_GRCPF_REG_CHIMP_COMM_TRIGGER 0x100 + +#define BNGE_HWRM_MAX_REQ_LEN (bd->hwrm_max_req_len) +#define BNGE_HWRM_SHORT_REQ_LEN sizeof(struct hwrm_short_input) +#define BNGE_HWRM_CMD_MAX_TIMEOUT 40000U +#define BNGE_SHORT_HWRM_CMD_TIMEOUT 20 +#define BNGE_HWRM_CMD_TIMEOUT (bd->hwrm_cmd_timeout) +#define BNGE_HWRM_RESET_TIMEOUT ((BNGE_HWRM_CMD_TIMEOUT) * 4) +#define BNGE_HWRM_TARGET 0xffff +#define BNGE_HWRM_NO_CMPL_RING -1 +#define BNGE_HWRM_REQ_MAX_SIZE 128 +#define BNGE_HWRM_DMA_SIZE (2 * PAGE_SIZE) /* space for req+resp */ +#define BNGE_HWRM_RESP_RESERVED PAGE_SIZE +#define BNGE_HWRM_RESP_OFFSET (BNGE_HWRM_DMA_SIZE - \ + BNGE_HWRM_RESP_RESERVED) +#define BNGE_HWRM_CTX_OFFSET (BNGE_HWRM_RESP_OFFSET - \ + sizeof(struct bnge_hwrm_ctx)) +#define BNGE_HWRM_DMA_ALIGN 16 +#define BNGE_HWRM_SENTINEL 0xb6e1f68a12e9a7eb /* arbitrary value */ +#define BNGE_HWRM_SHORT_MIN_TIMEOUT 3 +#define BNGE_HWRM_SHORT_MAX_TIMEOUT 10 +#define BNGE_HWRM_SHORT_TIMEOUT_COUNTER 5 + +#define BNGE_HWRM_MIN_TIMEOUT 25 +#define BNGE_HWRM_MAX_TIMEOUT 40 + +static inline unsigned int bnge_hwrm_timeout(unsigned int n) +{ + return n <= BNGE_HWRM_SHORT_TIMEOUT_COUNTER ? + n * BNGE_HWRM_SHORT_MIN_TIMEOUT : + BNGE_HWRM_SHORT_TIMEOUT_COUNTER * + BNGE_HWRM_SHORT_MIN_TIMEOUT + + (n - BNGE_HWRM_SHORT_TIMEOUT_COUNTER) * + BNGE_HWRM_MIN_TIMEOUT; +} + +#define BNGE_HWRM_FIN_WAIT_USEC 50000 + +void bnge_cleanup_hwrm_resources(struct bnge_dev *bd); +int bnge_init_hwrm_resources(struct bnge_dev *bd); + +int bnge_hwrm_req_create(struct bnge_dev *bd, void **req, u16 req_type, + u32 req_len); +#define bnge_hwrm_req_init(bd, req, req_type) \ + bnge_hwrm_req_create((bd), (void **)&(req), (req_type), \ + sizeof(*(req))) +void *bnge_hwrm_req_hold(struct bnge_dev *bd, void *req); +void bnge_hwrm_req_drop(struct bnge_dev *bd, void *req); +void bnge_hwrm_req_flags(struct bnge_dev *bd, void *req, + enum bnge_hwrm_ctx_flags flags); +void bnge_hwrm_req_timeout(struct bnge_dev *bd, void *req, + unsigned int timeout); +int bnge_hwrm_req_send(struct bnge_dev *bd, void *req); +int bnge_hwrm_req_send_silent(struct bnge_dev *bd, void *req); +void bnge_hwrm_req_alloc_flags(struct bnge_dev *bd, void *req, gfp_t flags); +void *bnge_hwrm_req_dma_slice(struct bnge_dev *bd, void *req, u32 size, + dma_addr_t *dma); +#endif /* _BNGE_HWRM_H_ */ diff --git a/drivers/net/ethernet/broadcom/bnge/bnge_hwrm_lib.c b/drivers/net/ethernet/broadcom/bnge/bnge_hwrm_lib.c new file mode 100644 index 000000000000..5c178fade065 --- /dev/null +++ b/drivers/net/ethernet/broadcom/bnge/bnge_hwrm_lib.c @@ -0,0 +1,703 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) 2025 Broadcom. + +#include <linux/errno.h> +#include <linux/kernel.h> +#include <linux/mm.h> +#include <linux/pci.h> +#include <linux/bnxt/hsi.h> + +#include "bnge.h" +#include "bnge_hwrm.h" +#include "bnge_hwrm_lib.h" +#include "bnge_rmem.h" +#include "bnge_resc.h" + +int bnge_hwrm_ver_get(struct bnge_dev *bd) +{ + u32 dev_caps_cfg, hwrm_ver, hwrm_spec_code; + u16 fw_maj, fw_min, fw_bld, fw_rsv; + struct hwrm_ver_get_output *resp; + struct hwrm_ver_get_input *req; + int rc; + + rc = bnge_hwrm_req_init(bd, req, HWRM_VER_GET); + if (rc) + return rc; + + bnge_hwrm_req_flags(bd, req, BNGE_HWRM_FULL_WAIT); + bd->hwrm_max_req_len = HWRM_MAX_REQ_LEN; + req->hwrm_intf_maj = HWRM_VERSION_MAJOR; + req->hwrm_intf_min = HWRM_VERSION_MINOR; + req->hwrm_intf_upd = HWRM_VERSION_UPDATE; + + resp = bnge_hwrm_req_hold(bd, req); + rc = bnge_hwrm_req_send(bd, req); + if (rc) + goto hwrm_ver_get_exit; + + memcpy(&bd->ver_resp, resp, sizeof(struct hwrm_ver_get_output)); + + hwrm_spec_code = resp->hwrm_intf_maj_8b << 16 | + resp->hwrm_intf_min_8b << 8 | + resp->hwrm_intf_upd_8b; + hwrm_ver = HWRM_VERSION_MAJOR << 16 | HWRM_VERSION_MINOR << 8 | + HWRM_VERSION_UPDATE; + + if (hwrm_spec_code > hwrm_ver) + snprintf(bd->hwrm_ver_supp, FW_VER_STR_LEN, "%d.%d.%d", + HWRM_VERSION_MAJOR, HWRM_VERSION_MINOR, + HWRM_VERSION_UPDATE); + else + snprintf(bd->hwrm_ver_supp, FW_VER_STR_LEN, "%d.%d.%d", + resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b, + resp->hwrm_intf_upd_8b); + + fw_maj = le16_to_cpu(resp->hwrm_fw_major); + fw_min = le16_to_cpu(resp->hwrm_fw_minor); + fw_bld = le16_to_cpu(resp->hwrm_fw_build); + fw_rsv = le16_to_cpu(resp->hwrm_fw_patch); + + bd->fw_ver_code = BNGE_FW_VER_CODE(fw_maj, fw_min, fw_bld, fw_rsv); + snprintf(bd->fw_ver_str, FW_VER_STR_LEN, "%d.%d.%d.%d", + fw_maj, fw_min, fw_bld, fw_rsv); + + if (strlen(resp->active_pkg_name)) { + int fw_ver_len = strlen(bd->fw_ver_str); + + snprintf(bd->fw_ver_str + fw_ver_len, + FW_VER_STR_LEN - fw_ver_len - 1, "/pkg %s", + resp->active_pkg_name); + bd->fw_cap |= BNGE_FW_CAP_PKG_VER; + } + + bd->hwrm_cmd_timeout = le16_to_cpu(resp->def_req_timeout); + if (!bd->hwrm_cmd_timeout) + bd->hwrm_cmd_timeout = BNGE_DFLT_HWRM_CMD_TIMEOUT; + bd->hwrm_cmd_max_timeout = le16_to_cpu(resp->max_req_timeout) * 1000; + if (!bd->hwrm_cmd_max_timeout) + bd->hwrm_cmd_max_timeout = BNGE_HWRM_CMD_MAX_TIMEOUT; + else if (bd->hwrm_cmd_max_timeout > BNGE_HWRM_CMD_MAX_TIMEOUT) + dev_warn(bd->dev, "Default HWRM commands max timeout increased to %d seconds\n", + bd->hwrm_cmd_max_timeout / 1000); + + bd->hwrm_max_req_len = le16_to_cpu(resp->max_req_win_len); + bd->hwrm_max_ext_req_len = le16_to_cpu(resp->max_ext_req_len); + + if (bd->hwrm_max_ext_req_len < HWRM_MAX_REQ_LEN) + bd->hwrm_max_ext_req_len = HWRM_MAX_REQ_LEN; + + bd->chip_num = le16_to_cpu(resp->chip_num); + bd->chip_rev = resp->chip_rev; + + dev_caps_cfg = le32_to_cpu(resp->dev_caps_cfg); + if ((dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) && + (dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_REQUIRED)) + bd->fw_cap |= BNGE_FW_CAP_SHORT_CMD; + + if (dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_KONG_MB_CHNL_SUPPORTED) + bd->fw_cap |= BNGE_FW_CAP_KONG_MB_CHNL; + + if (dev_caps_cfg & + VER_GET_RESP_DEV_CAPS_CFG_CFA_ADV_FLOW_MGNT_SUPPORTED) + bd->fw_cap |= BNGE_FW_CAP_CFA_ADV_FLOW; + +hwrm_ver_get_exit: + bnge_hwrm_req_drop(bd, req); + return rc; +} + +int +bnge_hwrm_nvm_dev_info(struct bnge_dev *bd, + struct hwrm_nvm_get_dev_info_output *nvm_info) +{ + struct hwrm_nvm_get_dev_info_output *resp; + struct hwrm_nvm_get_dev_info_input *req; + int rc; + + rc = bnge_hwrm_req_init(bd, req, HWRM_NVM_GET_DEV_INFO); + if (rc) + return rc; + + resp = bnge_hwrm_req_hold(bd, req); + rc = bnge_hwrm_req_send(bd, req); + if (!rc) + memcpy(nvm_info, resp, sizeof(*resp)); + bnge_hwrm_req_drop(bd, req); + return rc; +} + +int bnge_hwrm_func_reset(struct bnge_dev *bd) +{ + struct hwrm_func_reset_input *req; + int rc; + + rc = bnge_hwrm_req_init(bd, req, HWRM_FUNC_RESET); + if (rc) + return rc; + + req->enables = 0; + bnge_hwrm_req_timeout(bd, req, BNGE_HWRM_RESET_TIMEOUT); + return bnge_hwrm_req_send(bd, req); +} + +int bnge_hwrm_fw_set_time(struct bnge_dev *bd) +{ + struct hwrm_fw_set_time_input *req; + struct tm tm; + int rc; + + time64_to_tm(ktime_get_real_seconds(), 0, &tm); + + rc = bnge_hwrm_req_init(bd, req, HWRM_FW_SET_TIME); + if (rc) + return rc; + + req->year = cpu_to_le16(1900 + tm.tm_year); + req->month = 1 + tm.tm_mon; + req->day = tm.tm_mday; + req->hour = tm.tm_hour; + req->minute = tm.tm_min; + req->second = tm.tm_sec; + return bnge_hwrm_req_send(bd, req); +} + +int bnge_hwrm_func_drv_rgtr(struct bnge_dev *bd) +{ + struct hwrm_func_drv_rgtr_output *resp; + struct hwrm_func_drv_rgtr_input *req; + u32 flags; + int rc; + + rc = bnge_hwrm_req_init(bd, req, HWRM_FUNC_DRV_RGTR); + if (rc) + return rc; + + req->enables = cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE | + FUNC_DRV_RGTR_REQ_ENABLES_VER | + FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD); + + req->os_type = cpu_to_le16(FUNC_DRV_RGTR_REQ_OS_TYPE_LINUX); + flags = FUNC_DRV_RGTR_REQ_FLAGS_16BIT_VER_MODE; + + req->flags = cpu_to_le32(flags); + req->ver_maj_8b = DRV_VER_MAJ; + req->ver_min_8b = DRV_VER_MIN; + req->ver_upd_8b = DRV_VER_UPD; + req->ver_maj = cpu_to_le16(DRV_VER_MAJ); + req->ver_min = cpu_to_le16(DRV_VER_MIN); + req->ver_upd = cpu_to_le16(DRV_VER_UPD); + + resp = bnge_hwrm_req_hold(bd, req); + rc = bnge_hwrm_req_send(bd, req); + if (!rc) { + set_bit(BNGE_STATE_DRV_REGISTERED, &bd->state); + if (resp->flags & + cpu_to_le32(FUNC_DRV_RGTR_RESP_FLAGS_IF_CHANGE_SUPPORTED)) + bd->fw_cap |= BNGE_FW_CAP_IF_CHANGE; + } + bnge_hwrm_req_drop(bd, req); + return rc; +} + +int bnge_hwrm_func_drv_unrgtr(struct bnge_dev *bd) +{ + struct hwrm_func_drv_unrgtr_input *req; + int rc; + + if (!test_and_clear_bit(BNGE_STATE_DRV_REGISTERED, &bd->state)) + return 0; + + rc = bnge_hwrm_req_init(bd, req, HWRM_FUNC_DRV_UNRGTR); + if (rc) + return rc; + return bnge_hwrm_req_send(bd, req); +} + +static void bnge_init_ctx_initializer(struct bnge_ctx_mem_type *ctxm, + u8 init_val, u8 init_offset, + bool init_mask_set) +{ + ctxm->init_value = init_val; + ctxm->init_offset = BNGE_CTX_INIT_INVALID_OFFSET; + if (init_mask_set) + ctxm->init_offset = init_offset * 4; + else + ctxm->init_value = 0; +} + +static int bnge_alloc_all_ctx_pg_info(struct bnge_dev *bd, int ctx_max) +{ + struct bnge_ctx_mem_info *ctx = bd->ctx; + u16 type; + + for (type = 0; type < ctx_max; type++) { + struct bnge_ctx_mem_type *ctxm = &ctx->ctx_arr[type]; + int n = 1; + + if (!ctxm->max_entries) + continue; + + if (ctxm->instance_bmap) + n = hweight32(ctxm->instance_bmap); + ctxm->pg_info = kcalloc(n, sizeof(*ctxm->pg_info), GFP_KERNEL); + if (!ctxm->pg_info) + return -ENOMEM; + } + + return 0; +} + +#define BNGE_CTX_INIT_VALID(flags) \ + (!!((flags) & \ + FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_ENABLE_CTX_KIND_INIT)) + +int bnge_hwrm_func_backing_store_qcaps(struct bnge_dev *bd) +{ + struct hwrm_func_backing_store_qcaps_v2_output *resp; + struct hwrm_func_backing_store_qcaps_v2_input *req; + struct bnge_ctx_mem_info *ctx; + u16 type; + int rc; + + if (bd->ctx) + return 0; + + rc = bnge_hwrm_req_init(bd, req, HWRM_FUNC_BACKING_STORE_QCAPS_V2); + if (rc) + return rc; + + ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); + if (!ctx) + return -ENOMEM; + bd->ctx = ctx; + + resp = bnge_hwrm_req_hold(bd, req); + + for (type = 0; type < BNGE_CTX_V2_MAX; ) { + struct bnge_ctx_mem_type *ctxm = &ctx->ctx_arr[type]; + u8 init_val, init_off, i; + __le32 *p; + u32 flags; + + req->type = cpu_to_le16(type); + rc = bnge_hwrm_req_send(bd, req); + if (rc) + goto ctx_done; + flags = le32_to_cpu(resp->flags); + type = le16_to_cpu(resp->next_valid_type); + if (!(flags & + FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_TYPE_VALID)) + continue; + + ctxm->type = le16_to_cpu(resp->type); + ctxm->entry_size = le16_to_cpu(resp->entry_size); + ctxm->flags = flags; + ctxm->instance_bmap = le32_to_cpu(resp->instance_bit_map); + ctxm->entry_multiple = resp->entry_multiple; + ctxm->max_entries = le32_to_cpu(resp->max_num_entries); + ctxm->min_entries = le32_to_cpu(resp->min_num_entries); + init_val = resp->ctx_init_value; + init_off = resp->ctx_init_offset; + bnge_init_ctx_initializer(ctxm, init_val, init_off, + BNGE_CTX_INIT_VALID(flags)); + ctxm->split_entry_cnt = min_t(u8, resp->subtype_valid_cnt, + BNGE_MAX_SPLIT_ENTRY); + for (i = 0, p = &resp->split_entry_0; i < ctxm->split_entry_cnt; + i++, p++) + ctxm->split[i] = le32_to_cpu(*p); + } + rc = bnge_alloc_all_ctx_pg_info(bd, BNGE_CTX_V2_MAX); + +ctx_done: + bnge_hwrm_req_drop(bd, req); + return rc; +} + +static void bnge_hwrm_set_pg_attr(struct bnge_ring_mem_info *rmem, u8 *pg_attr, + __le64 *pg_dir) +{ + if (!rmem->nr_pages) + return; + + BNGE_SET_CTX_PAGE_ATTR(*pg_attr); + if (rmem->depth >= 1) { + if (rmem->depth == 2) + *pg_attr |= 2; + else + *pg_attr |= 1; + *pg_dir = cpu_to_le64(rmem->dma_pg_tbl); + } else { + *pg_dir = cpu_to_le64(rmem->dma_arr[0]); + } +} + +int bnge_hwrm_func_backing_store(struct bnge_dev *bd, + struct bnge_ctx_mem_type *ctxm, + bool last) +{ + struct hwrm_func_backing_store_cfg_v2_input *req; + u32 instance_bmap = ctxm->instance_bmap; + int i, j, rc = 0, n = 1; + __le32 *p; + + if (!(ctxm->flags & BNGE_CTX_MEM_TYPE_VALID) || !ctxm->pg_info) + return 0; + + if (instance_bmap) + n = hweight32(ctxm->instance_bmap); + else + instance_bmap = 1; + + rc = bnge_hwrm_req_init(bd, req, HWRM_FUNC_BACKING_STORE_CFG_V2); + if (rc) + return rc; + bnge_hwrm_req_hold(bd, req); + req->type = cpu_to_le16(ctxm->type); + req->entry_size = cpu_to_le16(ctxm->entry_size); + req->subtype_valid_cnt = ctxm->split_entry_cnt; + for (i = 0, p = &req->split_entry_0; i < ctxm->split_entry_cnt; i++) + p[i] = cpu_to_le32(ctxm->split[i]); + for (i = 0, j = 0; j < n && !rc; i++) { + struct bnge_ctx_pg_info *ctx_pg; + + if (!(instance_bmap & (1 << i))) + continue; + req->instance = cpu_to_le16(i); + ctx_pg = &ctxm->pg_info[j++]; + if (!ctx_pg->entries) + continue; + req->num_entries = cpu_to_le32(ctx_pg->entries); + bnge_hwrm_set_pg_attr(&ctx_pg->ring_mem, + &req->page_size_pbl_level, + &req->page_dir); + if (last && j == n) + req->flags = + cpu_to_le32(BNGE_BS_CFG_ALL_DONE); + rc = bnge_hwrm_req_send(bd, req); + } + bnge_hwrm_req_drop(bd, req); + + return rc; +} + +static int bnge_hwrm_get_rings(struct bnge_dev *bd) +{ + struct bnge_hw_resc *hw_resc = &bd->hw_resc; + struct hwrm_func_qcfg_output *resp; + struct hwrm_func_qcfg_input *req; + u16 cp, stats; + u16 rx, tx; + int rc; + + rc = bnge_hwrm_req_init(bd, req, HWRM_FUNC_QCFG); + if (rc) + return rc; + + req->fid = cpu_to_le16(0xffff); + resp = bnge_hwrm_req_hold(bd, req); + rc = bnge_hwrm_req_send(bd, req); + if (rc) { + bnge_hwrm_req_drop(bd, req); + return rc; + } + + hw_resc->resv_tx_rings = le16_to_cpu(resp->alloc_tx_rings); + hw_resc->resv_rx_rings = le16_to_cpu(resp->alloc_rx_rings); + hw_resc->resv_hw_ring_grps = + le32_to_cpu(resp->alloc_hw_ring_grps); + hw_resc->resv_vnics = le16_to_cpu(resp->alloc_vnics); + hw_resc->resv_rsscos_ctxs = le16_to_cpu(resp->alloc_rsscos_ctx); + cp = le16_to_cpu(resp->alloc_cmpl_rings); + stats = le16_to_cpu(resp->alloc_stat_ctx); + hw_resc->resv_irqs = cp; + rx = hw_resc->resv_rx_rings; + tx = hw_resc->resv_tx_rings; + if (bnge_is_agg_reqd(bd)) + rx >>= 1; + if (cp < (rx + tx)) { + rc = bnge_fix_rings_count(&rx, &tx, cp, false); + if (rc) + goto get_rings_exit; + if (bnge_is_agg_reqd(bd)) + rx <<= 1; + hw_resc->resv_rx_rings = rx; + hw_resc->resv_tx_rings = tx; + } + hw_resc->resv_irqs = le16_to_cpu(resp->alloc_msix); + hw_resc->resv_hw_ring_grps = rx; + hw_resc->resv_cp_rings = cp; + hw_resc->resv_stat_ctxs = stats; + +get_rings_exit: + bnge_hwrm_req_drop(bd, req); + return rc; +} + +static struct hwrm_func_cfg_input * +__bnge_hwrm_reserve_pf_rings(struct bnge_dev *bd, struct bnge_hw_rings *hwr) +{ + struct hwrm_func_cfg_input *req; + u32 enables = 0; + + if (bnge_hwrm_req_init(bd, req, HWRM_FUNC_QCFG)) + return NULL; + + req->fid = cpu_to_le16(0xffff); + enables |= hwr->tx ? FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS : 0; + req->num_tx_rings = cpu_to_le16(hwr->tx); + + enables |= hwr->rx ? FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS : 0; + enables |= hwr->stat ? FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0; + enables |= hwr->nq ? FUNC_CFG_REQ_ENABLES_NUM_MSIX : 0; + enables |= hwr->cmpl ? FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0; + enables |= hwr->vnic ? FUNC_CFG_REQ_ENABLES_NUM_VNICS : 0; + enables |= hwr->rss_ctx ? FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0; + + req->num_rx_rings = cpu_to_le16(hwr->rx); + req->num_rsscos_ctxs = cpu_to_le16(hwr->rss_ctx); + req->num_cmpl_rings = cpu_to_le16(hwr->cmpl); + req->num_msix = cpu_to_le16(hwr->nq); + req->num_stat_ctxs = cpu_to_le16(hwr->stat); + req->num_vnics = cpu_to_le16(hwr->vnic); + req->enables = cpu_to_le32(enables); + + return req; +} + +static int +bnge_hwrm_reserve_pf_rings(struct bnge_dev *bd, struct bnge_hw_rings *hwr) +{ + struct hwrm_func_cfg_input *req; + int rc; + + req = __bnge_hwrm_reserve_pf_rings(bd, hwr); + if (!req) + return -ENOMEM; + + if (!req->enables) { + bnge_hwrm_req_drop(bd, req); + return 0; + } + + rc = bnge_hwrm_req_send(bd, req); + if (rc) + return rc; + + return bnge_hwrm_get_rings(bd); +} + +int bnge_hwrm_reserve_rings(struct bnge_dev *bd, struct bnge_hw_rings *hwr) +{ + return bnge_hwrm_reserve_pf_rings(bd, hwr); +} + +int bnge_hwrm_func_qcfg(struct bnge_dev *bd) +{ + struct hwrm_func_qcfg_output *resp; + struct hwrm_func_qcfg_input *req; + int rc; + + rc = bnge_hwrm_req_init(bd, req, HWRM_FUNC_QCFG); + if (rc) + return rc; + + req->fid = cpu_to_le16(0xffff); + resp = bnge_hwrm_req_hold(bd, req); + rc = bnge_hwrm_req_send(bd, req); + if (rc) + goto func_qcfg_exit; + + bd->max_mtu = le16_to_cpu(resp->max_mtu_configured); + if (!bd->max_mtu) + bd->max_mtu = BNGE_MAX_MTU; + + if (bd->db_size) + goto func_qcfg_exit; + + bd->db_offset = le16_to_cpu(resp->legacy_l2_db_size_kb) * 1024; + bd->db_size = PAGE_ALIGN(le16_to_cpu(resp->l2_doorbell_bar_size_kb) * + 1024); + if (!bd->db_size || bd->db_size > pci_resource_len(bd->pdev, 2) || + bd->db_size <= bd->db_offset) + bd->db_size = pci_resource_len(bd->pdev, 2); + +func_qcfg_exit: + bnge_hwrm_req_drop(bd, req); + return rc; +} + +int bnge_hwrm_func_resc_qcaps(struct bnge_dev *bd) +{ + struct hwrm_func_resource_qcaps_output *resp; + struct bnge_hw_resc *hw_resc = &bd->hw_resc; + struct hwrm_func_resource_qcaps_input *req; + int rc; + + rc = bnge_hwrm_req_init(bd, req, HWRM_FUNC_RESOURCE_QCAPS); + if (rc) + return rc; + + req->fid = cpu_to_le16(0xffff); + resp = bnge_hwrm_req_hold(bd, req); + rc = bnge_hwrm_req_send_silent(bd, req); + if (rc) + goto hwrm_func_resc_qcaps_exit; + + hw_resc->max_tx_sch_inputs = le16_to_cpu(resp->max_tx_scheduler_inputs); + hw_resc->min_rsscos_ctxs = le16_to_cpu(resp->min_rsscos_ctx); + hw_resc->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx); + hw_resc->min_cp_rings = le16_to_cpu(resp->min_cmpl_rings); + hw_resc->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings); + hw_resc->min_tx_rings = le16_to_cpu(resp->min_tx_rings); + hw_resc->max_tx_rings = le16_to_cpu(resp->max_tx_rings); + hw_resc->min_rx_rings = le16_to_cpu(resp->min_rx_rings); + hw_resc->max_rx_rings = le16_to_cpu(resp->max_rx_rings); + hw_resc->min_hw_ring_grps = le16_to_cpu(resp->min_hw_ring_grps); + hw_resc->max_hw_ring_grps = le16_to_cpu(resp->max_hw_ring_grps); + hw_resc->min_l2_ctxs = le16_to_cpu(resp->min_l2_ctxs); + hw_resc->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs); + hw_resc->min_vnics = le16_to_cpu(resp->min_vnics); + hw_resc->max_vnics = le16_to_cpu(resp->max_vnics); + hw_resc->min_stat_ctxs = le16_to_cpu(resp->min_stat_ctx); + hw_resc->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx); + + hw_resc->max_nqs = le16_to_cpu(resp->max_msix); + hw_resc->max_hw_ring_grps = hw_resc->max_rx_rings; + +hwrm_func_resc_qcaps_exit: + bnge_hwrm_req_drop(bd, req); + return rc; +} + +int bnge_hwrm_func_qcaps(struct bnge_dev *bd) +{ + struct hwrm_func_qcaps_output *resp; + struct hwrm_func_qcaps_input *req; + struct bnge_pf_info *pf = &bd->pf; + u32 flags; + int rc; + + rc = bnge_hwrm_req_init(bd, req, HWRM_FUNC_QCAPS); + if (rc) + return rc; + + req->fid = cpu_to_le16(0xffff); + resp = bnge_hwrm_req_hold(bd, req); + rc = bnge_hwrm_req_send(bd, req); + if (rc) + goto hwrm_func_qcaps_exit; + + flags = le32_to_cpu(resp->flags); + if (flags & FUNC_QCAPS_RESP_FLAGS_ROCE_V1_SUPPORTED) + bd->flags |= BNGE_EN_ROCE_V1; + if (flags & FUNC_QCAPS_RESP_FLAGS_ROCE_V2_SUPPORTED) + bd->flags |= BNGE_EN_ROCE_V2; + + pf->fw_fid = le16_to_cpu(resp->fid); + pf->port_id = le16_to_cpu(resp->port_id); + memcpy(pf->mac_addr, resp->mac_address, ETH_ALEN); + + bd->tso_max_segs = le16_to_cpu(resp->max_tso_segs); + +hwrm_func_qcaps_exit: + bnge_hwrm_req_drop(bd, req); + return rc; +} + +int bnge_hwrm_vnic_qcaps(struct bnge_dev *bd) +{ + struct hwrm_vnic_qcaps_output *resp; + struct hwrm_vnic_qcaps_input *req; + int rc; + + bd->hw_ring_stats_size = sizeof(struct ctx_hw_stats); + bd->rss_cap &= ~BNGE_RSS_CAP_NEW_RSS_CAP; + + rc = bnge_hwrm_req_init(bd, req, HWRM_VNIC_QCAPS); + if (rc) + return rc; + + resp = bnge_hwrm_req_hold(bd, req); + rc = bnge_hwrm_req_send(bd, req); + if (!rc) { + u32 flags = le32_to_cpu(resp->flags); + + if (flags & VNIC_QCAPS_RESP_FLAGS_VLAN_STRIP_CAP) + bd->fw_cap |= BNGE_FW_CAP_VLAN_RX_STRIP; + if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_HASH_TYPE_DELTA_CAP) + bd->rss_cap |= BNGE_RSS_CAP_RSS_HASH_TYPE_DELTA; + if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_PROF_TCAM_MODE_ENABLED) + bd->rss_cap |= BNGE_RSS_CAP_RSS_TCAM; + bd->max_tpa_v2 = le16_to_cpu(resp->max_aggs_supported); + if (bd->max_tpa_v2) + bd->hw_ring_stats_size = BNGE_RING_STATS_SIZE; + if (flags & VNIC_QCAPS_RESP_FLAGS_HW_TUNNEL_TPA_CAP) + bd->fw_cap |= BNGE_FW_CAP_VNIC_TUNNEL_TPA; + if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_AH_SPI_IPV4_CAP) + bd->rss_cap |= BNGE_RSS_CAP_AH_V4_RSS_CAP; + if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_AH_SPI_IPV6_CAP) + bd->rss_cap |= BNGE_RSS_CAP_AH_V6_RSS_CAP; + if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_ESP_SPI_IPV4_CAP) + bd->rss_cap |= BNGE_RSS_CAP_ESP_V4_RSS_CAP; + if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_ESP_SPI_IPV6_CAP) + bd->rss_cap |= BNGE_RSS_CAP_ESP_V6_RSS_CAP; + } + bnge_hwrm_req_drop(bd, req); + + return rc; +} + +#define BNGE_CNPQ(q_profile) \ + ((q_profile) == \ + QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSY_ROCE_CNP) + +int bnge_hwrm_queue_qportcfg(struct bnge_dev *bd) +{ + struct hwrm_queue_qportcfg_output *resp; + struct hwrm_queue_qportcfg_input *req; + u8 i, j, *qptr; + bool no_rdma; + int rc; + + rc = bnge_hwrm_req_init(bd, req, HWRM_QUEUE_QPORTCFG); + if (rc) + return rc; + + resp = bnge_hwrm_req_hold(bd, req); + rc = bnge_hwrm_req_send(bd, req); + if (rc) + goto qportcfg_exit; + + if (!resp->max_configurable_queues) { + rc = -EINVAL; + goto qportcfg_exit; + } + bd->max_tc = resp->max_configurable_queues; + bd->max_lltc = resp->max_configurable_lossless_queues; + if (bd->max_tc > BNGE_MAX_QUEUE) + bd->max_tc = BNGE_MAX_QUEUE; + + no_rdma = !bnge_is_roce_en(bd); + qptr = &resp->queue_id0; + for (i = 0, j = 0; i < bd->max_tc; i++) { + bd->q_info[j].queue_id = *qptr; + bd->q_ids[i] = *qptr++; + bd->q_info[j].queue_profile = *qptr++; + bd->tc_to_qidx[j] = j; + if (!BNGE_CNPQ(bd->q_info[j].queue_profile) || no_rdma) + j++; + } + bd->max_q = bd->max_tc; + bd->max_tc = max_t(u8, j, 1); + + if (resp->queue_cfg_info & QUEUE_QPORTCFG_RESP_QUEUE_CFG_INFO_ASYM_CFG) + bd->max_tc = 1; + + if (bd->max_lltc > bd->max_tc) + bd->max_lltc = bd->max_tc; + +qportcfg_exit: + bnge_hwrm_req_drop(bd, req); + return rc; +} diff --git a/drivers/net/ethernet/broadcom/bnge/bnge_hwrm_lib.h b/drivers/net/ethernet/broadcom/bnge/bnge_hwrm_lib.h new file mode 100644 index 000000000000..6c03923eb559 --- /dev/null +++ b/drivers/net/ethernet/broadcom/bnge/bnge_hwrm_lib.h @@ -0,0 +1,27 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2025 Broadcom */ + +#ifndef _BNGE_HWRM_LIB_H_ +#define _BNGE_HWRM_LIB_H_ + +int bnge_hwrm_ver_get(struct bnge_dev *bd); +int bnge_hwrm_func_reset(struct bnge_dev *bd); +int bnge_hwrm_fw_set_time(struct bnge_dev *bd); +int bnge_hwrm_func_drv_rgtr(struct bnge_dev *bd); +int bnge_hwrm_func_drv_unrgtr(struct bnge_dev *bd); +int bnge_hwrm_vnic_qcaps(struct bnge_dev *bd); +int bnge_hwrm_nvm_dev_info(struct bnge_dev *bd, + struct hwrm_nvm_get_dev_info_output *nvm_dev_info); +int bnge_hwrm_func_backing_store(struct bnge_dev *bd, + struct bnge_ctx_mem_type *ctxm, + bool last); +int bnge_hwrm_func_backing_store_qcaps(struct bnge_dev *bd); +int bnge_hwrm_reserve_rings(struct bnge_dev *bd, + struct bnge_hw_rings *hwr); +int bnge_hwrm_func_qcaps(struct bnge_dev *bd); +int bnge_hwrm_vnic_qcaps(struct bnge_dev *bd); +int bnge_hwrm_func_qcfg(struct bnge_dev *bd); +int bnge_hwrm_func_resc_qcaps(struct bnge_dev *bd); +int bnge_hwrm_queue_qportcfg(struct bnge_dev *bd); + +#endif /* _BNGE_HWRM_LIB_H_ */ diff --git a/drivers/net/ethernet/broadcom/bnge/bnge_netdev.c b/drivers/net/ethernet/broadcom/bnge/bnge_netdev.c new file mode 100644 index 000000000000..02254934f3d0 --- /dev/null +++ b/drivers/net/ethernet/broadcom/bnge/bnge_netdev.c @@ -0,0 +1,268 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) 2025 Broadcom. + +#include <asm/byteorder.h> +#include <linux/dma-mapping.h> +#include <linux/dmapool.h> +#include <linux/delay.h> +#include <linux/errno.h> +#include <linux/kernel.h> +#include <linux/list.h> +#include <linux/pci.h> +#include <linux/netdevice.h> +#include <linux/etherdevice.h> +#include <linux/if.h> +#include <net/ip.h> +#include <linux/skbuff.h> + +#include "bnge.h" +#include "bnge_hwrm_lib.h" +#include "bnge_ethtool.h" + +static netdev_tx_t bnge_start_xmit(struct sk_buff *skb, struct net_device *dev) +{ + dev_kfree_skb_any(skb); + + return NETDEV_TX_OK; +} + +static int bnge_open(struct net_device *dev) +{ + return 0; +} + +static int bnge_close(struct net_device *dev) +{ + return 0; +} + +static const struct net_device_ops bnge_netdev_ops = { + .ndo_open = bnge_open, + .ndo_stop = bnge_close, + .ndo_start_xmit = bnge_start_xmit, +}; + +static void bnge_init_mac_addr(struct bnge_dev *bd) +{ + eth_hw_addr_set(bd->netdev, bd->pf.mac_addr); +} + +static void bnge_set_tpa_flags(struct bnge_dev *bd) +{ + struct bnge_net *bn = netdev_priv(bd->netdev); + + bn->priv_flags &= ~BNGE_NET_EN_TPA; + + if (bd->netdev->features & NETIF_F_LRO) + bn->priv_flags |= BNGE_NET_EN_LRO; + else if (bd->netdev->features & NETIF_F_GRO_HW) + bn->priv_flags |= BNGE_NET_EN_GRO; +} + +static void bnge_init_l2_fltr_tbl(struct bnge_net *bn) +{ + int i; + + for (i = 0; i < BNGE_L2_FLTR_HASH_SIZE; i++) + INIT_HLIST_HEAD(&bn->l2_fltr_hash_tbl[i]); + get_random_bytes(&bn->hash_seed, sizeof(bn->hash_seed)); +} + +void bnge_set_ring_params(struct bnge_dev *bd) +{ + struct bnge_net *bn = netdev_priv(bd->netdev); + u32 ring_size, rx_size, rx_space, max_rx_cmpl; + u32 agg_factor = 0, agg_ring_size = 0; + + /* 8 for CRC and VLAN */ + rx_size = SKB_DATA_ALIGN(bn->netdev->mtu + ETH_HLEN + NET_IP_ALIGN + 8); + + rx_space = rx_size + ALIGN(NET_SKB_PAD, 8) + + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); + + bn->rx_copy_thresh = BNGE_RX_COPY_THRESH; + ring_size = bn->rx_ring_size; + bn->rx_agg_ring_size = 0; + bn->rx_agg_nr_pages = 0; + + if (bn->priv_flags & BNGE_NET_EN_TPA) + agg_factor = min_t(u32, 4, 65536 / BNGE_RX_PAGE_SIZE); + + bn->priv_flags &= ~BNGE_NET_EN_JUMBO; + if (rx_space > PAGE_SIZE) { + u32 jumbo_factor; + + bn->priv_flags |= BNGE_NET_EN_JUMBO; + jumbo_factor = PAGE_ALIGN(bn->netdev->mtu - 40) >> PAGE_SHIFT; + if (jumbo_factor > agg_factor) + agg_factor = jumbo_factor; + } + if (agg_factor) { + if (ring_size > BNGE_MAX_RX_DESC_CNT_JUM_ENA) { + ring_size = BNGE_MAX_RX_DESC_CNT_JUM_ENA; + netdev_warn(bn->netdev, "RX ring size reduced from %d to %d due to jumbo ring\n", + bn->rx_ring_size, ring_size); + bn->rx_ring_size = ring_size; + } + agg_ring_size = ring_size * agg_factor; + + bn->rx_agg_nr_pages = bnge_adjust_pow_two(agg_ring_size, + RX_DESC_CNT); + if (bn->rx_agg_nr_pages > MAX_RX_AGG_PAGES) { + u32 tmp = agg_ring_size; + + bn->rx_agg_nr_pages = MAX_RX_AGG_PAGES; + agg_ring_size = MAX_RX_AGG_PAGES * RX_DESC_CNT - 1; + netdev_warn(bn->netdev, "RX agg ring size %d reduced to %d.\n", + tmp, agg_ring_size); + } + bn->rx_agg_ring_size = agg_ring_size; + bn->rx_agg_ring_mask = (bn->rx_agg_nr_pages * RX_DESC_CNT) - 1; + + rx_size = SKB_DATA_ALIGN(BNGE_RX_COPY_THRESH + NET_IP_ALIGN); + rx_space = rx_size + NET_SKB_PAD + + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); + } + + bn->rx_buf_use_size = rx_size; + bn->rx_buf_size = rx_space; + + bn->rx_nr_pages = bnge_adjust_pow_two(ring_size, RX_DESC_CNT); + bn->rx_ring_mask = (bn->rx_nr_pages * RX_DESC_CNT) - 1; + + ring_size = bn->tx_ring_size; + bn->tx_nr_pages = bnge_adjust_pow_two(ring_size, TX_DESC_CNT); + bn->tx_ring_mask = (bn->tx_nr_pages * TX_DESC_CNT) - 1; + + max_rx_cmpl = bn->rx_ring_size; + + if (bn->priv_flags & BNGE_NET_EN_TPA) + max_rx_cmpl += bd->max_tpa_v2; + ring_size = max_rx_cmpl * 2 + agg_ring_size + bn->tx_ring_size; + bn->cp_ring_size = ring_size; + + bn->cp_nr_pages = bnge_adjust_pow_two(ring_size, CP_DESC_CNT); + if (bn->cp_nr_pages > MAX_CP_PAGES) { + bn->cp_nr_pages = MAX_CP_PAGES; + bn->cp_ring_size = MAX_CP_PAGES * CP_DESC_CNT - 1; + netdev_warn(bn->netdev, "completion ring size %d reduced to %d.\n", + ring_size, bn->cp_ring_size); + } + bn->cp_bit = bn->cp_nr_pages * CP_DESC_CNT; + bn->cp_ring_mask = bn->cp_bit - 1; +} + +int bnge_netdev_alloc(struct bnge_dev *bd, int max_irqs) +{ + struct net_device *netdev; + struct bnge_net *bn; + int rc; + + netdev = alloc_etherdev_mqs(sizeof(*bn), max_irqs * BNGE_MAX_QUEUE, + max_irqs); + if (!netdev) + return -ENOMEM; + + SET_NETDEV_DEV(netdev, bd->dev); + bd->netdev = netdev; + + netdev->netdev_ops = &bnge_netdev_ops; + + bnge_set_ethtool_ops(netdev); + + bn = netdev_priv(netdev); + bn->netdev = netdev; + bn->bd = bd; + + netdev->min_mtu = ETH_ZLEN; + netdev->max_mtu = bd->max_mtu; + + netdev->hw_features = NETIF_F_IP_CSUM | + NETIF_F_IPV6_CSUM | + NETIF_F_SG | + NETIF_F_TSO | + NETIF_F_TSO6 | + NETIF_F_GSO_UDP_TUNNEL | + NETIF_F_GSO_GRE | + NETIF_F_GSO_IPXIP4 | + NETIF_F_GSO_UDP_TUNNEL_CSUM | + NETIF_F_GSO_GRE_CSUM | + NETIF_F_GSO_PARTIAL | + NETIF_F_RXHASH | + NETIF_F_RXCSUM | + NETIF_F_GRO; + + if (bd->flags & BNGE_EN_UDP_GSO_SUPP) + netdev->hw_features |= NETIF_F_GSO_UDP_L4; + + if (BNGE_SUPPORTS_TPA(bd)) + netdev->hw_features |= NETIF_F_LRO; + + netdev->hw_enc_features = NETIF_F_IP_CSUM | + NETIF_F_IPV6_CSUM | + NETIF_F_SG | + NETIF_F_TSO | + NETIF_F_TSO6 | + NETIF_F_GSO_UDP_TUNNEL | + NETIF_F_GSO_GRE | + NETIF_F_GSO_UDP_TUNNEL_CSUM | + NETIF_F_GSO_GRE_CSUM | + NETIF_F_GSO_IPXIP4 | + NETIF_F_GSO_PARTIAL; + + if (bd->flags & BNGE_EN_UDP_GSO_SUPP) + netdev->hw_enc_features |= NETIF_F_GSO_UDP_L4; + + netdev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM | + NETIF_F_GSO_GRE_CSUM; + + netdev->vlan_features = netdev->hw_features | NETIF_F_HIGHDMA; + if (bd->fw_cap & BNGE_FW_CAP_VLAN_RX_STRIP) + netdev->hw_features |= BNGE_HW_FEATURE_VLAN_ALL_RX; + if (bd->fw_cap & BNGE_FW_CAP_VLAN_TX_INSERT) + netdev->hw_features |= BNGE_HW_FEATURE_VLAN_ALL_TX; + + if (BNGE_SUPPORTS_TPA(bd)) + netdev->hw_features |= NETIF_F_GRO_HW; + + netdev->features |= netdev->hw_features | NETIF_F_HIGHDMA; + + if (netdev->features & NETIF_F_GRO_HW) + netdev->features &= ~NETIF_F_LRO; + + netdev->priv_flags |= IFF_UNICAST_FLT; + + netif_set_tso_max_size(netdev, GSO_MAX_SIZE); + if (bd->tso_max_segs) + netif_set_tso_max_segs(netdev, bd->tso_max_segs); + + bn->rx_ring_size = BNGE_DEFAULT_RX_RING_SIZE; + bn->tx_ring_size = BNGE_DEFAULT_TX_RING_SIZE; + + bnge_set_tpa_flags(bd); + bnge_set_ring_params(bd); + + bnge_init_l2_fltr_tbl(bn); + bnge_init_mac_addr(bd); + + rc = register_netdev(netdev); + if (rc) { + dev_err(bd->dev, "Register netdev failed rc: %d\n", rc); + goto err_netdev; + } + + return 0; + +err_netdev: + free_netdev(netdev); + return rc; +} + +void bnge_netdev_free(struct bnge_dev *bd) +{ + struct net_device *netdev = bd->netdev; + + unregister_netdev(netdev); + free_netdev(netdev); + bd->netdev = NULL; +} diff --git a/drivers/net/ethernet/broadcom/bnge/bnge_netdev.h b/drivers/net/ethernet/broadcom/bnge/bnge_netdev.h new file mode 100644 index 000000000000..a650d71a58db --- /dev/null +++ b/drivers/net/ethernet/broadcom/bnge/bnge_netdev.h @@ -0,0 +1,206 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2025 Broadcom */ + +#ifndef _BNGE_NETDEV_H_ +#define _BNGE_NETDEV_H_ + +#include <linux/bnxt/hsi.h> + +struct tx_bd { + __le32 tx_bd_len_flags_type; + #define TX_BD_TYPE (0x3f << 0) + #define TX_BD_TYPE_SHORT_TX_BD (0x00 << 0) + #define TX_BD_TYPE_LONG_TX_BD (0x10 << 0) + #define TX_BD_FLAGS_PACKET_END (1 << 6) + #define TX_BD_FLAGS_NO_CMPL (1 << 7) + #define TX_BD_FLAGS_BD_CNT (0x1f << 8) + #define TX_BD_FLAGS_BD_CNT_SHIFT 8 + #define TX_BD_FLAGS_LHINT (3 << 13) + #define TX_BD_FLAGS_LHINT_SHIFT 13 + #define TX_BD_FLAGS_LHINT_512_AND_SMALLER (0 << 13) + #define TX_BD_FLAGS_LHINT_512_TO_1023 (1 << 13) + #define TX_BD_FLAGS_LHINT_1024_TO_2047 (2 << 13) + #define TX_BD_FLAGS_LHINT_2048_AND_LARGER (3 << 13) + #define TX_BD_FLAGS_COAL_NOW (1 << 15) + #define TX_BD_LEN (0xffff << 16) + #define TX_BD_LEN_SHIFT 16 + u32 tx_bd_opaque; + __le64 tx_bd_haddr; +} __packed; + +struct rx_bd { + __le32 rx_bd_len_flags_type; + #define RX_BD_TYPE (0x3f << 0) + #define RX_BD_TYPE_RX_PACKET_BD 0x4 + #define RX_BD_TYPE_RX_BUFFER_BD 0x5 + #define RX_BD_TYPE_RX_AGG_BD 0x6 + #define RX_BD_TYPE_16B_BD_SIZE (0 << 4) + #define RX_BD_TYPE_32B_BD_SIZE (1 << 4) + #define RX_BD_TYPE_48B_BD_SIZE (2 << 4) + #define RX_BD_TYPE_64B_BD_SIZE (3 << 4) + #define RX_BD_FLAGS_SOP (1 << 6) + #define RX_BD_FLAGS_EOP (1 << 7) + #define RX_BD_FLAGS_BUFFERS (3 << 8) + #define RX_BD_FLAGS_1_BUFFER_PACKET (0 << 8) + #define RX_BD_FLAGS_2_BUFFER_PACKET (1 << 8) + #define RX_BD_FLAGS_3_BUFFER_PACKET (2 << 8) + #define RX_BD_FLAGS_4_BUFFER_PACKET (3 << 8) + #define RX_BD_LEN (0xffff << 16) + #define RX_BD_LEN_SHIFT 16 + u32 rx_bd_opaque; + __le64 rx_bd_haddr; +}; + +struct tx_cmp { + __le32 tx_cmp_flags_type; + #define CMP_TYPE (0x3f << 0) + #define CMP_TYPE_TX_L2_CMP 0 + #define CMP_TYPE_TX_L2_COAL_CMP 2 + #define CMP_TYPE_TX_L2_PKT_TS_CMP 4 + #define CMP_TYPE_RX_L2_CMP 17 + #define CMP_TYPE_RX_AGG_CMP 18 + #define CMP_TYPE_RX_L2_TPA_START_CMP 19 + #define CMP_TYPE_RX_L2_TPA_END_CMP 21 + #define CMP_TYPE_RX_TPA_AGG_CMP 22 + #define CMP_TYPE_RX_L2_V3_CMP 23 + #define CMP_TYPE_RX_L2_TPA_START_V3_CMP 25 + #define CMP_TYPE_STATUS_CMP 32 + #define CMP_TYPE_REMOTE_DRIVER_REQ 34 + #define CMP_TYPE_REMOTE_DRIVER_RESP 36 + #define CMP_TYPE_ERROR_STATUS 48 + #define CMPL_BASE_TYPE_STAT_EJECT 0x1aUL + #define CMPL_BASE_TYPE_HWRM_DONE 0x20UL + #define CMPL_BASE_TYPE_HWRM_FWD_REQ 0x22UL + #define CMPL_BASE_TYPE_HWRM_FWD_RESP 0x24UL + #define CMPL_BASE_TYPE_HWRM_ASYNC_EVENT 0x2eUL + #define TX_CMP_FLAGS_ERROR (1 << 6) + #define TX_CMP_FLAGS_PUSH (1 << 7) + u32 tx_cmp_opaque; + __le32 tx_cmp_errors_v; + #define TX_CMP_V (1 << 0) + #define TX_CMP_ERRORS_BUFFER_ERROR (7 << 1) + #define TX_CMP_ERRORS_BUFFER_ERROR_NO_ERROR 0 + #define TX_CMP_ERRORS_BUFFER_ERROR_BAD_FORMAT 2 + #define TX_CMP_ERRORS_BUFFER_ERROR_INVALID_STAG 4 + #define TX_CMP_ERRORS_BUFFER_ERROR_STAG_BOUNDS 5 + #define TX_CMP_ERRORS_ZERO_LENGTH_PKT (1 << 4) + #define TX_CMP_ERRORS_EXCESSIVE_BD_LEN (1 << 5) + #define TX_CMP_ERRORS_DMA_ERROR (1 << 6) + #define TX_CMP_ERRORS_HINT_TOO_SHORT (1 << 7) + __le32 sq_cons_idx; + #define TX_CMP_SQ_CONS_IDX_MASK 0x00ffffff +}; + +struct bnge_sw_tx_bd { + struct sk_buff *skb; + DEFINE_DMA_UNMAP_ADDR(mapping); + DEFINE_DMA_UNMAP_LEN(len); + struct page *page; + u8 is_ts_pkt; + u8 is_push; + u8 action; + unsigned short nr_frags; + union { + u16 rx_prod; + u16 txts_prod; + }; +}; + +struct bnge_sw_rx_bd { + void *data; + u8 *data_ptr; + dma_addr_t mapping; +}; + +struct bnge_sw_rx_agg_bd { + struct page *page; + unsigned int offset; + dma_addr_t mapping; +}; + +#define BNGE_RX_COPY_THRESH 256 + +#define BNGE_HW_FEATURE_VLAN_ALL_RX \ + (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX) +#define BNGE_HW_FEATURE_VLAN_ALL_TX \ + (NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX) + +enum { + BNGE_NET_EN_GRO = BIT(0), + BNGE_NET_EN_LRO = BIT(1), + BNGE_NET_EN_JUMBO = BIT(2), +}; + +#define BNGE_NET_EN_TPA (BNGE_NET_EN_GRO | BNGE_NET_EN_LRO) + +struct bnge_net { + struct bnge_dev *bd; + struct net_device *netdev; + + u32 priv_flags; + + u32 rx_ring_size; + u32 rx_buf_size; + u32 rx_buf_use_size; /* usable size */ + u32 rx_agg_ring_size; + u32 rx_copy_thresh; + u32 rx_ring_mask; + u32 rx_agg_ring_mask; + u16 rx_nr_pages; + u16 rx_agg_nr_pages; + + u32 tx_ring_size; + u32 tx_ring_mask; + u16 tx_nr_pages; + + /* NQs and Completion rings */ + u32 cp_ring_size; + u32 cp_ring_mask; + u32 cp_bit; + u16 cp_nr_pages; + +#define BNGE_L2_FLTR_HASH_SIZE 32 +#define BNGE_L2_FLTR_HASH_MASK (BNGE_L2_FLTR_HASH_SIZE - 1) + struct hlist_head l2_fltr_hash_tbl[BNGE_L2_FLTR_HASH_SIZE]; + u32 hash_seed; + u64 toeplitz_prefix; +}; + +#define BNGE_DEFAULT_RX_RING_SIZE 511 +#define BNGE_DEFAULT_TX_RING_SIZE 511 + +int bnge_netdev_alloc(struct bnge_dev *bd, int max_irqs); +void bnge_netdev_free(struct bnge_dev *bd); +void bnge_set_ring_params(struct bnge_dev *bd); + +#if (BNGE_PAGE_SHIFT == 16) +#define MAX_RX_PAGES_AGG_ENA 1 +#define MAX_RX_PAGES 4 +#define MAX_RX_AGG_PAGES 4 +#define MAX_TX_PAGES 1 +#define MAX_CP_PAGES 16 +#else +#define MAX_RX_PAGES_AGG_ENA 8 +#define MAX_RX_PAGES 32 +#define MAX_RX_AGG_PAGES 32 +#define MAX_TX_PAGES 8 +#define MAX_CP_PAGES 128 +#endif + +#define BNGE_RX_PAGE_SIZE (1 << BNGE_RX_PAGE_SHIFT) + +#define RX_DESC_CNT (BNGE_PAGE_SIZE / sizeof(struct rx_bd)) +#define TX_DESC_CNT (BNGE_PAGE_SIZE / sizeof(struct tx_bd)) +#define CP_DESC_CNT (BNGE_PAGE_SIZE / sizeof(struct tx_cmp)) +#define SW_RXBD_RING_SIZE (sizeof(struct bnge_sw_rx_bd) * RX_DESC_CNT) +#define HW_RXBD_RING_SIZE (sizeof(struct rx_bd) * RX_DESC_CNT) +#define SW_RXBD_AGG_RING_SIZE (sizeof(struct bnge_sw_rx_agg_bd) * RX_DESC_CNT) +#define SW_TXBD_RING_SIZE (sizeof(struct bnge_sw_tx_bd) * TX_DESC_CNT) +#define HW_TXBD_RING_SIZE (sizeof(struct tx_bd) * TX_DESC_CNT) +#define HW_CMPD_RING_SIZE (sizeof(struct tx_cmp) * CP_DESC_CNT) +#define BNGE_MAX_RX_DESC_CNT (RX_DESC_CNT * MAX_RX_PAGES - 1) +#define BNGE_MAX_RX_DESC_CNT_JUM_ENA (RX_DESC_CNT * MAX_RX_PAGES_AGG_ENA - 1) +#define BNGE_MAX_RX_JUM_DESC_CNT (RX_DESC_CNT * MAX_RX_AGG_PAGES - 1) +#define BNGE_MAX_TX_DESC_CNT (TX_DESC_CNT * MAX_TX_PAGES - 1) + +#endif /* _BNGE_NETDEV_H_ */ diff --git a/drivers/net/ethernet/broadcom/bnge/bnge_resc.c b/drivers/net/ethernet/broadcom/bnge/bnge_resc.c new file mode 100644 index 000000000000..c79a3607a1b7 --- /dev/null +++ b/drivers/net/ethernet/broadcom/bnge/bnge_resc.c @@ -0,0 +1,605 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) 2025 Broadcom. + +#include <linux/init.h> +#include <linux/module.h> +#include <linux/pci.h> +#include <linux/ethtool.h> +#include <linux/netdevice.h> + +#include "bnge.h" +#include "bnge_hwrm.h" +#include "bnge_hwrm_lib.h" +#include "bnge_resc.h" + +static u16 bnge_num_tx_to_cp(struct bnge_dev *bd, u16 tx) +{ + u16 tcs = bd->num_tc; + + if (!tcs) + tcs = 1; + + return tx / tcs; +} + +static u16 bnge_get_max_func_irqs(struct bnge_dev *bd) +{ + struct bnge_hw_resc *hw_resc = &bd->hw_resc; + + return min_t(u16, hw_resc->max_irqs, hw_resc->max_nqs); +} + +static unsigned int bnge_get_max_func_stat_ctxs(struct bnge_dev *bd) +{ + return bd->hw_resc.max_stat_ctxs; +} + +static unsigned int bnge_get_max_func_cp_rings(struct bnge_dev *bd) +{ + return bd->hw_resc.max_cp_rings; +} + +static int bnge_aux_get_dflt_msix(struct bnge_dev *bd) +{ + int roce_msix = BNGE_MAX_ROCE_MSIX; + + return min_t(int, roce_msix, num_online_cpus() + 1); +} + +static u16 bnge_aux_get_msix(struct bnge_dev *bd) +{ + if (bnge_is_roce_en(bd)) + return bd->aux_num_msix; + + return 0; +} + +static void bnge_aux_set_msix_num(struct bnge_dev *bd, u16 num) +{ + if (bnge_is_roce_en(bd)) + bd->aux_num_msix = num; +} + +static u16 bnge_aux_get_stat_ctxs(struct bnge_dev *bd) +{ + if (bnge_is_roce_en(bd)) + return bd->aux_num_stat_ctxs; + + return 0; +} + +static void bnge_aux_set_stat_ctxs(struct bnge_dev *bd, u16 num_aux_ctx) +{ + if (bnge_is_roce_en(bd)) + bd->aux_num_stat_ctxs = num_aux_ctx; +} + +static u16 bnge_func_stat_ctxs_demand(struct bnge_dev *bd) +{ + return bd->nq_nr_rings + bnge_aux_get_stat_ctxs(bd); +} + +static int bnge_get_dflt_aux_stat_ctxs(struct bnge_dev *bd) +{ + int stat_ctx = 0; + + if (bnge_is_roce_en(bd)) { + stat_ctx = BNGE_MIN_ROCE_STAT_CTXS; + + if (!bd->pf.port_id && bd->port_count > 1) + stat_ctx++; + } + + return stat_ctx; +} + +static u16 bnge_nqs_demand(struct bnge_dev *bd) +{ + return bd->nq_nr_rings + bnge_aux_get_msix(bd); +} + +static u16 bnge_cprs_demand(struct bnge_dev *bd) +{ + return bd->tx_nr_rings + bd->rx_nr_rings; +} + +static u16 bnge_get_avail_msix(struct bnge_dev *bd, int num) +{ + u16 max_irq = bnge_get_max_func_irqs(bd); + u16 total_demand = bd->nq_nr_rings + num; + + if (max_irq < total_demand) { + num = max_irq - bd->nq_nr_rings; + if (num <= 0) + return 0; + } + + return num; +} + +static u16 bnge_num_cp_to_tx(struct bnge_dev *bd, u16 tx_chunks) +{ + return tx_chunks * bd->num_tc; +} + +int bnge_fix_rings_count(u16 *rx, u16 *tx, u16 max, bool shared) +{ + u16 _rx = *rx, _tx = *tx; + + if (shared) { + *rx = min_t(u16, _rx, max); + *tx = min_t(u16, _tx, max); + } else { + if (max < 2) + return -ENOMEM; + while (_rx + _tx > max) { + if (_rx > _tx && _rx > 1) + _rx--; + else if (_tx > 1) + _tx--; + } + *rx = _rx; + *tx = _tx; + } + + return 0; +} + +static int bnge_adjust_rings(struct bnge_dev *bd, u16 *rx, + u16 *tx, u16 max_nq, bool sh) +{ + u16 tx_chunks = bnge_num_tx_to_cp(bd, *tx); + + if (tx_chunks != *tx) { + u16 tx_saved = tx_chunks, rc; + + rc = bnge_fix_rings_count(rx, &tx_chunks, max_nq, sh); + if (rc) + return rc; + if (tx_chunks != tx_saved) + *tx = bnge_num_cp_to_tx(bd, tx_chunks); + return 0; + } + + return bnge_fix_rings_count(rx, tx, max_nq, sh); +} + +static int bnge_cal_nr_rss_ctxs(u16 rx_rings) +{ + if (!rx_rings) + return 0; + + return bnge_adjust_pow_two(rx_rings - 1, + BNGE_RSS_TABLE_ENTRIES); +} + +static u16 bnge_rss_ctxs_in_use(struct bnge_dev *bd, + struct bnge_hw_rings *hwr) +{ + return bnge_cal_nr_rss_ctxs(hwr->grp); +} + +static u16 bnge_get_total_vnics(struct bnge_dev *bd, u16 rx_rings) +{ + return 1; +} + +static u32 bnge_get_rxfh_indir_size(struct bnge_dev *bd) +{ + return bnge_cal_nr_rss_ctxs(bd->rx_nr_rings) * + BNGE_RSS_TABLE_ENTRIES; +} + +static void bnge_set_dflt_rss_indir_tbl(struct bnge_dev *bd) +{ + u16 max_entries, pad; + u32 *rss_indir_tbl; + int i; + + max_entries = bnge_get_rxfh_indir_size(bd); + rss_indir_tbl = &bd->rss_indir_tbl[0]; + + for (i = 0; i < max_entries; i++) + rss_indir_tbl[i] = ethtool_rxfh_indir_default(i, + bd->rx_nr_rings); + + pad = bd->rss_indir_tbl_entries - max_entries; + if (pad) + memset(&rss_indir_tbl[i], 0, pad * sizeof(*rss_indir_tbl)); +} + +static void bnge_copy_reserved_rings(struct bnge_dev *bd, + struct bnge_hw_rings *hwr) +{ + struct bnge_hw_resc *hw_resc = &bd->hw_resc; + + hwr->tx = hw_resc->resv_tx_rings; + hwr->rx = hw_resc->resv_rx_rings; + hwr->nq = hw_resc->resv_irqs; + hwr->cmpl = hw_resc->resv_cp_rings; + hwr->grp = hw_resc->resv_hw_ring_grps; + hwr->vnic = hw_resc->resv_vnics; + hwr->stat = hw_resc->resv_stat_ctxs; + hwr->rss_ctx = hw_resc->resv_rsscos_ctxs; +} + +static bool bnge_rings_ok(struct bnge_hw_rings *hwr) +{ + return hwr->tx && hwr->rx && hwr->nq && hwr->grp && hwr->vnic && + hwr->stat && hwr->cmpl; +} + +static bool bnge_need_reserve_rings(struct bnge_dev *bd) +{ + struct bnge_hw_resc *hw_resc = &bd->hw_resc; + u16 cprs = bnge_cprs_demand(bd); + u16 rx = bd->rx_nr_rings, stat; + u16 nqs = bnge_nqs_demand(bd); + u16 vnic; + + if (hw_resc->resv_tx_rings != bd->tx_nr_rings) + return true; + + vnic = bnge_get_total_vnics(bd, rx); + + if (bnge_is_agg_reqd(bd)) + rx <<= 1; + stat = bnge_func_stat_ctxs_demand(bd); + if (hw_resc->resv_rx_rings != rx || hw_resc->resv_cp_rings != cprs || + hw_resc->resv_vnics != vnic || hw_resc->resv_stat_ctxs != stat) + return true; + if (hw_resc->resv_irqs != nqs) + return true; + + return false; +} + +int bnge_reserve_rings(struct bnge_dev *bd) +{ + u16 aux_dflt_msix = bnge_aux_get_dflt_msix(bd); + struct bnge_hw_rings hwr = {0}; + u16 rx_rings, old_rx_rings; + u16 nq = bd->nq_nr_rings; + u16 aux_msix = 0; + bool sh = false; + u16 tx_cp; + int rc; + + if (!bnge_need_reserve_rings(bd)) + return 0; + + if (!bnge_aux_registered(bd)) { + aux_msix = bnge_get_avail_msix(bd, aux_dflt_msix); + if (!aux_msix) + bnge_aux_set_stat_ctxs(bd, 0); + + if (aux_msix > aux_dflt_msix) + aux_msix = aux_dflt_msix; + hwr.nq = nq + aux_msix; + } else { + hwr.nq = bnge_nqs_demand(bd); + } + + hwr.tx = bd->tx_nr_rings; + hwr.rx = bd->rx_nr_rings; + if (bd->flags & BNGE_EN_SHARED_CHNL) + sh = true; + hwr.cmpl = hwr.rx + hwr.tx; + + hwr.vnic = bnge_get_total_vnics(bd, hwr.rx); + + if (bnge_is_agg_reqd(bd)) + hwr.rx <<= 1; + hwr.grp = bd->rx_nr_rings; + hwr.rss_ctx = bnge_rss_ctxs_in_use(bd, &hwr); + hwr.stat = bnge_func_stat_ctxs_demand(bd); + old_rx_rings = bd->hw_resc.resv_rx_rings; + + rc = bnge_hwrm_reserve_rings(bd, &hwr); + if (rc) + return rc; + + bnge_copy_reserved_rings(bd, &hwr); + + rx_rings = hwr.rx; + if (bnge_is_agg_reqd(bd)) { + if (hwr.rx >= 2) + rx_rings = hwr.rx >> 1; + else + return -ENOMEM; + } + + rx_rings = min_t(u16, rx_rings, hwr.grp); + hwr.nq = min_t(u16, hwr.nq, bd->nq_nr_rings); + if (hwr.stat > bnge_aux_get_stat_ctxs(bd)) + hwr.stat -= bnge_aux_get_stat_ctxs(bd); + hwr.nq = min_t(u16, hwr.nq, hwr.stat); + + /* Adjust the rings */ + rc = bnge_adjust_rings(bd, &rx_rings, &hwr.tx, hwr.nq, sh); + if (bnge_is_agg_reqd(bd)) + hwr.rx = rx_rings << 1; + tx_cp = hwr.tx; + hwr.nq = sh ? max_t(u16, tx_cp, rx_rings) : tx_cp + rx_rings; + bd->tx_nr_rings = hwr.tx; + + if (rx_rings != bd->rx_nr_rings) + dev_warn(bd->dev, "RX rings resv reduced to %d than earlier %d requested\n", + rx_rings, bd->rx_nr_rings); + + bd->rx_nr_rings = rx_rings; + bd->nq_nr_rings = hwr.nq; + + if (!bnge_rings_ok(&hwr)) + return -ENOMEM; + + if (old_rx_rings != bd->hw_resc.resv_rx_rings) + bnge_set_dflt_rss_indir_tbl(bd); + + if (!bnge_aux_registered(bd)) { + u16 resv_msix, resv_ctx, aux_ctxs; + struct bnge_hw_resc *hw_resc; + + hw_resc = &bd->hw_resc; + resv_msix = hw_resc->resv_irqs - bd->nq_nr_rings; + aux_msix = min_t(u16, resv_msix, aux_msix); + bnge_aux_set_msix_num(bd, aux_msix); + resv_ctx = hw_resc->resv_stat_ctxs - bd->nq_nr_rings; + aux_ctxs = min(resv_ctx, bnge_aux_get_stat_ctxs(bd)); + bnge_aux_set_stat_ctxs(bd, aux_ctxs); + } + + return rc; +} + +int bnge_alloc_irqs(struct bnge_dev *bd) +{ + u16 aux_msix, tx_cp, num_entries; + int i, irqs_demand, rc; + u16 max, min = 1; + + irqs_demand = bnge_nqs_demand(bd); + max = bnge_get_max_func_irqs(bd); + if (irqs_demand > max) + irqs_demand = max; + + if (!(bd->flags & BNGE_EN_SHARED_CHNL)) + min = 2; + + irqs_demand = pci_alloc_irq_vectors(bd->pdev, min, irqs_demand, + PCI_IRQ_MSIX); + aux_msix = bnge_aux_get_msix(bd); + if (irqs_demand < 0 || irqs_demand < aux_msix) { + rc = -ENODEV; + goto err_free_irqs; + } + + num_entries = irqs_demand; + if (pci_msix_can_alloc_dyn(bd->pdev)) + num_entries = max; + bd->irq_tbl = kcalloc(num_entries, sizeof(*bd->irq_tbl), GFP_KERNEL); + if (!bd->irq_tbl) { + rc = -ENOMEM; + goto err_free_irqs; + } + + for (i = 0; i < irqs_demand; i++) + bd->irq_tbl[i].vector = pci_irq_vector(bd->pdev, i); + + bd->irqs_acquired = irqs_demand; + /* Reduce rings based upon num of vectors allocated. + * We dont need to consider NQs as they have been calculated + * and must be more than irqs_demand. + */ + rc = bnge_adjust_rings(bd, &bd->rx_nr_rings, + &bd->tx_nr_rings, + irqs_demand - aux_msix, min == 1); + if (rc) + goto err_free_irqs; + + tx_cp = bnge_num_tx_to_cp(bd, bd->tx_nr_rings); + bd->nq_nr_rings = (min == 1) ? + max_t(u16, tx_cp, bd->rx_nr_rings) : + tx_cp + bd->rx_nr_rings; + + /* Readjust tx_nr_rings_per_tc */ + if (!bd->num_tc) + bd->tx_nr_rings_per_tc = bd->tx_nr_rings; + + return 0; + +err_free_irqs: + dev_err(bd->dev, "Failed to allocate IRQs err = %d\n", rc); + bnge_free_irqs(bd); + return rc; +} + +void bnge_free_irqs(struct bnge_dev *bd) +{ + pci_free_irq_vectors(bd->pdev); + kfree(bd->irq_tbl); + bd->irq_tbl = NULL; +} + +static void _bnge_get_max_rings(struct bnge_dev *bd, u16 *max_rx, + u16 *max_tx, u16 *max_nq) +{ + struct bnge_hw_resc *hw_resc = &bd->hw_resc; + u16 max_ring_grps = 0, max_cp; + int rc; + + *max_tx = hw_resc->max_tx_rings; + *max_rx = hw_resc->max_rx_rings; + *max_nq = min_t(int, bnge_get_max_func_irqs(bd), + hw_resc->max_stat_ctxs); + max_ring_grps = hw_resc->max_hw_ring_grps; + if (bnge_is_agg_reqd(bd)) + *max_rx >>= 1; + + max_cp = bnge_get_max_func_cp_rings(bd); + + /* Fix RX and TX rings according to number of CPs available */ + rc = bnge_fix_rings_count(max_rx, max_tx, max_cp, false); + if (rc) { + *max_rx = 0; + *max_tx = 0; + } + + *max_rx = min_t(int, *max_rx, max_ring_grps); +} + +static int bnge_get_max_rings(struct bnge_dev *bd, u16 *max_rx, + u16 *max_tx, bool shared) +{ + u16 rx, tx, nq; + + _bnge_get_max_rings(bd, &rx, &tx, &nq); + *max_rx = rx; + *max_tx = tx; + if (!rx || !tx || !nq) + return -ENOMEM; + + return bnge_fix_rings_count(max_rx, max_tx, nq, shared); +} + +static int bnge_get_dflt_rings(struct bnge_dev *bd, u16 *max_rx, u16 *max_tx, + bool shared) +{ + int rc; + + rc = bnge_get_max_rings(bd, max_rx, max_tx, shared); + if (rc) { + dev_info(bd->dev, "Not enough rings available\n"); + return rc; + } + + if (bnge_is_roce_en(bd)) { + int max_cp, max_stat, max_irq; + + /* Reserve minimum resources for RoCE */ + max_cp = bnge_get_max_func_cp_rings(bd); + max_stat = bnge_get_max_func_stat_ctxs(bd); + max_irq = bnge_get_max_func_irqs(bd); + if (max_cp <= BNGE_MIN_ROCE_CP_RINGS || + max_irq <= BNGE_MIN_ROCE_CP_RINGS || + max_stat <= BNGE_MIN_ROCE_STAT_CTXS) + return 0; + + max_cp -= BNGE_MIN_ROCE_CP_RINGS; + max_irq -= BNGE_MIN_ROCE_CP_RINGS; + max_stat -= BNGE_MIN_ROCE_STAT_CTXS; + max_cp = min_t(u16, max_cp, max_irq); + max_cp = min_t(u16, max_cp, max_stat); + rc = bnge_adjust_rings(bd, max_rx, max_tx, max_cp, shared); + if (rc) + rc = 0; + } + + return rc; +} + +/* In initial default shared ring setting, each shared ring must have a + * RX/TX ring pair. + */ +static void bnge_trim_dflt_sh_rings(struct bnge_dev *bd) +{ + bd->nq_nr_rings = min_t(u16, bd->tx_nr_rings_per_tc, bd->rx_nr_rings); + bd->rx_nr_rings = bd->nq_nr_rings; + bd->tx_nr_rings_per_tc = bd->nq_nr_rings; + bd->tx_nr_rings = bd->tx_nr_rings_per_tc; +} + +static int bnge_net_init_dflt_rings(struct bnge_dev *bd, bool sh) +{ + u16 dflt_rings, max_rx_rings, max_tx_rings; + int rc; + + if (sh) + bd->flags |= BNGE_EN_SHARED_CHNL; + + dflt_rings = netif_get_num_default_rss_queues(); + + rc = bnge_get_dflt_rings(bd, &max_rx_rings, &max_tx_rings, sh); + if (rc) + return rc; + bd->rx_nr_rings = min_t(u16, dflt_rings, max_rx_rings); + bd->tx_nr_rings_per_tc = min_t(u16, dflt_rings, max_tx_rings); + if (sh) + bnge_trim_dflt_sh_rings(bd); + else + bd->nq_nr_rings = bd->tx_nr_rings_per_tc + bd->rx_nr_rings; + bd->tx_nr_rings = bd->tx_nr_rings_per_tc; + + rc = bnge_reserve_rings(bd); + if (rc && rc != -ENODEV) + dev_warn(bd->dev, "Unable to reserve tx rings\n"); + bd->tx_nr_rings_per_tc = bd->tx_nr_rings; + if (sh) + bnge_trim_dflt_sh_rings(bd); + + /* Rings may have been reduced, re-reserve them again */ + if (bnge_need_reserve_rings(bd)) { + rc = bnge_reserve_rings(bd); + if (rc && rc != -ENODEV) + dev_warn(bd->dev, "Fewer rings reservation failed\n"); + bd->tx_nr_rings_per_tc = bd->tx_nr_rings; + } + if (rc) { + bd->tx_nr_rings = 0; + bd->rx_nr_rings = 0; + } + + return rc; +} + +static int bnge_alloc_rss_indir_tbl(struct bnge_dev *bd) +{ + u16 entries; + + entries = BNGE_MAX_RSS_TABLE_ENTRIES; + + bd->rss_indir_tbl_entries = entries; + bd->rss_indir_tbl = + kmalloc_array(entries, sizeof(*bd->rss_indir_tbl), GFP_KERNEL); + if (!bd->rss_indir_tbl) + return -ENOMEM; + + return 0; +} + +int bnge_net_init_dflt_config(struct bnge_dev *bd) +{ + struct bnge_hw_resc *hw_resc; + int rc; + + rc = bnge_alloc_rss_indir_tbl(bd); + if (rc) + return rc; + + rc = bnge_net_init_dflt_rings(bd, true); + if (rc) + goto err_free_tbl; + + hw_resc = &bd->hw_resc; + bd->max_fltr = hw_resc->max_rx_em_flows + hw_resc->max_rx_wm_flows + + BNGE_L2_FLTR_MAX_FLTR; + + return 0; + +err_free_tbl: + kfree(bd->rss_indir_tbl); + bd->rss_indir_tbl = NULL; + return rc; +} + +void bnge_net_uninit_dflt_config(struct bnge_dev *bd) +{ + kfree(bd->rss_indir_tbl); + bd->rss_indir_tbl = NULL; +} + +void bnge_aux_init_dflt_config(struct bnge_dev *bd) +{ + bd->aux_num_msix = bnge_aux_get_dflt_msix(bd); + bd->aux_num_stat_ctxs = bnge_get_dflt_aux_stat_ctxs(bd); +} diff --git a/drivers/net/ethernet/broadcom/bnge/bnge_resc.h b/drivers/net/ethernet/broadcom/bnge/bnge_resc.h new file mode 100644 index 000000000000..54ef1c7d8822 --- /dev/null +++ b/drivers/net/ethernet/broadcom/bnge/bnge_resc.h @@ -0,0 +1,94 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2025 Broadcom */ + +#ifndef _BNGE_RESC_H_ +#define _BNGE_RESC_H_ + +#include "bnge_netdev.h" +#include "bnge_rmem.h" + +struct bnge_hw_resc { + u16 min_rsscos_ctxs; + u16 max_rsscos_ctxs; + u16 resv_rsscos_ctxs; + u16 min_cp_rings; + u16 max_cp_rings; + u16 resv_cp_rings; + u16 min_tx_rings; + u16 max_tx_rings; + u16 resv_tx_rings; + u16 max_tx_sch_inputs; + u16 min_rx_rings; + u16 max_rx_rings; + u16 resv_rx_rings; + u16 min_hw_ring_grps; + u16 max_hw_ring_grps; + u16 resv_hw_ring_grps; + u16 min_l2_ctxs; + u16 max_l2_ctxs; + u16 min_vnics; + u16 max_vnics; + u16 resv_vnics; + u16 min_stat_ctxs; + u16 max_stat_ctxs; + u16 resv_stat_ctxs; + u16 max_nqs; + u16 max_irqs; + u16 resv_irqs; + u32 max_encap_records; + u32 max_decap_records; + u32 max_tx_em_flows; + u32 max_tx_wm_flows; + u32 max_rx_em_flows; + u32 max_rx_wm_flows; +}; + +struct bnge_hw_rings { + u16 tx; + u16 rx; + u16 grp; + u16 nq; + u16 cmpl; + u16 stat; + u16 vnic; + u16 rss_ctx; +}; + +/* "TXRX", 2 hypens, plus maximum integer */ +#define BNGE_IRQ_NAME_EXTRA 17 +struct bnge_irq { + irq_handler_t handler; + unsigned int vector; + u8 requested:1; + u8 have_cpumask:1; + char name[IFNAMSIZ + BNGE_IRQ_NAME_EXTRA]; + cpumask_var_t cpu_mask; +}; + +int bnge_reserve_rings(struct bnge_dev *bd); +int bnge_fix_rings_count(u16 *rx, u16 *tx, u16 max, bool shared); +int bnge_alloc_irqs(struct bnge_dev *bd); +void bnge_free_irqs(struct bnge_dev *bd); +int bnge_net_init_dflt_config(struct bnge_dev *bd); +void bnge_net_uninit_dflt_config(struct bnge_dev *bd); +void bnge_aux_init_dflt_config(struct bnge_dev *bd); + +static inline u32 +bnge_adjust_pow_two(u32 total_ent, u16 ent_per_blk) +{ + u32 blks = total_ent / ent_per_blk; + + if (blks == 0 || blks == 1) + return ++blks; + + if (!is_power_of_2(blks)) + blks = roundup_pow_of_two(blks); + + return blks; +} + +#define BNGE_MAX_ROCE_MSIX 64 +#define BNGE_MIN_ROCE_CP_RINGS 2 +#define BNGE_MIN_ROCE_STAT_CTXS 1 + +#endif /* _BNGE_RESC_H_ */ diff --git a/drivers/net/ethernet/broadcom/bnge/bnge_rmem.c b/drivers/net/ethernet/broadcom/bnge/bnge_rmem.c new file mode 100644 index 000000000000..52ada65943a0 --- /dev/null +++ b/drivers/net/ethernet/broadcom/bnge/bnge_rmem.c @@ -0,0 +1,438 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) 2025 Broadcom. + +#include <linux/etherdevice.h> +#include <linux/init.h> +#include <linux/module.h> +#include <linux/pci.h> +#include <linux/mm.h> +#include <linux/dma-mapping.h> +#include <linux/vmalloc.h> +#include <linux/crash_dump.h> +#include <linux/bnxt/hsi.h> + +#include "bnge.h" +#include "bnge_hwrm_lib.h" +#include "bnge_rmem.h" + +static void bnge_init_ctx_mem(struct bnge_ctx_mem_type *ctxm, + void *p, int len) +{ + u8 init_val = ctxm->init_value; + u16 offset = ctxm->init_offset; + u8 *p2 = p; + int i; + + if (!init_val) + return; + if (offset == BNGE_CTX_INIT_INVALID_OFFSET) { + memset(p, init_val, len); + return; + } + for (i = 0; i < len; i += ctxm->entry_size) + *(p2 + i + offset) = init_val; +} + +void bnge_free_ring(struct bnge_dev *bd, struct bnge_ring_mem_info *rmem) +{ + struct pci_dev *pdev = bd->pdev; + int i; + + if (!rmem->pg_arr) + goto skip_pages; + + for (i = 0; i < rmem->nr_pages; i++) { + if (!rmem->pg_arr[i]) + continue; + + dma_free_coherent(&pdev->dev, rmem->page_size, + rmem->pg_arr[i], rmem->dma_arr[i]); + + rmem->pg_arr[i] = NULL; + } +skip_pages: + if (rmem->pg_tbl) { + size_t pg_tbl_size = rmem->nr_pages * 8; + + if (rmem->flags & BNGE_RMEM_USE_FULL_PAGE_FLAG) + pg_tbl_size = rmem->page_size; + dma_free_coherent(&pdev->dev, pg_tbl_size, + rmem->pg_tbl, rmem->dma_pg_tbl); + rmem->pg_tbl = NULL; + } + if (rmem->vmem_size && *rmem->vmem) { + vfree(*rmem->vmem); + *rmem->vmem = NULL; + } +} + +int bnge_alloc_ring(struct bnge_dev *bd, struct bnge_ring_mem_info *rmem) +{ + struct pci_dev *pdev = bd->pdev; + u64 valid_bit = 0; + int i; + + if (rmem->flags & (BNGE_RMEM_VALID_PTE_FLAG | BNGE_RMEM_RING_PTE_FLAG)) + valid_bit = PTU_PTE_VALID; + + if ((rmem->nr_pages > 1 || rmem->depth > 0) && !rmem->pg_tbl) { + size_t pg_tbl_size = rmem->nr_pages * 8; + + if (rmem->flags & BNGE_RMEM_USE_FULL_PAGE_FLAG) + pg_tbl_size = rmem->page_size; + rmem->pg_tbl = dma_alloc_coherent(&pdev->dev, pg_tbl_size, + &rmem->dma_pg_tbl, + GFP_KERNEL); + if (!rmem->pg_tbl) + return -ENOMEM; + } + + for (i = 0; i < rmem->nr_pages; i++) { + u64 extra_bits = valid_bit; + + rmem->pg_arr[i] = dma_alloc_coherent(&pdev->dev, + rmem->page_size, + &rmem->dma_arr[i], + GFP_KERNEL); + if (!rmem->pg_arr[i]) + return -ENOMEM; + + if (rmem->ctx_mem) + bnge_init_ctx_mem(rmem->ctx_mem, rmem->pg_arr[i], + rmem->page_size); + + if (rmem->nr_pages > 1 || rmem->depth > 0) { + if (i == rmem->nr_pages - 2 && + (rmem->flags & BNGE_RMEM_RING_PTE_FLAG)) + extra_bits |= PTU_PTE_NEXT_TO_LAST; + else if (i == rmem->nr_pages - 1 && + (rmem->flags & BNGE_RMEM_RING_PTE_FLAG)) + extra_bits |= PTU_PTE_LAST; + rmem->pg_tbl[i] = + cpu_to_le64(rmem->dma_arr[i] | extra_bits); + } + } + + if (rmem->vmem_size) { + *rmem->vmem = vzalloc(rmem->vmem_size); + if (!(*rmem->vmem)) + return -ENOMEM; + } + + return 0; +} + +static int bnge_alloc_ctx_one_lvl(struct bnge_dev *bd, + struct bnge_ctx_pg_info *ctx_pg) +{ + struct bnge_ring_mem_info *rmem = &ctx_pg->ring_mem; + + rmem->page_size = BNGE_PAGE_SIZE; + rmem->pg_arr = ctx_pg->ctx_pg_arr; + rmem->dma_arr = ctx_pg->ctx_dma_arr; + rmem->flags = BNGE_RMEM_VALID_PTE_FLAG; + if (rmem->depth >= 1) + rmem->flags |= BNGE_RMEM_USE_FULL_PAGE_FLAG; + return bnge_alloc_ring(bd, rmem); +} + +static int bnge_alloc_ctx_pg_tbls(struct bnge_dev *bd, + struct bnge_ctx_pg_info *ctx_pg, u32 mem_size, + u8 depth, struct bnge_ctx_mem_type *ctxm) +{ + struct bnge_ring_mem_info *rmem = &ctx_pg->ring_mem; + int rc; + + if (!mem_size) + return -EINVAL; + + ctx_pg->nr_pages = DIV_ROUND_UP(mem_size, BNGE_PAGE_SIZE); + if (ctx_pg->nr_pages > MAX_CTX_TOTAL_PAGES) { + ctx_pg->nr_pages = 0; + return -EINVAL; + } + if (ctx_pg->nr_pages > MAX_CTX_PAGES || depth > 1) { + int nr_tbls, i; + + rmem->depth = 2; + ctx_pg->ctx_pg_tbl = kcalloc(MAX_CTX_PAGES, sizeof(ctx_pg), + GFP_KERNEL); + if (!ctx_pg->ctx_pg_tbl) + return -ENOMEM; + nr_tbls = DIV_ROUND_UP(ctx_pg->nr_pages, MAX_CTX_PAGES); + rmem->nr_pages = nr_tbls; + rc = bnge_alloc_ctx_one_lvl(bd, ctx_pg); + if (rc) + return rc; + for (i = 0; i < nr_tbls; i++) { + struct bnge_ctx_pg_info *pg_tbl; + + pg_tbl = kzalloc(sizeof(*pg_tbl), GFP_KERNEL); + if (!pg_tbl) + return -ENOMEM; + ctx_pg->ctx_pg_tbl[i] = pg_tbl; + rmem = &pg_tbl->ring_mem; + rmem->pg_tbl = ctx_pg->ctx_pg_arr[i]; + rmem->dma_pg_tbl = ctx_pg->ctx_dma_arr[i]; + rmem->depth = 1; + rmem->nr_pages = MAX_CTX_PAGES; + rmem->ctx_mem = ctxm; + if (i == (nr_tbls - 1)) { + int rem = ctx_pg->nr_pages % MAX_CTX_PAGES; + + if (rem) + rmem->nr_pages = rem; + } + rc = bnge_alloc_ctx_one_lvl(bd, pg_tbl); + if (rc) + break; + } + } else { + rmem->nr_pages = DIV_ROUND_UP(mem_size, BNGE_PAGE_SIZE); + if (rmem->nr_pages > 1 || depth) + rmem->depth = 1; + rmem->ctx_mem = ctxm; + rc = bnge_alloc_ctx_one_lvl(bd, ctx_pg); + } + + return rc; +} + +static void bnge_free_ctx_pg_tbls(struct bnge_dev *bd, + struct bnge_ctx_pg_info *ctx_pg) +{ + struct bnge_ring_mem_info *rmem = &ctx_pg->ring_mem; + + if (rmem->depth > 1 || ctx_pg->nr_pages > MAX_CTX_PAGES || + ctx_pg->ctx_pg_tbl) { + int i, nr_tbls = rmem->nr_pages; + + for (i = 0; i < nr_tbls; i++) { + struct bnge_ctx_pg_info *pg_tbl; + struct bnge_ring_mem_info *rmem2; + + pg_tbl = ctx_pg->ctx_pg_tbl[i]; + if (!pg_tbl) + continue; + rmem2 = &pg_tbl->ring_mem; + bnge_free_ring(bd, rmem2); + ctx_pg->ctx_pg_arr[i] = NULL; + kfree(pg_tbl); + ctx_pg->ctx_pg_tbl[i] = NULL; + } + kfree(ctx_pg->ctx_pg_tbl); + ctx_pg->ctx_pg_tbl = NULL; + } + bnge_free_ring(bd, rmem); + ctx_pg->nr_pages = 0; +} + +static int bnge_setup_ctxm_pg_tbls(struct bnge_dev *bd, + struct bnge_ctx_mem_type *ctxm, u32 entries, + u8 pg_lvl) +{ + struct bnge_ctx_pg_info *ctx_pg = ctxm->pg_info; + int i, rc = 0, n = 1; + u32 mem_size; + + if (!ctxm->entry_size || !ctx_pg) + return -EINVAL; + if (ctxm->instance_bmap) + n = hweight32(ctxm->instance_bmap); + if (ctxm->entry_multiple) + entries = roundup(entries, ctxm->entry_multiple); + entries = clamp_t(u32, entries, ctxm->min_entries, ctxm->max_entries); + mem_size = entries * ctxm->entry_size; + for (i = 0; i < n && !rc; i++) { + ctx_pg[i].entries = entries; + rc = bnge_alloc_ctx_pg_tbls(bd, &ctx_pg[i], mem_size, pg_lvl, + ctxm->init_value ? ctxm : NULL); + } + + return rc; +} + +static int bnge_backing_store_cfg(struct bnge_dev *bd, u32 ena) +{ + struct bnge_ctx_mem_info *ctx = bd->ctx; + struct bnge_ctx_mem_type *ctxm; + u16 last_type; + int rc = 0; + u16 type; + + if (!ena) + return 0; + else if (ena & FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM) + last_type = BNGE_CTX_MAX - 1; + else + last_type = BNGE_CTX_L2_MAX - 1; + ctx->ctx_arr[last_type].last = 1; + + for (type = 0 ; type < BNGE_CTX_V2_MAX; type++) { + ctxm = &ctx->ctx_arr[type]; + + rc = bnge_hwrm_func_backing_store(bd, ctxm, ctxm->last); + if (rc) + return rc; + } + + return 0; +} + +void bnge_free_ctx_mem(struct bnge_dev *bd) +{ + struct bnge_ctx_mem_info *ctx = bd->ctx; + u16 type; + + if (!ctx) + return; + + for (type = 0; type < BNGE_CTX_V2_MAX; type++) { + struct bnge_ctx_mem_type *ctxm = &ctx->ctx_arr[type]; + struct bnge_ctx_pg_info *ctx_pg = ctxm->pg_info; + int i, n = 1; + + if (!ctx_pg) + continue; + if (ctxm->instance_bmap) + n = hweight32(ctxm->instance_bmap); + for (i = 0; i < n; i++) + bnge_free_ctx_pg_tbls(bd, &ctx_pg[i]); + + kfree(ctx_pg); + ctxm->pg_info = NULL; + } + + ctx->flags &= ~BNGE_CTX_FLAG_INITED; + kfree(ctx); + bd->ctx = NULL; +} + +#define FUNC_BACKING_STORE_CFG_REQ_DFLT_ENABLES \ + (FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP | \ + FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ | \ + FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ | \ + FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC | \ + FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT) + +int bnge_alloc_ctx_mem(struct bnge_dev *bd) +{ + struct bnge_ctx_mem_type *ctxm; + struct bnge_ctx_mem_info *ctx; + u32 l2_qps, qp1_qps, max_qps; + u32 ena, entries_sp, entries; + u32 srqs, max_srqs, min; + u32 num_mr, num_ah; + u32 extra_srqs = 0; + u32 extra_qps = 0; + u32 fast_qpmd_qps; + u8 pg_lvl = 1; + int i, rc; + + rc = bnge_hwrm_func_backing_store_qcaps(bd); + if (rc) { + dev_err(bd->dev, "Failed querying ctx mem caps, rc: %d\n", rc); + return rc; + } + + ctx = bd->ctx; + if (!ctx || (ctx->flags & BNGE_CTX_FLAG_INITED)) + return 0; + + ctxm = &ctx->ctx_arr[BNGE_CTX_QP]; + l2_qps = ctxm->qp_l2_entries; + qp1_qps = ctxm->qp_qp1_entries; + fast_qpmd_qps = ctxm->qp_fast_qpmd_entries; + max_qps = ctxm->max_entries; + ctxm = &ctx->ctx_arr[BNGE_CTX_SRQ]; + srqs = ctxm->srq_l2_entries; + max_srqs = ctxm->max_entries; + ena = 0; + if (bnge_is_roce_en(bd) && !is_kdump_kernel()) { + pg_lvl = 2; + extra_qps = min_t(u32, 65536, max_qps - l2_qps - qp1_qps); + /* allocate extra qps if fast qp destroy feature enabled */ + extra_qps += fast_qpmd_qps; + extra_srqs = min_t(u32, 8192, max_srqs - srqs); + if (fast_qpmd_qps) + ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP_FAST_QPMD; + } + + ctxm = &ctx->ctx_arr[BNGE_CTX_QP]; + rc = bnge_setup_ctxm_pg_tbls(bd, ctxm, l2_qps + qp1_qps + extra_qps, + pg_lvl); + if (rc) + return rc; + + ctxm = &ctx->ctx_arr[BNGE_CTX_SRQ]; + rc = bnge_setup_ctxm_pg_tbls(bd, ctxm, srqs + extra_srqs, pg_lvl); + if (rc) + return rc; + + ctxm = &ctx->ctx_arr[BNGE_CTX_CQ]; + rc = bnge_setup_ctxm_pg_tbls(bd, ctxm, ctxm->cq_l2_entries + + extra_qps * 2, pg_lvl); + if (rc) + return rc; + + ctxm = &ctx->ctx_arr[BNGE_CTX_VNIC]; + rc = bnge_setup_ctxm_pg_tbls(bd, ctxm, ctxm->max_entries, 1); + if (rc) + return rc; + + ctxm = &ctx->ctx_arr[BNGE_CTX_STAT]; + rc = bnge_setup_ctxm_pg_tbls(bd, ctxm, ctxm->max_entries, 1); + if (rc) + return rc; + + if (!bnge_is_roce_en(bd)) + goto skip_rdma; + + ctxm = &ctx->ctx_arr[BNGE_CTX_MRAV]; + /* 128K extra is needed to accommodate static AH context + * allocation by f/w. + */ + num_mr = min_t(u32, ctxm->max_entries / 2, 1024 * 256); + num_ah = min_t(u32, num_mr, 1024 * 128); + ctxm->split_entry_cnt = BNGE_CTX_MRAV_AV_SPLIT_ENTRY + 1; + if (!ctxm->mrav_av_entries || ctxm->mrav_av_entries > num_ah) + ctxm->mrav_av_entries = num_ah; + + rc = bnge_setup_ctxm_pg_tbls(bd, ctxm, num_mr + num_ah, 2); + if (rc) + return rc; + ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV; + + ctxm = &ctx->ctx_arr[BNGE_CTX_TIM]; + rc = bnge_setup_ctxm_pg_tbls(bd, ctxm, l2_qps + qp1_qps + extra_qps, 1); + if (rc) + return rc; + ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM; + +skip_rdma: + ctxm = &ctx->ctx_arr[BNGE_CTX_STQM]; + min = ctxm->min_entries; + entries_sp = ctx->ctx_arr[BNGE_CTX_VNIC].vnic_entries + l2_qps + + 2 * (extra_qps + qp1_qps) + min; + rc = bnge_setup_ctxm_pg_tbls(bd, ctxm, entries_sp, 2); + if (rc) + return rc; + + ctxm = &ctx->ctx_arr[BNGE_CTX_FTQM]; + entries = l2_qps + 2 * (extra_qps + qp1_qps); + rc = bnge_setup_ctxm_pg_tbls(bd, ctxm, entries, 2); + if (rc) + return rc; + for (i = 0; i < ctx->tqm_fp_rings_count + 1; i++) + ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP << i; + ena |= FUNC_BACKING_STORE_CFG_REQ_DFLT_ENABLES; + + rc = bnge_backing_store_cfg(bd, ena); + if (rc) { + dev_err(bd->dev, "Failed configuring ctx mem, rc: %d\n", rc); + return rc; + } + ctx->flags |= BNGE_CTX_FLAG_INITED; + + return 0; +} diff --git a/drivers/net/ethernet/broadcom/bnge/bnge_rmem.h b/drivers/net/ethernet/broadcom/bnge/bnge_rmem.h new file mode 100644 index 000000000000..300f1d8268ef --- /dev/null +++ b/drivers/net/ethernet/broadcom/bnge/bnge_rmem.h @@ -0,0 +1,188 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2025 Broadcom */ + +#ifndef _BNGE_RMEM_H_ +#define _BNGE_RMEM_H_ + +struct bnge_ctx_mem_type; +struct bnge_dev; + +#define PTU_PTE_VALID 0x1UL +#define PTU_PTE_LAST 0x2UL +#define PTU_PTE_NEXT_TO_LAST 0x4UL + +struct bnge_ring_mem_info { + /* Number of pages to next level */ + int nr_pages; + int page_size; + u16 flags; +#define BNGE_RMEM_VALID_PTE_FLAG 1 +#define BNGE_RMEM_RING_PTE_FLAG 2 +#define BNGE_RMEM_USE_FULL_PAGE_FLAG 4 + + u16 depth; + + void **pg_arr; + dma_addr_t *dma_arr; + + __le64 *pg_tbl; + dma_addr_t dma_pg_tbl; + + int vmem_size; + void **vmem; + + struct bnge_ctx_mem_type *ctx_mem; +}; + +/* The hardware supports certain page sizes. + * Use the supported page sizes to allocate the rings. + */ +#if (PAGE_SHIFT < 12) +#define BNGE_PAGE_SHIFT 12 +#elif (PAGE_SHIFT <= 13) +#define BNGE_PAGE_SHIFT PAGE_SHIFT +#elif (PAGE_SHIFT < 16) +#define BNGE_PAGE_SHIFT 13 +#else +#define BNGE_PAGE_SHIFT 16 +#endif +#define BNGE_PAGE_SIZE (1 << BNGE_PAGE_SHIFT) +/* The RXBD length is 16-bit so we can only support page sizes < 64K */ +#if (PAGE_SHIFT > 15) +#define BNGE_RX_PAGE_SHIFT 15 +#else +#define BNGE_RX_PAGE_SHIFT PAGE_SHIFT +#endif +#define MAX_CTX_PAGES (BNGE_PAGE_SIZE / 8) +#define MAX_CTX_TOTAL_PAGES (MAX_CTX_PAGES * MAX_CTX_PAGES) + +struct bnge_ctx_pg_info { + u32 entries; + u32 nr_pages; + void *ctx_pg_arr[MAX_CTX_PAGES]; + dma_addr_t ctx_dma_arr[MAX_CTX_PAGES]; + struct bnge_ring_mem_info ring_mem; + struct bnge_ctx_pg_info **ctx_pg_tbl; +}; + +#define BNGE_MAX_TQM_SP_RINGS 1 +#define BNGE_MAX_TQM_FP_RINGS 8 +#define BNGE_MAX_TQM_RINGS \ + (BNGE_MAX_TQM_SP_RINGS + BNGE_MAX_TQM_FP_RINGS) +#define BNGE_BACKING_STORE_CFG_LEGACY_LEN 256 +#define BNGE_SET_CTX_PAGE_ATTR(attr) \ +do { \ + if (BNGE_PAGE_SIZE == 0x2000) \ + attr = FUNC_BACKING_STORE_CFG_REQ_SRQ_PG_SIZE_PG_8K; \ + else if (BNGE_PAGE_SIZE == 0x10000) \ + attr = FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_PG_64K; \ + else \ + attr = FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_PG_4K; \ +} while (0) + +#define BNGE_CTX_MRAV_AV_SPLIT_ENTRY 0 + +#define BNGE_CTX_QP \ + FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_QP +#define BNGE_CTX_SRQ \ + FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SRQ +#define BNGE_CTX_CQ \ + FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CQ +#define BNGE_CTX_VNIC \ + FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_VNIC +#define BNGE_CTX_STAT \ + FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_STAT +#define BNGE_CTX_STQM \ + FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SP_TQM_RING +#define BNGE_CTX_FTQM \ + FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_FP_TQM_RING +#define BNGE_CTX_MRAV \ + FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_MRAV +#define BNGE_CTX_TIM \ + FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_TIM +#define BNGE_CTX_TCK \ + FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_TX_CK +#define BNGE_CTX_RCK \ + FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_RX_CK +#define BNGE_CTX_MTQM \ + FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_MP_TQM_RING +#define BNGE_CTX_SQDBS \ + FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SQ_DB_SHADOW +#define BNGE_CTX_RQDBS \ + FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_RQ_DB_SHADOW +#define BNGE_CTX_SRQDBS \ + FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SRQ_DB_SHADOW +#define BNGE_CTX_CQDBS \ + FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CQ_DB_SHADOW +#define BNGE_CTX_SRT_TRACE \ + FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SRT_TRACE +#define BNGE_CTX_SRT2_TRACE \ + FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SRT2_TRACE +#define BNGE_CTX_CRT_TRACE \ + FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CRT_TRACE +#define BNGE_CTX_CRT2_TRACE \ + FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CRT2_TRACE +#define BNGE_CTX_RIGP0_TRACE \ + FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_RIGP0_TRACE +#define BNGE_CTX_L2_HWRM_TRACE \ + FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_L2_HWRM_TRACE +#define BNGE_CTX_ROCE_HWRM_TRACE \ + FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_ROCE_HWRM_TRACE + +#define BNGE_CTX_MAX (BNGE_CTX_TIM + 1) +#define BNGE_CTX_L2_MAX (BNGE_CTX_FTQM + 1) +#define BNGE_CTX_INV ((u16)-1) + +#define BNGE_CTX_V2_MAX \ + (FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_ROCE_HWRM_TRACE + 1) + +#define BNGE_BS_CFG_ALL_DONE \ + FUNC_BACKING_STORE_CFG_V2_REQ_FLAGS_BS_CFG_ALL_DONE + +struct bnge_ctx_mem_type { + u16 type; + u16 entry_size; + u32 flags; +#define BNGE_CTX_MEM_TYPE_VALID \ + FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_TYPE_VALID + u32 instance_bmap; + u8 init_value; + u8 entry_multiple; + u16 init_offset; +#define BNGE_CTX_INIT_INVALID_OFFSET 0xffff + u32 max_entries; + u32 min_entries; + u8 last:1; + u8 split_entry_cnt; +#define BNGE_MAX_SPLIT_ENTRY 4 + union { + struct { + u32 qp_l2_entries; + u32 qp_qp1_entries; + u32 qp_fast_qpmd_entries; + }; + u32 srq_l2_entries; + u32 cq_l2_entries; + u32 vnic_entries; + struct { + u32 mrav_av_entries; + u32 mrav_num_entries_units; + }; + u32 split[BNGE_MAX_SPLIT_ENTRY]; + }; + struct bnge_ctx_pg_info *pg_info; +}; + +struct bnge_ctx_mem_info { + u8 tqm_fp_rings_count; + u32 flags; +#define BNGE_CTX_FLAG_INITED 0x01 + struct bnge_ctx_mem_type ctx_arr[BNGE_CTX_V2_MAX]; +}; + +int bnge_alloc_ring(struct bnge_dev *bd, struct bnge_ring_mem_info *rmem); +void bnge_free_ring(struct bnge_dev *bd, struct bnge_ring_mem_info *rmem); +int bnge_alloc_ctx_mem(struct bnge_dev *bd); +void bnge_free_ctx_mem(struct bnge_dev *bd); + +#endif /* _BNGE_RMEM_H_ */ |