diff options
Diffstat (limited to 'drivers/net/ethernet/qlogic')
49 files changed, 12768 insertions, 9810 deletions
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c index 344ea1143454..07dd3c3b1771 100644 --- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c +++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c @@ -463,6 +463,7 @@ netxen_read_mac_addr(struct netxen_adapter *adapter) u64 mac_addr; struct net_device *netdev = adapter->netdev; struct pci_dev *pdev = adapter->pdev; + u8 addr[ETH_ALEN]; if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) { if (netxen_p3_get_mac_addr(adapter, &mac_addr) != 0) @@ -474,7 +475,8 @@ netxen_read_mac_addr(struct netxen_adapter *adapter) p = (unsigned char *)&mac_addr; for (i = 0; i < 6; i++) - netdev->dev_addr[i] = *(p + 5 - i); + addr[i] = *(p + 5 - i); + eth_hw_addr_set(netdev, addr); memcpy(adapter->mac_addr, netdev->dev_addr, netdev->addr_len); @@ -500,7 +502,7 @@ static int netxen_nic_set_mac(struct net_device *netdev, void *p) } memcpy(adapter->mac_addr, addr->sa_data, netdev->addr_len); - memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); + eth_hw_addr_set(netdev, addr->sa_data); adapter->macaddr_set(adapter, addr->sa_data); if (netif_running(netdev)) { @@ -842,7 +844,7 @@ netxen_check_options(struct netxen_adapter *adapter) adapter->fw_version = NETXEN_VERSION_CODE(fw_major, fw_minor, fw_build); /* Get FW Mini Coredump template and store it */ - if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) { + if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) { if (adapter->mdump.md_template == NULL || adapter->fw_version > prev_fw_version) { kfree(adapter->mdump.md_template); diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h index d58e021614cd..d613095b78e0 100644 --- a/drivers/net/ethernet/qlogic/qed/qed.h +++ b/drivers/net/ethernet/qlogic/qed/qed.h @@ -23,6 +23,8 @@ #include <linux/qed/qed_if.h> #include "qed_debug.h" #include "qed_hsi.h" +#include "qed_dbg_hsi.h" +#include "qed_mfw_hsi.h" extern const struct qed_common_ops qed_common_ops_pass; @@ -89,14 +91,14 @@ static inline u32 qed_db_addr_vf(u32 cid, u32 DEMS) } #define ALIGNED_TYPE_SIZE(type_name, p_hwfn) \ - ((sizeof(type_name) + (u32)(1 << (p_hwfn->cdev->cache_shift)) - 1) & \ + ((sizeof(type_name) + (u32)(1 << ((p_hwfn)->cdev->cache_shift)) - 1) & \ ~((1 << (p_hwfn->cdev->cache_shift)) - 1)) -#define for_each_hwfn(cdev, i) for (i = 0; i < cdev->num_hwfns; i++) +#define for_each_hwfn(cdev, i) for (i = 0; i < (cdev)->num_hwfns; i++) #define D_TRINE(val, cond1, cond2, true1, true2, def) \ - (val == (cond1) ? true1 : \ - (val == (cond2) ? true2 : def)) + ((val) == (cond1) ? true1 : \ + ((val) == (cond2) ? true2 : def)) /* forward */ struct qed_ptt_pool; @@ -510,7 +512,7 @@ enum qed_hsi_def_type { struct qed_simd_fp_handler { void *token; - void (*func)(void *); + void (*func)(void *cookie); }; enum qed_slowpath_wq_flag { @@ -703,8 +705,6 @@ struct qed_dev { #define QED_IS_BB_B0(dev) (QED_IS_BB(dev) && CHIP_REV_IS_B0(dev)) #define QED_IS_AH(dev) ((dev)->type == QED_DEV_TYPE_AH) #define QED_IS_K2(dev) QED_IS_AH(dev) -#define QED_IS_E4(dev) (QED_IS_BB(dev) || QED_IS_AH(dev)) -#define QED_IS_E5(dev) ((dev)->type == QED_DEV_TYPE_E5) u16 vendor_id; @@ -875,14 +875,14 @@ u32 qed_get_hsi_def_val(struct qed_dev *cdev, enum qed_hsi_def_type type); #define NUM_OF_BTB_BLOCKS(dev) \ qed_get_hsi_def_val(dev, QED_HSI_DEF_MAX_BTB_BLOCKS) - /** - * @brief qed_concrete_to_sw_fid - get the sw function id from - * the concrete value. + * qed_concrete_to_sw_fid(): Get the sw function id from + * the concrete value. * - * @param concrete_fid + * @cdev: Qed dev pointer. + * @concrete_fid: Concrete fid. * - * @return inline u8 + * Return: inline u8. */ static inline u8 qed_concrete_to_sw_fid(struct qed_dev *cdev, u32 concrete_fid) @@ -902,7 +902,6 @@ static inline u8 qed_concrete_to_sw_fid(struct qed_dev *cdev, } #define PKT_LB_TC 9 -#define MAX_NUM_VOQS_E4 20 int qed_configure_vport_wfq(struct qed_dev *cdev, u16 vp_id, u32 rate); void qed_configure_vp_wfq_on_link_change(struct qed_dev *cdev, @@ -914,7 +913,7 @@ int qed_device_num_engines(struct qed_dev *cdev); void qed_set_fw_mac_addr(__le16 *fw_msb, __le16 *fw_mid, __le16 *fw_lsb, u8 *mac); -#define QED_LEADING_HWFN(dev) (&dev->hwfns[0]) +#define QED_LEADING_HWFN(dev) (&(dev)->hwfns[0]) #define QED_IS_CMT(dev) ((dev)->num_hwfns > 1) /* Macros for getting the engine-affinitized hwfn (FIR: fcoe,iscsi,roce) */ #define QED_FIR_AFFIN_HWFN(dev) (&(dev)->hwfns[dev->fir_affin]) @@ -935,7 +934,7 @@ void qed_set_fw_mac_addr(__le16 *fw_msb, #define PQ_FLAGS_LLT (BIT(7)) #define PQ_FLAGS_MTC (BIT(8)) -/* physical queue index for cm context intialization */ +/* physical queue index for cm context initialization */ u16 qed_get_cm_pq_idx(struct qed_hwfn *p_hwfn, u32 pq_flags); u16 qed_get_cm_pq_idx_mcos(struct qed_hwfn *p_hwfn, u8 tc); u16 qed_get_cm_pq_idx_vf(struct qed_hwfn *p_hwfn, u16 vf); @@ -947,12 +946,18 @@ void qed_db_recovery_dp(struct qed_hwfn *p_hwfn); void qed_db_recovery_execute(struct qed_hwfn *p_hwfn); bool qed_edpm_enabled(struct qed_hwfn *p_hwfn); +#define GET_GTT_REG_ADDR(__base, __offset, __idx) \ + ((__base) + __offset ## _GTT_OFFSET((__idx))) + +#define GET_GTT_BDQ_REG_ADDR(__base, __offset, __idx, __bdq_idx) \ + ((__base) + __offset ## _GTT_OFFSET((__idx), (__bdq_idx))) + /* Other Linux specific common definitions */ #define DP_NAME(cdev) ((cdev)->name) -#define REG_ADDR(cdev, offset) (void __iomem *)((u8 __iomem *)\ - (cdev->regview) + \ - (offset)) +#define REG_ADDR(cdev, offset) ((void __iomem *)((u8 __iomem *)\ + ((cdev)->regview) + \ + (offset))) #define REG_RD(cdev, offset) readl(REG_ADDR(cdev, offset)) #define REG_WR(cdev, offset, val) writel((u32)val, REG_ADDR(cdev, offset)) @@ -960,7 +965,7 @@ bool qed_edpm_enabled(struct qed_hwfn *p_hwfn); #define DOORBELL(cdev, db_addr, val) \ writel((u32)val, (void __iomem *)((u8 __iomem *)\ - (cdev->doorbells) + (db_addr))) + ((cdev)->doorbells) + (db_addr))) #define MFW_PORT(_p_hwfn) ((_p_hwfn)->abs_pf_id % \ qed_device_num_ports((_p_hwfn)->cdev)) @@ -998,4 +1003,5 @@ int qed_llh_add_dst_tcp_port_filter(struct qed_dev *cdev, u16 dest_port); void qed_llh_remove_src_tcp_port_filter(struct qed_dev *cdev, u16 src_port); void qed_llh_remove_dst_tcp_port_filter(struct qed_dev *cdev, u16 src_port); void qed_llh_clear_all_filters(struct qed_dev *cdev); +unsigned long qed_get_epoch_time(void); #endif /* _QED_H */ diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.c b/drivers/net/ethernet/qlogic/qed/qed_cxt.c index cb0f2a3a1ac9..452494f8c298 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_cxt.c +++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.c @@ -54,22 +54,22 @@ /* connection context union */ union conn_context { - struct e4_core_conn_context core_ctx; - struct e4_eth_conn_context eth_ctx; - struct e4_iscsi_conn_context iscsi_ctx; - struct e4_fcoe_conn_context fcoe_ctx; - struct e4_roce_conn_context roce_ctx; + struct core_conn_context core_ctx; + struct eth_conn_context eth_ctx; + struct iscsi_conn_context iscsi_ctx; + struct fcoe_conn_context fcoe_ctx; + struct roce_conn_context roce_ctx; }; /* TYPE-0 task context - iSCSI, FCOE */ union type0_task_context { - struct e4_iscsi_task_context iscsi_ctx; - struct e4_fcoe_task_context fcoe_ctx; + struct iscsi_task_context iscsi_ctx; + struct fcoe_task_context fcoe_ctx; }; /* TYPE-1 task context - ROCE */ union type1_task_context { - struct e4_rdma_task_context roce_ctx; + struct rdma_task_context roce_ctx; }; struct src_ent { diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.h b/drivers/net/ethernet/qlogic/qed/qed_cxt.h index 8adb7ed0c12d..168ce2c50385 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_cxt.h +++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.h @@ -28,24 +28,23 @@ struct qed_tid_mem { }; /** - * @brief qedo_cid_get_cxt_info - Returns the context info for a specific cid + * qed_cxt_get_cid_info(): Returns the context info for a specific cidi. * + * @p_hwfn: HW device data. + * @p_info: In/out. * - * @param p_hwfn - * @param p_info in/out - * - * @return int + * Return: Int. */ int qed_cxt_get_cid_info(struct qed_hwfn *p_hwfn, struct qed_cxt_info *p_info); /** - * @brief qed_cxt_get_tid_mem_info + * qed_cxt_get_tid_mem_info(): Returns the tid mem info. * - * @param p_hwfn - * @param p_info + * @p_hwfn: HW device data. + * @p_info: in/out. * - * @return int + * Return: int. */ int qed_cxt_get_tid_mem_info(struct qed_hwfn *p_hwfn, struct qed_tid_mem *p_info); @@ -64,142 +63,155 @@ u32 qed_cxt_get_proto_cid_count(struct qed_hwfn *p_hwfn, enum protocol_type type, u32 *vf_cid); /** - * @brief qed_cxt_set_pf_params - Set the PF params for cxt init + * qed_cxt_set_pf_params(): Set the PF params for cxt init. + * + * @p_hwfn: HW device data. + * @rdma_tasks: Requested maximum. * - * @param p_hwfn - * @param rdma_tasks - requested maximum - * @return int + * Return: int. */ int qed_cxt_set_pf_params(struct qed_hwfn *p_hwfn, u32 rdma_tasks); /** - * @brief qed_cxt_cfg_ilt_compute - compute ILT init parameters + * qed_cxt_cfg_ilt_compute(): Compute ILT init parameters. * - * @param p_hwfn - * @param last_line + * @p_hwfn: HW device data. + * @last_line: Last_line. * - * @return int + * Return: Int */ int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn, u32 *last_line); /** - * @brief qed_cxt_cfg_ilt_compute_excess - how many lines can be decreased + * qed_cxt_cfg_ilt_compute_excess(): How many lines can be decreased. + * + * @p_hwfn: HW device data. + * @used_lines: Used lines. * - * @param p_hwfn - * @param used_lines + * Return: Int. */ u32 qed_cxt_cfg_ilt_compute_excess(struct qed_hwfn *p_hwfn, u32 used_lines); /** - * @brief qed_cxt_mngr_alloc - Allocate and init the context manager struct + * qed_cxt_mngr_alloc(): Allocate and init the context manager struct. * - * @param p_hwfn + * @p_hwfn: HW device data. * - * @return int + * Return: Int. */ int qed_cxt_mngr_alloc(struct qed_hwfn *p_hwfn); /** - * @brief qed_cxt_mngr_free + * qed_cxt_mngr_free() - Context manager free. * - * @param p_hwfn + * @p_hwfn: HW device data. + * + * Return: Void. */ void qed_cxt_mngr_free(struct qed_hwfn *p_hwfn); /** - * @brief qed_cxt_tables_alloc - Allocate ILT shadow, Searcher T2, acquired map + * qed_cxt_tables_alloc(): Allocate ILT shadow, Searcher T2, acquired map. * - * @param p_hwfn + * @p_hwfn: HW device data. * - * @return int + * Return: Int. */ int qed_cxt_tables_alloc(struct qed_hwfn *p_hwfn); /** - * @brief qed_cxt_mngr_setup - Reset the acquired CIDs + * qed_cxt_mngr_setup(): Reset the acquired CIDs. * - * @param p_hwfn + * @p_hwfn: HW device data. */ void qed_cxt_mngr_setup(struct qed_hwfn *p_hwfn); /** - * @brief qed_cxt_hw_init_common - Initailze ILT and DQ, common phase, per path. - * + * qed_cxt_hw_init_common(): Initailze ILT and DQ, common phase, per path. * + * @p_hwfn: HW device data. * - * @param p_hwfn + * Return: Void. */ void qed_cxt_hw_init_common(struct qed_hwfn *p_hwfn); /** - * @brief qed_cxt_hw_init_pf - Initailze ILT and DQ, PF phase, per path. + * qed_cxt_hw_init_pf(): Initailze ILT and DQ, PF phase, per path. * - * @param p_hwfn - * @param p_ptt + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * + * Return: Void. */ void qed_cxt_hw_init_pf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); /** - * @brief qed_qm_init_pf - Initailze the QM PF phase, per path + * qed_qm_init_pf(): Initailze the QM PF phase, per path. + * + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @is_pf_loading: Is pf pending. * - * @param p_hwfn - * @param p_ptt - * @param is_pf_loading + * Return: Void. */ void qed_qm_init_pf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, bool is_pf_loading); /** - * @brief Reconfigures QM pf on the fly + * qed_qm_reconf(): Reconfigures QM pf on the fly. * - * @param p_hwfn - * @param p_ptt + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. * - * @return int + * Return: Int. */ int qed_qm_reconf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); #define QED_CXT_PF_CID (0xff) /** - * @brief qed_cxt_release - Release a cid + * qed_cxt_release_cid(): Release a cid. * - * @param p_hwfn - * @param cid + * @p_hwfn: HW device data. + * @cid: Cid. + * + * Return: Void. */ void qed_cxt_release_cid(struct qed_hwfn *p_hwfn, u32 cid); /** - * @brief qed_cxt_release - Release a cid belonging to a vf-queue + * _qed_cxt_release_cid(): Release a cid belonging to a vf-queue. + * + * @p_hwfn: HW device data. + * @cid: Cid. + * @vfid: Engine relative index. QED_CXT_PF_CID if belongs to PF. * - * @param p_hwfn - * @param cid - * @param vfid - engine relative index. QED_CXT_PF_CID if belongs to PF + * Return: Void. */ void _qed_cxt_release_cid(struct qed_hwfn *p_hwfn, u32 cid, u8 vfid); /** - * @brief qed_cxt_acquire - Acquire a new cid of a specific protocol type + * qed_cxt_acquire_cid(): Acquire a new cid of a specific protocol type. * - * @param p_hwfn - * @param type - * @param p_cid + * @p_hwfn: HW device data. + * @type: Type. + * @p_cid: Pointer cid. * - * @return int + * Return: Int. */ int qed_cxt_acquire_cid(struct qed_hwfn *p_hwfn, enum protocol_type type, u32 *p_cid); /** - * @brief _qed_cxt_acquire - Acquire a new cid of a specific protocol type - * for a vf-queue + * _qed_cxt_acquire_cid(): Acquire a new cid of a specific protocol type + * for a vf-queue. * - * @param p_hwfn - * @param type - * @param p_cid - * @param vfid - engine relative index. QED_CXT_PF_CID if belongs to PF + * @p_hwfn: HW device data. + * @type: Type. + * @p_cid: Pointer cid. + * @vfid: Engine relative index. QED_CXT_PF_CID if belongs to PF. * - * @return int + * Return: Int. */ int _qed_cxt_acquire_cid(struct qed_hwfn *p_hwfn, enum protocol_type type, u32 *p_cid, u8 vfid); @@ -334,7 +346,10 @@ struct qed_cxt_mngr { /* Maximal number of L2 steering filters */ u32 arfs_count; - u8 task_type_id; + u16 iscsi_task_pages; + u16 fcoe_task_pages; + u16 roce_task_pages; + u16 eth_task_pages; u16 task_ctx_size; u16 conn_ctx_size; }; diff --git a/drivers/net/ethernet/qlogic/qed/qed_dbg_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_dbg_hsi.h new file mode 100644 index 000000000000..9d5a0c9e1ca0 --- /dev/null +++ b/drivers/net/ethernet/qlogic/qed/qed_dbg_hsi.h @@ -0,0 +1,1491 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ +/* QLogic qed NIC Driver + * Copyright (c) 2019-2021 Marvell International Ltd. + */ +#ifndef _QED_DBG_HSI_H +#define _QED_DBG_HSI_H + +#include <linux/types.h> +#include <linux/io.h> +#include <linux/bitops.h> +#include <linux/delay.h> +#include <linux/kernel.h> +#include <linux/list.h> +#include <linux/slab.h> + +/****************************************/ +/* Debug Tools HSI constants and macros */ +/****************************************/ + +enum block_id { + BLOCK_GRC, + BLOCK_MISCS, + BLOCK_MISC, + BLOCK_DBU, + BLOCK_PGLUE_B, + BLOCK_CNIG, + BLOCK_CPMU, + BLOCK_NCSI, + BLOCK_OPTE, + BLOCK_BMB, + BLOCK_PCIE, + BLOCK_MCP, + BLOCK_MCP2, + BLOCK_PSWHST, + BLOCK_PSWHST2, + BLOCK_PSWRD, + BLOCK_PSWRD2, + BLOCK_PSWWR, + BLOCK_PSWWR2, + BLOCK_PSWRQ, + BLOCK_PSWRQ2, + BLOCK_PGLCS, + BLOCK_DMAE, + BLOCK_PTU, + BLOCK_TCM, + BLOCK_MCM, + BLOCK_UCM, + BLOCK_XCM, + BLOCK_YCM, + BLOCK_PCM, + BLOCK_QM, + BLOCK_TM, + BLOCK_DORQ, + BLOCK_BRB, + BLOCK_SRC, + BLOCK_PRS, + BLOCK_TSDM, + BLOCK_MSDM, + BLOCK_USDM, + BLOCK_XSDM, + BLOCK_YSDM, + BLOCK_PSDM, + BLOCK_TSEM, + BLOCK_MSEM, + BLOCK_USEM, + BLOCK_XSEM, + BLOCK_YSEM, + BLOCK_PSEM, + BLOCK_RSS, + BLOCK_TMLD, + BLOCK_MULD, + BLOCK_YULD, + BLOCK_XYLD, + BLOCK_PRM, + BLOCK_PBF_PB1, + BLOCK_PBF_PB2, + BLOCK_RPB, + BLOCK_BTB, + BLOCK_PBF, + BLOCK_RDIF, + BLOCK_TDIF, + BLOCK_CDU, + BLOCK_CCFC, + BLOCK_TCFC, + BLOCK_IGU, + BLOCK_CAU, + BLOCK_UMAC, + BLOCK_XMAC, + BLOCK_MSTAT, + BLOCK_DBG, + BLOCK_NIG, + BLOCK_WOL, + BLOCK_BMBN, + BLOCK_IPC, + BLOCK_NWM, + BLOCK_NWS, + BLOCK_MS, + BLOCK_PHY_PCIE, + BLOCK_LED, + BLOCK_AVS_WRAP, + BLOCK_PXPREQBUS, + BLOCK_BAR0_MAP, + BLOCK_MCP_FIO, + BLOCK_LAST_INIT, + BLOCK_PRS_FC, + BLOCK_PBF_FC, + BLOCK_NIG_LB_FC, + BLOCK_NIG_LB_FC_PLLH, + BLOCK_NIG_TX_FC_PLLH, + BLOCK_NIG_TX_FC, + BLOCK_NIG_RX_FC_PLLH, + BLOCK_NIG_RX_FC, + MAX_BLOCK_ID +}; + +/* binary debug buffer types */ +enum bin_dbg_buffer_type { + BIN_BUF_DBG_MODE_TREE, + BIN_BUF_DBG_DUMP_REG, + BIN_BUF_DBG_DUMP_MEM, + BIN_BUF_DBG_IDLE_CHK_REGS, + BIN_BUF_DBG_IDLE_CHK_IMMS, + BIN_BUF_DBG_IDLE_CHK_RULES, + BIN_BUF_DBG_IDLE_CHK_PARSING_DATA, + BIN_BUF_DBG_ATTN_BLOCKS, + BIN_BUF_DBG_ATTN_REGS, + BIN_BUF_DBG_ATTN_INDEXES, + BIN_BUF_DBG_ATTN_NAME_OFFSETS, + BIN_BUF_DBG_BLOCKS, + BIN_BUF_DBG_BLOCKS_CHIP_DATA, + BIN_BUF_DBG_BUS_LINES, + BIN_BUF_DBG_BLOCKS_USER_DATA, + BIN_BUF_DBG_BLOCKS_CHIP_USER_DATA, + BIN_BUF_DBG_BUS_LINE_NAME_OFFSETS, + BIN_BUF_DBG_RESET_REGS, + BIN_BUF_DBG_PARSING_STRINGS, + MAX_BIN_DBG_BUFFER_TYPE +}; + +/* Attention bit mapping */ +struct dbg_attn_bit_mapping { + u16 data; +#define DBG_ATTN_BIT_MAPPING_VAL_MASK 0x7FFF +#define DBG_ATTN_BIT_MAPPING_VAL_SHIFT 0 +#define DBG_ATTN_BIT_MAPPING_IS_UNUSED_BIT_CNT_MASK 0x1 +#define DBG_ATTN_BIT_MAPPING_IS_UNUSED_BIT_CNT_SHIFT 15 +}; + +/* Attention block per-type data */ +struct dbg_attn_block_type_data { + u16 names_offset; + u16 reserved1; + u8 num_regs; + u8 reserved2; + u16 regs_offset; + +}; + +/* Block attentions */ +struct dbg_attn_block { + struct dbg_attn_block_type_data per_type_data[2]; +}; + +/* Attention register result */ +struct dbg_attn_reg_result { + u32 data; +#define DBG_ATTN_REG_RESULT_STS_ADDRESS_MASK 0xFFFFFF +#define DBG_ATTN_REG_RESULT_STS_ADDRESS_SHIFT 0 +#define DBG_ATTN_REG_RESULT_NUM_REG_ATTN_MASK 0xFF +#define DBG_ATTN_REG_RESULT_NUM_REG_ATTN_SHIFT 24 + u16 block_attn_offset; + u16 reserved; + u32 sts_val; + u32 mask_val; +}; + +/* Attention block result */ +struct dbg_attn_block_result { + u8 block_id; + u8 data; +#define DBG_ATTN_BLOCK_RESULT_ATTN_TYPE_MASK 0x3 +#define DBG_ATTN_BLOCK_RESULT_ATTN_TYPE_SHIFT 0 +#define DBG_ATTN_BLOCK_RESULT_NUM_REGS_MASK 0x3F +#define DBG_ATTN_BLOCK_RESULT_NUM_REGS_SHIFT 2 + u16 names_offset; + struct dbg_attn_reg_result reg_results[15]; +}; + +/* Mode header */ +struct dbg_mode_hdr { + u16 data; +#define DBG_MODE_HDR_EVAL_MODE_MASK 0x1 +#define DBG_MODE_HDR_EVAL_MODE_SHIFT 0 +#define DBG_MODE_HDR_MODES_BUF_OFFSET_MASK 0x7FFF +#define DBG_MODE_HDR_MODES_BUF_OFFSET_SHIFT 1 +}; + +/* Attention register */ +struct dbg_attn_reg { + struct dbg_mode_hdr mode; + u16 block_attn_offset; + u32 data; +#define DBG_ATTN_REG_STS_ADDRESS_MASK 0xFFFFFF +#define DBG_ATTN_REG_STS_ADDRESS_SHIFT 0 +#define DBG_ATTN_REG_NUM_REG_ATTN_MASK 0xFF +#define DBG_ATTN_REG_NUM_REG_ATTN_SHIFT 24 + u32 sts_clr_address; + u32 mask_address; +}; + +/* Attention types */ +enum dbg_attn_type { + ATTN_TYPE_INTERRUPT, + ATTN_TYPE_PARITY, + MAX_DBG_ATTN_TYPE +}; + +/* Block debug data */ +struct dbg_block { + u8 name[15]; + u8 associated_storm_letter; +}; + +/* Chip-specific block debug data */ +struct dbg_block_chip { + u8 flags; +#define DBG_BLOCK_CHIP_IS_REMOVED_MASK 0x1 +#define DBG_BLOCK_CHIP_IS_REMOVED_SHIFT 0 +#define DBG_BLOCK_CHIP_HAS_RESET_REG_MASK 0x1 +#define DBG_BLOCK_CHIP_HAS_RESET_REG_SHIFT 1 +#define DBG_BLOCK_CHIP_UNRESET_BEFORE_DUMP_MASK 0x1 +#define DBG_BLOCK_CHIP_UNRESET_BEFORE_DUMP_SHIFT 2 +#define DBG_BLOCK_CHIP_HAS_DBG_BUS_MASK 0x1 +#define DBG_BLOCK_CHIP_HAS_DBG_BUS_SHIFT 3 +#define DBG_BLOCK_CHIP_HAS_LATENCY_EVENTS_MASK 0x1 +#define DBG_BLOCK_CHIP_HAS_LATENCY_EVENTS_SHIFT 4 +#define DBG_BLOCK_CHIP_RESERVED0_MASK 0x7 +#define DBG_BLOCK_CHIP_RESERVED0_SHIFT 5 + u8 dbg_client_id; + u8 reset_reg_id; + u8 reset_reg_bit_offset; + struct dbg_mode_hdr dbg_bus_mode; + u16 reserved1; + u8 reserved2; + u8 num_of_dbg_bus_lines; + u16 dbg_bus_lines_offset; + u32 dbg_select_reg_addr; + u32 dbg_dword_enable_reg_addr; + u32 dbg_shift_reg_addr; + u32 dbg_force_valid_reg_addr; + u32 dbg_force_frame_reg_addr; +}; + +/* Chip-specific block user debug data */ +struct dbg_block_chip_user { + u8 num_of_dbg_bus_lines; + u8 has_latency_events; + u16 names_offset; +}; + +/* Block user debug data */ +struct dbg_block_user { + u8 name[16]; +}; + +/* Block Debug line data */ +struct dbg_bus_line { + u8 data; +#define DBG_BUS_LINE_NUM_OF_GROUPS_MASK 0xF +#define DBG_BUS_LINE_NUM_OF_GROUPS_SHIFT 0 +#define DBG_BUS_LINE_IS_256B_MASK 0x1 +#define DBG_BUS_LINE_IS_256B_SHIFT 4 +#define DBG_BUS_LINE_RESERVED_MASK 0x7 +#define DBG_BUS_LINE_RESERVED_SHIFT 5 + u8 group_sizes; +}; + +/* Condition header for registers dump */ +struct dbg_dump_cond_hdr { + struct dbg_mode_hdr mode; /* Mode header */ + u8 block_id; /* block ID */ + u8 data_size; /* size in dwords of the data following this header */ +}; + +/* Memory data for registers dump */ +struct dbg_dump_mem { + u32 dword0; +#define DBG_DUMP_MEM_ADDRESS_MASK 0xFFFFFF +#define DBG_DUMP_MEM_ADDRESS_SHIFT 0 +#define DBG_DUMP_MEM_MEM_GROUP_ID_MASK 0xFF +#define DBG_DUMP_MEM_MEM_GROUP_ID_SHIFT 24 + u32 dword1; +#define DBG_DUMP_MEM_LENGTH_MASK 0xFFFFFF +#define DBG_DUMP_MEM_LENGTH_SHIFT 0 +#define DBG_DUMP_MEM_WIDE_BUS_MASK 0x1 +#define DBG_DUMP_MEM_WIDE_BUS_SHIFT 24 +#define DBG_DUMP_MEM_RESERVED_MASK 0x7F +#define DBG_DUMP_MEM_RESERVED_SHIFT 25 +}; + +/* Register data for registers dump */ +struct dbg_dump_reg { + u32 data; +#define DBG_DUMP_REG_ADDRESS_MASK 0x7FFFFF +#define DBG_DUMP_REG_ADDRESS_SHIFT 0 +#define DBG_DUMP_REG_WIDE_BUS_MASK 0x1 +#define DBG_DUMP_REG_WIDE_BUS_SHIFT 23 +#define DBG_DUMP_REG_LENGTH_MASK 0xFF +#define DBG_DUMP_REG_LENGTH_SHIFT 24 +}; + +/* Split header for registers dump */ +struct dbg_dump_split_hdr { + u32 hdr; +#define DBG_DUMP_SPLIT_HDR_DATA_SIZE_MASK 0xFFFFFF +#define DBG_DUMP_SPLIT_HDR_DATA_SIZE_SHIFT 0 +#define DBG_DUMP_SPLIT_HDR_SPLIT_TYPE_ID_MASK 0xFF +#define DBG_DUMP_SPLIT_HDR_SPLIT_TYPE_ID_SHIFT 24 +}; + +/* Condition header for idle check */ +struct dbg_idle_chk_cond_hdr { + struct dbg_mode_hdr mode; /* Mode header */ + u16 data_size; /* size in dwords of the data following this header */ +}; + +/* Idle Check condition register */ +struct dbg_idle_chk_cond_reg { + u32 data; +#define DBG_IDLE_CHK_COND_REG_ADDRESS_MASK 0x7FFFFF +#define DBG_IDLE_CHK_COND_REG_ADDRESS_SHIFT 0 +#define DBG_IDLE_CHK_COND_REG_WIDE_BUS_MASK 0x1 +#define DBG_IDLE_CHK_COND_REG_WIDE_BUS_SHIFT 23 +#define DBG_IDLE_CHK_COND_REG_BLOCK_ID_MASK 0xFF +#define DBG_IDLE_CHK_COND_REG_BLOCK_ID_SHIFT 24 + u16 num_entries; + u8 entry_size; + u8 start_entry; +}; + +/* Idle Check info register */ +struct dbg_idle_chk_info_reg { + u32 data; +#define DBG_IDLE_CHK_INFO_REG_ADDRESS_MASK 0x7FFFFF +#define DBG_IDLE_CHK_INFO_REG_ADDRESS_SHIFT 0 +#define DBG_IDLE_CHK_INFO_REG_WIDE_BUS_MASK 0x1 +#define DBG_IDLE_CHK_INFO_REG_WIDE_BUS_SHIFT 23 +#define DBG_IDLE_CHK_INFO_REG_BLOCK_ID_MASK 0xFF +#define DBG_IDLE_CHK_INFO_REG_BLOCK_ID_SHIFT 24 + u16 size; /* register size in dwords */ + struct dbg_mode_hdr mode; /* Mode header */ +}; + +/* Idle Check register */ +union dbg_idle_chk_reg { + struct dbg_idle_chk_cond_reg cond_reg; /* condition register */ + struct dbg_idle_chk_info_reg info_reg; /* info register */ +}; + +/* Idle Check result header */ +struct dbg_idle_chk_result_hdr { + u16 rule_id; /* Failing rule index */ + u16 mem_entry_id; /* Failing memory entry index */ + u8 num_dumped_cond_regs; /* number of dumped condition registers */ + u8 num_dumped_info_regs; /* number of dumped condition registers */ + u8 severity; /* from dbg_idle_chk_severity_types enum */ + u8 reserved; +}; + +/* Idle Check result register header */ +struct dbg_idle_chk_result_reg_hdr { + u8 data; +#define DBG_IDLE_CHK_RESULT_REG_HDR_IS_MEM_MASK 0x1 +#define DBG_IDLE_CHK_RESULT_REG_HDR_IS_MEM_SHIFT 0 +#define DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID_MASK 0x7F +#define DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID_SHIFT 1 + u8 start_entry; /* index of the first checked entry */ + u16 size; /* register size in dwords */ +}; + +/* Idle Check rule */ +struct dbg_idle_chk_rule { + u16 rule_id; /* Idle Check rule ID */ + u8 severity; /* value from dbg_idle_chk_severity_types enum */ + u8 cond_id; /* Condition ID */ + u8 num_cond_regs; /* number of condition registers */ + u8 num_info_regs; /* number of info registers */ + u8 num_imms; /* number of immediates in the condition */ + u8 reserved1; + u16 reg_offset; /* offset of this rules registers in the idle check + * register array (in dbg_idle_chk_reg units). + */ + u16 imm_offset; /* offset of this rules immediate values in the + * immediate values array (in dwords). + */ +}; + +/* Idle Check rule parsing data */ +struct dbg_idle_chk_rule_parsing_data { + u32 data; +#define DBG_IDLE_CHK_RULE_PARSING_DATA_HAS_FW_MSG_MASK 0x1 +#define DBG_IDLE_CHK_RULE_PARSING_DATA_HAS_FW_MSG_SHIFT 0 +#define DBG_IDLE_CHK_RULE_PARSING_DATA_STR_OFFSET_MASK 0x7FFFFFFF +#define DBG_IDLE_CHK_RULE_PARSING_DATA_STR_OFFSET_SHIFT 1 +}; + +/* Idle check severity types */ +enum dbg_idle_chk_severity_types { + /* idle check failure should cause an error */ + IDLE_CHK_SEVERITY_ERROR, + /* idle check failure should cause an error only if theres no traffic */ + IDLE_CHK_SEVERITY_ERROR_NO_TRAFFIC, + /* idle check failure should cause a warning */ + IDLE_CHK_SEVERITY_WARNING, + MAX_DBG_IDLE_CHK_SEVERITY_TYPES +}; + +/* Reset register */ +struct dbg_reset_reg { + u32 data; +#define DBG_RESET_REG_ADDR_MASK 0xFFFFFF +#define DBG_RESET_REG_ADDR_SHIFT 0 +#define DBG_RESET_REG_IS_REMOVED_MASK 0x1 +#define DBG_RESET_REG_IS_REMOVED_SHIFT 24 +#define DBG_RESET_REG_RESERVED_MASK 0x7F +#define DBG_RESET_REG_RESERVED_SHIFT 25 +}; + +/* Debug Bus block data */ +struct dbg_bus_block_data { + u8 enable_mask; + u8 right_shift; + u8 force_valid_mask; + u8 force_frame_mask; + u8 dword_mask; + u8 line_num; + u8 hw_id; + u8 flags; +#define DBG_BUS_BLOCK_DATA_IS_256B_LINE_MASK 0x1 +#define DBG_BUS_BLOCK_DATA_IS_256B_LINE_SHIFT 0 +#define DBG_BUS_BLOCK_DATA_RESERVED_MASK 0x7F +#define DBG_BUS_BLOCK_DATA_RESERVED_SHIFT 1 +}; + +enum dbg_bus_clients { + DBG_BUS_CLIENT_RBCN, + DBG_BUS_CLIENT_RBCP, + DBG_BUS_CLIENT_RBCR, + DBG_BUS_CLIENT_RBCT, + DBG_BUS_CLIENT_RBCU, + DBG_BUS_CLIENT_RBCF, + DBG_BUS_CLIENT_RBCX, + DBG_BUS_CLIENT_RBCS, + DBG_BUS_CLIENT_RBCH, + DBG_BUS_CLIENT_RBCZ, + DBG_BUS_CLIENT_OTHER_ENGINE, + DBG_BUS_CLIENT_TIMESTAMP, + DBG_BUS_CLIENT_CPU, + DBG_BUS_CLIENT_RBCY, + DBG_BUS_CLIENT_RBCQ, + DBG_BUS_CLIENT_RBCM, + DBG_BUS_CLIENT_RBCB, + DBG_BUS_CLIENT_RBCW, + DBG_BUS_CLIENT_RBCV, + MAX_DBG_BUS_CLIENTS +}; + +/* Debug Bus constraint operation types */ +enum dbg_bus_constraint_ops { + DBG_BUS_CONSTRAINT_OP_EQ, + DBG_BUS_CONSTRAINT_OP_NE, + DBG_BUS_CONSTRAINT_OP_LT, + DBG_BUS_CONSTRAINT_OP_LTC, + DBG_BUS_CONSTRAINT_OP_LE, + DBG_BUS_CONSTRAINT_OP_LEC, + DBG_BUS_CONSTRAINT_OP_GT, + DBG_BUS_CONSTRAINT_OP_GTC, + DBG_BUS_CONSTRAINT_OP_GE, + DBG_BUS_CONSTRAINT_OP_GEC, + MAX_DBG_BUS_CONSTRAINT_OPS +}; + +/* Debug Bus trigger state data */ +struct dbg_bus_trigger_state_data { + u8 msg_len; + u8 constraint_dword_mask; + u8 storm_id; + u8 reserved; +}; + +/* Debug Bus memory address */ +struct dbg_bus_mem_addr { + u32 lo; + u32 hi; +}; + +/* Debug Bus PCI buffer data */ +struct dbg_bus_pci_buf_data { + struct dbg_bus_mem_addr phys_addr; /* PCI buffer physical address */ + struct dbg_bus_mem_addr virt_addr; /* PCI buffer virtual address */ + u32 size; /* PCI buffer size in bytes */ +}; + +/* Debug Bus Storm EID range filter params */ +struct dbg_bus_storm_eid_range_params { + u8 min; /* Minimal event ID to filter on */ + u8 max; /* Maximal event ID to filter on */ +}; + +/* Debug Bus Storm EID mask filter params */ +struct dbg_bus_storm_eid_mask_params { + u8 val; /* Event ID value */ + u8 mask; /* Event ID mask. 1s in the mask = dont care bits. */ +}; + +/* Debug Bus Storm EID filter params */ +union dbg_bus_storm_eid_params { + struct dbg_bus_storm_eid_range_params range; + struct dbg_bus_storm_eid_mask_params mask; +}; + +/* Debug Bus Storm data */ +struct dbg_bus_storm_data { + u8 enabled; + u8 mode; + u8 hw_id; + u8 eid_filter_en; + u8 eid_range_not_mask; + u8 cid_filter_en; + union dbg_bus_storm_eid_params eid_filter_params; + u32 cid; +}; + +/* Debug Bus data */ +struct dbg_bus_data { + u32 app_version; + u8 state; + u8 mode_256b_en; + u8 num_enabled_blocks; + u8 num_enabled_storms; + u8 target; + u8 one_shot_en; + u8 grc_input_en; + u8 timestamp_input_en; + u8 filter_en; + u8 adding_filter; + u8 filter_pre_trigger; + u8 filter_post_trigger; + u8 trigger_en; + u8 filter_constraint_dword_mask; + u8 next_trigger_state; + u8 next_constraint_id; + struct dbg_bus_trigger_state_data trigger_states[3]; + u8 filter_msg_len; + u8 rcv_from_other_engine; + u8 blocks_dword_mask; + u8 blocks_dword_overlap; + u32 hw_id_mask; + struct dbg_bus_pci_buf_data pci_buf; + struct dbg_bus_block_data blocks[132]; + struct dbg_bus_storm_data storms[6]; +}; + +/* Debug bus states */ +enum dbg_bus_states { + DBG_BUS_STATE_IDLE, + DBG_BUS_STATE_READY, + DBG_BUS_STATE_RECORDING, + DBG_BUS_STATE_STOPPED, + MAX_DBG_BUS_STATES +}; + +/* Debug Bus Storm modes */ +enum dbg_bus_storm_modes { + DBG_BUS_STORM_MODE_PRINTF, + DBG_BUS_STORM_MODE_PRAM_ADDR, + DBG_BUS_STORM_MODE_DRA_RW, + DBG_BUS_STORM_MODE_DRA_W, + DBG_BUS_STORM_MODE_LD_ST_ADDR, + DBG_BUS_STORM_MODE_DRA_FSM, + DBG_BUS_STORM_MODE_FAST_DBGMUX, + DBG_BUS_STORM_MODE_RH, + DBG_BUS_STORM_MODE_RH_WITH_STORE, + DBG_BUS_STORM_MODE_FOC, + DBG_BUS_STORM_MODE_EXT_STORE, + MAX_DBG_BUS_STORM_MODES +}; + +/* Debug bus target IDs */ +enum dbg_bus_targets { + DBG_BUS_TARGET_ID_INT_BUF, + DBG_BUS_TARGET_ID_NIG, + DBG_BUS_TARGET_ID_PCI, + MAX_DBG_BUS_TARGETS +}; + +/* GRC Dump data */ +struct dbg_grc_data { + u8 params_initialized; + u8 reserved1; + u16 reserved2; + u32 param_val[48]; +}; + +/* Debug GRC params */ +enum dbg_grc_params { + DBG_GRC_PARAM_DUMP_TSTORM, + DBG_GRC_PARAM_DUMP_MSTORM, + DBG_GRC_PARAM_DUMP_USTORM, + DBG_GRC_PARAM_DUMP_XSTORM, + DBG_GRC_PARAM_DUMP_YSTORM, + DBG_GRC_PARAM_DUMP_PSTORM, + DBG_GRC_PARAM_DUMP_REGS, + DBG_GRC_PARAM_DUMP_RAM, + DBG_GRC_PARAM_DUMP_PBUF, + DBG_GRC_PARAM_DUMP_IOR, + DBG_GRC_PARAM_DUMP_VFC, + DBG_GRC_PARAM_DUMP_CM_CTX, + DBG_GRC_PARAM_DUMP_PXP, + DBG_GRC_PARAM_DUMP_RSS, + DBG_GRC_PARAM_DUMP_CAU, + DBG_GRC_PARAM_DUMP_QM, + DBG_GRC_PARAM_DUMP_MCP, + DBG_GRC_PARAM_DUMP_DORQ, + DBG_GRC_PARAM_DUMP_CFC, + DBG_GRC_PARAM_DUMP_IGU, + DBG_GRC_PARAM_DUMP_BRB, + DBG_GRC_PARAM_DUMP_BTB, + DBG_GRC_PARAM_DUMP_BMB, + DBG_GRC_PARAM_RESERVD1, + DBG_GRC_PARAM_DUMP_MULD, + DBG_GRC_PARAM_DUMP_PRS, + DBG_GRC_PARAM_DUMP_DMAE, + DBG_GRC_PARAM_DUMP_TM, + DBG_GRC_PARAM_DUMP_SDM, + DBG_GRC_PARAM_DUMP_DIF, + DBG_GRC_PARAM_DUMP_STATIC, + DBG_GRC_PARAM_UNSTALL, + DBG_GRC_PARAM_RESERVED2, + DBG_GRC_PARAM_MCP_TRACE_META_SIZE, + DBG_GRC_PARAM_EXCLUDE_ALL, + DBG_GRC_PARAM_CRASH, + DBG_GRC_PARAM_PARITY_SAFE, + DBG_GRC_PARAM_DUMP_CM, + DBG_GRC_PARAM_DUMP_PHY, + DBG_GRC_PARAM_NO_MCP, + DBG_GRC_PARAM_NO_FW_VER, + DBG_GRC_PARAM_RESERVED3, + DBG_GRC_PARAM_DUMP_MCP_HW_DUMP, + DBG_GRC_PARAM_DUMP_ILT_CDUC, + DBG_GRC_PARAM_DUMP_ILT_CDUT, + DBG_GRC_PARAM_DUMP_CAU_EXT, + MAX_DBG_GRC_PARAMS +}; + +/* Debug status codes */ +enum dbg_status { + DBG_STATUS_OK, + DBG_STATUS_APP_VERSION_NOT_SET, + DBG_STATUS_UNSUPPORTED_APP_VERSION, + DBG_STATUS_DBG_BLOCK_NOT_RESET, + DBG_STATUS_INVALID_ARGS, + DBG_STATUS_OUTPUT_ALREADY_SET, + DBG_STATUS_INVALID_PCI_BUF_SIZE, + DBG_STATUS_PCI_BUF_ALLOC_FAILED, + DBG_STATUS_PCI_BUF_NOT_ALLOCATED, + DBG_STATUS_INVALID_FILTER_TRIGGER_DWORDS, + DBG_STATUS_NO_MATCHING_FRAMING_MODE, + DBG_STATUS_VFC_READ_ERROR, + DBG_STATUS_STORM_ALREADY_ENABLED, + DBG_STATUS_STORM_NOT_ENABLED, + DBG_STATUS_BLOCK_ALREADY_ENABLED, + DBG_STATUS_BLOCK_NOT_ENABLED, + DBG_STATUS_NO_INPUT_ENABLED, + DBG_STATUS_NO_FILTER_TRIGGER_256B, + DBG_STATUS_FILTER_ALREADY_ENABLED, + DBG_STATUS_TRIGGER_ALREADY_ENABLED, + DBG_STATUS_TRIGGER_NOT_ENABLED, + DBG_STATUS_CANT_ADD_CONSTRAINT, + DBG_STATUS_TOO_MANY_TRIGGER_STATES, + DBG_STATUS_TOO_MANY_CONSTRAINTS, + DBG_STATUS_RECORDING_NOT_STARTED, + DBG_STATUS_DATA_DIDNT_TRIGGER, + DBG_STATUS_NO_DATA_RECORDED, + DBG_STATUS_DUMP_BUF_TOO_SMALL, + DBG_STATUS_DUMP_NOT_CHUNK_ALIGNED, + DBG_STATUS_UNKNOWN_CHIP, + DBG_STATUS_VIRT_MEM_ALLOC_FAILED, + DBG_STATUS_BLOCK_IN_RESET, + DBG_STATUS_INVALID_TRACE_SIGNATURE, + DBG_STATUS_INVALID_NVRAM_BUNDLE, + DBG_STATUS_NVRAM_GET_IMAGE_FAILED, + DBG_STATUS_NON_ALIGNED_NVRAM_IMAGE, + DBG_STATUS_NVRAM_READ_FAILED, + DBG_STATUS_IDLE_CHK_PARSE_FAILED, + DBG_STATUS_MCP_TRACE_BAD_DATA, + DBG_STATUS_MCP_TRACE_NO_META, + DBG_STATUS_MCP_COULD_NOT_HALT, + DBG_STATUS_MCP_COULD_NOT_RESUME, + DBG_STATUS_RESERVED0, + DBG_STATUS_SEMI_FIFO_NOT_EMPTY, + DBG_STATUS_IGU_FIFO_BAD_DATA, + DBG_STATUS_MCP_COULD_NOT_MASK_PRTY, + DBG_STATUS_FW_ASSERTS_PARSE_FAILED, + DBG_STATUS_REG_FIFO_BAD_DATA, + DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA, + DBG_STATUS_DBG_ARRAY_NOT_SET, + DBG_STATUS_RESERVED1, + DBG_STATUS_NON_MATCHING_LINES, + DBG_STATUS_INSUFFICIENT_HW_IDS, + DBG_STATUS_DBG_BUS_IN_USE, + DBG_STATUS_INVALID_STORM_DBG_MODE, + DBG_STATUS_OTHER_ENGINE_BB_ONLY, + DBG_STATUS_FILTER_SINGLE_HW_ID, + DBG_STATUS_TRIGGER_SINGLE_HW_ID, + DBG_STATUS_MISSING_TRIGGER_STATE_STORM, + MAX_DBG_STATUS +}; + +/* Debug Storms IDs */ +enum dbg_storms { + DBG_TSTORM_ID, + DBG_MSTORM_ID, + DBG_USTORM_ID, + DBG_XSTORM_ID, + DBG_YSTORM_ID, + DBG_PSTORM_ID, + MAX_DBG_STORMS +}; + +/* Idle Check data */ +struct idle_chk_data { + u32 buf_size; + u8 buf_size_set; + u8 reserved1; + u16 reserved2; +}; + +struct pretend_params { + u8 split_type; + u8 reserved; + u16 split_id; +}; + +/* Debug Tools data (per HW function) + */ +struct dbg_tools_data { + struct dbg_grc_data grc; + struct dbg_bus_data bus; + struct idle_chk_data idle_chk; + u8 mode_enable[40]; + u8 block_in_reset[132]; + u8 chip_id; + u8 hw_type; + u8 num_ports; + u8 num_pfs_per_port; + u8 num_vfs; + u8 initialized; + u8 use_dmae; + u8 reserved; + struct pretend_params pretend; + u32 num_regs_read; +}; + +/* ILT Clients */ +enum ilt_clients { + ILT_CLI_CDUC, + ILT_CLI_CDUT, + ILT_CLI_QM, + ILT_CLI_TM, + ILT_CLI_SRC, + ILT_CLI_TSDM, + ILT_CLI_RGFS, + ILT_CLI_TGFS, + MAX_ILT_CLIENTS +}; + +/***************************** Public Functions *******************************/ + +/** + * qed_dbg_set_bin_ptr(): Sets a pointer to the binary data with debug + * arrays. + * + * @p_hwfn: HW device data. + * @bin_ptr: A pointer to the binary data with debug arrays. + * + * Return: enum dbg status. + */ +enum dbg_status qed_dbg_set_bin_ptr(struct qed_hwfn *p_hwfn, + const u8 * const bin_ptr); + +/** + * qed_read_regs(): Reads registers into a buffer (using GRC). + * + * @p_hwfn: HW device data. + * @p_ptt: Ptt window used for writing the registers. + * @buf: Destination buffer. + * @addr: Source GRC address in dwords. + * @len: Number of registers to read. + * + * Return: Void. + */ +void qed_read_regs(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, u32 *buf, u32 addr, u32 len); + +/** + * qed_read_fw_info(): Reads FW info from the chip. + * + * @p_hwfn: HW device data. + * @p_ptt: Ptt window used for writing the registers. + * @fw_info: (Out) a pointer to write the FW info into. + * + * Return: True if the FW info was read successfully from one of the Storms, + * or false if all Storms are in reset. + * + * The FW info contains FW-related information, such as the FW version, + * FW image (main/L2B/kuku), FW timestamp, etc. + * The FW info is read from the internal RAM of the first Storm that is not in + * reset. + */ +bool qed_read_fw_info(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, struct fw_info *fw_info); +/** + * qed_dbg_grc_config(): Sets the value of a GRC parameter. + * + * @p_hwfn: HW device data. + * @grc_param: GRC parameter. + * @val: Value to set. + * + * Return: Error if one of the following holds: + * - The version wasn't set. + * - Grc_param is invalid. + * - Val is outside the allowed boundaries. + */ +enum dbg_status qed_dbg_grc_config(struct qed_hwfn *p_hwfn, + enum dbg_grc_params grc_param, u32 val); + +/** + * qed_dbg_grc_set_params_default(): Reverts all GRC parameters to their + * default value. + * + * @p_hwfn: HW device data. + * + * Return: Void. + */ +void qed_dbg_grc_set_params_default(struct qed_hwfn *p_hwfn); +/** + * qed_dbg_grc_get_dump_buf_size(): Returns the required buffer size for + * GRC Dump. + * + * @p_hwfn: HW device data. + * @p_ptt: Ptt window used for writing the registers. + * @buf_size: (OUT) required buffer size (in dwords) for the GRC Dump + * data. + * + * Return: Error if one of the following holds: + * - The version wasn't set + * Otherwise, returns ok. + */ +enum dbg_status qed_dbg_grc_get_dump_buf_size(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u32 *buf_size); + +/** + * qed_dbg_grc_dump(): Dumps GRC data into the specified buffer. + * + * @p_hwfn: HW device data. + * @p_ptt: Ptt window used for writing the registers. + * @dump_buf: Pointer to write the collected GRC data into. + * @buf_size_in_dwords:Size of the specified buffer in dwords. + * @num_dumped_dwords: (OUT) number of dumped dwords. + * + * Return: Error if one of the following holds: + * - The version wasn't set. + * - The specified dump buffer is too small. + * Otherwise, returns ok. + */ +enum dbg_status qed_dbg_grc_dump(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u32 *dump_buf, + u32 buf_size_in_dwords, + u32 *num_dumped_dwords); + +/** + * qed_dbg_idle_chk_get_dump_buf_size(): Returns the required buffer size + * for idle check results. + * + * @p_hwfn: HW device data. + * @p_ptt: Ptt window used for writing the registers. + * @buf_size: (OUT) required buffer size (in dwords) for the idle check + * data. + * + * return: Error if one of the following holds: + * - The version wasn't set. + * Otherwise, returns ok. + */ +enum dbg_status qed_dbg_idle_chk_get_dump_buf_size(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u32 *buf_size); + +/** + * qed_dbg_idle_chk_dump: Performs idle check and writes the results + * into the specified buffer. + * + * @p_hwfn: HW device data. + * @p_ptt: Ptt window used for writing the registers. + * @dump_buf: Pointer to write the idle check data into. + * @buf_size_in_dwords: Size of the specified buffer in dwords. + * @num_dumped_dwords: (OUT) number of dumped dwords. + * + * Return: Error if one of the following holds: + * - The version wasn't set. + * - The specified buffer is too small. + * Otherwise, returns ok. + */ +enum dbg_status qed_dbg_idle_chk_dump(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u32 *dump_buf, + u32 buf_size_in_dwords, + u32 *num_dumped_dwords); + +/** + * qed_dbg_mcp_trace_get_dump_buf_size(): Returns the required buffer size + * for mcp trace results. + * + * @p_hwfn: HW device data. + * @p_ptt: Ptt window used for writing the registers. + * @buf_size: (OUT) Required buffer size (in dwords) for mcp trace data. + * + * Return: Error if one of the following holds: + * - The version wasn't set. + * - The trace data in MCP scratchpad contain an invalid signature. + * - The bundle ID in NVRAM is invalid. + * - The trace meta data cannot be found (in NVRAM or image file). + * Otherwise, returns ok. + */ +enum dbg_status qed_dbg_mcp_trace_get_dump_buf_size(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u32 *buf_size); + +/** + * qed_dbg_mcp_trace_dump(): Performs mcp trace and writes the results + * into the specified buffer. + * + * @p_hwfn: HW device data. + * @p_ptt: Ptt window used for writing the registers. + * @dump_buf: Pointer to write the mcp trace data into. + * @buf_size_in_dwords: Size of the specified buffer in dwords. + * @num_dumped_dwords: (OUT) number of dumped dwords. + * + * Return: Error if one of the following holds: + * - The version wasn't set. + * - The specified buffer is too small. + * - The trace data in MCP scratchpad contain an invalid signature. + * - The bundle ID in NVRAM is invalid. + * - The trace meta data cannot be found (in NVRAM or image file). + * - The trace meta data cannot be read (from NVRAM or image file). + * Otherwise, returns ok. + */ +enum dbg_status qed_dbg_mcp_trace_dump(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u32 *dump_buf, + u32 buf_size_in_dwords, + u32 *num_dumped_dwords); + +/** + * qed_dbg_reg_fifo_get_dump_buf_size(): Returns the required buffer size + * for grc trace fifo results. + * + * @p_hwfn: HW device data. + * @p_ptt: Ptt window used for writing the registers. + * @buf_size: (OUT) Required buffer size (in dwords) for reg fifo data. + * + * Return: Error if one of the following holds: + * - The version wasn't set + * Otherwise, returns ok. + */ +enum dbg_status qed_dbg_reg_fifo_get_dump_buf_size(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u32 *buf_size); + +/** + * qed_dbg_reg_fifo_dump(): Reads the reg fifo and writes the results into + * the specified buffer. + * + * @p_hwfn: HW device data. + * @p_ptt: Ptt window used for writing the registers. + * @dump_buf: Pointer to write the reg fifo data into. + * @buf_size_in_dwords: Size of the specified buffer in dwords. + * @num_dumped_dwords: (OUT) number of dumped dwords. + * + * Return: Error if one of the following holds: + * - The version wasn't set. + * - The specified buffer is too small. + * - DMAE transaction failed. + * Otherwise, returns ok. + */ +enum dbg_status qed_dbg_reg_fifo_dump(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u32 *dump_buf, + u32 buf_size_in_dwords, + u32 *num_dumped_dwords); + +/** + * qed_dbg_igu_fifo_get_dump_buf_size(): Returns the required buffer size + * for the IGU fifo results. + * + * @p_hwfn: HW device data. + * @p_ptt: Ptt window used for writing the registers. + * @buf_size: (OUT) Required buffer size (in dwords) for the IGU fifo + * data. + * + * Return: Error if one of the following holds: + * - The version wasn't set. + * Otherwise, returns ok. + */ +enum dbg_status qed_dbg_igu_fifo_get_dump_buf_size(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u32 *buf_size); + +/** + * qed_dbg_igu_fifo_dump(): Reads the IGU fifo and writes the results into + * the specified buffer. + * + * @p_hwfn: HW device data. + * @p_ptt: Ptt window used for writing the registers. + * @dump_buf: Pointer to write the IGU fifo data into. + * @buf_size_in_dwords: Size of the specified buffer in dwords. + * @num_dumped_dwords: (OUT) number of dumped dwords. + * + * Return: Error if one of the following holds: + * - The version wasn't set + * - The specified buffer is too small + * - DMAE transaction failed + * Otherwise, returns ok. + */ +enum dbg_status qed_dbg_igu_fifo_dump(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u32 *dump_buf, + u32 buf_size_in_dwords, + u32 *num_dumped_dwords); + +/** + * qed_dbg_protection_override_get_dump_buf_size(): Returns the required + * buffer size for protection override window results. + * + * @p_hwfn: HW device data. + * @p_ptt: Ptt window used for writing the registers. + * @buf_size: (OUT) Required buffer size (in dwords) for protection + * override data. + * + * Return: Error if one of the following holds: + * - The version wasn't set + * Otherwise, returns ok. + */ +enum dbg_status +qed_dbg_protection_override_get_dump_buf_size(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u32 *buf_size); +/** + * qed_dbg_protection_override_dump(): Reads protection override window + * entries and writes the results into the specified buffer. + * + * @p_hwfn: HW device data. + * @p_ptt: Ptt window used for writing the registers. + * @dump_buf: Pointer to write the protection override data into. + * @buf_size_in_dwords: Size of the specified buffer in dwords. + * @num_dumped_dwords: (OUT) number of dumped dwords. + * + * @return: Error if one of the following holds: + * - The version wasn't set. + * - The specified buffer is too small. + * - DMAE transaction failed. + * Otherwise, returns ok. + */ +enum dbg_status qed_dbg_protection_override_dump(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u32 *dump_buf, + u32 buf_size_in_dwords, + u32 *num_dumped_dwords); +/** + * qed_dbg_fw_asserts_get_dump_buf_size(): Returns the required buffer + * size for FW Asserts results. + * + * @p_hwfn: HW device data. + * @p_ptt: Ptt window used for writing the registers. + * @buf_size: (OUT) Required buffer size (in dwords) for FW Asserts data. + * + * Return: Error if one of the following holds: + * - The version wasn't set. + * Otherwise, returns ok. + */ +enum dbg_status qed_dbg_fw_asserts_get_dump_buf_size(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u32 *buf_size); +/** + * qed_dbg_fw_asserts_dump(): Reads the FW Asserts and writes the results + * into the specified buffer. + * + * @p_hwfn: HW device data. + * @p_ptt: Ptt window used for writing the registers. + * @dump_buf: Pointer to write the FW Asserts data into. + * @buf_size_in_dwords: Size of the specified buffer in dwords. + * @num_dumped_dwords: (OUT) number of dumped dwords. + * + * Return: Error if one of the following holds: + * - The version wasn't set. + * - The specified buffer is too small. + * Otherwise, returns ok. + */ +enum dbg_status qed_dbg_fw_asserts_dump(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u32 *dump_buf, + u32 buf_size_in_dwords, + u32 *num_dumped_dwords); + +/** + * qed_dbg_read_attn(): Reads the attention registers of the specified + * block and type, and writes the results into the specified buffer. + * + * @p_hwfn: HW device data. + * @p_ptt: Ptt window used for writing the registers. + * @block: Block ID. + * @attn_type: Attention type. + * @clear_status: Indicates if the attention status should be cleared. + * @results: (OUT) Pointer to write the read results into. + * + * Return: Error if one of the following holds: + * - The version wasn't set + * Otherwise, returns ok. + */ +enum dbg_status qed_dbg_read_attn(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + enum block_id block, + enum dbg_attn_type attn_type, + bool clear_status, + struct dbg_attn_block_result *results); + +/** + * qed_dbg_print_attn(): Prints attention registers values in the + * specified results struct. + * + * @p_hwfn: HW device data. + * @results: Pointer to the attention read results + * + * Return: Error if one of the following holds: + * - The version wasn't set + * Otherwise, returns ok. + */ +enum dbg_status qed_dbg_print_attn(struct qed_hwfn *p_hwfn, + struct dbg_attn_block_result *results); + +/******************************* Data Types **********************************/ + +struct mcp_trace_format { + u32 data; +#define MCP_TRACE_FORMAT_MODULE_MASK 0x0000ffff +#define MCP_TRACE_FORMAT_MODULE_OFFSET 0 +#define MCP_TRACE_FORMAT_LEVEL_MASK 0x00030000 +#define MCP_TRACE_FORMAT_LEVEL_OFFSET 16 +#define MCP_TRACE_FORMAT_P1_SIZE_MASK 0x000c0000 +#define MCP_TRACE_FORMAT_P1_SIZE_OFFSET 18 +#define MCP_TRACE_FORMAT_P2_SIZE_MASK 0x00300000 +#define MCP_TRACE_FORMAT_P2_SIZE_OFFSET 20 +#define MCP_TRACE_FORMAT_P3_SIZE_MASK 0x00c00000 +#define MCP_TRACE_FORMAT_P3_SIZE_OFFSET 22 +#define MCP_TRACE_FORMAT_LEN_MASK 0xff000000 +#define MCP_TRACE_FORMAT_LEN_OFFSET 24 + + char *format_str; +}; + +/* MCP Trace Meta data structure */ +struct mcp_trace_meta { + u32 modules_num; + char **modules; + u32 formats_num; + struct mcp_trace_format *formats; + bool is_allocated; +}; + +/* Debug Tools user data */ +struct dbg_tools_user_data { + struct mcp_trace_meta mcp_trace_meta; + const u32 *mcp_trace_user_meta_buf; +}; + +/******************************** Constants **********************************/ + +#define MAX_NAME_LEN 16 + +/***************************** Public Functions *******************************/ + +/** + * qed_dbg_user_set_bin_ptr(): Sets a pointer to the binary data with + * debug arrays. + * + * @p_hwfn: HW device data. + * @bin_ptr: a pointer to the binary data with debug arrays. + * + * Return: dbg_status. + */ +enum dbg_status qed_dbg_user_set_bin_ptr(struct qed_hwfn *p_hwfn, + const u8 * const bin_ptr); + +/** + * qed_dbg_alloc_user_data(): Allocates user debug data. + * + * @p_hwfn: HW device data. + * @user_data_ptr: (OUT) a pointer to the allocated memory. + * + * Return: dbg_status. + */ +enum dbg_status qed_dbg_alloc_user_data(struct qed_hwfn *p_hwfn, + void **user_data_ptr); + +/** + * qed_dbg_get_status_str(): Returns a string for the specified status. + * + * @status: A debug status code. + * + * Return: A string for the specified status. + */ +const char *qed_dbg_get_status_str(enum dbg_status status); + +/** + * qed_get_idle_chk_results_buf_size(): Returns the required buffer size + * for idle check results (in bytes). + * + * @p_hwfn: HW device data. + * @dump_buf: idle check dump buffer. + * @num_dumped_dwords: number of dwords that were dumped. + * @results_buf_size: (OUT) required buffer size (in bytes) for the parsed + * results. + * + * Return: Error if the parsing fails, ok otherwise. + */ +enum dbg_status qed_get_idle_chk_results_buf_size(struct qed_hwfn *p_hwfn, + u32 *dump_buf, + u32 num_dumped_dwords, + u32 *results_buf_size); +/** + * qed_print_idle_chk_results(): Prints idle check results + * + * @p_hwfn: HW device data. + * @dump_buf: idle check dump buffer. + * @num_dumped_dwords: number of dwords that were dumped. + * @results_buf: buffer for printing the idle check results. + * @num_errors: (OUT) number of errors found in idle check. + * @num_warnings: (OUT) number of warnings found in idle check. + * + * Return: Error if the parsing fails, ok otherwise. + */ +enum dbg_status qed_print_idle_chk_results(struct qed_hwfn *p_hwfn, + u32 *dump_buf, + u32 num_dumped_dwords, + char *results_buf, + u32 *num_errors, + u32 *num_warnings); + +/** + * qed_dbg_mcp_trace_set_meta_data(): Sets the MCP Trace meta data. + * + * @p_hwfn: HW device data. + * @meta_buf: Meta buffer. + * + * Return: Void. + * + * Needed in case the MCP Trace dump doesn't contain the meta data (e.g. due to + * no NVRAM access). + */ +void qed_dbg_mcp_trace_set_meta_data(struct qed_hwfn *p_hwfn, + const u32 *meta_buf); + +/** + * qed_get_mcp_trace_results_buf_size(): Returns the required buffer size + * for MCP Trace results (in bytes). + * + * @p_hwfn: HW device data. + * @dump_buf: MCP Trace dump buffer. + * @num_dumped_dwords: number of dwords that were dumped. + * @results_buf_size: (OUT) required buffer size (in bytes) for the parsed + * results. + * + * Return: Rrror if the parsing fails, ok otherwise. + */ +enum dbg_status qed_get_mcp_trace_results_buf_size(struct qed_hwfn *p_hwfn, + u32 *dump_buf, + u32 num_dumped_dwords, + u32 *results_buf_size); + +/** + * qed_print_mcp_trace_results(): Prints MCP Trace results + * + * @p_hwfn: HW device data. + * @dump_buf: MCP trace dump buffer, starting from the header. + * @num_dumped_dwords: Member of dwords that were dumped. + * @results_buf: Buffer for printing the mcp trace results. + * + * Return: Error if the parsing fails, ok otherwise. + */ +enum dbg_status qed_print_mcp_trace_results(struct qed_hwfn *p_hwfn, + u32 *dump_buf, + u32 num_dumped_dwords, + char *results_buf); + +/** + * qed_print_mcp_trace_results_cont(): Prints MCP Trace results, and + * keeps the MCP trace meta data allocated, to support continuous MCP Trace + * parsing. After the continuous parsing ends, mcp_trace_free_meta_data should + * be called to free the meta data. + * + * @p_hwfn: HW device data. + * @dump_buf: MVP trace dump buffer, starting from the header. + * @results_buf: Buffer for printing the mcp trace results. + * + * Return: Error if the parsing fails, ok otherwise. + */ +enum dbg_status qed_print_mcp_trace_results_cont(struct qed_hwfn *p_hwfn, + u32 *dump_buf, + char *results_buf); + +/** + * qed_print_mcp_trace_line(): Prints MCP Trace results for a single line + * + * @p_hwfn: HW device data. + * @dump_buf: MCP trace dump buffer, starting from the header. + * @num_dumped_bytes: Number of bytes that were dumped. + * @results_buf: Buffer for printing the mcp trace results. + * + * Return: Error if the parsing fails, ok otherwise. + */ +enum dbg_status qed_print_mcp_trace_line(struct qed_hwfn *p_hwfn, + u8 *dump_buf, + u32 num_dumped_bytes, + char *results_buf); + +/** + * qed_mcp_trace_free_meta_data(): Frees the MCP Trace meta data. + * Should be called after continuous MCP Trace parsing. + * + * @p_hwfn: HW device data. + * + * Return: Void. + */ +void qed_mcp_trace_free_meta_data(struct qed_hwfn *p_hwfn); + +/** + * qed_get_reg_fifo_results_buf_size(): Returns the required buffer size + * for reg_fifo results (in bytes). + * + * @p_hwfn: HW device data. + * @dump_buf: Reg fifo dump buffer. + * @num_dumped_dwords: Number of dwords that were dumped. + * @results_buf_size: (OUT) required buffer size (in bytes) for the parsed + * results. + * + * Return: Error if the parsing fails, ok otherwise. + */ +enum dbg_status qed_get_reg_fifo_results_buf_size(struct qed_hwfn *p_hwfn, + u32 *dump_buf, + u32 num_dumped_dwords, + u32 *results_buf_size); + +/** + * qed_print_reg_fifo_results(): Prints reg fifo results. + * + * @p_hwfn: HW device data. + * @dump_buf: Reg fifo dump buffer, starting from the header. + * @num_dumped_dwords: Number of dwords that were dumped. + * @results_buf: Buffer for printing the reg fifo results. + * + * Return: Error if the parsing fails, ok otherwise. + */ +enum dbg_status qed_print_reg_fifo_results(struct qed_hwfn *p_hwfn, + u32 *dump_buf, + u32 num_dumped_dwords, + char *results_buf); + +/** + * qed_get_igu_fifo_results_buf_size(): Returns the required buffer size + * for igu_fifo results (in bytes). + * + * @p_hwfn: HW device data. + * @dump_buf: IGU fifo dump buffer. + * @num_dumped_dwords: number of dwords that were dumped. + * @results_buf_size: (OUT) required buffer size (in bytes) for the parsed + * results. + * + * Return: Error if the parsing fails, ok otherwise. + */ +enum dbg_status qed_get_igu_fifo_results_buf_size(struct qed_hwfn *p_hwfn, + u32 *dump_buf, + u32 num_dumped_dwords, + u32 *results_buf_size); + +/** + * qed_print_igu_fifo_results(): Prints IGU fifo results + * + * @p_hwfn: HW device data. + * @dump_buf: IGU fifo dump buffer, starting from the header. + * @num_dumped_dwords: Number of dwords that were dumped. + * @results_buf: Buffer for printing the IGU fifo results. + * + * Return: Error if the parsing fails, ok otherwise. + */ +enum dbg_status qed_print_igu_fifo_results(struct qed_hwfn *p_hwfn, + u32 *dump_buf, + u32 num_dumped_dwords, + char *results_buf); + +/** + * qed_get_protection_override_results_buf_size(): Returns the required + * buffer size for protection override results (in bytes). + * + * @p_hwfn: HW device data. + * @dump_buf: Protection override dump buffer. + * @num_dumped_dwords: Number of dwords that were dumped. + * @results_buf_size: (OUT) required buffer size (in bytes) for the parsed + * results. + * + * Return: Error if the parsing fails, ok otherwise. + */ +enum dbg_status +qed_get_protection_override_results_buf_size(struct qed_hwfn *p_hwfn, + u32 *dump_buf, + u32 num_dumped_dwords, + u32 *results_buf_size); + +/** + * qed_print_protection_override_results(): Prints protection override + * results. + * + * @p_hwfn: HW device data. + * @dump_buf: Protection override dump buffer, starting from the header. + * @num_dumped_dwords: Number of dwords that were dumped. + * @results_buf: Buffer for printing the reg fifo results. + * + * Return: Error if the parsing fails, ok otherwise. + */ +enum dbg_status qed_print_protection_override_results(struct qed_hwfn *p_hwfn, + u32 *dump_buf, + u32 num_dumped_dwords, + char *results_buf); + +/** + * qed_get_fw_asserts_results_buf_size(): Returns the required buffer size + * for FW Asserts results (in bytes). + * + * @p_hwfn: HW device data. + * @dump_buf: FW Asserts dump buffer. + * @num_dumped_dwords: number of dwords that were dumped. + * @results_buf_size: (OUT) required buffer size (in bytes) for the parsed + * results. + * + * Return: Error if the parsing fails, ok otherwise. + */ +enum dbg_status qed_get_fw_asserts_results_buf_size(struct qed_hwfn *p_hwfn, + u32 *dump_buf, + u32 num_dumped_dwords, + u32 *results_buf_size); + +/** + * qed_print_fw_asserts_results(): Prints FW Asserts results. + * + * @p_hwfn: HW device data. + * @dump_buf: FW Asserts dump buffer, starting from the header. + * @num_dumped_dwords: number of dwords that were dumped. + * @results_buf: buffer for printing the FW Asserts results. + * + * Return: Error if the parsing fails, ok otherwise. + */ +enum dbg_status qed_print_fw_asserts_results(struct qed_hwfn *p_hwfn, + u32 *dump_buf, + u32 num_dumped_dwords, + char *results_buf); + +/** + * qed_dbg_parse_attn(): Parses and prints attention registers values in + * the specified results struct. + * + * @p_hwfn: HW device data. + * @results: Pointer to the attention read results + * + * Return: Error if one of the following holds: + * - The version wasn't set. + * Otherwise, returns ok. + */ +enum dbg_status qed_dbg_parse_attn(struct qed_hwfn *p_hwfn, + struct dbg_attn_block_result *results); +#endif diff --git a/drivers/net/ethernet/qlogic/qed/qed_dcbx.h b/drivers/net/ethernet/qlogic/qed/qed_dcbx.h index e1798925b444..ea839e605577 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dcbx.h +++ b/drivers/net/ethernet/qlogic/qed/qed_dcbx.h @@ -84,16 +84,17 @@ struct qed_dcbx_mib_meta_data { extern const struct qed_eth_dcbnl_ops qed_dcbnl_ops_pass; #ifdef CONFIG_DCB -int qed_dcbx_get_config_params(struct qed_hwfn *, struct qed_dcbx_set *); +int qed_dcbx_get_config_params(struct qed_hwfn *p_hwfn, + struct qed_dcbx_set *params); -int qed_dcbx_config_params(struct qed_hwfn *, - struct qed_ptt *, struct qed_dcbx_set *, bool); +int qed_dcbx_config_params(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, + struct qed_dcbx_set *params, bool hw_commit); #endif /* QED local interface routines */ int -qed_dcbx_mib_update_event(struct qed_hwfn *, - struct qed_ptt *, enum qed_mib_read_type); +qed_dcbx_mib_update_event(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, + enum qed_mib_read_type type); int qed_dcbx_info_alloc(struct qed_hwfn *p_hwfn); void qed_dcbx_info_free(struct qed_hwfn *p_hwfn); diff --git a/drivers/net/ethernet/qlogic/qed/qed_debug.c b/drivers/net/ethernet/qlogic/qed/qed_debug.c index 6ab3e60d4928..e3edca187ddf 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_debug.c +++ b/drivers/net/ethernet/qlogic/qed/qed_debug.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) /* QLogic qed NIC Driver * Copyright (c) 2015 QLogic Corporation - * Copyright (c) 2019-2020 Marvell International Ltd. + * Copyright (c) 2019-2021 Marvell International Ltd. */ #include <linux/module.h> @@ -10,6 +10,7 @@ #include "qed.h" #include "qed_cxt.h" #include "qed_hsi.h" +#include "qed_dbg_hsi.h" #include "qed_hw.h" #include "qed_mcp.h" #include "qed_reg_addr.h" @@ -121,6 +122,11 @@ static u32 cond0(const u32 *r, const u32 *imm) return (r[0] & ~r[1]) != imm[0]; } +static u32 cond14(const u32 *r, const u32 *imm) +{ + return (r[0] | imm[0]) != imm[1]; +} + static u32 cond1(const u32 *r, const u32 *imm) { return r[0] != imm[0]; @@ -172,6 +178,7 @@ static u32(*cond_arr[]) (const u32 *r, const u32 *imm) = { cond11, cond12, cond13, + cond14, }; #define NUM_PHYS_BLOCKS 84 @@ -208,10 +215,61 @@ enum dbg_bus_frame_modes { DBG_BUS_NUM_FRAME_MODES }; +/* Debug bus SEMI frame modes */ +enum dbg_bus_semi_frame_modes { + DBG_BUS_SEMI_FRAME_MODE_4FAST = 0, /* 4 fast dw */ + DBG_BUS_SEMI_FRAME_MODE_2FAST_2SLOW = 1, /* 2 fast dw, 2 slow dw */ + DBG_BUS_SEMI_FRAME_MODE_1FAST_3SLOW = 2, /* 1 fast dw,3 slow dw */ + DBG_BUS_SEMI_FRAME_MODE_4SLOW = 3, /* 4 slow dw */ + DBG_BUS_SEMI_NUM_FRAME_MODES +}; + +/* Debug bus filter types */ +enum dbg_bus_filter_types { + DBG_BUS_FILTER_TYPE_OFF, /* Filter always off */ + DBG_BUS_FILTER_TYPE_PRE, /* Filter before trigger only */ + DBG_BUS_FILTER_TYPE_POST, /* Filter after trigger only */ + DBG_BUS_FILTER_TYPE_ON /* Filter always on */ +}; + +/* Debug bus pre-trigger recording types */ +enum dbg_bus_pre_trigger_types { + DBG_BUS_PRE_TRIGGER_FROM_ZERO, /* Record from time 0 */ + DBG_BUS_PRE_TRIGGER_NUM_CHUNKS, /* Record some chunks before trigger */ + DBG_BUS_PRE_TRIGGER_DROP /* Drop data before trigger */ +}; + +/* Debug bus post-trigger recording types */ +enum dbg_bus_post_trigger_types { + DBG_BUS_POST_TRIGGER_RECORD, /* Start recording after trigger */ + DBG_BUS_POST_TRIGGER_DROP /* Drop data after trigger */ +}; + +/* Debug bus other engine mode */ +enum dbg_bus_other_engine_modes { + DBG_BUS_OTHER_ENGINE_MODE_NONE, + DBG_BUS_OTHER_ENGINE_MODE_DOUBLE_BW_TX, + DBG_BUS_OTHER_ENGINE_MODE_DOUBLE_BW_RX, + DBG_BUS_OTHER_ENGINE_MODE_CROSS_ENGINE_TX, + DBG_BUS_OTHER_ENGINE_MODE_CROSS_ENGINE_RX +}; + +/* DBG block Framing mode definitions */ +struct framing_mode_defs { + u8 id; + u8 blocks_dword_mask; + u8 storms_dword_mask; + u8 semi_framing_mode_id; + u8 full_buf_thr; +}; + /* Chip constant definitions */ struct chip_defs { const char *name; + u8 dwords_per_cycle; + u8 num_framing_modes; u32 num_ilt_pages; + struct framing_mode_defs *framing_modes; }; /* HW type constant definitions */ @@ -334,7 +392,7 @@ struct split_type_defs { #define FIELD_BIT_OFFSET(type, field) type ## _ ## field ## _ ## OFFSET #define FIELD_BIT_SIZE(type, field) type ## _ ## field ## _ ## SIZE #define FIELD_DWORD_OFFSET(type, field) \ - (int)(FIELD_BIT_OFFSET(type, field) / 32) + ((int)(FIELD_BIT_OFFSET(type, field) / 32)) #define FIELD_DWORD_SHIFT(type, field) (FIELD_BIT_OFFSET(type, field) % 32) #define FIELD_BIT_MASK(type, field) \ (((1 << FIELD_BIT_SIZE(type, field)) - 1) << \ @@ -431,11 +489,13 @@ struct split_type_defs { #define STATIC_DEBUG_LINE_DWORDS 9 -#define NUM_COMMON_GLOBAL_PARAMS 9 +#define NUM_COMMON_GLOBAL_PARAMS 11 #define MAX_RECURSION_DEPTH 10 +#define FW_IMG_KUKU 0 #define FW_IMG_MAIN 1 +#define FW_IMG_L2B 2 #define REG_FIFO_ELEMENT_DWORDS 2 #define REG_FIFO_DEPTH_ELEMENTS 32 @@ -464,10 +524,25 @@ struct split_type_defs { /***************************** Constant Arrays *******************************/ +/* DBG block framing mode definitions, in descending preference order */ +static struct framing_mode_defs s_framing_mode_defs[4] = { + {DBG_BUS_FRAME_MODE_4ST, 0x0, 0xf, + DBG_BUS_SEMI_FRAME_MODE_4FAST, + 10}, + {DBG_BUS_FRAME_MODE_4HW, 0xf, 0x0, DBG_BUS_SEMI_FRAME_MODE_4SLOW, + 10}, + {DBG_BUS_FRAME_MODE_2ST_2HW, 0x3, 0xc, + DBG_BUS_SEMI_FRAME_MODE_2FAST_2SLOW, 10}, + {DBG_BUS_FRAME_MODE_1ST_3HW, 0x7, 0x8, + DBG_BUS_SEMI_FRAME_MODE_1FAST_3SLOW, 10} +}; + /* Chip constant definitions array */ static struct chip_defs s_chip_defs[MAX_CHIP_IDS] = { - {"bb", PSWRQ2_REG_ILT_MEMORY_SIZE_BB / 2}, - {"ah", PSWRQ2_REG_ILT_MEMORY_SIZE_K2 / 2} + {"bb", 4, DBG_BUS_NUM_FRAME_MODES, PSWRQ2_REG_ILT_MEMORY_SIZE_BB / 2, + s_framing_mode_defs}, + {"ah", 4, DBG_BUS_NUM_FRAME_MODES, PSWRQ2_REG_ILT_MEMORY_SIZE_K2 / 2, + s_framing_mode_defs} }; /* Storm constant definitions array */ @@ -477,8 +552,8 @@ static struct storm_defs s_storm_defs[] = { {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT}, true, TSEM_REG_FAST_MEMORY, - TSEM_REG_DBG_FRAME_MODE_BB_K2, TSEM_REG_SLOW_DBG_ACTIVE_BB_K2, - TSEM_REG_SLOW_DBG_MODE_BB_K2, TSEM_REG_DBG_MODE1_CFG_BB_K2, + TSEM_REG_DBG_FRAME_MODE, TSEM_REG_SLOW_DBG_ACTIVE, + TSEM_REG_SLOW_DBG_MODE, TSEM_REG_DBG_MODE1_CFG, TSEM_REG_SYNC_DBG_EMPTY, TSEM_REG_DBG_GPRE_VECT, TCM_REG_CTX_RBC_ACCS, {TCM_REG_AGG_CON_CTX, TCM_REG_SM_CON_CTX, TCM_REG_AGG_TASK_CTX, @@ -491,10 +566,10 @@ static struct storm_defs s_storm_defs[] = { {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM}, false, MSEM_REG_FAST_MEMORY, - MSEM_REG_DBG_FRAME_MODE_BB_K2, - MSEM_REG_SLOW_DBG_ACTIVE_BB_K2, - MSEM_REG_SLOW_DBG_MODE_BB_K2, - MSEM_REG_DBG_MODE1_CFG_BB_K2, + MSEM_REG_DBG_FRAME_MODE, + MSEM_REG_SLOW_DBG_ACTIVE, + MSEM_REG_SLOW_DBG_MODE, + MSEM_REG_DBG_MODE1_CFG, MSEM_REG_SYNC_DBG_EMPTY, MSEM_REG_DBG_GPRE_VECT, MCM_REG_CTX_RBC_ACCS, @@ -508,10 +583,10 @@ static struct storm_defs s_storm_defs[] = { {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU}, false, USEM_REG_FAST_MEMORY, - USEM_REG_DBG_FRAME_MODE_BB_K2, - USEM_REG_SLOW_DBG_ACTIVE_BB_K2, - USEM_REG_SLOW_DBG_MODE_BB_K2, - USEM_REG_DBG_MODE1_CFG_BB_K2, + USEM_REG_DBG_FRAME_MODE, + USEM_REG_SLOW_DBG_ACTIVE, + USEM_REG_SLOW_DBG_MODE, + USEM_REG_DBG_MODE1_CFG, USEM_REG_SYNC_DBG_EMPTY, USEM_REG_DBG_GPRE_VECT, UCM_REG_CTX_RBC_ACCS, @@ -525,10 +600,10 @@ static struct storm_defs s_storm_defs[] = { {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX}, false, XSEM_REG_FAST_MEMORY, - XSEM_REG_DBG_FRAME_MODE_BB_K2, - XSEM_REG_SLOW_DBG_ACTIVE_BB_K2, - XSEM_REG_SLOW_DBG_MODE_BB_K2, - XSEM_REG_DBG_MODE1_CFG_BB_K2, + XSEM_REG_DBG_FRAME_MODE, + XSEM_REG_SLOW_DBG_ACTIVE, + XSEM_REG_SLOW_DBG_MODE, + XSEM_REG_DBG_MODE1_CFG, XSEM_REG_SYNC_DBG_EMPTY, XSEM_REG_DBG_GPRE_VECT, XCM_REG_CTX_RBC_ACCS, @@ -541,10 +616,10 @@ static struct storm_defs s_storm_defs[] = { {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY}, false, YSEM_REG_FAST_MEMORY, - YSEM_REG_DBG_FRAME_MODE_BB_K2, - YSEM_REG_SLOW_DBG_ACTIVE_BB_K2, - YSEM_REG_SLOW_DBG_MODE_BB_K2, - YSEM_REG_DBG_MODE1_CFG_BB_K2, + YSEM_REG_DBG_FRAME_MODE, + YSEM_REG_SLOW_DBG_ACTIVE, + YSEM_REG_SLOW_DBG_MODE, + YSEM_REG_DBG_MODE1_CFG, YSEM_REG_SYNC_DBG_EMPTY, YSEM_REG_DBG_GPRE_VECT, YCM_REG_CTX_RBC_ACCS, @@ -558,10 +633,10 @@ static struct storm_defs s_storm_defs[] = { {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS}, true, PSEM_REG_FAST_MEMORY, - PSEM_REG_DBG_FRAME_MODE_BB_K2, - PSEM_REG_SLOW_DBG_ACTIVE_BB_K2, - PSEM_REG_SLOW_DBG_MODE_BB_K2, - PSEM_REG_DBG_MODE1_CFG_BB_K2, + PSEM_REG_DBG_FRAME_MODE, + PSEM_REG_SLOW_DBG_ACTIVE, + PSEM_REG_SLOW_DBG_MODE, + PSEM_REG_DBG_MODE1_CFG, PSEM_REG_SYNC_DBG_EMPTY, PSEM_REG_DBG_GPRE_VECT, PCM_REG_CTX_RBC_ACCS, @@ -575,7 +650,8 @@ static struct hw_type_defs s_hw_type_defs[] = { {"asic", 1, 256, 32768}, {"reserved", 0, 0, 0}, {"reserved2", 0, 0, 0}, - {"reserved3", 0, 0, 0} + {"reserved3", 0, 0, 0}, + {"reserved4", 0, 0, 0} }; static struct grc_param_defs s_grc_param_defs[] = { @@ -772,25 +848,25 @@ static struct rbc_reset_defs s_rbc_reset_defs[] = { static struct phy_defs s_phy_defs[] = { {"nw_phy", NWS_REG_NWS_CMU_K2, - PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_7_0_K2_E5, - PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_15_8_K2_E5, - PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_7_0_K2_E5, - PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_11_8_K2_E5}, - {"sgmii_phy", MS_REG_MS_CMU_K2_E5, - PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X132_K2_E5, - PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X133_K2_E5, - PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X130_K2_E5, - PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X131_K2_E5}, - {"pcie_phy0", PHY_PCIE_REG_PHY0_K2_E5, - PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X132_K2_E5, - PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X133_K2_E5, - PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X130_K2_E5, - PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X131_K2_E5}, - {"pcie_phy1", PHY_PCIE_REG_PHY1_K2_E5, - PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X132_K2_E5, - PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X133_K2_E5, - PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X130_K2_E5, - PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X131_K2_E5}, + PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_7_0_K2, + PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_15_8_K2, + PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_7_0_K2, + PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_11_8_K2}, + {"sgmii_phy", MS_REG_MS_CMU_K2, + PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X132_K2, + PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X133_K2, + PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X130_K2, + PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X131_K2}, + {"pcie_phy0", PHY_PCIE_REG_PHY0_K2, + PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X132_K2, + PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X133_K2, + PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X130_K2, + PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X131_K2}, + {"pcie_phy1", PHY_PCIE_REG_PHY1_K2, + PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X132_K2, + PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X133_K2, + PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X130_K2, + PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X131_K2}, }; static struct split_type_defs s_split_type_defs[] = { @@ -810,8 +886,17 @@ static struct split_type_defs s_split_type_defs[] = { {"vf"} }; +/******************************** Variables **********************************/ + +/* The version of the calling app */ +static u32 s_app_ver; + /**************************** Private Functions ******************************/ +static void qed_static_asserts(void) +{ +} + /* Reads and returns a single dword from the specified unaligned buffer */ static u32 qed_read_unaligned_dword(u8 *buf) { @@ -870,6 +955,9 @@ static enum dbg_status qed_dbg_dev_init(struct qed_hwfn *p_hwfn) if (dev_data->initialized) return DBG_STATUS_OK; + if (!s_app_ver) + return DBG_STATUS_APP_VERSION_NOT_SET; + /* Set chip */ if (QED_IS_K2(p_hwfn->cdev)) { dev_data->chip_id = CHIP_K2; @@ -990,11 +1078,6 @@ static void qed_read_storm_fw_info(struct qed_hwfn *p_hwfn, for (i = 0; i < size; i++, addr += BYTES_IN_DWORD) dest[i] = qed_rd(p_hwfn, p_ptt, addr); - /* qed_rq() fetches data in CPU byteorder. Swap it back to - * the device's to get right structure layout. - */ - cpu_to_le32_array(dest, size); - /* Read FW version info from Storm RAM */ size = le32_to_cpu(fw_info_location.size); if (!size || size > sizeof(*fw_info)) @@ -1006,8 +1089,6 @@ static void qed_read_storm_fw_info(struct qed_hwfn *p_hwfn, for (i = 0; i < size; i++, addr += BYTES_IN_DWORD) dest[i] = qed_rd(p_hwfn, p_ptt, addr); - - cpu_to_le32_array(dest, size); } /* Dumps the specified string to the specified buffer. @@ -1117,9 +1198,15 @@ static u32 qed_dump_fw_ver_param(struct qed_hwfn *p_hwfn, DP_NOTICE(p_hwfn, "Unexpected debug error: invalid FW version string\n"); switch (fw_info.ver.image_id) { + case FW_IMG_KUKU: + strcpy(fw_img_str, "kuku"); + break; case FW_IMG_MAIN: strcpy(fw_img_str, "main"); break; + case FW_IMG_L2B: + strcpy(fw_img_str, "l2b"); + break; default: strcpy(fw_img_str, "unknown"); break; @@ -1255,6 +1342,8 @@ static u32 qed_dump_common_global_params(struct qed_hwfn *p_hwfn, s_hw_type_defs[dev_data->hw_type].name); offset += qed_dump_num_param(dump_buf + offset, dump, "pci-func", p_hwfn->abs_pf_id); + offset += qed_dump_num_param(dump_buf + offset, + dump, "epoch", qed_get_epoch_time()); if (dev_data->chip_id == CHIP_BB) offset += qed_dump_num_param(dump_buf + offset, dump, "path", QED_PATH_ID(p_hwfn)); @@ -1590,7 +1679,7 @@ static void qed_grc_stall_storms(struct qed_hwfn *p_hwfn, continue; reg_addr = s_storm_defs[storm_id].sem_fast_mem_addr + - SEM_FAST_REG_STALL_0_BB_K2; + SEM_FAST_REG_STALL_0; qed_wr(p_hwfn, p_ptt, reg_addr, stall ? 1 : 0); } @@ -1703,8 +1792,8 @@ static void qed_grc_clear_all_prty(struct qed_hwfn *p_hwfn, { struct dbg_tools_data *dev_data = &p_hwfn->dbg_info; const struct dbg_attn_reg *attn_reg_arr; + u32 block_id, sts_clr_address; u8 reg_idx, num_attn_regs; - u32 block_id; for (block_id = 0; block_id < NUM_PHYS_BLOCKS; block_id++) { if (dev_data->block_in_reset[block_id]) @@ -1728,16 +1817,103 @@ static void qed_grc_clear_all_prty(struct qed_hwfn *p_hwfn, GET_FIELD(reg_data->mode.data, DBG_MODE_HDR_MODES_BUF_OFFSET); + sts_clr_address = reg_data->sts_clr_address; /* If Mode match: clear parity status */ if (!eval_mode || qed_is_mode_match(p_hwfn, &modes_buf_offset)) qed_rd(p_hwfn, p_ptt, - DWORDS_TO_BYTES(reg_data-> - sts_clr_address)); + DWORDS_TO_BYTES(sts_clr_address)); } } } +/* Finds the meta data image in NVRAM */ +static enum dbg_status qed_find_nvram_image(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u32 image_type, + u32 *nvram_offset_bytes, + u32 *nvram_size_bytes) +{ + u32 ret_mcp_resp, ret_mcp_param, ret_txn_size; + struct mcp_file_att file_att; + int nvm_result; + + /* Call NVRAM get file command */ + nvm_result = qed_mcp_nvm_rd_cmd(p_hwfn, + p_ptt, + DRV_MSG_CODE_NVM_GET_FILE_ATT, + image_type, + &ret_mcp_resp, + &ret_mcp_param, + &ret_txn_size, + (u32 *)&file_att, false); + + /* Check response */ + if (nvm_result || (ret_mcp_resp & FW_MSG_CODE_MASK) != + FW_MSG_CODE_NVM_OK) + return DBG_STATUS_NVRAM_GET_IMAGE_FAILED; + + /* Update return values */ + *nvram_offset_bytes = file_att.nvm_start_addr; + *nvram_size_bytes = file_att.len; + + DP_VERBOSE(p_hwfn, + QED_MSG_DEBUG, + "find_nvram_image: found NVRAM image of type %d in NVRAM offset %d bytes with size %d bytes\n", + image_type, *nvram_offset_bytes, *nvram_size_bytes); + + /* Check alignment */ + if (*nvram_size_bytes & 0x3) + return DBG_STATUS_NON_ALIGNED_NVRAM_IMAGE; + + return DBG_STATUS_OK; +} + +/* Reads data from NVRAM */ +static enum dbg_status qed_nvram_read(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u32 nvram_offset_bytes, + u32 nvram_size_bytes, u32 *ret_buf) +{ + u32 ret_mcp_resp, ret_mcp_param, ret_read_size, bytes_to_copy; + s32 bytes_left = nvram_size_bytes; + u32 read_offset = 0, param = 0; + + DP_VERBOSE(p_hwfn, + QED_MSG_DEBUG, + "nvram_read: reading image of size %d bytes from NVRAM\n", + nvram_size_bytes); + + do { + bytes_to_copy = + (bytes_left > + MCP_DRV_NVM_BUF_LEN) ? MCP_DRV_NVM_BUF_LEN : bytes_left; + + /* Call NVRAM read command */ + SET_MFW_FIELD(param, + DRV_MB_PARAM_NVM_OFFSET, + nvram_offset_bytes + read_offset); + SET_MFW_FIELD(param, DRV_MB_PARAM_NVM_LEN, bytes_to_copy); + if (qed_mcp_nvm_rd_cmd(p_hwfn, p_ptt, + DRV_MSG_CODE_NVM_READ_NVRAM, param, + &ret_mcp_resp, + &ret_mcp_param, &ret_read_size, + (u32 *)((u8 *)ret_buf + read_offset), + false)) + return DBG_STATUS_NVRAM_READ_FAILED; + + /* Check response */ + if ((ret_mcp_resp & FW_MSG_CODE_MASK) != FW_MSG_CODE_NVM_OK) + return DBG_STATUS_NVRAM_READ_FAILED; + + /* Update read offset */ + read_offset += ret_read_size; + bytes_left -= ret_read_size; + } while (bytes_left > 0); + + return DBG_STATUS_OK; +} + /* Dumps GRC registers section header. Returns the dumped size in dwords. * the following parameters are dumped: * - count: no. of dumped entries @@ -3189,17 +3365,6 @@ static u32 qed_grc_dump_phy(struct qed_hwfn *p_hwfn, return offset; } -static enum dbg_status qed_find_nvram_image(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, - u32 image_type, - u32 *nvram_offset_bytes, - u32 *nvram_size_bytes); - -static enum dbg_status qed_nvram_read(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, - u32 nvram_offset_bytes, - u32 nvram_size_bytes, u32 *ret_buf); - /* Dumps the MCP HW dump from NVRAM. Returns the dumped size in dwords. */ static u32 qed_grc_dump_mcp_hw_dump(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, @@ -3283,10 +3448,6 @@ static u32 qed_grc_dump_static_debug(struct qed_hwfn *p_hwfn, has_dbg_bus = GET_FIELD(block_per_chip->flags, DBG_BLOCK_CHIP_HAS_DBG_BUS); - /* read+clear for NWS parity is not working, skip NWS block */ - if (block_id == BLOCK_NWS) - continue; - if (!is_removed && has_dbg_bus && GET_FIELD(block_per_chip->dbg_bus_mode.data, DBG_MODE_HDR_EVAL_MODE) > 0) { @@ -3375,8 +3536,8 @@ static enum dbg_status qed_grc_dump(struct qed_hwfn *p_hwfn, bool dump, u32 *num_dumped_dwords) { struct dbg_tools_data *dev_data = &p_hwfn->dbg_info; - u32 dwords_read, offset = 0; bool parities_masked = false; + u32 dwords_read, offset = 0; u8 i; *num_dumped_dwords = 0; @@ -3545,8 +3706,7 @@ static enum dbg_status qed_grc_dump(struct qed_hwfn *p_hwfn, */ static u32 qed_idle_chk_dump_failure(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, - u32 * - dump_buf, + u32 *dump_buf, bool dump, u16 rule_id, const struct dbg_idle_chk_rule *rule, @@ -3894,91 +4054,6 @@ static u32 qed_idle_chk_dump(struct qed_hwfn *p_hwfn, return offset; } -/* Finds the meta data image in NVRAM */ -static enum dbg_status qed_find_nvram_image(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, - u32 image_type, - u32 *nvram_offset_bytes, - u32 *nvram_size_bytes) -{ - u32 ret_mcp_resp, ret_mcp_param, ret_txn_size; - struct mcp_file_att file_att; - int nvm_result; - - /* Call NVRAM get file command */ - nvm_result = qed_mcp_nvm_rd_cmd(p_hwfn, - p_ptt, - DRV_MSG_CODE_NVM_GET_FILE_ATT, - image_type, - &ret_mcp_resp, - &ret_mcp_param, - &ret_txn_size, (u32 *)&file_att); - - /* Check response */ - if (nvm_result || - (ret_mcp_resp & FW_MSG_CODE_MASK) != FW_MSG_CODE_NVM_OK) - return DBG_STATUS_NVRAM_GET_IMAGE_FAILED; - - /* Update return values */ - *nvram_offset_bytes = file_att.nvm_start_addr; - *nvram_size_bytes = file_att.len; - - DP_VERBOSE(p_hwfn, - QED_MSG_DEBUG, - "find_nvram_image: found NVRAM image of type %d in NVRAM offset %d bytes with size %d bytes\n", - image_type, *nvram_offset_bytes, *nvram_size_bytes); - - /* Check alignment */ - if (*nvram_size_bytes & 0x3) - return DBG_STATUS_NON_ALIGNED_NVRAM_IMAGE; - - return DBG_STATUS_OK; -} - -/* Reads data from NVRAM */ -static enum dbg_status qed_nvram_read(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, - u32 nvram_offset_bytes, - u32 nvram_size_bytes, u32 *ret_buf) -{ - u32 ret_mcp_resp, ret_mcp_param, ret_read_size, bytes_to_copy; - s32 bytes_left = nvram_size_bytes; - u32 read_offset = 0, param = 0; - - DP_VERBOSE(p_hwfn, - QED_MSG_DEBUG, - "nvram_read: reading image of size %d bytes from NVRAM\n", - nvram_size_bytes); - - do { - bytes_to_copy = - (bytes_left > - MCP_DRV_NVM_BUF_LEN) ? MCP_DRV_NVM_BUF_LEN : bytes_left; - - /* Call NVRAM read command */ - SET_MFW_FIELD(param, - DRV_MB_PARAM_NVM_OFFSET, - nvram_offset_bytes + read_offset); - SET_MFW_FIELD(param, DRV_MB_PARAM_NVM_LEN, bytes_to_copy); - if (qed_mcp_nvm_rd_cmd(p_hwfn, p_ptt, - DRV_MSG_CODE_NVM_READ_NVRAM, param, - &ret_mcp_resp, - &ret_mcp_param, &ret_read_size, - (u32 *)((u8 *)ret_buf + read_offset))) - return DBG_STATUS_NVRAM_READ_FAILED; - - /* Check response */ - if ((ret_mcp_resp & FW_MSG_CODE_MASK) != FW_MSG_CODE_NVM_OK) - return DBG_STATUS_NVRAM_READ_FAILED; - - /* Update read offset */ - read_offset += ret_read_size; - bytes_left -= ret_read_size; - } while (bytes_left > 0); - - return DBG_STATUS_OK; -} - /* Get info on the MCP Trace data in the scratchpad: * - trace_data_grc_addr (OUT): trace data GRC address in bytes * - trace_data_size (OUT): trace data size in bytes (without the header) @@ -4480,14 +4555,18 @@ static u32 qed_fw_asserts_dump(struct qed_hwfn *p_hwfn, /* Dumps the specified ILT pages to the specified buffer. * Returns the dumped size in dwords. */ -static u32 qed_ilt_dump_pages_range(u32 *dump_buf, - bool dump, - u32 start_page_id, +static u32 qed_ilt_dump_pages_range(u32 *dump_buf, u32 *given_offset, + bool *dump, u32 start_page_id, u32 num_pages, struct phys_mem_desc *ilt_pages, - bool dump_page_ids) + bool dump_page_ids, u32 buf_size_in_dwords, + u32 *given_actual_dump_size_in_dwords) { - u32 page_id, end_page_id, offset = 0; + u32 actual_dump_size_in_dwords = *given_actual_dump_size_in_dwords; + u32 page_id, end_page_id, offset = *given_offset; + struct phys_mem_desc *mem_desc = NULL; + bool continue_dump = *dump; + u32 partial_page_size = 0; if (num_pages == 0) return offset; @@ -4495,31 +4574,51 @@ static u32 qed_ilt_dump_pages_range(u32 *dump_buf, end_page_id = start_page_id + num_pages - 1; for (page_id = start_page_id; page_id <= end_page_id; page_id++) { - struct phys_mem_desc *mem_desc = &ilt_pages[page_id]; - - /** - * - * if (page_id >= ->p_cxt_mngr->ilt_shadow_size) - * break; - */ - + mem_desc = &ilt_pages[page_id]; if (!ilt_pages[page_id].virt_addr) continue; if (dump_page_ids) { - /* Copy page ID to dump buffer */ - if (dump) + /* Copy page ID to dump buffer + * (if dump is needed and buffer is not full) + */ + if ((continue_dump) && + (offset + 1 > buf_size_in_dwords)) { + continue_dump = false; + actual_dump_size_in_dwords = offset; + } + if (continue_dump) *(dump_buf + offset) = page_id; offset++; } else { /* Copy page memory to dump buffer */ - if (dump) + if ((continue_dump) && + (offset + BYTES_TO_DWORDS(mem_desc->size) > + buf_size_in_dwords)) { + if (offset + BYTES_TO_DWORDS(mem_desc->size) > + buf_size_in_dwords) { + partial_page_size = + buf_size_in_dwords - offset; + memcpy(dump_buf + offset, + mem_desc->virt_addr, + partial_page_size); + continue_dump = false; + actual_dump_size_in_dwords = + offset + partial_page_size; + } + } + + if (continue_dump) memcpy(dump_buf + offset, mem_desc->virt_addr, mem_desc->size); offset += BYTES_TO_DWORDS(mem_desc->size); } } + *dump = continue_dump; + *given_offset = offset; + *given_actual_dump_size_in_dwords = actual_dump_size_in_dwords; + return offset; } @@ -4528,21 +4627,30 @@ static u32 qed_ilt_dump_pages_range(u32 *dump_buf, */ static u32 qed_ilt_dump_pages_section(struct qed_hwfn *p_hwfn, u32 *dump_buf, - bool dump, + u32 *given_offset, + bool *dump, u32 valid_conn_pf_pages, u32 valid_conn_vf_pages, struct phys_mem_desc *ilt_pages, - bool dump_page_ids) + bool dump_page_ids, + u32 buf_size_in_dwords, + u32 *given_actual_dump_size_in_dwords) { struct qed_ilt_client_cfg *clients = p_hwfn->p_cxt_mngr->clients; - u32 pf_start_line, start_page_id, offset = 0; + u32 pf_start_line, start_page_id, offset = *given_offset; u32 cdut_pf_init_pages, cdut_vf_init_pages; u32 cdut_pf_work_pages, cdut_vf_work_pages; u32 base_data_offset, size_param_offset; + u32 src_pages; + u32 section_header_and_param_size; u32 cdut_pf_pages, cdut_vf_pages; + u32 actual_dump_size_in_dwords; + bool continue_dump = *dump; + bool update_size = *dump; const char *section_name; - u8 i; + u32 i; + actual_dump_size_in_dwords = *given_actual_dump_size_in_dwords; section_name = dump_page_ids ? "ilt_page_ids" : "ilt_page_mem"; cdut_pf_init_pages = qed_get_cdut_num_pf_init_pages(p_hwfn); cdut_vf_init_pages = qed_get_cdut_num_vf_init_pages(p_hwfn); @@ -4551,13 +4659,26 @@ static u32 qed_ilt_dump_pages_section(struct qed_hwfn *p_hwfn, cdut_pf_pages = cdut_pf_init_pages + cdut_pf_work_pages; cdut_vf_pages = cdut_vf_init_pages + cdut_vf_work_pages; pf_start_line = p_hwfn->p_cxt_mngr->pf_start_line; + section_header_and_param_size = qed_dump_section_hdr(NULL, + false, + section_name, + 1) + + qed_dump_num_param(NULL, false, "size", 0); + + if ((continue_dump) && + (offset + section_header_and_param_size > buf_size_in_dwords)) { + continue_dump = false; + update_size = false; + actual_dump_size_in_dwords = offset; + } - offset += - qed_dump_section_hdr(dump_buf + offset, dump, section_name, 1); + offset += qed_dump_section_hdr(dump_buf + offset, + continue_dump, section_name, 1); /* Dump size parameter (0 for now, overwritten with real size later) */ size_param_offset = offset; - offset += qed_dump_num_param(dump_buf + offset, dump, "size", 0); + offset += qed_dump_num_param(dump_buf + offset, + continue_dump, "size", 0); base_data_offset = offset; /* CDUC pages are ordered as follows: @@ -4570,22 +4691,22 @@ static u32 qed_ilt_dump_pages_section(struct qed_hwfn *p_hwfn, if (qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_DUMP_ILT_CDUC)) { /* Dump connection PF pages */ start_page_id = clients[ILT_CLI_CDUC].first.val - pf_start_line; - offset += qed_ilt_dump_pages_range(dump_buf + offset, - dump, - start_page_id, - valid_conn_pf_pages, - ilt_pages, dump_page_ids); + qed_ilt_dump_pages_range(dump_buf, &offset, &continue_dump, + start_page_id, valid_conn_pf_pages, + ilt_pages, dump_page_ids, + buf_size_in_dwords, + &actual_dump_size_in_dwords); /* Dump connection VF pages */ start_page_id += clients[ILT_CLI_CDUC].pf_total_lines; for (i = 0; i < p_hwfn->p_cxt_mngr->vf_count; i++, start_page_id += clients[ILT_CLI_CDUC].vf_total_lines) - offset += qed_ilt_dump_pages_range(dump_buf + offset, - dump, - start_page_id, - valid_conn_vf_pages, - ilt_pages, - dump_page_ids); + qed_ilt_dump_pages_range(dump_buf, &offset, + &continue_dump, start_page_id, + valid_conn_vf_pages, + ilt_pages, dump_page_ids, + buf_size_in_dwords, + &actual_dump_size_in_dwords); } /* CDUT pages are ordered as follows: @@ -4599,63 +4720,84 @@ static u32 qed_ilt_dump_pages_section(struct qed_hwfn *p_hwfn, /* Dump task PF pages */ start_page_id = clients[ILT_CLI_CDUT].first.val + cdut_pf_init_pages - pf_start_line; - offset += qed_ilt_dump_pages_range(dump_buf + offset, - dump, - start_page_id, - cdut_pf_work_pages, - ilt_pages, dump_page_ids); + qed_ilt_dump_pages_range(dump_buf, &offset, &continue_dump, + start_page_id, cdut_pf_work_pages, + ilt_pages, dump_page_ids, + buf_size_in_dwords, + &actual_dump_size_in_dwords); /* Dump task VF pages */ start_page_id = clients[ILT_CLI_CDUT].first.val + cdut_pf_pages + cdut_vf_init_pages - pf_start_line; for (i = 0; i < p_hwfn->p_cxt_mngr->vf_count; i++, start_page_id += cdut_vf_pages) - offset += qed_ilt_dump_pages_range(dump_buf + offset, - dump, - start_page_id, - cdut_vf_work_pages, - ilt_pages, - dump_page_ids); + qed_ilt_dump_pages_range(dump_buf, &offset, + &continue_dump, start_page_id, + cdut_vf_work_pages, ilt_pages, + dump_page_ids, + buf_size_in_dwords, + &actual_dump_size_in_dwords); + } + + /*Dump Searcher pages */ + if (clients[ILT_CLI_SRC].active) { + start_page_id = clients[ILT_CLI_SRC].first.val - pf_start_line; + src_pages = clients[ILT_CLI_SRC].last.val - + clients[ILT_CLI_SRC].first.val + 1; + qed_ilt_dump_pages_range(dump_buf, &offset, &continue_dump, + start_page_id, src_pages, ilt_pages, + dump_page_ids, buf_size_in_dwords, + &actual_dump_size_in_dwords); } /* Overwrite size param */ - if (dump) - qed_dump_num_param(dump_buf + size_param_offset, - dump, "size", offset - base_data_offset); + if (update_size) { + u32 section_size = (*dump == continue_dump) ? + offset - base_data_offset : + actual_dump_size_in_dwords - base_data_offset; + if (section_size > 0) + qed_dump_num_param(dump_buf + size_param_offset, + *dump, "size", section_size); + else if ((section_size == 0) && (*dump != continue_dump)) + actual_dump_size_in_dwords -= + section_header_and_param_size; + } + + *dump = continue_dump; + *given_offset = offset; + *given_actual_dump_size_in_dwords = actual_dump_size_in_dwords; return offset; } -/* Performs ILT Dump to the specified buffer. +/* Dumps a section containing the global parameters. + * Part of ilt dump process * Returns the dumped size in dwords. */ -static u32 qed_ilt_dump(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, u32 *dump_buf, bool dump) +static u32 +qed_ilt_dump_dump_common_global_params(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u32 *dump_buf, + bool dump, + u32 cduc_page_size, + u32 conn_ctx_size, + u32 cdut_page_size, + u32 *full_dump_size_param_offset, + u32 *actual_dump_size_param_offset) { struct qed_ilt_client_cfg *clients = p_hwfn->p_cxt_mngr->clients; - u32 valid_conn_vf_cids, valid_conn_vf_pages, offset = 0; - u32 valid_conn_pf_cids, valid_conn_pf_pages, num_pages; - u32 num_cids_per_page, conn_ctx_size; - u32 cduc_page_size, cdut_page_size; - struct phys_mem_desc *ilt_pages; - u8 conn_type; - - cduc_page_size = 1 << - (clients[ILT_CLI_CDUC].p_size.val + PXP_ILT_PAGE_SIZE_NUM_BITS_MIN); - cdut_page_size = 1 << - (clients[ILT_CLI_CDUT].p_size.val + PXP_ILT_PAGE_SIZE_NUM_BITS_MIN); - conn_ctx_size = p_hwfn->p_cxt_mngr->conn_ctx_size; - num_cids_per_page = (int)(cduc_page_size / conn_ctx_size); - ilt_pages = p_hwfn->p_cxt_mngr->ilt_shadow; + u32 offset = 0; - /* Dump global params - 22 must match number of params below */ offset += qed_dump_common_global_params(p_hwfn, p_ptt, - dump_buf + offset, dump, 22); + dump_buf + offset, + dump, 30); offset += qed_dump_str_param(dump_buf + offset, - dump, "dump-type", "ilt-dump"); + dump, + "dump-type", "ilt-dump"); offset += qed_dump_num_param(dump_buf + offset, dump, - "cduc-page-size", cduc_page_size); + "cduc-page-size", + cduc_page_size); offset += qed_dump_num_param(dump_buf + offset, dump, "cduc-first-page-id", @@ -4667,20 +4809,19 @@ static u32 qed_ilt_dump(struct qed_hwfn *p_hwfn, offset += qed_dump_num_param(dump_buf + offset, dump, "cduc-num-pf-pages", - clients - [ILT_CLI_CDUC].pf_total_lines); + clients[ILT_CLI_CDUC].pf_total_lines); offset += qed_dump_num_param(dump_buf + offset, dump, "cduc-num-vf-pages", - clients - [ILT_CLI_CDUC].vf_total_lines); + clients[ILT_CLI_CDUC].vf_total_lines); offset += qed_dump_num_param(dump_buf + offset, dump, "max-conn-ctx-size", conn_ctx_size); offset += qed_dump_num_param(dump_buf + offset, dump, - "cdut-page-size", cdut_page_size); + "cdut-page-size", + cdut_page_size); offset += qed_dump_num_param(dump_buf + offset, dump, "cdut-first-page-id", @@ -4711,19 +4852,16 @@ static u32 qed_ilt_dump(struct qed_hwfn *p_hwfn, p_hwfn->p_cxt_mngr->task_ctx_size); offset += qed_dump_num_param(dump_buf + offset, dump, - "task-type-id", - p_hwfn->p_cxt_mngr->task_type_id); - offset += qed_dump_num_param(dump_buf + offset, - dump, "first-vf-id-in-pf", p_hwfn->p_cxt_mngr->first_vf_in_pf); - offset += /* 18 */ qed_dump_num_param(dump_buf + offset, - dump, - "num-vfs-in-pf", - p_hwfn->p_cxt_mngr->vf_count); offset += qed_dump_num_param(dump_buf + offset, dump, - "ptr-size-bytes", sizeof(void *)); + "num-vfs-in-pf", + p_hwfn->p_cxt_mngr->vf_count); + offset += qed_dump_num_param(dump_buf + offset, + dump, + "ptr-size-bytes", + sizeof(void *)); offset += qed_dump_num_param(dump_buf + offset, dump, "pf-start-line", @@ -4736,58 +4874,281 @@ static u32 qed_ilt_dump(struct qed_hwfn *p_hwfn, dump, "ilt-shadow-size", p_hwfn->p_cxt_mngr->ilt_shadow_size); + + *full_dump_size_param_offset = offset; + + offset += qed_dump_num_param(dump_buf + offset, + dump, "dump-size-full", 0); + + *actual_dump_size_param_offset = offset; + + offset += qed_dump_num_param(dump_buf + offset, + dump, + "dump-size-actual", 0); + offset += qed_dump_num_param(dump_buf + offset, + dump, + "iscsi_task_pages", + p_hwfn->p_cxt_mngr->iscsi_task_pages); + offset += qed_dump_num_param(dump_buf + offset, + dump, + "fcoe_task_pages", + p_hwfn->p_cxt_mngr->fcoe_task_pages); + offset += qed_dump_num_param(dump_buf + offset, + dump, + "roce_task_pages", + p_hwfn->p_cxt_mngr->roce_task_pages); + offset += qed_dump_num_param(dump_buf + offset, + dump, + "eth_task_pages", + p_hwfn->p_cxt_mngr->eth_task_pages); + offset += qed_dump_num_param(dump_buf + offset, + dump, + "src-first-page-id", + clients[ILT_CLI_SRC].first.val); + offset += qed_dump_num_param(dump_buf + offset, + dump, + "src-last-page-id", + clients[ILT_CLI_SRC].last.val); + offset += qed_dump_num_param(dump_buf + offset, + dump, + "src-is-active", + clients[ILT_CLI_SRC].active); + /* Additional/Less parameters require matching of number in call to * dump_common_global_params() */ - /* Dump section containing number of PF CIDs per connection type */ + return offset; +} + +/* Dump section containing number of PF CIDs per connection type. + * Part of ilt dump process. + * Returns the dumped size in dwords. + */ +static u32 qed_ilt_dump_dump_num_pf_cids(struct qed_hwfn *p_hwfn, + u32 *dump_buf, + bool dump, u32 *valid_conn_pf_cids) +{ + u32 num_pf_cids = 0; + u32 offset = 0; + u8 conn_type; + offset += qed_dump_section_hdr(dump_buf + offset, dump, "num_pf_cids_per_conn_type", 1); offset += qed_dump_num_param(dump_buf + offset, - dump, "size", NUM_OF_CONNECTION_TYPES_E4); - for (conn_type = 0, valid_conn_pf_cids = 0; - conn_type < NUM_OF_CONNECTION_TYPES_E4; conn_type++, offset++) { - u32 num_pf_cids = - p_hwfn->p_cxt_mngr->conn_cfg[conn_type].cid_count; - + dump, "size", NUM_OF_CONNECTION_TYPES); + for (conn_type = 0, *valid_conn_pf_cids = 0; + conn_type < NUM_OF_CONNECTION_TYPES; conn_type++, offset++) { + num_pf_cids = p_hwfn->p_cxt_mngr->conn_cfg[conn_type].cid_count; if (dump) *(dump_buf + offset) = num_pf_cids; - valid_conn_pf_cids += num_pf_cids; + *valid_conn_pf_cids += num_pf_cids; } - /* Dump section containing number of VF CIDs per connection type */ - offset += qed_dump_section_hdr(dump_buf + offset, - dump, "num_vf_cids_per_conn_type", 1); + return offset; +} + +/* Dump section containing number of VF CIDs per connection type + * Part of ilt dump process. + * Returns the dumped size in dwords. + */ +static u32 qed_ilt_dump_dump_num_vf_cids(struct qed_hwfn *p_hwfn, + u32 *dump_buf, + bool dump, u32 *valid_conn_vf_cids) +{ + u32 num_vf_cids = 0; + u32 offset = 0; + u8 conn_type; + + offset += qed_dump_section_hdr(dump_buf + offset, dump, + "num_vf_cids_per_conn_type", 1); offset += qed_dump_num_param(dump_buf + offset, - dump, "size", NUM_OF_CONNECTION_TYPES_E4); - for (conn_type = 0, valid_conn_vf_cids = 0; - conn_type < NUM_OF_CONNECTION_TYPES_E4; conn_type++, offset++) { - u32 num_vf_cids = + dump, "size", NUM_OF_CONNECTION_TYPES); + for (conn_type = 0, *valid_conn_vf_cids = 0; + conn_type < NUM_OF_CONNECTION_TYPES; conn_type++, offset++) { + num_vf_cids = p_hwfn->p_cxt_mngr->conn_cfg[conn_type].cids_per_vf; - if (dump) *(dump_buf + offset) = num_vf_cids; - valid_conn_vf_cids += num_vf_cids; + *valid_conn_vf_cids += num_vf_cids; + } + + return offset; +} + +/* Performs ILT Dump to the specified buffer. + * buf_size_in_dwords - The dumped buffer size. + * Returns the dumped size in dwords. + */ +static u32 qed_ilt_dump(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u32 *dump_buf, u32 buf_size_in_dwords, bool dump) +{ +#if ((!defined VMWARE) && (!defined UEFI)) + struct qed_ilt_client_cfg *clients = p_hwfn->p_cxt_mngr->clients; +#endif + u32 valid_conn_vf_cids = 0, + valid_conn_vf_pages, offset = 0, real_dumped_size = 0; + u32 valid_conn_pf_cids = 0, valid_conn_pf_pages, num_pages; + u32 num_cids_per_page, conn_ctx_size; + u32 cduc_page_size, cdut_page_size; + u32 actual_dump_size_in_dwords = 0; + struct phys_mem_desc *ilt_pages; + u32 actul_dump_off = 0; + u32 last_section_size; + u32 full_dump_off = 0; + u32 section_size = 0; + bool continue_dump; + u32 page_id; + + last_section_size = qed_dump_last_section(NULL, 0, false); + cduc_page_size = 1 << + (clients[ILT_CLI_CDUC].p_size.val + PXP_ILT_PAGE_SIZE_NUM_BITS_MIN); + cdut_page_size = 1 << + (clients[ILT_CLI_CDUT].p_size.val + PXP_ILT_PAGE_SIZE_NUM_BITS_MIN); + conn_ctx_size = p_hwfn->p_cxt_mngr->conn_ctx_size; + num_cids_per_page = (int)(cduc_page_size / conn_ctx_size); + ilt_pages = p_hwfn->p_cxt_mngr->ilt_shadow; + continue_dump = dump; + + /* if need to dump then save memory for the last section + * (last section calculates CRC of dumped data) + */ + if (dump) { + if (buf_size_in_dwords >= last_section_size) { + buf_size_in_dwords -= last_section_size; + } else { + continue_dump = false; + actual_dump_size_in_dwords = offset; + } } - /* Dump section containing physical memory descs for each ILT page */ + /* Dump global params */ + + /* if need to dump then first check that there is enough memory + * in dumped buffer for this section calculate the size of this + * section without dumping. if there is not enough memory - then + * stop the dumping. + */ + if (continue_dump) { + section_size = + qed_ilt_dump_dump_common_global_params(p_hwfn, + p_ptt, + NULL, + false, + cduc_page_size, + conn_ctx_size, + cdut_page_size, + &full_dump_off, + &actul_dump_off); + if (offset + section_size > buf_size_in_dwords) { + continue_dump = false; + actual_dump_size_in_dwords = offset; + } + } + + offset += qed_ilt_dump_dump_common_global_params(p_hwfn, + p_ptt, + dump_buf + offset, + continue_dump, + cduc_page_size, + conn_ctx_size, + cdut_page_size, + &full_dump_off, + &actul_dump_off); + + /* Dump section containing number of PF CIDs per connection type + * If need to dump then first check that there is enough memory in + * dumped buffer for this section. + */ + if (continue_dump) { + section_size = + qed_ilt_dump_dump_num_pf_cids(p_hwfn, + NULL, + false, + &valid_conn_pf_cids); + if (offset + section_size > buf_size_in_dwords) { + continue_dump = false; + actual_dump_size_in_dwords = offset; + } + } + + offset += qed_ilt_dump_dump_num_pf_cids(p_hwfn, + dump_buf + offset, + continue_dump, + &valid_conn_pf_cids); + + /* Dump section containing number of VF CIDs per connection type + * If need to dump then first check that there is enough memory in + * dumped buffer for this section. + */ + if (continue_dump) { + section_size = + qed_ilt_dump_dump_num_vf_cids(p_hwfn, + NULL, + false, + &valid_conn_vf_cids); + if (offset + section_size > buf_size_in_dwords) { + continue_dump = false; + actual_dump_size_in_dwords = offset; + } + } + + offset += qed_ilt_dump_dump_num_vf_cids(p_hwfn, + dump_buf + offset, + continue_dump, + &valid_conn_vf_cids); + + /* Dump section containing physical memory descriptors for each + * ILT page. + */ num_pages = p_hwfn->p_cxt_mngr->ilt_shadow_size; + + /* If need to dump then first check that there is enough memory + * in dumped buffer for the section header. + */ + if (continue_dump) { + section_size = qed_dump_section_hdr(NULL, + false, + "ilt_page_desc", + 1) + + qed_dump_num_param(NULL, + false, + "size", + num_pages * PAGE_MEM_DESC_SIZE_DWORDS); + if (offset + section_size > buf_size_in_dwords) { + continue_dump = false; + actual_dump_size_in_dwords = offset; + } + } + offset += qed_dump_section_hdr(dump_buf + offset, - dump, "ilt_page_desc", 1); + continue_dump, "ilt_page_desc", 1); offset += qed_dump_num_param(dump_buf + offset, - dump, + continue_dump, "size", num_pages * PAGE_MEM_DESC_SIZE_DWORDS); - /* Copy memory descriptors to dump buffer */ - if (dump) { - u32 page_id; - + /* Copy memory descriptors to dump buffer + * If need to dump then dump till the dump buffer size + */ + if (continue_dump) { for (page_id = 0; page_id < num_pages; - page_id++, offset += PAGE_MEM_DESC_SIZE_DWORDS) - memcpy(dump_buf + offset, - &ilt_pages[page_id], - DWORDS_TO_BYTES(PAGE_MEM_DESC_SIZE_DWORDS)); + page_id++, offset += PAGE_MEM_DESC_SIZE_DWORDS) { + if (continue_dump && + (offset + PAGE_MEM_DESC_SIZE_DWORDS <= + buf_size_in_dwords)) { + memcpy(dump_buf + offset, + &ilt_pages[page_id], + DWORDS_TO_BYTES + (PAGE_MEM_DESC_SIZE_DWORDS)); + } else { + if (continue_dump) { + continue_dump = false; + actual_dump_size_in_dwords = offset; + } + } + } } else { offset += num_pages * PAGE_MEM_DESC_SIZE_DWORDS; } @@ -4798,25 +5159,31 @@ static u32 qed_ilt_dump(struct qed_hwfn *p_hwfn, num_cids_per_page); /* Dump ILT pages IDs */ - offset += qed_ilt_dump_pages_section(p_hwfn, - dump_buf + offset, - dump, - valid_conn_pf_pages, - valid_conn_vf_pages, - ilt_pages, true); + qed_ilt_dump_pages_section(p_hwfn, dump_buf, &offset, &continue_dump, + valid_conn_pf_pages, valid_conn_vf_pages, + ilt_pages, true, buf_size_in_dwords, + &actual_dump_size_in_dwords); /* Dump ILT pages memory */ - offset += qed_ilt_dump_pages_section(p_hwfn, - dump_buf + offset, - dump, - valid_conn_pf_pages, - valid_conn_vf_pages, - ilt_pages, false); + qed_ilt_dump_pages_section(p_hwfn, dump_buf, &offset, &continue_dump, + valid_conn_pf_pages, valid_conn_vf_pages, + ilt_pages, false, buf_size_in_dwords, + &actual_dump_size_in_dwords); + + real_dumped_size = + (continue_dump == dump) ? offset : actual_dump_size_in_dwords; + qed_dump_num_param(dump_buf + full_dump_off, dump, + "full-dump-size", offset + last_section_size); + qed_dump_num_param(dump_buf + actul_dump_off, + dump, + "actual-dump-size", + real_dumped_size + last_section_size); /* Dump last section */ - offset += qed_dump_last_section(dump_buf, offset, dump); + real_dumped_size += qed_dump_last_section(dump_buf, + real_dumped_size, dump); - return offset; + return real_dumped_size; } /***************************** Public Functions *******************************/ @@ -4837,6 +5204,16 @@ enum dbg_status qed_dbg_set_bin_ptr(struct qed_hwfn *p_hwfn, return DBG_STATUS_OK; } +static enum dbg_status qed_dbg_set_app_ver(u32 ver) +{ + if (ver < TOOLS_VERSION) + return DBG_STATUS_UNSUPPORTED_APP_VERSION; + + s_app_ver = ver; + + return DBG_STATUS_OK; +} + bool qed_read_fw_info(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, struct fw_info *fw_info) { @@ -4975,6 +5352,9 @@ enum dbg_status qed_dbg_grc_dump(struct qed_hwfn *p_hwfn, if (buf_size_in_dwords < needed_buf_size_in_dwords) return DBG_STATUS_DUMP_BUF_TOO_SMALL; + /* Doesn't do anything, needed for compile time asserts */ + qed_static_asserts(); + /* GRC Dump */ status = qed_grc_dump(p_hwfn, p_ptt, dump_buf, true, num_dumped_dwords); @@ -5296,7 +5676,7 @@ static enum dbg_status qed_dbg_ilt_get_dump_buf_size(struct qed_hwfn *p_hwfn, if (status != DBG_STATUS_OK) return status; - *buf_size = qed_ilt_dump(p_hwfn, p_ptt, NULL, false); + *buf_size = qed_ilt_dump(p_hwfn, p_ptt, NULL, 0, false); return DBG_STATUS_OK; } @@ -5307,21 +5687,9 @@ static enum dbg_status qed_dbg_ilt_dump(struct qed_hwfn *p_hwfn, u32 buf_size_in_dwords, u32 *num_dumped_dwords) { - u32 needed_buf_size_in_dwords; - enum dbg_status status; - - *num_dumped_dwords = 0; - - status = qed_dbg_ilt_get_dump_buf_size(p_hwfn, - p_ptt, - &needed_buf_size_in_dwords); - if (status != DBG_STATUS_OK) - return status; - - if (buf_size_in_dwords < needed_buf_size_in_dwords) - return DBG_STATUS_DUMP_BUF_TOO_SMALL; - - *num_dumped_dwords = qed_ilt_dump(p_hwfn, p_ptt, dump_buf, true); + *num_dumped_dwords = qed_ilt_dump(p_hwfn, + p_ptt, + dump_buf, buf_size_in_dwords, true); /* Reveret GRC params to their default */ qed_dbg_grc_set_params_default(p_hwfn); @@ -5724,7 +6092,46 @@ static const char * const s_status_str[] = { "The configured filter mode requires that all the constraints of a single trigger state will be defined on a single Storm/block input", /* DBG_STATUS_MISSING_TRIGGER_STATE_STORM */ - "When triggering on Storm data, the Storm to trigger on must be specified" + "When triggering on Storm data, the Storm to trigger on must be specified", + + /* DBG_STATUS_MDUMP2_FAILED_TO_REQUEST_OFFSIZE */ + "Failed to request MDUMP2 Offsize", + + /* DBG_STATUS_MDUMP2_FAILED_VALIDATION_OF_DATA_CRC */ + "Expected CRC (part of the MDUMP2 data) is different than the calculated CRC over that data", + + /* DBG_STATUS_MDUMP2_INVALID_SIGNATURE */ + "Invalid Signature found at start of MDUMP2", + + /* DBG_STATUS_MDUMP2_INVALID_LOG_SIZE */ + "Invalid Log Size of MDUMP2", + + /* DBG_STATUS_MDUMP2_INVALID_LOG_HDR */ + "Invalid Log Header of MDUMP2", + + /* DBG_STATUS_MDUMP2_INVALID_LOG_DATA */ + "Invalid Log Data of MDUMP2", + + /* DBG_STATUS_MDUMP2_ERROR_EXTRACTING_NUM_PORTS */ + "Could not extract number of ports from regval buf of MDUMP2", + + /* DBG_STATUS_MDUMP2_ERROR_EXTRACTING_MFW_STATUS */ + "Could not extract MFW (link) status from regval buf of MDUMP2", + + /* DBG_STATUS_MDUMP2_ERROR_DISPLAYING_LINKDUMP */ + "Could not display linkdump of MDUMP2", + + /* DBG_STATUS_MDUMP2_ERROR_READING_PHY_CFG */ + "Could not read PHY CFG of MDUMP2", + + /* DBG_STATUS_MDUMP2_ERROR_READING_PLL_MODE */ + "Could not read PLL Mode of MDUMP2", + + /* DBG_STATUS_MDUMP2_ERROR_READING_LANE_REGS */ + "Could not read TSCF/TSCE Lane Regs of MDUMP2", + + /* DBG_STATUS_MDUMP2_ERROR_ALLOCATING_BUF */ + "Could not allocate MDUMP2 reg-val internal buffer" }; /* Idle check severity names array */ @@ -5874,6 +6281,10 @@ static char s_temp_buf[MAX_MSG_LEN]; /**************************** Private Functions ******************************/ +static void qed_user_static_asserts(void) +{ +} + static u32 qed_cyclic_add(u32 a, u32 b, u32 size) { return (a + b) % size; @@ -6153,9 +6564,8 @@ static u32 qed_parse_idle_chk_dump_rules(struct qed_hwfn *p_hwfn, /* Skip register names until the required reg_id is * reached. */ - for (; reg_id > curr_reg_id; - curr_reg_id++, - parsing_str += strlen(parsing_str) + 1); + for (; reg_id > curr_reg_id; curr_reg_id++) + parsing_str += strlen(parsing_str) + 1; results_offset += sprintf(qed_get_buf_ptr(results_buf, @@ -6208,9 +6618,9 @@ static enum dbg_status qed_parse_idle_chk_dump(struct qed_hwfn *p_hwfn, u32 *num_errors, u32 *num_warnings) { + u32 num_section_params = 0, num_rules, num_rules_not_dumped; const char *section_name, *param_name, *param_str_val; u32 *dump_buf_end = dump_buf + num_dumped_dwords; - u32 num_section_params = 0, num_rules; /* Offset in results_buf in bytes */ u32 results_offset = 0; @@ -6234,15 +6644,31 @@ static enum dbg_status qed_parse_idle_chk_dump(struct qed_hwfn *p_hwfn, num_section_params, results_buf, &results_offset); - /* Read idle_chk section */ + /* Read idle_chk section + * There may be 1 or 2 idle_chk section parameters: + * - 1st is "num_rules" + * - 2nd is "num_rules_not_dumped" (optional) + */ + dump_buf += qed_read_section_hdr(dump_buf, §ion_name, &num_section_params); - if (strcmp(section_name, "idle_chk") || num_section_params != 1) + if (strcmp(section_name, "idle_chk") || + (num_section_params != 2 && num_section_params != 1)) return DBG_STATUS_IDLE_CHK_PARSE_FAILED; dump_buf += qed_read_param(dump_buf, ¶m_name, ¶m_str_val, &num_rules); if (strcmp(param_name, "num_rules")) return DBG_STATUS_IDLE_CHK_PARSE_FAILED; + if (num_section_params > 1) { + dump_buf += qed_read_param(dump_buf, + ¶m_name, + ¶m_str_val, + &num_rules_not_dumped); + if (strcmp(param_name, "num_rules_not_dumped")) + return DBG_STATUS_IDLE_CHK_PARSE_FAILED; + } else { + num_rules_not_dumped = 0; + } if (num_rules) { u32 rules_print_size; @@ -6309,6 +6735,13 @@ static enum dbg_status qed_parse_idle_chk_dump(struct qed_hwfn *p_hwfn, results_offset), "\nIdle Check completed successfully\n"); + if (num_rules_not_dumped) + results_offset += + sprintf(qed_get_buf_ptr(results_buf, + results_offset), + "\nIdle Check Partially dumped : num_rules_not_dumped = %d\n", + num_rules_not_dumped); + /* Add 1 for string NULL termination */ *parsed_results_bytes = results_offset + 1; @@ -7160,6 +7593,9 @@ enum dbg_status qed_print_mcp_trace_results(struct qed_hwfn *p_hwfn, { u32 parsed_buf_size; + /* Doesn't do anything, needed for compile time asserts */ + qed_user_static_asserts(); + return qed_parse_mcp_trace_dump(p_hwfn, dump_buf, results_buf, &parsed_buf_size, true); @@ -7336,7 +7772,7 @@ enum dbg_status qed_dbg_parse_attn(struct qed_hwfn *p_hwfn, reg_result->block_attn_offset; /* Go over attention status bits */ - for (j = 0; j < num_reg_attn; j++, bit_idx++) { + for (j = 0; j < num_reg_attn; j++) { u16 attn_idx_val = GET_FIELD(bit_mapping[j].data, DBG_ATTN_BIT_MAPPING_VAL); const char *attn_name, *attn_type_str, *masked_str; @@ -7353,35 +7789,36 @@ enum dbg_status qed_dbg_parse_attn(struct qed_hwfn *p_hwfn, } /* Check current bit index */ - if (!(reg_result->sts_val & BIT(bit_idx))) - continue; + if (reg_result->sts_val & BIT(bit_idx)) { + /* An attention bit with value=1 was found + * Find attention name + */ + attn_name_offset = + block_attn_name_offsets[attn_idx_val]; + attn_name = attn_name_base + attn_name_offset; + attn_type_str = + (attn_type == + ATTN_TYPE_INTERRUPT ? "Interrupt" : + "Parity"); + masked_str = reg_result->mask_val & + BIT(bit_idx) ? + " [masked]" : ""; + sts_addr = + GET_FIELD(reg_result->data, + DBG_ATTN_REG_RESULT_STS_ADDRESS); + DP_NOTICE(p_hwfn, + "%s (%s) : %s [address 0x%08x, bit %d]%s\n", + block_name, attn_type_str, attn_name, + sts_addr * 4, bit_idx, masked_str); + } - /* An attention bit with value=1 was found - * Find attention name - */ - attn_name_offset = - block_attn_name_offsets[attn_idx_val]; - attn_name = attn_name_base + attn_name_offset; - attn_type_str = - (attn_type == - ATTN_TYPE_INTERRUPT ? "Interrupt" : - "Parity"); - masked_str = reg_result->mask_val & BIT(bit_idx) ? - " [masked]" : ""; - sts_addr = GET_FIELD(reg_result->data, - DBG_ATTN_REG_RESULT_STS_ADDRESS); - DP_NOTICE(p_hwfn, - "%s (%s) : %s [address 0x%08x, bit %d]%s\n", - block_name, attn_type_str, attn_name, - sts_addr * 4, bit_idx, masked_str); + bit_idx++; } } return DBG_STATUS_OK; } -static DEFINE_MUTEX(qed_dbg_lock); - /* Wrapper for unifying the idle_chk and mcp_trace api */ static enum dbg_status qed_print_idle_chk_results_wrapper(struct qed_hwfn *p_hwfn, @@ -7396,9 +7833,26 @@ qed_print_idle_chk_results_wrapper(struct qed_hwfn *p_hwfn, &num_warnnings); } +static DEFINE_MUTEX(qed_dbg_lock); + +#define MAX_PHY_RESULT_BUFFER 9000 + +/******************************** Feature Meta data section ******************/ + +#define GRC_NUM_STR_FUNCS 2 +#define IDLE_CHK_NUM_STR_FUNCS 1 +#define MCP_TRACE_NUM_STR_FUNCS 1 +#define REG_FIFO_NUM_STR_FUNCS 1 +#define IGU_FIFO_NUM_STR_FUNCS 1 +#define PROTECTION_OVERRIDE_NUM_STR_FUNCS 1 +#define FW_ASSERTS_NUM_STR_FUNCS 1 +#define ILT_NUM_STR_FUNCS 1 +#define PHY_NUM_STR_FUNCS 20 + /* Feature meta data lookup table */ static struct { char *name; + u32 num_funcs; enum dbg_status (*get_size)(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 *size); enum dbg_status (*perform_dump)(struct qed_hwfn *p_hwfn, @@ -7411,40 +7865,46 @@ static struct { u32 *dump_buf, u32 num_dumped_dwords, u32 *results_buf_size); + const struct qed_func_lookup *hsi_func_lookup; } qed_features_lookup[] = { { - "grc", qed_dbg_grc_get_dump_buf_size, - qed_dbg_grc_dump, NULL, NULL}, { - "idle_chk", + "grc", GRC_NUM_STR_FUNCS, qed_dbg_grc_get_dump_buf_size, + qed_dbg_grc_dump, NULL, NULL, NULL}, { + "idle_chk", IDLE_CHK_NUM_STR_FUNCS, qed_dbg_idle_chk_get_dump_buf_size, qed_dbg_idle_chk_dump, qed_print_idle_chk_results_wrapper, - qed_get_idle_chk_results_buf_size}, { - "mcp_trace", + qed_get_idle_chk_results_buf_size, + NULL}, { + "mcp_trace", MCP_TRACE_NUM_STR_FUNCS, qed_dbg_mcp_trace_get_dump_buf_size, qed_dbg_mcp_trace_dump, qed_print_mcp_trace_results, - qed_get_mcp_trace_results_buf_size}, { - "reg_fifo", + qed_get_mcp_trace_results_buf_size, + NULL}, { + "reg_fifo", REG_FIFO_NUM_STR_FUNCS, qed_dbg_reg_fifo_get_dump_buf_size, qed_dbg_reg_fifo_dump, qed_print_reg_fifo_results, - qed_get_reg_fifo_results_buf_size}, { - "igu_fifo", + qed_get_reg_fifo_results_buf_size, + NULL}, { + "igu_fifo", IGU_FIFO_NUM_STR_FUNCS, qed_dbg_igu_fifo_get_dump_buf_size, qed_dbg_igu_fifo_dump, qed_print_igu_fifo_results, - qed_get_igu_fifo_results_buf_size}, { - "protection_override", + qed_get_igu_fifo_results_buf_size, + NULL}, { + "protection_override", PROTECTION_OVERRIDE_NUM_STR_FUNCS, qed_dbg_protection_override_get_dump_buf_size, qed_dbg_protection_override_dump, qed_print_protection_override_results, - qed_get_protection_override_results_buf_size}, { - "fw_asserts", + qed_get_protection_override_results_buf_size, + NULL}, { + "fw_asserts", FW_ASSERTS_NUM_STR_FUNCS, qed_dbg_fw_asserts_get_dump_buf_size, qed_dbg_fw_asserts_dump, qed_print_fw_asserts_results, - qed_get_fw_asserts_results_buf_size}, { - "ilt", - qed_dbg_ilt_get_dump_buf_size, - qed_dbg_ilt_dump, NULL, NULL},}; + qed_get_fw_asserts_results_buf_size, + NULL}, { + "ilt", ILT_NUM_STR_FUNCS, qed_dbg_ilt_get_dump_buf_size, + qed_dbg_ilt_dump, NULL, NULL, NULL},}; static void qed_dbg_print_feature(u8 *p_text_buf, u32 text_size) { @@ -7466,7 +7926,8 @@ static enum dbg_status format_feature(struct qed_hwfn *p_hwfn, { struct qed_dbg_feature *feature = &p_hwfn->cdev->dbg_features[feature_idx]; - u32 text_size_bytes, null_char_pos, i; + u32 txt_size_bytes, null_char_pos, i; + u32 *dbuf, dwords; enum dbg_status rc; char *text_buf; @@ -7474,33 +7935,43 @@ static enum dbg_status format_feature(struct qed_hwfn *p_hwfn, if (!qed_features_lookup[feature_idx].results_buf_size) return DBG_STATUS_OK; + dbuf = (u32 *)feature->dump_buf; + dwords = feature->dumped_dwords; + /* Obtain size of formatted output */ - rc = qed_features_lookup[feature_idx]. - results_buf_size(p_hwfn, (u32 *)feature->dump_buf, - feature->dumped_dwords, &text_size_bytes); + rc = qed_features_lookup[feature_idx].results_buf_size(p_hwfn, + dbuf, + dwords, + &txt_size_bytes); if (rc != DBG_STATUS_OK) return rc; - /* Make sure that the allocated size is a multiple of dword (4 bytes) */ - null_char_pos = text_size_bytes - 1; - text_size_bytes = (text_size_bytes + 3) & ~0x3; + /* Make sure that the allocated size is a multiple of dword + * (4 bytes). + */ + null_char_pos = txt_size_bytes - 1; + txt_size_bytes = (txt_size_bytes + 3) & ~0x3; - if (text_size_bytes < QED_RESULTS_BUF_MIN_SIZE) { + if (txt_size_bytes < QED_RESULTS_BUF_MIN_SIZE) { DP_NOTICE(p_hwfn->cdev, "formatted size of feature was too small %d. Aborting\n", - text_size_bytes); + txt_size_bytes); return DBG_STATUS_INVALID_ARGS; } - /* Allocate temp text buf */ - text_buf = vzalloc(text_size_bytes); - if (!text_buf) + /* allocate temp text buf */ + text_buf = vzalloc(txt_size_bytes); + if (!text_buf) { + DP_NOTICE(p_hwfn->cdev, + "failed to allocate text buffer. Aborting\n"); return DBG_STATUS_VIRT_MEM_ALLOC_FAILED; + } /* Decode feature opcodes to string on temp buf */ - rc = qed_features_lookup[feature_idx]. - print_results(p_hwfn, (u32 *)feature->dump_buf, - feature->dumped_dwords, text_buf); + rc = qed_features_lookup[feature_idx].print_results(p_hwfn, + dbuf, + dwords, + text_buf); if (rc != DBG_STATUS_OK) { vfree(text_buf); return rc; @@ -7510,26 +7981,27 @@ static enum dbg_status format_feature(struct qed_hwfn *p_hwfn, * The bytes that were added as a result of the dword alignment are also * padded with '\n' characters. */ - for (i = null_char_pos; i < text_size_bytes; i++) + for (i = null_char_pos; i < txt_size_bytes; i++) text_buf[i] = '\n'; /* Dump printable feature to log */ if (p_hwfn->cdev->print_dbg_data) - qed_dbg_print_feature(text_buf, text_size_bytes); + qed_dbg_print_feature(text_buf, txt_size_bytes); - /* Just return the original binary buffer if requested */ + /* Dump binary data as is to the output file */ if (p_hwfn->cdev->dbg_bin_dump) { vfree(text_buf); - return DBG_STATUS_OK; + return rc; } - /* Free the old dump_buf and point the dump_buf to the newly allocagted + /* Free the old dump_buf and point the dump_buf to the newly allocated * and formatted text buffer. */ vfree(feature->dump_buf); feature->dump_buf = text_buf; - feature->buf_size = text_size_bytes; - feature->dumped_dwords = text_size_bytes / 4; + feature->buf_size = txt_size_bytes; + feature->dumped_dwords = txt_size_bytes / 4; + return rc; } @@ -7542,7 +8014,7 @@ static enum dbg_status qed_dbg_dump(struct qed_hwfn *p_hwfn, { struct qed_dbg_feature *feature = &p_hwfn->cdev->dbg_features[feature_idx]; - u32 buf_size_dwords; + u32 buf_size_dwords, *dbuf, *dwords; enum dbg_status rc; DP_NOTICE(p_hwfn->cdev, "Collecting a debug feature [\"%s\"]\n", @@ -7580,13 +8052,16 @@ static enum dbg_status qed_dbg_dump(struct qed_hwfn *p_hwfn, if (!feature->dump_buf) return DBG_STATUS_VIRT_MEM_ALLOC_FAILED; - rc = qed_features_lookup[feature_idx]. - perform_dump(p_hwfn, p_ptt, (u32 *)feature->dump_buf, - feature->buf_size / sizeof(u32), - &feature->dumped_dwords); + dbuf = (u32 *)feature->dump_buf; + dwords = &feature->dumped_dwords; + rc = qed_features_lookup[feature_idx].perform_dump(p_hwfn, p_ptt, + dbuf, + feature->buf_size / + sizeof(u32), + dwords); /* If mcp is stuck we get DBG_STATUS_NVRAM_GET_IMAGE_FAILED error. - * In this case the buffer holds valid binary data, but we wont able + * In this case the buffer holds valid binary data, but we won't able * to parse it (since parsing relies on data in NVRAM which is only * accessible when MFW is responsive). skip the formatting but return * success so that binary data is provided. @@ -7777,7 +8252,8 @@ enum debug_print_features { static u32 qed_calc_regdump_header(struct qed_dev *cdev, enum debug_print_features feature, - int engine, u32 feature_size, u8 omit_engine) + int engine, u32 feature_size, + u8 omit_engine, u8 dbg_bin_dump) { u32 res = 0; @@ -7788,7 +8264,7 @@ static u32 qed_calc_regdump_header(struct qed_dev *cdev, feature, feature_size); SET_FIELD(res, REGDUMP_HEADER_FEATURE, feature); - SET_FIELD(res, REGDUMP_HEADER_BIN_DUMP, 1); + SET_FIELD(res, REGDUMP_HEADER_BIN_DUMP, dbg_bin_dump); SET_FIELD(res, REGDUMP_HEADER_OMIT_ENGINE, omit_engine); SET_FIELD(res, REGDUMP_HEADER_ENGINE, engine); @@ -7798,12 +8274,10 @@ static u32 qed_calc_regdump_header(struct qed_dev *cdev, int qed_dbg_all_data(struct qed_dev *cdev, void *buffer) { u8 cur_engine, omit_engine = 0, org_engine; - struct qed_hwfn *p_hwfn = - &cdev->hwfns[cdev->engine_for_debug]; + struct qed_hwfn *p_hwfn = &cdev->hwfns[cdev->engine_for_debug]; struct dbg_tools_data *dev_data = &p_hwfn->dbg_info; - int grc_params[MAX_DBG_GRC_PARAMS], i; + int grc_params[MAX_DBG_GRC_PARAMS], rc, i; u32 offset = 0, feature_size; - int rc; for (i = 0; i < MAX_DBG_GRC_PARAMS; i++) grc_params[i] = dev_data->grc.param_val[i]; @@ -7811,8 +8285,8 @@ int qed_dbg_all_data(struct qed_dev *cdev, void *buffer) if (!QED_IS_CMT(cdev)) omit_engine = 1; + cdev->dbg_bin_dump = 1; mutex_lock(&qed_dbg_lock); - cdev->dbg_bin_dump = true; org_engine = qed_get_debug_engine(cdev); for (cur_engine = 0; cur_engine < cdev->num_hwfns; cur_engine++) { @@ -7826,8 +8300,11 @@ int qed_dbg_all_data(struct qed_dev *cdev, void *buffer) REGDUMP_HEADER_SIZE, &feature_size); if (!rc) { *(u32 *)((u8 *)buffer + offset) = - qed_calc_regdump_header(cdev, IDLE_CHK, cur_engine, - feature_size, omit_engine); + qed_calc_regdump_header(cdev, IDLE_CHK, + cur_engine, + feature_size, + omit_engine, + cdev->dbg_bin_dump); offset += (feature_size + REGDUMP_HEADER_SIZE); } else { DP_ERR(cdev, "qed_dbg_idle_chk failed. rc = %d\n", rc); @@ -7838,8 +8315,11 @@ int qed_dbg_all_data(struct qed_dev *cdev, void *buffer) REGDUMP_HEADER_SIZE, &feature_size); if (!rc) { *(u32 *)((u8 *)buffer + offset) = - qed_calc_regdump_header(cdev, IDLE_CHK, cur_engine, - feature_size, omit_engine); + qed_calc_regdump_header(cdev, IDLE_CHK, + cur_engine, + feature_size, + omit_engine, + cdev->dbg_bin_dump); offset += (feature_size + REGDUMP_HEADER_SIZE); } else { DP_ERR(cdev, "qed_dbg_idle_chk failed. rc = %d\n", rc); @@ -7850,8 +8330,11 @@ int qed_dbg_all_data(struct qed_dev *cdev, void *buffer) REGDUMP_HEADER_SIZE, &feature_size); if (!rc) { *(u32 *)((u8 *)buffer + offset) = - qed_calc_regdump_header(cdev, REG_FIFO, cur_engine, - feature_size, omit_engine); + qed_calc_regdump_header(cdev, REG_FIFO, + cur_engine, + feature_size, + omit_engine, + cdev->dbg_bin_dump); offset += (feature_size + REGDUMP_HEADER_SIZE); } else { DP_ERR(cdev, "qed_dbg_reg_fifo failed. rc = %d\n", rc); @@ -7862,8 +8345,11 @@ int qed_dbg_all_data(struct qed_dev *cdev, void *buffer) REGDUMP_HEADER_SIZE, &feature_size); if (!rc) { *(u32 *)((u8 *)buffer + offset) = - qed_calc_regdump_header(cdev, IGU_FIFO, cur_engine, - feature_size, omit_engine); + qed_calc_regdump_header(cdev, IGU_FIFO, + cur_engine, + feature_size, + omit_engine, + cdev->dbg_bin_dump); offset += (feature_size + REGDUMP_HEADER_SIZE); } else { DP_ERR(cdev, "qed_dbg_igu_fifo failed. rc = %d", rc); @@ -7875,9 +8361,12 @@ int qed_dbg_all_data(struct qed_dev *cdev, void *buffer) &feature_size); if (!rc) { *(u32 *)((u8 *)buffer + offset) = - qed_calc_regdump_header(cdev, PROTECTION_OVERRIDE, + qed_calc_regdump_header(cdev, + PROTECTION_OVERRIDE, cur_engine, - feature_size, omit_engine); + feature_size, + omit_engine, + cdev->dbg_bin_dump); offset += (feature_size + REGDUMP_HEADER_SIZE); } else { DP_ERR(cdev, @@ -7891,8 +8380,10 @@ int qed_dbg_all_data(struct qed_dev *cdev, void *buffer) if (!rc) { *(u32 *)((u8 *)buffer + offset) = qed_calc_regdump_header(cdev, FW_ASSERTS, - cur_engine, feature_size, - omit_engine); + cur_engine, + feature_size, + omit_engine, + cdev->dbg_bin_dump); offset += (feature_size + REGDUMP_HEADER_SIZE); } else { DP_ERR(cdev, "qed_dbg_fw_asserts failed. rc = %d\n", @@ -7900,8 +8391,8 @@ int qed_dbg_all_data(struct qed_dev *cdev, void *buffer) } feature_size = qed_dbg_ilt_size(cdev); - if (!cdev->disable_ilt_dump && - feature_size < ILT_DUMP_MAX_SIZE) { + if (!cdev->disable_ilt_dump && feature_size < + ILT_DUMP_MAX_SIZE) { rc = qed_dbg_ilt(cdev, (u8 *)buffer + offset + REGDUMP_HEADER_SIZE, &feature_size); if (!rc) { @@ -7909,15 +8400,16 @@ int qed_dbg_all_data(struct qed_dev *cdev, void *buffer) qed_calc_regdump_header(cdev, ILT_DUMP, cur_engine, feature_size, - omit_engine); - offset += feature_size + REGDUMP_HEADER_SIZE; + omit_engine, + cdev->dbg_bin_dump); + offset += (feature_size + REGDUMP_HEADER_SIZE); } else { DP_ERR(cdev, "qed_dbg_ilt failed. rc = %d\n", rc); } } - /* GRC dump - must be last because when mcp stuck it will + /* Grc dump - must be last because when mcp stuck it will * clutter idle_chk, reg_fifo, ... */ for (i = 0; i < MAX_DBG_GRC_PARAMS; i++) @@ -7929,7 +8421,9 @@ int qed_dbg_all_data(struct qed_dev *cdev, void *buffer) *(u32 *)((u8 *)buffer + offset) = qed_calc_regdump_header(cdev, GRC_DUMP, cur_engine, - feature_size, omit_engine); + feature_size, + omit_engine, + cdev->dbg_bin_dump); offset += (feature_size + REGDUMP_HEADER_SIZE); } else { DP_ERR(cdev, "qed_dbg_grc failed. rc = %d", rc); @@ -7944,16 +8438,13 @@ int qed_dbg_all_data(struct qed_dev *cdev, void *buffer) if (!rc) { *(u32 *)((u8 *)buffer + offset) = qed_calc_regdump_header(cdev, MCP_TRACE, cur_engine, - feature_size, omit_engine); + feature_size, omit_engine, + cdev->dbg_bin_dump); offset += (feature_size + REGDUMP_HEADER_SIZE); } else { DP_ERR(cdev, "qed_dbg_mcp_trace failed. rc = %d\n", rc); } - /* Re-populate nvm attribute info */ - qed_mcp_nvm_info_free(p_hwfn); - qed_mcp_nvm_info_populate(p_hwfn); - /* nvm cfg1 */ rc = qed_dbg_nvm_image(cdev, (u8 *)buffer + offset + @@ -7962,43 +8453,51 @@ int qed_dbg_all_data(struct qed_dev *cdev, void *buffer) if (!rc) { *(u32 *)((u8 *)buffer + offset) = qed_calc_regdump_header(cdev, NVM_CFG1, cur_engine, - feature_size, omit_engine); + feature_size, omit_engine, + cdev->dbg_bin_dump); offset += (feature_size + REGDUMP_HEADER_SIZE); } else if (rc != -ENOENT) { DP_ERR(cdev, "qed_dbg_nvm_image failed for image %d (%s), rc = %d\n", - QED_NVM_IMAGE_NVM_CFG1, "QED_NVM_IMAGE_NVM_CFG1", rc); + QED_NVM_IMAGE_NVM_CFG1, "QED_NVM_IMAGE_NVM_CFG1", + rc); } - /* nvm default */ + /* nvm default */ rc = qed_dbg_nvm_image(cdev, - (u8 *)buffer + offset + REGDUMP_HEADER_SIZE, - &feature_size, QED_NVM_IMAGE_DEFAULT_CFG); + (u8 *)buffer + offset + + REGDUMP_HEADER_SIZE, &feature_size, + QED_NVM_IMAGE_DEFAULT_CFG); if (!rc) { *(u32 *)((u8 *)buffer + offset) = - qed_calc_regdump_header(cdev, DEFAULT_CFG, cur_engine, - feature_size, omit_engine); + qed_calc_regdump_header(cdev, DEFAULT_CFG, + cur_engine, feature_size, + omit_engine, + cdev->dbg_bin_dump); offset += (feature_size + REGDUMP_HEADER_SIZE); } else if (rc != -ENOENT) { DP_ERR(cdev, "qed_dbg_nvm_image failed for image %d (%s), rc = %d\n", - QED_NVM_IMAGE_DEFAULT_CFG, "QED_NVM_IMAGE_DEFAULT_CFG", - rc); + QED_NVM_IMAGE_DEFAULT_CFG, + "QED_NVM_IMAGE_DEFAULT_CFG", rc); } /* nvm meta */ rc = qed_dbg_nvm_image(cdev, - (u8 *)buffer + offset + REGDUMP_HEADER_SIZE, - &feature_size, QED_NVM_IMAGE_NVM_META); + (u8 *)buffer + offset + + REGDUMP_HEADER_SIZE, &feature_size, + QED_NVM_IMAGE_NVM_META); if (!rc) { *(u32 *)((u8 *)buffer + offset) = - qed_calc_regdump_header(cdev, NVM_META, cur_engine, - feature_size, omit_engine); + qed_calc_regdump_header(cdev, NVM_META, cur_engine, + feature_size, omit_engine, + cdev->dbg_bin_dump); offset += (feature_size + REGDUMP_HEADER_SIZE); } else if (rc != -ENOENT) { DP_ERR(cdev, "qed_dbg_nvm_image failed for image %d (%s), rc = %d\n", - QED_NVM_IMAGE_NVM_META, "QED_NVM_IMAGE_NVM_META", rc); + QED_NVM_IMAGE_NVM_META, "QED_NVM_IMAGE_NVM_META", + rc); } /* nvm mdump */ @@ -8007,8 +8506,9 @@ int qed_dbg_all_data(struct qed_dev *cdev, void *buffer) QED_NVM_IMAGE_MDUMP); if (!rc) { *(u32 *)((u8 *)buffer + offset) = - qed_calc_regdump_header(cdev, MDUMP, cur_engine, - feature_size, omit_engine); + qed_calc_regdump_header(cdev, MDUMP, cur_engine, + feature_size, omit_engine, + cdev->dbg_bin_dump); offset += (feature_size + REGDUMP_HEADER_SIZE); } else if (rc != -ENOENT) { DP_ERR(cdev, @@ -8016,17 +8516,16 @@ int qed_dbg_all_data(struct qed_dev *cdev, void *buffer) QED_NVM_IMAGE_MDUMP, "QED_NVM_IMAGE_MDUMP", rc); } - cdev->dbg_bin_dump = false; mutex_unlock(&qed_dbg_lock); + cdev->dbg_bin_dump = 0; return 0; } int qed_dbg_all_data_size(struct qed_dev *cdev) { - struct qed_hwfn *p_hwfn = - &cdev->hwfns[cdev->engine_for_debug]; u32 regs_len = 0, image_len = 0, ilt_len = 0, total_ilt_len = 0; + struct qed_hwfn *p_hwfn = &cdev->hwfns[cdev->engine_for_debug]; u8 cur_engine, org_engine; cdev->disable_ilt_dump = false; @@ -8037,14 +8536,13 @@ int qed_dbg_all_data_size(struct qed_dev *cdev) "calculating idle_chk and grcdump register length for current engine\n"); qed_set_debug_engine(cdev, cur_engine); regs_len += REGDUMP_HEADER_SIZE + qed_dbg_idle_chk_size(cdev) + - REGDUMP_HEADER_SIZE + qed_dbg_idle_chk_size(cdev) + - REGDUMP_HEADER_SIZE + qed_dbg_grc_size(cdev) + - REGDUMP_HEADER_SIZE + qed_dbg_reg_fifo_size(cdev) + - REGDUMP_HEADER_SIZE + qed_dbg_igu_fifo_size(cdev) + - REGDUMP_HEADER_SIZE + - qed_dbg_protection_override_size(cdev) + - REGDUMP_HEADER_SIZE + qed_dbg_fw_asserts_size(cdev); - + REGDUMP_HEADER_SIZE + qed_dbg_idle_chk_size(cdev) + + REGDUMP_HEADER_SIZE + qed_dbg_grc_size(cdev) + + REGDUMP_HEADER_SIZE + qed_dbg_reg_fifo_size(cdev) + + REGDUMP_HEADER_SIZE + qed_dbg_igu_fifo_size(cdev) + + REGDUMP_HEADER_SIZE + + qed_dbg_protection_override_size(cdev) + + REGDUMP_HEADER_SIZE + qed_dbg_fw_asserts_size(cdev); ilt_len = REGDUMP_HEADER_SIZE + qed_dbg_ilt_size(cdev); if (ilt_len < ILT_DUMP_MAX_SIZE) { total_ilt_len += ilt_len; @@ -8055,7 +8553,8 @@ int qed_dbg_all_data_size(struct qed_dev *cdev) qed_set_debug_engine(cdev, org_engine); /* Engine common */ - regs_len += REGDUMP_HEADER_SIZE + qed_dbg_mcp_trace_size(cdev); + regs_len += REGDUMP_HEADER_SIZE + qed_dbg_mcp_trace_size(cdev) + + REGDUMP_HEADER_SIZE + qed_dbg_phy_size(cdev); qed_dbg_nvm_image_length(p_hwfn, QED_NVM_IMAGE_NVM_CFG1, &image_len); if (image_len) regs_len += REGDUMP_HEADER_SIZE + image_len; @@ -8083,10 +8582,8 @@ int qed_dbg_all_data_size(struct qed_dev *cdev) int qed_dbg_feature(struct qed_dev *cdev, void *buffer, enum qed_dbg_features feature, u32 *num_dumped_bytes) { - struct qed_hwfn *p_hwfn = - &cdev->hwfns[cdev->engine_for_debug]; - struct qed_dbg_feature *qed_feature = - &cdev->dbg_features[feature]; + struct qed_dbg_feature *qed_feature = &cdev->dbg_features[feature]; + struct qed_hwfn *p_hwfn = &cdev->hwfns[cdev->engine_for_debug]; enum dbg_status dbg_rc; struct qed_ptt *p_ptt; int rc = 0; @@ -8119,9 +8616,8 @@ out: int qed_dbg_feature_size(struct qed_dev *cdev, enum qed_dbg_features feature) { - struct qed_hwfn *p_hwfn = - &cdev->hwfns[cdev->engine_for_debug]; struct qed_dbg_feature *qed_feature = &cdev->dbg_features[feature]; + struct qed_hwfn *p_hwfn = &cdev->hwfns[cdev->engine_for_debug]; struct qed_ptt *p_ptt = qed_ptt_acquire(p_hwfn); u32 buf_size_dwords; enum dbg_status rc; @@ -8143,6 +8639,14 @@ int qed_dbg_feature_size(struct qed_dev *cdev, enum qed_dbg_features feature) return qed_feature->buf_size; } +int qed_dbg_phy_size(struct qed_dev *cdev) +{ + /* return max size of phy info and + * phy mac_stat multiplied by the number of ports + */ + return MAX_PHY_RESULT_BUFFER * (1 + qed_device_num_ports(cdev)); +} + u8 qed_get_debug_engine(struct qed_dev *cdev) { return cdev->engine_for_debug; @@ -8160,6 +8664,9 @@ void qed_dbg_pf_init(struct qed_dev *cdev) const u8 *dbg_values = NULL; int i; + /* Sync ver with debugbus qed code */ + qed_dbg_set_app_ver(TOOLS_VERSION); + /* Debug values are after init values. * The offset is the first dword of the file. */ diff --git a/drivers/net/ethernet/qlogic/qed/qed_debug.h b/drivers/net/ethernet/qlogic/qed/qed_debug.h index e71af82d3200..b0d4b937cf4a 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_debug.h +++ b/drivers/net/ethernet/qlogic/qed/qed_debug.h @@ -1,11 +1,11 @@ /* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ /* QLogic qed NIC Driver * Copyright (c) 2015 QLogic Corporation - * Copyright (c) 2019-2020 Marvell International Ltd. + * Copyright (c) 2019-2021 Marvell International Ltd. */ -#ifndef _QED_DEBUGFS_H -#define _QED_DEBUGFS_H +#ifndef _QED_DEBUG_H +#define _QED_DEBUG_H enum qed_dbg_features { DBG_FEATURE_GRC, @@ -45,6 +45,7 @@ int qed_dbg_ilt_size(struct qed_dev *cdev); int qed_dbg_mcp_trace(struct qed_dev *cdev, void *buffer, u32 *num_dumped_bytes); int qed_dbg_mcp_trace_size(struct qed_dev *cdev); +int qed_dbg_phy_size(struct qed_dev *cdev); int qed_dbg_all_data(struct qed_dev *cdev, void *buffer); int qed_dbg_all_data_size(struct qed_dev *cdev); u8 qed_get_debug_engine(struct qed_dev *cdev); diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c index 0410c3604abd..cc4ec2bb36db 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dev.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c @@ -25,6 +25,7 @@ #include "qed_dev_api.h" #include "qed_fcoe.h" #include "qed_hsi.h" +#include "qed_iro_hsi.h" #include "qed_hw.h" #include "qed_init_ops.h" #include "qed_int.h" @@ -951,7 +952,7 @@ qed_llh_remove_filter(struct qed_hwfn *p_hwfn, } int qed_llh_add_mac_filter(struct qed_dev *cdev, - u8 ppfid, u8 mac_addr[ETH_ALEN]) + u8 ppfid, const u8 mac_addr[ETH_ALEN]) { struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev); struct qed_ptt *p_ptt = qed_ptt_acquire(p_hwfn); @@ -1396,12 +1397,13 @@ void qed_resc_free(struct qed_dev *cdev) qed_rdma_info_free(p_hwfn); } + qed_spq_unregister_async_cb(p_hwfn, PROTOCOLID_COMMON); qed_iov_free(p_hwfn); qed_l2_free(p_hwfn); qed_dmae_info_free(p_hwfn); qed_dcbx_info_free(p_hwfn); qed_dbg_user_data_free(p_hwfn); - qed_fw_overlay_mem_free(p_hwfn, p_hwfn->fw_overlay_mem); + qed_fw_overlay_mem_free(p_hwfn, &p_hwfn->fw_overlay_mem); /* Destroy doorbell recovery mechanism */ qed_db_recovery_teardown(p_hwfn); @@ -1483,8 +1485,8 @@ static u16 qed_init_qm_get_num_pf_rls(struct qed_hwfn *p_hwfn) u16 num_pf_rls, num_vfs = qed_init_qm_get_num_vfs(p_hwfn); /* num RLs can't exceed resource amount of rls or vports */ - num_pf_rls = (u16) min_t(u32, RESC_NUM(p_hwfn, QED_RL), - RESC_NUM(p_hwfn, QED_VPORT)); + num_pf_rls = (u16)min_t(u32, RESC_NUM(p_hwfn, QED_RL), + RESC_NUM(p_hwfn, QED_VPORT)); /* Make sure after we reserve there's something left */ if (num_pf_rls < num_vfs + NUM_DEFAULT_RLS) @@ -1532,8 +1534,8 @@ static void qed_init_qm_params(struct qed_hwfn *p_hwfn) bool four_port; /* pq and vport bases for this PF */ - qm_info->start_pq = (u16) RESC_START(p_hwfn, QED_PQ); - qm_info->start_vport = (u8) RESC_START(p_hwfn, QED_VPORT); + qm_info->start_pq = (u16)RESC_START(p_hwfn, QED_PQ); + qm_info->start_vport = (u8)RESC_START(p_hwfn, QED_VPORT); /* rate limiting and weighted fair queueing are always enabled */ qm_info->vport_rl_en = true; @@ -1628,9 +1630,9 @@ static void qed_init_qm_advance_vport(struct qed_hwfn *p_hwfn) */ /* flags for pq init */ -#define PQ_INIT_SHARE_VPORT (1 << 0) -#define PQ_INIT_PF_RL (1 << 1) -#define PQ_INIT_VF_RL (1 << 2) +#define PQ_INIT_SHARE_VPORT BIT(0) +#define PQ_INIT_PF_RL BIT(1) +#define PQ_INIT_VF_RL BIT(2) /* defines for pq init */ #define PQ_INIT_DEFAULT_WRR_GROUP 1 @@ -2290,7 +2292,7 @@ int qed_resc_alloc(struct qed_dev *cdev) goto alloc_no_mem; } - rc = qed_eq_alloc(p_hwfn, (u16) n_eqes); + rc = qed_eq_alloc(p_hwfn, (u16)n_eqes); if (rc) goto alloc_err; @@ -2375,6 +2377,49 @@ alloc_err: return rc; } +static int qed_fw_err_handler(struct qed_hwfn *p_hwfn, + u8 opcode, + u16 echo, + union event_ring_data *data, u8 fw_return_code) +{ + if (fw_return_code != COMMON_ERR_CODE_ERROR) + goto eqe_unexpected; + + if (data->err_data.recovery_scope == ERR_SCOPE_FUNC && + le16_to_cpu(data->err_data.entity_id) >= MAX_NUM_PFS) { + qed_sriov_vfpf_malicious(p_hwfn, &data->err_data); + return 0; + } + +eqe_unexpected: + DP_ERR(p_hwfn, + "Skipping unexpected eqe 0x%02x, FW return code 0x%x, echo 0x%x\n", + opcode, fw_return_code, echo); + return -EINVAL; +} + +static int qed_common_eqe_event(struct qed_hwfn *p_hwfn, + u8 opcode, + __le16 echo, + union event_ring_data *data, + u8 fw_return_code) +{ + switch (opcode) { + case COMMON_EVENT_VF_PF_CHANNEL: + case COMMON_EVENT_VF_FLR: + return qed_sriov_eqe_event(p_hwfn, opcode, echo, data, + fw_return_code); + case COMMON_EVENT_FW_ERROR: + return qed_fw_err_handler(p_hwfn, opcode, + le16_to_cpu(echo), data, + fw_return_code); + default: + DP_INFO(p_hwfn->cdev, "Unknown eqe event 0x%02x, echo 0x%x\n", + opcode, echo); + return -EINVAL; + } +} + void qed_resc_setup(struct qed_dev *cdev) { int i; @@ -2403,6 +2448,8 @@ void qed_resc_setup(struct qed_dev *cdev) qed_l2_setup(p_hwfn); qed_iov_setup(p_hwfn); + qed_spq_register_async_cb(p_hwfn, PROTOCOLID_COMMON, + qed_common_eqe_event); #ifdef CONFIG_QED_LL2 if (p_hwfn->using_ll2) qed_ll2_setup(p_hwfn); @@ -2430,9 +2477,8 @@ int qed_final_cleanup(struct qed_hwfn *p_hwfn, u32 command = 0, addr, count = FINAL_CLEANUP_POLL_CNT; int rc = -EBUSY; - addr = GTT_BAR0_MAP_REG_USDM_RAM + - USTORM_FLR_FINAL_ACK_OFFSET(p_hwfn->rel_pf_id); - + addr = GET_GTT_REG_ADDR(GTT_BAR0_MAP_REG_USDM_RAM, + USTORM_FLR_FINAL_ACK, p_hwfn->rel_pf_id); if (is_vf) id += 0x10; @@ -2592,7 +2638,7 @@ static void qed_init_cache_line_size(struct qed_hwfn *p_hwfn, cache_line_size); } - if (L1_CACHE_BYTES > wr_mbs) + if (wr_mbs < L1_CACHE_BYTES) DP_INFO(p_hwfn, "The cache line size for padding is suboptimal for performance [OS cache line size 0x%x, wr mbs 0x%x]\n", L1_CACHE_BYTES, wr_mbs); @@ -2608,13 +2654,21 @@ static int qed_hw_init_common(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, int hw_mode) { struct qed_qm_info *qm_info = &p_hwfn->qm_info; - struct qed_qm_common_rt_init_params params; + struct qed_qm_common_rt_init_params *params; struct qed_dev *cdev = p_hwfn->cdev; u8 vf_id, max_num_vfs; u16 num_pfs, pf_id; u32 concrete_fid; int rc = 0; + params = kzalloc(sizeof(*params), GFP_KERNEL); + if (!params) { + DP_NOTICE(p_hwfn->cdev, + "Failed to allocate common init params\n"); + + return -ENOMEM; + } + qed_init_cau_rt_data(cdev); /* Program GTT windows */ @@ -2627,16 +2681,15 @@ static int qed_hw_init_common(struct qed_hwfn *p_hwfn, qm_info->pf_wfq_en = true; } - memset(¶ms, 0, sizeof(params)); - params.max_ports_per_engine = p_hwfn->cdev->num_ports_in_engine; - params.max_phys_tcs_per_port = qm_info->max_phys_tcs_per_port; - params.pf_rl_en = qm_info->pf_rl_en; - params.pf_wfq_en = qm_info->pf_wfq_en; - params.global_rl_en = qm_info->vport_rl_en; - params.vport_wfq_en = qm_info->vport_wfq_en; - params.port_params = qm_info->qm_port_params; + params->max_ports_per_engine = p_hwfn->cdev->num_ports_in_engine; + params->max_phys_tcs_per_port = qm_info->max_phys_tcs_per_port; + params->pf_rl_en = qm_info->pf_rl_en; + params->pf_wfq_en = qm_info->pf_wfq_en; + params->global_rl_en = qm_info->vport_rl_en; + params->vport_wfq_en = qm_info->vport_wfq_en; + params->port_params = qm_info->qm_port_params; - qed_qm_common_rt_init(p_hwfn, ¶ms); + qed_qm_common_rt_init(p_hwfn, params); qed_cxt_hw_init_common(p_hwfn); @@ -2644,7 +2697,7 @@ static int qed_hw_init_common(struct qed_hwfn *p_hwfn, rc = qed_init_run(p_hwfn, p_ptt, PHASE_ENGINE, ANY_PHASE_ID, hw_mode); if (rc) - return rc; + goto out; qed_wr(p_hwfn, p_ptt, PSWRQ2_REG_L2P_VALIDATE_VFID, 0); qed_wr(p_hwfn, p_ptt, PGLUE_B_REG_USE_CLIENTID_IN_TAG, 1); @@ -2663,7 +2716,7 @@ static int qed_hw_init_common(struct qed_hwfn *p_hwfn, max_num_vfs = QED_IS_AH(cdev) ? MAX_NUM_VFS_K2 : MAX_NUM_VFS_BB; for (vf_id = 0; vf_id < max_num_vfs; vf_id++) { concrete_fid = qed_vfid_to_concrete(p_hwfn, vf_id); - qed_fid_pretend(p_hwfn, p_ptt, (u16) concrete_fid); + qed_fid_pretend(p_hwfn, p_ptt, (u16)concrete_fid); qed_wr(p_hwfn, p_ptt, CCFC_REG_STRONG_ENABLE_VF, 0x1); qed_wr(p_hwfn, p_ptt, CCFC_REG_WEAK_ENABLE_VF, 0x0); qed_wr(p_hwfn, p_ptt, TCFC_REG_STRONG_ENABLE_VF, 0x1); @@ -2672,6 +2725,9 @@ static int qed_hw_init_common(struct qed_hwfn *p_hwfn, /* pretend to original PF */ qed_fid_pretend(p_hwfn, p_ptt, p_hwfn->rel_pf_id); +out: + kfree(params); + return rc; } @@ -2784,7 +2840,7 @@ qed_hw_init_pf_doorbell_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) qed_rdma_dpm_bar(p_hwfn, p_ptt); } - p_hwfn->wid_count = (u16) n_cpus; + p_hwfn->wid_count = (u16)n_cpus; DP_INFO(p_hwfn, "doorbell bar: normal_region_size=%d, pwm_region_size=%d, dpi_size=%d, dpi_count=%d, roce_edpm=%s, page_size=%lu\n", @@ -3503,8 +3559,8 @@ static void qed_hw_hwfn_prepare(struct qed_hwfn *p_hwfn) static void get_function_id(struct qed_hwfn *p_hwfn) { /* ME Register */ - p_hwfn->hw_info.opaque_fid = (u16) REG_RD(p_hwfn, - PXP_PF_ME_OPAQUE_ADDR); + p_hwfn->hw_info.opaque_fid = (u16)REG_RD(p_hwfn, + PXP_PF_ME_OPAQUE_ADDR); p_hwfn->hw_info.concrete_fid = REG_RD(p_hwfn, PXP_PF_ME_CONCRETE_ADDR); @@ -3670,12 +3726,14 @@ u32 qed_get_hsi_def_val(struct qed_dev *cdev, enum qed_hsi_def_type type) return qed_hsi_def_val[type][chip_id]; } + static int qed_hw_set_soft_resc_size(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { u32 resc_max_val, mcp_resp; u8 res_id; int rc; + for (res_id = 0; res_id < QED_MAX_RESC; res_id++) { switch (res_id) { case QED_LL2_RAM_QUEUE: @@ -3921,7 +3979,7 @@ static int qed_hw_get_resc(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) * resources allocation queries should be atomic. Since several PFs can * run in parallel - a resource lock is needed. * If either the resource lock or resource set value commands are not - * supported - skip the the max values setting, release the lock if + * supported - skip the max values setting, release the lock if * needed, and proceed to the queries. Other failures, including a * failure to acquire the lock, will cause this function to fail. */ @@ -3934,7 +3992,7 @@ static int qed_hw_get_resc(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) } else if (rc == -EINVAL) { DP_INFO(p_hwfn, "Skip the max values setting of the soft resources since the resource lock is not supported by the MFW\n"); - } else if (!rc && !resc_lock_params.b_granted) { + } else if (!resc_lock_params.b_granted) { DP_NOTICE(p_hwfn, "Failed to acquire the resource lock for the resource allocation commands\n"); return -EBUSY; @@ -4775,7 +4833,7 @@ int qed_fw_l2_queue(struct qed_hwfn *p_hwfn, u16 src_id, u16 *dst_id) if (src_id >= RESC_NUM(p_hwfn, QED_L2_QUEUE)) { u16 min, max; - min = (u16) RESC_START(p_hwfn, QED_L2_QUEUE); + min = (u16)RESC_START(p_hwfn, QED_L2_QUEUE); max = min + RESC_NUM(p_hwfn, QED_L2_QUEUE); DP_NOTICE(p_hwfn, "l2_queue id [%d] is not valid, available indices [%d - %d]\n", @@ -4909,7 +4967,7 @@ int qed_set_rxq_coalesce(struct qed_hwfn *p_hwfn, goto out; address = BAR0_MAP_REG_USDM_RAM + - USTORM_ETH_QUEUE_ZONE_OFFSET(p_cid->abs.queue_id); + USTORM_ETH_QUEUE_ZONE_GTT_OFFSET(p_cid->abs.queue_id); rc = qed_set_coalesce(p_hwfn, p_ptt, address, ð_qzone, sizeof(struct ustorm_eth_queue_zone), timeset); @@ -4948,7 +5006,7 @@ int qed_set_txq_coalesce(struct qed_hwfn *p_hwfn, goto out; address = BAR0_MAP_REG_XSDM_RAM + - XSTORM_ETH_QUEUE_ZONE_OFFSET(p_cid->abs.queue_id); + XSTORM_ETH_QUEUE_ZONE_GTT_OFFSET(p_cid->abs.queue_id); rc = qed_set_coalesce(p_hwfn, p_ptt, address, ð_qzone, sizeof(struct xstorm_eth_queue_zone), timeset); diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev_api.h b/drivers/net/ethernet/qlogic/qed/qed_dev_api.h index d3c1f3879be8..f8682356d0cf 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dev_api.h +++ b/drivers/net/ethernet/qlogic/qed/qed_dev_api.h @@ -15,44 +15,52 @@ #include "qed_int.h" /** - * @brief qed_init_dp - initialize the debug level + * qed_init_dp(): Initialize the debug level. * - * @param cdev - * @param dp_module - * @param dp_level + * @cdev: Qed dev pointer. + * @dp_module: Module debug parameter. + * @dp_level: Module debug level. + * + * Return: Void. */ void qed_init_dp(struct qed_dev *cdev, u32 dp_module, u8 dp_level); /** - * @brief qed_init_struct - initialize the device structure to - * its defaults + * qed_init_struct(): Initialize the device structure to + * its defaults. + * + * @cdev: Qed dev pointer. * - * @param cdev + * Return: Void. */ void qed_init_struct(struct qed_dev *cdev); /** - * @brief qed_resc_free - + * qed_resc_free: Free device resources. + * + * @cdev: Qed dev pointer. * - * @param cdev + * Return: Void. */ void qed_resc_free(struct qed_dev *cdev); /** - * @brief qed_resc_alloc - + * qed_resc_alloc(): Alloc device resources. * - * @param cdev + * @cdev: Qed dev pointer. * - * @return int + * Return: Int. */ int qed_resc_alloc(struct qed_dev *cdev); /** - * @brief qed_resc_setup - + * qed_resc_setup(): Setup device resources. * - * @param cdev + * @cdev: Qed dev pointer. + * + * Return: Void. */ void qed_resc_setup(struct qed_dev *cdev); @@ -105,94 +113,96 @@ struct qed_hw_init_params { }; /** - * @brief qed_hw_init - + * qed_hw_init(): Init Qed hardware. * - * @param cdev - * @param p_params + * @cdev: Qed dev pointer. + * @p_params: Pointers to params. * - * @return int + * Return: Int. */ int qed_hw_init(struct qed_dev *cdev, struct qed_hw_init_params *p_params); /** - * @brief qed_hw_timers_stop_all - stop the timers HW block + * qed_hw_timers_stop_all(): Stop the timers HW block. * - * @param cdev + * @cdev: Qed dev pointer. * - * @return void + * Return: void. */ void qed_hw_timers_stop_all(struct qed_dev *cdev); /** - * @brief qed_hw_stop - + * qed_hw_stop(): Stop Qed hardware. * - * @param cdev + * @cdev: Qed dev pointer. * - * @return int + * Return: int. */ int qed_hw_stop(struct qed_dev *cdev); /** - * @brief qed_hw_stop_fastpath -should be called incase - * slowpath is still required for the device, - * but fastpath is not. + * qed_hw_stop_fastpath(): Should be called incase + * slowpath is still required for the device, + * but fastpath is not. * - * @param cdev + * @cdev: Qed dev pointer. * - * @return int + * Return: Int. */ int qed_hw_stop_fastpath(struct qed_dev *cdev); /** - * @brief qed_hw_start_fastpath -restart fastpath traffic, - * only if hw_stop_fastpath was called + * qed_hw_start_fastpath(): Restart fastpath traffic, + * only if hw_stop_fastpath was called. * - * @param p_hwfn + * @p_hwfn: HW device data. * - * @return int + * Return: Int. */ int qed_hw_start_fastpath(struct qed_hwfn *p_hwfn); - /** - * @brief qed_hw_prepare - + * qed_hw_prepare(): Prepare Qed hardware. * - * @param cdev - * @param personality - personality to initialize + * @cdev: Qed dev pointer. + * @personality: Personality to initialize. * - * @return int + * Return: Int. */ int qed_hw_prepare(struct qed_dev *cdev, int personality); /** - * @brief qed_hw_remove - + * qed_hw_remove(): Remove Qed hardware. + * + * @cdev: Qed dev pointer. * - * @param cdev + * Return: Void. */ void qed_hw_remove(struct qed_dev *cdev); /** - * @brief qed_ptt_acquire - Allocate a PTT window + * qed_ptt_acquire(): Allocate a PTT window. * - * Should be called at the entry point to the driver (at the beginning of an - * exported function) + * @p_hwfn: HW device data. * - * @param p_hwfn + * Return: struct qed_ptt. * - * @return struct qed_ptt + * Should be called at the entry point to the driver (at the beginning of an + * exported function). */ struct qed_ptt *qed_ptt_acquire(struct qed_hwfn *p_hwfn); /** - * @brief qed_ptt_release - Release PTT Window + * qed_ptt_release(): Release PTT Window. * - * Should be called at the end of a flow - at the end of the function that - * acquired the PTT. + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. * + * Return: Void. * - * @param p_hwfn - * @param p_ptt + * Should be called at the end of a flow - at the end of the function that + * acquired the PTT. */ void qed_ptt_release(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); @@ -205,15 +215,17 @@ enum qed_dmae_address_type_t { }; /** - * @brief qed_dmae_host2grc - copy data from source addr to - * dmae registers using the given ptt + * qed_dmae_host2grc(): Copy data from source addr to + * dmae registers using the given ptt. + * + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @source_addr: Source address. + * @grc_addr: GRC address (dmae_data_offset). + * @size_in_dwords: Size. + * @p_params: (default parameters will be used in case of NULL). * - * @param p_hwfn - * @param p_ptt - * @param source_addr - * @param grc_addr (dmae_data_offset) - * @param size_in_dwords - * @param p_params (default parameters will be used in case of NULL) + * Return: Int. */ int qed_dmae_host2grc(struct qed_hwfn *p_hwfn, @@ -224,29 +236,34 @@ qed_dmae_host2grc(struct qed_hwfn *p_hwfn, struct qed_dmae_params *p_params); /** - * @brief qed_dmae_grc2host - Read data from dmae data offset - * to source address using the given ptt + * qed_dmae_grc2host(): Read data from dmae data offset + * to source address using the given ptt. + * + * @p_ptt: P_ptt. + * @grc_addr: GRC address (dmae_data_offset). + * @dest_addr: Destination Address. + * @size_in_dwords: Size. + * @p_params: (default parameters will be used in case of NULL). * - * @param p_ptt - * @param grc_addr (dmae_data_offset) - * @param dest_addr - * @param size_in_dwords - * @param p_params (default parameters will be used in case of NULL) + * Return: Int. */ int qed_dmae_grc2host(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 grc_addr, dma_addr_t dest_addr, u32 size_in_dwords, struct qed_dmae_params *p_params); /** - * @brief qed_dmae_host2host - copy data from to source address - * to a destination adress (for SRIOV) using the given ptt + * qed_dmae_host2host(): Copy data from to source address + * to a destination adrress (for SRIOV) using the given + * ptt. * - * @param p_hwfn - * @param p_ptt - * @param source_addr - * @param dest_addr - * @param size_in_dwords - * @param p_params (default parameters will be used in case of NULL) + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @source_addr: Source address. + * @dest_addr: Destination address. + * @size_in_dwords: size. + * @p_params: (default parameters will be used in case of NULL). + * + * Return: Int. */ int qed_dmae_host2host(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, @@ -259,51 +276,51 @@ int qed_chain_alloc(struct qed_dev *cdev, struct qed_chain *chain, void qed_chain_free(struct qed_dev *cdev, struct qed_chain *chain); /** - * @@brief qed_fw_l2_queue - Get absolute L2 queue ID + * qed_fw_l2_queue(): Get absolute L2 queue ID. * - * @param p_hwfn - * @param src_id - relative to p_hwfn - * @param dst_id - absolute per engine + * @p_hwfn: HW device data. + * @src_id: Relative to p_hwfn. + * @dst_id: Absolute per engine. * - * @return int + * Return: Int. */ int qed_fw_l2_queue(struct qed_hwfn *p_hwfn, u16 src_id, u16 *dst_id); /** - * @@brief qed_fw_vport - Get absolute vport ID + * qed_fw_vport(): Get absolute vport ID. * - * @param p_hwfn - * @param src_id - relative to p_hwfn - * @param dst_id - absolute per engine + * @p_hwfn: HW device data. + * @src_id: Relative to p_hwfn. + * @dst_id: Absolute per engine. * - * @return int + * Return: Int. */ int qed_fw_vport(struct qed_hwfn *p_hwfn, u8 src_id, u8 *dst_id); /** - * @@brief qed_fw_rss_eng - Get absolute RSS engine ID + * qed_fw_rss_eng(): Get absolute RSS engine ID. * - * @param p_hwfn - * @param src_id - relative to p_hwfn - * @param dst_id - absolute per engine + * @p_hwfn: HW device data. + * @src_id: Relative to p_hwfn. + * @dst_id: Absolute per engine. * - * @return int + * Return: Int. */ int qed_fw_rss_eng(struct qed_hwfn *p_hwfn, u8 src_id, u8 *dst_id); /** - * @brief qed_llh_get_num_ppfid - Return the allocated number of LLH filter - * banks that are allocated to the PF. + * qed_llh_get_num_ppfid(): Return the allocated number of LLH filter + * banks that are allocated to the PF. * - * @param cdev + * @cdev: Qed dev pointer. * - * @return u8 - Number of LLH filter banks + * Return: u8 Number of LLH filter banks. */ u8 qed_llh_get_num_ppfid(struct qed_dev *cdev); @@ -314,45 +331,50 @@ enum qed_eng { }; /** - * @brief qed_llh_set_ppfid_affinity - Set the engine affinity for the given - * LLH filter bank. + * qed_llh_set_ppfid_affinity(): Set the engine affinity for the given + * LLH filter bank. * - * @param cdev - * @param ppfid - relative within the allocated ppfids ('0' is the default one). - * @param eng + * @cdev: Qed dev pointer. + * @ppfid: Relative within the allocated ppfids ('0' is the default one). + * @eng: Engine. * - * @return int + * Return: Int. */ int qed_llh_set_ppfid_affinity(struct qed_dev *cdev, u8 ppfid, enum qed_eng eng); /** - * @brief qed_llh_set_roce_affinity - Set the RoCE engine affinity + * qed_llh_set_roce_affinity(): Set the RoCE engine affinity. * - * @param cdev - * @param eng + * @cdev: Qed dev pointer. + * @eng: Engine. * - * @return int + * Return: Int. */ int qed_llh_set_roce_affinity(struct qed_dev *cdev, enum qed_eng eng); /** - * @brief qed_llh_add_mac_filter - Add a LLH MAC filter into the given filter - * bank. + * qed_llh_add_mac_filter(): Add a LLH MAC filter into the given filter + * bank. + * + * @cdev: Qed dev pointer. + * @ppfid: Relative within the allocated ppfids ('0' is the default one). + * @mac_addr: MAC to add. * - * @param cdev - * @param ppfid - relative within the allocated ppfids ('0' is the default one). - * @param mac_addr - MAC to add + * Return: Int. */ int qed_llh_add_mac_filter(struct qed_dev *cdev, - u8 ppfid, u8 mac_addr[ETH_ALEN]); + u8 ppfid, const u8 mac_addr[ETH_ALEN]); /** - * @brief qed_llh_remove_mac_filter - Remove a LLH MAC filter from the given - * filter bank. + * qed_llh_remove_mac_filter(): Remove a LLH MAC filter from the given + * filter bank. + * + * @cdev: Qed dev pointer. + * @ppfid: Ppfid. + * @mac_addr: MAC to remove * - * @param p_ptt - * @param p_filter - MAC to remove + * Return: Void. */ void qed_llh_remove_mac_filter(struct qed_dev *cdev, u8 ppfid, u8 mac_addr[ETH_ALEN]); @@ -368,15 +390,16 @@ enum qed_llh_prot_filter_type_t { }; /** - * @brief qed_llh_add_protocol_filter - Add a LLH protocol filter into the - * given filter bank. + * qed_llh_add_protocol_filter(): Add a LLH protocol filter into the + * given filter bank. * - * @param cdev - * @param ppfid - relative within the allocated ppfids ('0' is the default one). - * @param type - type of filters and comparing - * @param source_port_or_eth_type - source port or ethertype to add - * @param dest_port - destination port to add - * @param type - type of filters and comparing + * @cdev: Qed dev pointer. + * @ppfid: Relative within the allocated ppfids ('0' is the default one). + * @type: Type of filters and comparing. + * @source_port_or_eth_type: Source port or ethertype to add. + * @dest_port: Destination port to add. + * + * Return: Int. */ int qed_llh_add_protocol_filter(struct qed_dev *cdev, @@ -385,14 +408,14 @@ qed_llh_add_protocol_filter(struct qed_dev *cdev, u16 source_port_or_eth_type, u16 dest_port); /** - * @brief qed_llh_remove_protocol_filter - Remove a LLH protocol filter from - * the given filter bank. + * qed_llh_remove_protocol_filter(): Remove a LLH protocol filter from + * the given filter bank. * - * @param cdev - * @param ppfid - relative within the allocated ppfids ('0' is the default one). - * @param type - type of filters and comparing - * @param source_port_or_eth_type - source port or ethertype to add - * @param dest_port - destination port to add + * @cdev: Qed dev pointer. + * @ppfid: Relative within the allocated ppfids ('0' is the default one). + * @type: Type of filters and comparing. + * @source_port_or_eth_type: Source port or ethertype to add. + * @dest_port: Destination port to add. */ void qed_llh_remove_protocol_filter(struct qed_dev *cdev, @@ -401,31 +424,31 @@ qed_llh_remove_protocol_filter(struct qed_dev *cdev, u16 source_port_or_eth_type, u16 dest_port); /** - * *@brief Cleanup of previous driver remains prior to load + * qed_final_cleanup(): Cleanup of previous driver remains prior to load. * - * @param p_hwfn - * @param p_ptt - * @param id - For PF, engine-relative. For VF, PF-relative. - * @param is_vf - true iff cleanup is made for a VF. + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @id: For PF, engine-relative. For VF, PF-relative. + * @is_vf: True iff cleanup is made for a VF. * - * @return int + * Return: Int. */ int qed_final_cleanup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u16 id, bool is_vf); /** - * @brief qed_get_queue_coalesce - Retrieve coalesce value for a given queue. + * qed_get_queue_coalesce(): Retrieve coalesce value for a given queue. * - * @param p_hwfn - * @param p_coal - store coalesce value read from the hardware. - * @param p_handle + * @p_hwfn: HW device data. + * @coal: Store coalesce value read from the hardware. + * @handle: P_handle. * - * @return int + * Return: Int. **/ int qed_get_queue_coalesce(struct qed_hwfn *p_hwfn, u16 *coal, void *handle); /** - * @brief qed_set_queue_coalesce - Configure coalesce parameters for Rx and + * qed_set_queue_coalesce(): Configure coalesce parameters for Rx and * Tx queue. The fact that we can configure coalescing to up to 511, but on * varying accuracy [the bigger the value the less accurate] up to a mistake * of 3usec for the highest values. @@ -433,37 +456,38 @@ int qed_get_queue_coalesce(struct qed_hwfn *p_hwfn, u16 *coal, void *handle); * should be in same range [i.e., either 0-0x7f, 0x80-0xff or 0x100-0x1ff] * otherwise configuration would break. * + * @rx_coal: Rx Coalesce value in micro seconds. + * @tx_coal: TX Coalesce value in micro seconds. + * @p_handle: P_handle. * - * @param rx_coal - Rx Coalesce value in micro seconds. - * @param tx_coal - TX Coalesce value in micro seconds. - * @param p_handle - * - * @return int + * Return: Int. **/ int qed_set_queue_coalesce(u16 rx_coal, u16 tx_coal, void *p_handle); /** - * @brief qed_pglueb_set_pfid_enable - Enable or disable PCI BUS MASTER + * qed_pglueb_set_pfid_enable(): Enable or disable PCI BUS MASTER. * - * @param p_hwfn - * @param p_ptt - * @param b_enable - true/false + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @b_enable: True/False. * - * @return int + * Return: Int. */ int qed_pglueb_set_pfid_enable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, bool b_enable); /** - * @brief db_recovery_add - add doorbell information to the doorbell - * recovery mechanism. + * qed_db_recovery_add(): add doorbell information to the doorbell + * recovery mechanism. + * + * @cdev: Qed dev pointer. + * @db_addr: Doorbell address. + * @db_data: Address of where db_data is stored. + * @db_width: Doorbell is 32b pr 64b. + * @db_space: Doorbell recovery addresses are user or kernel space. * - * @param cdev - * @param db_addr - doorbell address - * @param db_data - address of where db_data is stored - * @param db_width - doorbell is 32b pr 64b - * @param db_space - doorbell recovery addresses are user or kernel space + * Return: Int. */ int qed_db_recovery_add(struct qed_dev *cdev, void __iomem *db_addr, @@ -472,17 +496,18 @@ int qed_db_recovery_add(struct qed_dev *cdev, enum qed_db_rec_space db_space); /** - * @brief db_recovery_del - remove doorbell information from the doorbell + * qed_db_recovery_del() - remove doorbell information from the doorbell * recovery mechanism. db_data serves as key (db_addr is not unique). * - * @param cdev - * @param db_addr - doorbell address - * @param db_data - address where db_data is stored. Serves as key for the + * @cdev: Qed dev pointer. + * @db_addr: doorbell address. + * @db_data: address where db_data is stored. Serves as key for the * entry to delete. + * + * Return: Int. */ int qed_db_recovery_del(struct qed_dev *cdev, void __iomem *db_addr, void *db_data); - const char *qed_hw_get_resc_name(enum qed_resources res_id); #endif diff --git a/drivers/net/ethernet/qlogic/qed/qed_devlink.c b/drivers/net/ethernet/qlogic/qed/qed_devlink.c index 78070682f2df..6bb4e165b592 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_devlink.c +++ b/drivers/net/ethernet/qlogic/qed/qed_devlink.c @@ -215,10 +215,6 @@ struct devlink *qed_devlink_register(struct qed_dev *cdev) qdevlink = devlink_priv(dl); qdevlink->cdev = cdev; - rc = devlink_register(dl); - if (rc) - goto err_free; - rc = devlink_params_register(dl, qed_devlink_params, ARRAY_SIZE(qed_devlink_params)); if (rc) @@ -229,17 +225,13 @@ struct devlink *qed_devlink_register(struct qed_dev *cdev) QED_DEVLINK_PARAM_ID_IWARP_CMT, value); - devlink_params_publish(dl); cdev->iwarp_cmt = false; qed_fw_reporters_create(dl); - + devlink_register(dl); return dl; err_unregister: - devlink_unregister(dl); - -err_free: devlink_free(dl); return ERR_PTR(rc); @@ -250,11 +242,11 @@ void qed_devlink_unregister(struct devlink *devlink) if (!devlink) return; + devlink_unregister(devlink); qed_fw_reporters_destroy(devlink); devlink_params_unregister(devlink, qed_devlink_params, ARRAY_SIZE(qed_devlink_params)); - devlink_unregister(devlink); devlink_free(devlink); } diff --git a/drivers/net/ethernet/qlogic/qed/qed_fcoe.c b/drivers/net/ethernet/qlogic/qed/qed_fcoe.c index b768f0698170..3764190b948e 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_fcoe.c +++ b/drivers/net/ethernet/qlogic/qed/qed_fcoe.c @@ -30,6 +30,7 @@ #include "qed_hsi.h" #include "qed_hw.h" #include "qed_int.h" +#include "qed_iro_hsi.h" #include "qed_ll2.h" #include "qed_mcp.h" #include "qed_reg_addr.h" @@ -89,7 +90,7 @@ qed_sp_fcoe_func_start(struct qed_hwfn *p_hwfn, struct qed_fcoe_pf_params *fcoe_pf_params = NULL; struct fcoe_init_ramrod_params *p_ramrod = NULL; struct fcoe_init_func_ramrod_data *p_data; - struct e4_fcoe_conn_context *p_cxt = NULL; + struct fcoe_conn_context *p_cxt = NULL; struct qed_spq_entry *p_ent = NULL; struct qed_sp_init_data init_data; struct qed_cxt_info cxt_info; @@ -144,7 +145,7 @@ qed_sp_fcoe_func_start(struct qed_hwfn *p_hwfn, memset(p_cxt, 0, sizeof(*p_cxt)); SET_FIELD(p_cxt->tstorm_ag_context.flags3, - E4_TSTORM_FCOE_CONN_AG_CTX_DUMMY_TIMER_CF_EN, 1); + TSTORM_FCOE_CONN_AG_CTX_DUMMY_TIMER_CF_EN, 1); fcoe_pf_params->dummy_icid = (u16)dummy_cid; @@ -506,10 +507,9 @@ static void __iomem *qed_fcoe_get_primary_bdq_prod(struct qed_hwfn *p_hwfn, { if (RESC_NUM(p_hwfn, QED_BDQ)) { return (u8 __iomem *)p_hwfn->regview + - GTT_BAR0_MAP_REG_MSDM_RAM + - MSTORM_SCSI_BDQ_EXT_PROD_OFFSET(RESC_START(p_hwfn, - QED_BDQ), - bdq_id); + GET_GTT_BDQ_REG_ADDR(GTT_BAR0_MAP_REG_MSDM_RAM, + MSTORM_SCSI_BDQ_EXT_PROD, + RESC_START(p_hwfn, QED_BDQ), bdq_id); } else { DP_NOTICE(p_hwfn, "BDQ is not allocated!\n"); return NULL; @@ -521,10 +521,9 @@ static void __iomem *qed_fcoe_get_secondary_bdq_prod(struct qed_hwfn *p_hwfn, { if (RESC_NUM(p_hwfn, QED_BDQ)) { return (u8 __iomem *)p_hwfn->regview + - GTT_BAR0_MAP_REG_TSDM_RAM + - TSTORM_SCSI_BDQ_EXT_PROD_OFFSET(RESC_START(p_hwfn, - QED_BDQ), - bdq_id); + GET_GTT_BDQ_REG_ADDR(GTT_BAR0_MAP_REG_TSDM_RAM, + TSTORM_SCSI_BDQ_EXT_PROD, + RESC_START(p_hwfn, QED_BDQ), bdq_id); } else { DP_NOTICE(p_hwfn, "BDQ is not allocated!\n"); return NULL; @@ -549,7 +548,7 @@ int qed_fcoe_alloc(struct qed_hwfn *p_hwfn) void qed_fcoe_setup(struct qed_hwfn *p_hwfn) { - struct e4_fcoe_task_context *p_task_ctx = NULL; + struct fcoe_task_context *p_task_ctx = NULL; u32 i, lc; int rc; @@ -561,7 +560,7 @@ void qed_fcoe_setup(struct qed_hwfn *p_hwfn) if (rc) continue; - memset(p_task_ctx, 0, sizeof(struct e4_fcoe_task_context)); + memset(p_task_ctx, 0, sizeof(struct fcoe_task_context)); lc = 0; SET_FIELD(lc, TIMERS_CONTEXT_VALIDLC0, 1); @@ -572,7 +571,7 @@ void qed_fcoe_setup(struct qed_hwfn *p_hwfn) p_task_ctx->timer_context.logical_client_1 = cpu_to_le32(lc); SET_FIELD(p_task_ctx->tstorm_ag_context.flags0, - E4_TSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE, 1); + TSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE, 1); } } diff --git a/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_hsi.h index fb1baa2da2d0..f2cedbd9489c 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_hsi.h +++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation - * Copyright (c) 2019-2020 Marvell International Ltd. + * Copyright (c) 2019-2021 Marvell International Ltd. */ #ifndef _QED_HSI_H @@ -38,7 +38,7 @@ enum common_event_opcode { COMMON_EVENT_VF_PF_CHANNEL, COMMON_EVENT_VF_FLR, COMMON_EVENT_PF_UPDATE, - COMMON_EVENT_MALICIOUS_VF, + COMMON_EVENT_FW_ERROR, COMMON_EVENT_RL_UPDATE, COMMON_EVENT_EMPTY, MAX_COMMON_EVENT_OPCODE @@ -84,6 +84,13 @@ enum core_l4_pseudo_checksum_mode { MAX_CORE_L4_PSEUDO_CHECKSUM_MODE }; +/* LL2 SP error code */ +enum core_ll2_error_code { + LL2_OK = 0, + LL2_ERROR, + MAX_CORE_LL2_ERROR_CODE +}; + /* Light-L2 RX Producers in Tstorm RAM */ struct core_ll2_port_stats { struct regpair gsi_invalid_hdr; @@ -123,6 +130,15 @@ struct core_ll2_ustorm_per_queue_stat { struct regpair rcv_bcast_pkts; }; +struct core_ll2_rx_per_queue_stat { + struct core_ll2_tstorm_per_queue_stat tstorm_stat; + struct core_ll2_ustorm_per_queue_stat ustorm_stat; +}; + +struct core_ll2_tx_per_queue_stat { + struct core_ll2_pstorm_per_queue_stat pstorm_stat; +}; + /* Structure for doorbell data, in PWM mode, for RX producers update. */ struct core_pwm_prod_update_data { __le16 icid; /* internal CID */ @@ -135,6 +151,15 @@ struct core_pwm_prod_update_data { struct core_ll2_rx_prod prod; /* Producers */ }; +/* Ramrod data for rx/tx queue statistics query ramrod */ +struct core_queue_stats_query_ramrod_data { + u8 rx_stat; + u8 tx_stat; + __le16 reserved[3]; + struct regpair rx_stat_addr; + struct regpair tx_stat_addr; +}; + /* Core Ramrod Command IDs (light L2) */ enum core_ramrod_cmd_id { CORE_RAMROD_UNUSED, @@ -210,7 +235,8 @@ struct core_rx_fast_path_cqe { __le16 vlan; struct core_rx_cqe_opaque_data opaque_data; struct parsing_err_flags err_flags; - __le16 reserved0; + u8 packet_source; + u8 reserved0; __le32 reserved1[3]; }; @@ -226,7 +252,8 @@ struct core_rx_gsi_offload_cqe { __le16 qp_id; __le32 src_qp; struct core_rx_cqe_opaque_data opaque_data; - __le32 reserved; + u8 packet_source; + u8 reserved[3]; }; /* Core RX CQE for Light L2 */ @@ -245,6 +272,15 @@ union core_rx_cqe_union { struct core_rx_slow_path_cqe rx_cqe_sp; }; +/* RX packet source. */ +enum core_rx_pkt_source { + CORE_RX_PKT_SOURCE_NETWORK = 0, + CORE_RX_PKT_SOURCE_LB, + CORE_RX_PKT_SOURCE_TX, + CORE_RX_PKT_SOURCE_LL2_TX, + MAX_CORE_RX_PKT_SOURCE +}; + /* Ramrod data for rx queue start ramrod */ struct core_rx_start_ramrod_data { struct regpair bd_base; @@ -362,7 +398,7 @@ struct core_tx_update_ramrod_data { u8 update_qm_pq_id_flg; u8 reserved0; __le16 qm_pq_id; - __le32 reserved1; + __le32 reserved1[1]; }; /* Enum flag for what type of dcb data to update */ @@ -386,224 +422,222 @@ struct pstorm_core_conn_st_ctx { /* Core Slowpath Connection storm context of Xstorm */ struct xstorm_core_conn_st_ctx { - __le32 spq_base_lo; - __le32 spq_base_hi; - struct regpair consolid_base_addr; + struct regpair spq_base_addr; + __le32 reserved0[2]; __le16 spq_cons; - __le16 consolid_cons; - __le32 reserved0[55]; + __le16 reserved1[111]; }; -struct e4_xstorm_core_conn_ag_ctx { +struct xstorm_core_conn_ag_ctx { u8 reserved0; u8 state; u8 flags0; -#define E4_XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 -#define E4_XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 -#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED1_MASK 0x1 -#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED1_SHIFT 1 -#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED2_MASK 0x1 -#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED2_SHIFT 2 -#define E4_XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1 -#define E4_XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3 -#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED3_MASK 0x1 -#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED3_SHIFT 4 -#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED4_MASK 0x1 -#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED4_SHIFT 5 -#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED5_MASK 0x1 -#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED5_SHIFT 6 -#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED6_MASK 0x1 -#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED6_SHIFT 7 +#define XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 +#define XSTORM_CORE_CONN_AG_CTX_RESERVED1_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_RESERVED1_SHIFT 1 +#define XSTORM_CORE_CONN_AG_CTX_RESERVED2_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_RESERVED2_SHIFT 2 +#define XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3 +#define XSTORM_CORE_CONN_AG_CTX_RESERVED3_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_RESERVED3_SHIFT 4 +#define XSTORM_CORE_CONN_AG_CTX_RESERVED4_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_RESERVED4_SHIFT 5 +#define XSTORM_CORE_CONN_AG_CTX_RESERVED5_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_RESERVED5_SHIFT 6 +#define XSTORM_CORE_CONN_AG_CTX_RESERVED6_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_RESERVED6_SHIFT 7 u8 flags1; -#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED7_MASK 0x1 -#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED7_SHIFT 0 -#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED8_MASK 0x1 -#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED8_SHIFT 1 -#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED9_MASK 0x1 -#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED9_SHIFT 2 -#define E4_XSTORM_CORE_CONN_AG_CTX_BIT11_MASK 0x1 -#define E4_XSTORM_CORE_CONN_AG_CTX_BIT11_SHIFT 3 -#define E4_XSTORM_CORE_CONN_AG_CTX_BIT12_MASK 0x1 -#define E4_XSTORM_CORE_CONN_AG_CTX_BIT12_SHIFT 4 -#define E4_XSTORM_CORE_CONN_AG_CTX_BIT13_MASK 0x1 -#define E4_XSTORM_CORE_CONN_AG_CTX_BIT13_SHIFT 5 -#define E4_XSTORM_CORE_CONN_AG_CTX_TX_RULE_ACTIVE_MASK 0x1 -#define E4_XSTORM_CORE_CONN_AG_CTX_TX_RULE_ACTIVE_SHIFT 6 -#define E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE_MASK 0x1 -#define E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE_SHIFT 7 +#define XSTORM_CORE_CONN_AG_CTX_RESERVED7_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_RESERVED7_SHIFT 0 +#define XSTORM_CORE_CONN_AG_CTX_RESERVED8_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_RESERVED8_SHIFT 1 +#define XSTORM_CORE_CONN_AG_CTX_RESERVED9_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_RESERVED9_SHIFT 2 +#define XSTORM_CORE_CONN_AG_CTX_BIT11_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_BIT11_SHIFT 3 +#define XSTORM_CORE_CONN_AG_CTX_BIT12_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_BIT12_SHIFT 4 +#define XSTORM_CORE_CONN_AG_CTX_BIT13_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_BIT13_SHIFT 5 +#define XSTORM_CORE_CONN_AG_CTX_TX_RULE_ACTIVE_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_TX_RULE_ACTIVE_SHIFT 6 +#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE_SHIFT 7 u8 flags2; -#define E4_XSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3 -#define E4_XSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 0 -#define E4_XSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3 -#define E4_XSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 2 -#define E4_XSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3 -#define E4_XSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 4 -#define E4_XSTORM_CORE_CONN_AG_CTX_CF3_MASK 0x3 -#define E4_XSTORM_CORE_CONN_AG_CTX_CF3_SHIFT 6 +#define XSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3 +#define XSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 0 +#define XSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3 +#define XSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 2 +#define XSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3 +#define XSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 4 +#define XSTORM_CORE_CONN_AG_CTX_CF3_MASK 0x3 +#define XSTORM_CORE_CONN_AG_CTX_CF3_SHIFT 6 u8 flags3; -#define E4_XSTORM_CORE_CONN_AG_CTX_CF4_MASK 0x3 -#define E4_XSTORM_CORE_CONN_AG_CTX_CF4_SHIFT 0 -#define E4_XSTORM_CORE_CONN_AG_CTX_CF5_MASK 0x3 -#define E4_XSTORM_CORE_CONN_AG_CTX_CF5_SHIFT 2 -#define E4_XSTORM_CORE_CONN_AG_CTX_CF6_MASK 0x3 -#define E4_XSTORM_CORE_CONN_AG_CTX_CF6_SHIFT 4 -#define E4_XSTORM_CORE_CONN_AG_CTX_CF7_MASK 0x3 -#define E4_XSTORM_CORE_CONN_AG_CTX_CF7_SHIFT 6 +#define XSTORM_CORE_CONN_AG_CTX_CF4_MASK 0x3 +#define XSTORM_CORE_CONN_AG_CTX_CF4_SHIFT 0 +#define XSTORM_CORE_CONN_AG_CTX_CF5_MASK 0x3 +#define XSTORM_CORE_CONN_AG_CTX_CF5_SHIFT 2 +#define XSTORM_CORE_CONN_AG_CTX_CF6_MASK 0x3 +#define XSTORM_CORE_CONN_AG_CTX_CF6_SHIFT 4 +#define XSTORM_CORE_CONN_AG_CTX_CF7_MASK 0x3 +#define XSTORM_CORE_CONN_AG_CTX_CF7_SHIFT 6 u8 flags4; -#define E4_XSTORM_CORE_CONN_AG_CTX_CF8_MASK 0x3 -#define E4_XSTORM_CORE_CONN_AG_CTX_CF8_SHIFT 0 -#define E4_XSTORM_CORE_CONN_AG_CTX_CF9_MASK 0x3 -#define E4_XSTORM_CORE_CONN_AG_CTX_CF9_SHIFT 2 -#define E4_XSTORM_CORE_CONN_AG_CTX_CF10_MASK 0x3 -#define E4_XSTORM_CORE_CONN_AG_CTX_CF10_SHIFT 4 -#define E4_XSTORM_CORE_CONN_AG_CTX_CF11_MASK 0x3 -#define E4_XSTORM_CORE_CONN_AG_CTX_CF11_SHIFT 6 +#define XSTORM_CORE_CONN_AG_CTX_CF8_MASK 0x3 +#define XSTORM_CORE_CONN_AG_CTX_CF8_SHIFT 0 +#define XSTORM_CORE_CONN_AG_CTX_CF9_MASK 0x3 +#define XSTORM_CORE_CONN_AG_CTX_CF9_SHIFT 2 +#define XSTORM_CORE_CONN_AG_CTX_CF10_MASK 0x3 +#define XSTORM_CORE_CONN_AG_CTX_CF10_SHIFT 4 +#define XSTORM_CORE_CONN_AG_CTX_CF11_MASK 0x3 +#define XSTORM_CORE_CONN_AG_CTX_CF11_SHIFT 6 u8 flags5; -#define E4_XSTORM_CORE_CONN_AG_CTX_CF12_MASK 0x3 -#define E4_XSTORM_CORE_CONN_AG_CTX_CF12_SHIFT 0 -#define E4_XSTORM_CORE_CONN_AG_CTX_CF13_MASK 0x3 -#define E4_XSTORM_CORE_CONN_AG_CTX_CF13_SHIFT 2 -#define E4_XSTORM_CORE_CONN_AG_CTX_CF14_MASK 0x3 -#define E4_XSTORM_CORE_CONN_AG_CTX_CF14_SHIFT 4 -#define E4_XSTORM_CORE_CONN_AG_CTX_CF15_MASK 0x3 -#define E4_XSTORM_CORE_CONN_AG_CTX_CF15_SHIFT 6 +#define XSTORM_CORE_CONN_AG_CTX_CF12_MASK 0x3 +#define XSTORM_CORE_CONN_AG_CTX_CF12_SHIFT 0 +#define XSTORM_CORE_CONN_AG_CTX_CF13_MASK 0x3 +#define XSTORM_CORE_CONN_AG_CTX_CF13_SHIFT 2 +#define XSTORM_CORE_CONN_AG_CTX_CF14_MASK 0x3 +#define XSTORM_CORE_CONN_AG_CTX_CF14_SHIFT 4 +#define XSTORM_CORE_CONN_AG_CTX_CF15_MASK 0x3 +#define XSTORM_CORE_CONN_AG_CTX_CF15_SHIFT 6 u8 flags6; -#define E4_XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_MASK 0x3 -#define E4_XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_SHIFT 0 -#define E4_XSTORM_CORE_CONN_AG_CTX_CF17_MASK 0x3 -#define E4_XSTORM_CORE_CONN_AG_CTX_CF17_SHIFT 2 -#define E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_MASK 0x3 -#define E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_SHIFT 4 -#define E4_XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_MASK 0x3 -#define E4_XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_SHIFT 6 +#define XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_MASK 0x3 +#define XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_SHIFT 0 +#define XSTORM_CORE_CONN_AG_CTX_CF17_MASK 0x3 +#define XSTORM_CORE_CONN_AG_CTX_CF17_SHIFT 2 +#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_MASK 0x3 +#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_SHIFT 4 +#define XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_MASK 0x3 +#define XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_SHIFT 6 u8 flags7; -#define E4_XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_MASK 0x3 -#define E4_XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_SHIFT 0 -#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED10_MASK 0x3 -#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED10_SHIFT 2 -#define E4_XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_MASK 0x3 -#define E4_XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_SHIFT 4 -#define E4_XSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1 -#define E4_XSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 6 -#define E4_XSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1 -#define E4_XSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 7 +#define XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_MASK 0x3 +#define XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_SHIFT 0 +#define XSTORM_CORE_CONN_AG_CTX_RESERVED10_MASK 0x3 +#define XSTORM_CORE_CONN_AG_CTX_RESERVED10_SHIFT 2 +#define XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_MASK 0x3 +#define XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_SHIFT 4 +#define XSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 6 +#define XSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 7 u8 flags8; -#define E4_XSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1 -#define E4_XSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 0 -#define E4_XSTORM_CORE_CONN_AG_CTX_CF3EN_MASK 0x1 -#define E4_XSTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT 1 -#define E4_XSTORM_CORE_CONN_AG_CTX_CF4EN_MASK 0x1 -#define E4_XSTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT 2 -#define E4_XSTORM_CORE_CONN_AG_CTX_CF5EN_MASK 0x1 -#define E4_XSTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT 3 -#define E4_XSTORM_CORE_CONN_AG_CTX_CF6EN_MASK 0x1 -#define E4_XSTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT 4 -#define E4_XSTORM_CORE_CONN_AG_CTX_CF7EN_MASK 0x1 -#define E4_XSTORM_CORE_CONN_AG_CTX_CF7EN_SHIFT 5 -#define E4_XSTORM_CORE_CONN_AG_CTX_CF8EN_MASK 0x1 -#define E4_XSTORM_CORE_CONN_AG_CTX_CF8EN_SHIFT 6 -#define E4_XSTORM_CORE_CONN_AG_CTX_CF9EN_MASK 0x1 -#define E4_XSTORM_CORE_CONN_AG_CTX_CF9EN_SHIFT 7 +#define XSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 0 +#define XSTORM_CORE_CONN_AG_CTX_CF3EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT 1 +#define XSTORM_CORE_CONN_AG_CTX_CF4EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT 2 +#define XSTORM_CORE_CONN_AG_CTX_CF5EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT 3 +#define XSTORM_CORE_CONN_AG_CTX_CF6EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT 4 +#define XSTORM_CORE_CONN_AG_CTX_CF7EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_CF7EN_SHIFT 5 +#define XSTORM_CORE_CONN_AG_CTX_CF8EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_CF8EN_SHIFT 6 +#define XSTORM_CORE_CONN_AG_CTX_CF9EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_CF9EN_SHIFT 7 u8 flags9; -#define E4_XSTORM_CORE_CONN_AG_CTX_CF10EN_MASK 0x1 -#define E4_XSTORM_CORE_CONN_AG_CTX_CF10EN_SHIFT 0 -#define E4_XSTORM_CORE_CONN_AG_CTX_CF11EN_MASK 0x1 -#define E4_XSTORM_CORE_CONN_AG_CTX_CF11EN_SHIFT 1 -#define E4_XSTORM_CORE_CONN_AG_CTX_CF12EN_MASK 0x1 -#define E4_XSTORM_CORE_CONN_AG_CTX_CF12EN_SHIFT 2 -#define E4_XSTORM_CORE_CONN_AG_CTX_CF13EN_MASK 0x1 -#define E4_XSTORM_CORE_CONN_AG_CTX_CF13EN_SHIFT 3 -#define E4_XSTORM_CORE_CONN_AG_CTX_CF14EN_MASK 0x1 -#define E4_XSTORM_CORE_CONN_AG_CTX_CF14EN_SHIFT 4 -#define E4_XSTORM_CORE_CONN_AG_CTX_CF15EN_MASK 0x1 -#define E4_XSTORM_CORE_CONN_AG_CTX_CF15EN_SHIFT 5 -#define E4_XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN_MASK 0x1 -#define E4_XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN_SHIFT 6 -#define E4_XSTORM_CORE_CONN_AG_CTX_CF17EN_MASK 0x1 -#define E4_XSTORM_CORE_CONN_AG_CTX_CF17EN_SHIFT 7 +#define XSTORM_CORE_CONN_AG_CTX_CF10EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_CF10EN_SHIFT 0 +#define XSTORM_CORE_CONN_AG_CTX_CF11EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_CF11EN_SHIFT 1 +#define XSTORM_CORE_CONN_AG_CTX_CF12EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_CF12EN_SHIFT 2 +#define XSTORM_CORE_CONN_AG_CTX_CF13EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_CF13EN_SHIFT 3 +#define XSTORM_CORE_CONN_AG_CTX_CF14EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_CF14EN_SHIFT 4 +#define XSTORM_CORE_CONN_AG_CTX_CF15EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_CF15EN_SHIFT 5 +#define XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN_SHIFT 6 +#define XSTORM_CORE_CONN_AG_CTX_CF17EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_CF17EN_SHIFT 7 u8 flags10; -#define E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN_MASK 0x1 -#define E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN_SHIFT 0 -#define E4_XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_EN_MASK 0x1 -#define E4_XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_EN_SHIFT 1 -#define E4_XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1 -#define E4_XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2 -#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED11_MASK 0x1 -#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED11_SHIFT 3 -#define E4_XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1 -#define E4_XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4 -#define E4_XSTORM_CORE_CONN_AG_CTX_CF23EN_MASK 0x1 -#define E4_XSTORM_CORE_CONN_AG_CTX_CF23EN_SHIFT 5 -#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED12_MASK 0x1 -#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED12_SHIFT 6 -#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED13_MASK 0x1 -#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED13_SHIFT 7 +#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN_SHIFT 0 +#define XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_EN_SHIFT 1 +#define XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2 +#define XSTORM_CORE_CONN_AG_CTX_RESERVED11_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_RESERVED11_SHIFT 3 +#define XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4 +#define XSTORM_CORE_CONN_AG_CTX_CF23EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_CF23EN_SHIFT 5 +#define XSTORM_CORE_CONN_AG_CTX_RESERVED12_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_RESERVED12_SHIFT 6 +#define XSTORM_CORE_CONN_AG_CTX_RESERVED13_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_RESERVED13_SHIFT 7 u8 flags11; -#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED14_MASK 0x1 -#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED14_SHIFT 0 -#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED15_MASK 0x1 -#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED15_SHIFT 1 -#define E4_XSTORM_CORE_CONN_AG_CTX_TX_DEC_RULE_EN_MASK 0x1 -#define E4_XSTORM_CORE_CONN_AG_CTX_TX_DEC_RULE_EN_SHIFT 2 -#define E4_XSTORM_CORE_CONN_AG_CTX_RULE5EN_MASK 0x1 -#define E4_XSTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 3 -#define E4_XSTORM_CORE_CONN_AG_CTX_RULE6EN_MASK 0x1 -#define E4_XSTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 4 -#define E4_XSTORM_CORE_CONN_AG_CTX_RULE7EN_MASK 0x1 -#define E4_XSTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 5 -#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED1_MASK 0x1 -#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED1_SHIFT 6 -#define E4_XSTORM_CORE_CONN_AG_CTX_RULE9EN_MASK 0x1 -#define E4_XSTORM_CORE_CONN_AG_CTX_RULE9EN_SHIFT 7 +#define XSTORM_CORE_CONN_AG_CTX_RESERVED14_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_RESERVED14_SHIFT 0 +#define XSTORM_CORE_CONN_AG_CTX_RESERVED15_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_RESERVED15_SHIFT 1 +#define XSTORM_CORE_CONN_AG_CTX_TX_DEC_RULE_EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_TX_DEC_RULE_EN_SHIFT 2 +#define XSTORM_CORE_CONN_AG_CTX_RULE5EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 3 +#define XSTORM_CORE_CONN_AG_CTX_RULE6EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 4 +#define XSTORM_CORE_CONN_AG_CTX_RULE7EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 5 +#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED1_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED1_SHIFT 6 +#define XSTORM_CORE_CONN_AG_CTX_RULE9EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_RULE9EN_SHIFT 7 u8 flags12; -#define E4_XSTORM_CORE_CONN_AG_CTX_RULE10EN_MASK 0x1 -#define E4_XSTORM_CORE_CONN_AG_CTX_RULE10EN_SHIFT 0 -#define E4_XSTORM_CORE_CONN_AG_CTX_RULE11EN_MASK 0x1 -#define E4_XSTORM_CORE_CONN_AG_CTX_RULE11EN_SHIFT 1 -#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED2_MASK 0x1 -#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED2_SHIFT 2 -#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED3_MASK 0x1 -#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED3_SHIFT 3 -#define E4_XSTORM_CORE_CONN_AG_CTX_RULE14EN_MASK 0x1 -#define E4_XSTORM_CORE_CONN_AG_CTX_RULE14EN_SHIFT 4 -#define E4_XSTORM_CORE_CONN_AG_CTX_RULE15EN_MASK 0x1 -#define E4_XSTORM_CORE_CONN_AG_CTX_RULE15EN_SHIFT 5 -#define E4_XSTORM_CORE_CONN_AG_CTX_RULE16EN_MASK 0x1 -#define E4_XSTORM_CORE_CONN_AG_CTX_RULE16EN_SHIFT 6 -#define E4_XSTORM_CORE_CONN_AG_CTX_RULE17EN_MASK 0x1 -#define E4_XSTORM_CORE_CONN_AG_CTX_RULE17EN_SHIFT 7 +#define XSTORM_CORE_CONN_AG_CTX_RULE10EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_RULE10EN_SHIFT 0 +#define XSTORM_CORE_CONN_AG_CTX_RULE11EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_RULE11EN_SHIFT 1 +#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED2_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED2_SHIFT 2 +#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED3_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED3_SHIFT 3 +#define XSTORM_CORE_CONN_AG_CTX_RULE14EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_RULE14EN_SHIFT 4 +#define XSTORM_CORE_CONN_AG_CTX_RULE15EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_RULE15EN_SHIFT 5 +#define XSTORM_CORE_CONN_AG_CTX_RULE16EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_RULE16EN_SHIFT 6 +#define XSTORM_CORE_CONN_AG_CTX_RULE17EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_RULE17EN_SHIFT 7 u8 flags13; -#define E4_XSTORM_CORE_CONN_AG_CTX_RULE18EN_MASK 0x1 -#define E4_XSTORM_CORE_CONN_AG_CTX_RULE18EN_SHIFT 0 -#define E4_XSTORM_CORE_CONN_AG_CTX_RULE19EN_MASK 0x1 -#define E4_XSTORM_CORE_CONN_AG_CTX_RULE19EN_SHIFT 1 -#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED4_MASK 0x1 -#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED4_SHIFT 2 -#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED5_MASK 0x1 -#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED5_SHIFT 3 -#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED6_MASK 0x1 -#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED6_SHIFT 4 -#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED7_MASK 0x1 -#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED7_SHIFT 5 -#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED8_MASK 0x1 -#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED8_SHIFT 6 -#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED9_MASK 0x1 -#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED9_SHIFT 7 +#define XSTORM_CORE_CONN_AG_CTX_RULE18EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_RULE18EN_SHIFT 0 +#define XSTORM_CORE_CONN_AG_CTX_RULE19EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_RULE19EN_SHIFT 1 +#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED4_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED4_SHIFT 2 +#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED5_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED5_SHIFT 3 +#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED6_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED6_SHIFT 4 +#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED7_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED7_SHIFT 5 +#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED8_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED8_SHIFT 6 +#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED9_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED9_SHIFT 7 u8 flags14; -#define E4_XSTORM_CORE_CONN_AG_CTX_BIT16_MASK 0x1 -#define E4_XSTORM_CORE_CONN_AG_CTX_BIT16_SHIFT 0 -#define E4_XSTORM_CORE_CONN_AG_CTX_BIT17_MASK 0x1 -#define E4_XSTORM_CORE_CONN_AG_CTX_BIT17_SHIFT 1 -#define E4_XSTORM_CORE_CONN_AG_CTX_BIT18_MASK 0x1 -#define E4_XSTORM_CORE_CONN_AG_CTX_BIT18_SHIFT 2 -#define E4_XSTORM_CORE_CONN_AG_CTX_BIT19_MASK 0x1 -#define E4_XSTORM_CORE_CONN_AG_CTX_BIT19_SHIFT 3 -#define E4_XSTORM_CORE_CONN_AG_CTX_BIT20_MASK 0x1 -#define E4_XSTORM_CORE_CONN_AG_CTX_BIT20_SHIFT 4 -#define E4_XSTORM_CORE_CONN_AG_CTX_BIT21_MASK 0x1 -#define E4_XSTORM_CORE_CONN_AG_CTX_BIT21_SHIFT 5 -#define E4_XSTORM_CORE_CONN_AG_CTX_CF23_MASK 0x3 -#define E4_XSTORM_CORE_CONN_AG_CTX_CF23_SHIFT 6 +#define XSTORM_CORE_CONN_AG_CTX_BIT16_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_BIT16_SHIFT 0 +#define XSTORM_CORE_CONN_AG_CTX_BIT17_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_BIT17_SHIFT 1 +#define XSTORM_CORE_CONN_AG_CTX_BIT18_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_BIT18_SHIFT 2 +#define XSTORM_CORE_CONN_AG_CTX_BIT19_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_BIT19_SHIFT 3 +#define XSTORM_CORE_CONN_AG_CTX_BIT20_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_BIT20_SHIFT 4 +#define XSTORM_CORE_CONN_AG_CTX_BIT21_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_BIT21_SHIFT 5 +#define XSTORM_CORE_CONN_AG_CTX_CF23_MASK 0x3 +#define XSTORM_CORE_CONN_AG_CTX_CF23_SHIFT 6 u8 byte2; __le16 physical_q0; __le16 consolid_prod; @@ -657,89 +691,89 @@ struct e4_xstorm_core_conn_ag_ctx { __le16 word15; }; -struct e4_tstorm_core_conn_ag_ctx { +struct tstorm_core_conn_ag_ctx { u8 byte0; u8 byte1; u8 flags0; -#define E4_TSTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1 -#define E4_TSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0 -#define E4_TSTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1 -#define E4_TSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1 -#define E4_TSTORM_CORE_CONN_AG_CTX_BIT2_MASK 0x1 -#define E4_TSTORM_CORE_CONN_AG_CTX_BIT2_SHIFT 2 -#define E4_TSTORM_CORE_CONN_AG_CTX_BIT3_MASK 0x1 -#define E4_TSTORM_CORE_CONN_AG_CTX_BIT3_SHIFT 3 -#define E4_TSTORM_CORE_CONN_AG_CTX_BIT4_MASK 0x1 -#define E4_TSTORM_CORE_CONN_AG_CTX_BIT4_SHIFT 4 -#define E4_TSTORM_CORE_CONN_AG_CTX_BIT5_MASK 0x1 -#define E4_TSTORM_CORE_CONN_AG_CTX_BIT5_SHIFT 5 -#define E4_TSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3 -#define E4_TSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 6 +#define TSTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1 +#define TSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0 +#define TSTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1 +#define TSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1 +#define TSTORM_CORE_CONN_AG_CTX_BIT2_MASK 0x1 +#define TSTORM_CORE_CONN_AG_CTX_BIT2_SHIFT 2 +#define TSTORM_CORE_CONN_AG_CTX_BIT3_MASK 0x1 +#define TSTORM_CORE_CONN_AG_CTX_BIT3_SHIFT 3 +#define TSTORM_CORE_CONN_AG_CTX_BIT4_MASK 0x1 +#define TSTORM_CORE_CONN_AG_CTX_BIT4_SHIFT 4 +#define TSTORM_CORE_CONN_AG_CTX_BIT5_MASK 0x1 +#define TSTORM_CORE_CONN_AG_CTX_BIT5_SHIFT 5 +#define TSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3 +#define TSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 6 u8 flags1; -#define E4_TSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3 -#define E4_TSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 0 -#define E4_TSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3 -#define E4_TSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 2 -#define E4_TSTORM_CORE_CONN_AG_CTX_CF3_MASK 0x3 -#define E4_TSTORM_CORE_CONN_AG_CTX_CF3_SHIFT 4 -#define E4_TSTORM_CORE_CONN_AG_CTX_CF4_MASK 0x3 -#define E4_TSTORM_CORE_CONN_AG_CTX_CF4_SHIFT 6 +#define TSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3 +#define TSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 0 +#define TSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3 +#define TSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 2 +#define TSTORM_CORE_CONN_AG_CTX_CF3_MASK 0x3 +#define TSTORM_CORE_CONN_AG_CTX_CF3_SHIFT 4 +#define TSTORM_CORE_CONN_AG_CTX_CF4_MASK 0x3 +#define TSTORM_CORE_CONN_AG_CTX_CF4_SHIFT 6 u8 flags2; -#define E4_TSTORM_CORE_CONN_AG_CTX_CF5_MASK 0x3 -#define E4_TSTORM_CORE_CONN_AG_CTX_CF5_SHIFT 0 -#define E4_TSTORM_CORE_CONN_AG_CTX_CF6_MASK 0x3 -#define E4_TSTORM_CORE_CONN_AG_CTX_CF6_SHIFT 2 -#define E4_TSTORM_CORE_CONN_AG_CTX_CF7_MASK 0x3 -#define E4_TSTORM_CORE_CONN_AG_CTX_CF7_SHIFT 4 -#define E4_TSTORM_CORE_CONN_AG_CTX_CF8_MASK 0x3 -#define E4_TSTORM_CORE_CONN_AG_CTX_CF8_SHIFT 6 +#define TSTORM_CORE_CONN_AG_CTX_CF5_MASK 0x3 +#define TSTORM_CORE_CONN_AG_CTX_CF5_SHIFT 0 +#define TSTORM_CORE_CONN_AG_CTX_CF6_MASK 0x3 +#define TSTORM_CORE_CONN_AG_CTX_CF6_SHIFT 2 +#define TSTORM_CORE_CONN_AG_CTX_CF7_MASK 0x3 +#define TSTORM_CORE_CONN_AG_CTX_CF7_SHIFT 4 +#define TSTORM_CORE_CONN_AG_CTX_CF8_MASK 0x3 +#define TSTORM_CORE_CONN_AG_CTX_CF8_SHIFT 6 u8 flags3; -#define E4_TSTORM_CORE_CONN_AG_CTX_CF9_MASK 0x3 -#define E4_TSTORM_CORE_CONN_AG_CTX_CF9_SHIFT 0 -#define E4_TSTORM_CORE_CONN_AG_CTX_CF10_MASK 0x3 -#define E4_TSTORM_CORE_CONN_AG_CTX_CF10_SHIFT 2 -#define E4_TSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1 -#define E4_TSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 4 -#define E4_TSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1 -#define E4_TSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 5 -#define E4_TSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1 -#define E4_TSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 6 -#define E4_TSTORM_CORE_CONN_AG_CTX_CF3EN_MASK 0x1 -#define E4_TSTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT 7 +#define TSTORM_CORE_CONN_AG_CTX_CF9_MASK 0x3 +#define TSTORM_CORE_CONN_AG_CTX_CF9_SHIFT 0 +#define TSTORM_CORE_CONN_AG_CTX_CF10_MASK 0x3 +#define TSTORM_CORE_CONN_AG_CTX_CF10_SHIFT 2 +#define TSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1 +#define TSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 4 +#define TSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1 +#define TSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 5 +#define TSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1 +#define TSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 6 +#define TSTORM_CORE_CONN_AG_CTX_CF3EN_MASK 0x1 +#define TSTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT 7 u8 flags4; -#define E4_TSTORM_CORE_CONN_AG_CTX_CF4EN_MASK 0x1 -#define E4_TSTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT 0 -#define E4_TSTORM_CORE_CONN_AG_CTX_CF5EN_MASK 0x1 -#define E4_TSTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT 1 -#define E4_TSTORM_CORE_CONN_AG_CTX_CF6EN_MASK 0x1 -#define E4_TSTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT 2 -#define E4_TSTORM_CORE_CONN_AG_CTX_CF7EN_MASK 0x1 -#define E4_TSTORM_CORE_CONN_AG_CTX_CF7EN_SHIFT 3 -#define E4_TSTORM_CORE_CONN_AG_CTX_CF8EN_MASK 0x1 -#define E4_TSTORM_CORE_CONN_AG_CTX_CF8EN_SHIFT 4 -#define E4_TSTORM_CORE_CONN_AG_CTX_CF9EN_MASK 0x1 -#define E4_TSTORM_CORE_CONN_AG_CTX_CF9EN_SHIFT 5 -#define E4_TSTORM_CORE_CONN_AG_CTX_CF10EN_MASK 0x1 -#define E4_TSTORM_CORE_CONN_AG_CTX_CF10EN_SHIFT 6 -#define E4_TSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1 -#define E4_TSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 7 +#define TSTORM_CORE_CONN_AG_CTX_CF4EN_MASK 0x1 +#define TSTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT 0 +#define TSTORM_CORE_CONN_AG_CTX_CF5EN_MASK 0x1 +#define TSTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT 1 +#define TSTORM_CORE_CONN_AG_CTX_CF6EN_MASK 0x1 +#define TSTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT 2 +#define TSTORM_CORE_CONN_AG_CTX_CF7EN_MASK 0x1 +#define TSTORM_CORE_CONN_AG_CTX_CF7EN_SHIFT 3 +#define TSTORM_CORE_CONN_AG_CTX_CF8EN_MASK 0x1 +#define TSTORM_CORE_CONN_AG_CTX_CF8EN_SHIFT 4 +#define TSTORM_CORE_CONN_AG_CTX_CF9EN_MASK 0x1 +#define TSTORM_CORE_CONN_AG_CTX_CF9EN_SHIFT 5 +#define TSTORM_CORE_CONN_AG_CTX_CF10EN_MASK 0x1 +#define TSTORM_CORE_CONN_AG_CTX_CF10EN_SHIFT 6 +#define TSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define TSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 7 u8 flags5; -#define E4_TSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1 -#define E4_TSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 0 -#define E4_TSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1 -#define E4_TSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 1 -#define E4_TSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1 -#define E4_TSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 2 -#define E4_TSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1 -#define E4_TSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 3 -#define E4_TSTORM_CORE_CONN_AG_CTX_RULE5EN_MASK 0x1 -#define E4_TSTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 4 -#define E4_TSTORM_CORE_CONN_AG_CTX_RULE6EN_MASK 0x1 -#define E4_TSTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 5 -#define E4_TSTORM_CORE_CONN_AG_CTX_RULE7EN_MASK 0x1 -#define E4_TSTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 6 -#define E4_TSTORM_CORE_CONN_AG_CTX_RULE8EN_MASK 0x1 -#define E4_TSTORM_CORE_CONN_AG_CTX_RULE8EN_SHIFT 7 +#define TSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define TSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 0 +#define TSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define TSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 1 +#define TSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define TSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 2 +#define TSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define TSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 3 +#define TSTORM_CORE_CONN_AG_CTX_RULE5EN_MASK 0x1 +#define TSTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 4 +#define TSTORM_CORE_CONN_AG_CTX_RULE6EN_MASK 0x1 +#define TSTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 5 +#define TSTORM_CORE_CONN_AG_CTX_RULE7EN_MASK 0x1 +#define TSTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 6 +#define TSTORM_CORE_CONN_AG_CTX_RULE8EN_MASK 0x1 +#define TSTORM_CORE_CONN_AG_CTX_RULE8EN_SHIFT 7 __le32 reg0; __le32 reg1; __le32 reg2; @@ -761,63 +795,63 @@ struct e4_tstorm_core_conn_ag_ctx { __le32 reg10; }; -struct e4_ustorm_core_conn_ag_ctx { +struct ustorm_core_conn_ag_ctx { u8 reserved; u8 byte1; u8 flags0; -#define E4_USTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1 -#define E4_USTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0 -#define E4_USTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1 -#define E4_USTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1 -#define E4_USTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3 -#define E4_USTORM_CORE_CONN_AG_CTX_CF0_SHIFT 2 -#define E4_USTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3 -#define E4_USTORM_CORE_CONN_AG_CTX_CF1_SHIFT 4 -#define E4_USTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3 -#define E4_USTORM_CORE_CONN_AG_CTX_CF2_SHIFT 6 +#define USTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1 +#define USTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0 +#define USTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1 +#define USTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1 +#define USTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3 +#define USTORM_CORE_CONN_AG_CTX_CF0_SHIFT 2 +#define USTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3 +#define USTORM_CORE_CONN_AG_CTX_CF1_SHIFT 4 +#define USTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3 +#define USTORM_CORE_CONN_AG_CTX_CF2_SHIFT 6 u8 flags1; -#define E4_USTORM_CORE_CONN_AG_CTX_CF3_MASK 0x3 -#define E4_USTORM_CORE_CONN_AG_CTX_CF3_SHIFT 0 -#define E4_USTORM_CORE_CONN_AG_CTX_CF4_MASK 0x3 -#define E4_USTORM_CORE_CONN_AG_CTX_CF4_SHIFT 2 -#define E4_USTORM_CORE_CONN_AG_CTX_CF5_MASK 0x3 -#define E4_USTORM_CORE_CONN_AG_CTX_CF5_SHIFT 4 -#define E4_USTORM_CORE_CONN_AG_CTX_CF6_MASK 0x3 -#define E4_USTORM_CORE_CONN_AG_CTX_CF6_SHIFT 6 +#define USTORM_CORE_CONN_AG_CTX_CF3_MASK 0x3 +#define USTORM_CORE_CONN_AG_CTX_CF3_SHIFT 0 +#define USTORM_CORE_CONN_AG_CTX_CF4_MASK 0x3 +#define USTORM_CORE_CONN_AG_CTX_CF4_SHIFT 2 +#define USTORM_CORE_CONN_AG_CTX_CF5_MASK 0x3 +#define USTORM_CORE_CONN_AG_CTX_CF5_SHIFT 4 +#define USTORM_CORE_CONN_AG_CTX_CF6_MASK 0x3 +#define USTORM_CORE_CONN_AG_CTX_CF6_SHIFT 6 u8 flags2; -#define E4_USTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1 -#define E4_USTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 0 -#define E4_USTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1 -#define E4_USTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 1 -#define E4_USTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1 -#define E4_USTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 2 -#define E4_USTORM_CORE_CONN_AG_CTX_CF3EN_MASK 0x1 -#define E4_USTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT 3 -#define E4_USTORM_CORE_CONN_AG_CTX_CF4EN_MASK 0x1 -#define E4_USTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT 4 -#define E4_USTORM_CORE_CONN_AG_CTX_CF5EN_MASK 0x1 -#define E4_USTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT 5 -#define E4_USTORM_CORE_CONN_AG_CTX_CF6EN_MASK 0x1 -#define E4_USTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT 6 -#define E4_USTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1 -#define E4_USTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 7 +#define USTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1 +#define USTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 0 +#define USTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1 +#define USTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 1 +#define USTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1 +#define USTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 2 +#define USTORM_CORE_CONN_AG_CTX_CF3EN_MASK 0x1 +#define USTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT 3 +#define USTORM_CORE_CONN_AG_CTX_CF4EN_MASK 0x1 +#define USTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT 4 +#define USTORM_CORE_CONN_AG_CTX_CF5EN_MASK 0x1 +#define USTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT 5 +#define USTORM_CORE_CONN_AG_CTX_CF6EN_MASK 0x1 +#define USTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT 6 +#define USTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define USTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 7 u8 flags3; -#define E4_USTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1 -#define E4_USTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 0 -#define E4_USTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1 -#define E4_USTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 1 -#define E4_USTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1 -#define E4_USTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 2 -#define E4_USTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1 -#define E4_USTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 3 -#define E4_USTORM_CORE_CONN_AG_CTX_RULE5EN_MASK 0x1 -#define E4_USTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 4 -#define E4_USTORM_CORE_CONN_AG_CTX_RULE6EN_MASK 0x1 -#define E4_USTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 5 -#define E4_USTORM_CORE_CONN_AG_CTX_RULE7EN_MASK 0x1 -#define E4_USTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 6 -#define E4_USTORM_CORE_CONN_AG_CTX_RULE8EN_MASK 0x1 -#define E4_USTORM_CORE_CONN_AG_CTX_RULE8EN_SHIFT 7 +#define USTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define USTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 0 +#define USTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define USTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 1 +#define USTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define USTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 2 +#define USTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define USTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 3 +#define USTORM_CORE_CONN_AG_CTX_RULE5EN_MASK 0x1 +#define USTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 4 +#define USTORM_CORE_CONN_AG_CTX_RULE6EN_MASK 0x1 +#define USTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 5 +#define USTORM_CORE_CONN_AG_CTX_RULE7EN_MASK 0x1 +#define USTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 6 +#define USTORM_CORE_CONN_AG_CTX_RULE8EN_MASK 0x1 +#define USTORM_CORE_CONN_AG_CTX_RULE8EN_SHIFT 7 u8 byte2; u8 byte3; __le16 word0; @@ -846,15 +880,15 @@ struct tstorm_core_conn_st_ctx { }; /* core connection context */ -struct e4_core_conn_context { +struct core_conn_context { struct ystorm_core_conn_st_ctx ystorm_st_context; struct regpair ystorm_st_padding[2]; struct pstorm_core_conn_st_ctx pstorm_st_context; struct regpair pstorm_st_padding[2]; struct xstorm_core_conn_st_ctx xstorm_st_context; - struct e4_xstorm_core_conn_ag_ctx xstorm_ag_context; - struct e4_tstorm_core_conn_ag_ctx tstorm_ag_context; - struct e4_ustorm_core_conn_ag_ctx ustorm_ag_context; + struct xstorm_core_conn_ag_ctx xstorm_ag_context; + struct tstorm_core_conn_ag_ctx tstorm_ag_context; + struct ustorm_core_conn_ag_ctx ustorm_ag_context; struct mstorm_core_conn_st_ctx mstorm_st_context; struct ustorm_core_conn_st_ctx ustorm_st_context; struct regpair ustorm_st_padding[2]; @@ -930,12 +964,12 @@ struct eth_rx_rate_limit { /* Update RSS indirection table entry command */ struct eth_tstorm_rss_update_data { - u8 valid; u8 vport_id; u8 ind_table_index; - u8 reserved; __le16 ind_table_value; __le16 reserved1; + u8 reserved; + u8 valid; }; struct eth_ustorm_per_pf_stat { @@ -967,19 +1001,20 @@ struct vf_pf_channel_eqe_data { struct regpair msg_addr; }; -/* Event Ring malicious VF data */ -struct malicious_vf_eqe_data { - u8 vf_id; - u8 err_id; - __le16 reserved[3]; -}; - /* Event Ring initial cleanup data */ struct initial_cleanup_eqe_data { u8 vf_id; u8 reserved[7]; }; +/* FW error data */ +struct fw_err_data { + u8 recovery_scope; + u8 err_id; + __le16 entity_id; + u8 reserved[4]; +}; + /* Event Data Union */ union event_ring_data { u8 bytes[8]; @@ -987,8 +1022,8 @@ union event_ring_data { struct iscsi_eqe_data iscsi_info; struct iscsi_connect_done_results iscsi_conn_done_info; union rdma_eqe_data rdma_data; - struct malicious_vf_eqe_data malicious_vf; struct initial_cleanup_eqe_data vf_init_cleanup; + struct fw_err_data err_data; }; /* Event Ring Entry */ @@ -1042,6 +1077,15 @@ struct hsi_fp_ver_struct { u8 major_ver_arr[2]; }; +/* Integration Phase */ +enum integ_phase { + INTEG_PHASE_BB_A0_LATEST = 3, + INTEG_PHASE_BB_B0_NO_MCP = 10, + INTEG_PHASE_BB_B0_WITH_MCP = 11, + MAX_INTEG_PHASE +}; + +/* Ports mode */ enum iwarp_ll2_tx_queues { IWARP_LL2_IN_ORDER_TX_QUEUE = 1, IWARP_LL2_ALIGNED_TX_QUEUE, @@ -1050,9 +1094,9 @@ enum iwarp_ll2_tx_queues { MAX_IWARP_LL2_TX_QUEUES }; -/* Malicious VF error ID */ -enum malicious_vf_error_id { - MALICIOUS_VF_NO_ERROR, +/* Function error ID */ +enum func_err_id { + FUNC_NO_ERROR, VF_PF_CHANNEL_NOT_READY, VF_ZONE_MSG_NOT_VALID, VF_ZONE_FUNC_NOT_ENABLED, @@ -1087,13 +1131,33 @@ enum malicious_vf_error_id { CORE_PACKET_SIZE_TOO_LARGE, CORE_ILLEGAL_BD_FLAGS, CORE_GSI_PACKET_VIOLATION, - MAX_MALICIOUS_VF_ERROR_ID, + MAX_FUNC_ERR_ID +}; + +/* FW error handling mode */ +enum fw_err_mode { + FW_ERR_FATAL_ASSERT, + FW_ERR_DRV_REPORT, + MAX_FW_ERR_MODE +}; + +/* FW error recovery scope */ +enum fw_err_recovery_scope { + ERR_SCOPE_INVALID, + ERR_SCOPE_TX_Q, + ERR_SCOPE_RX_Q, + ERR_SCOPE_QP, + ERR_SCOPE_VPORT, + ERR_SCOPE_FUNC, + ERR_SCOPE_PORT, + ERR_SCOPE_ENGINE, + MAX_FW_ERR_RECOVERY_SCOPE }; /* Mstorm non-triggering VF zone */ struct mstorm_non_trigger_vf_zone { struct eth_mstorm_per_queue_stat eth_queue_stat; - struct eth_rx_prod_data eth_rx_queue_producers[ETH_MAX_NUM_RX_QUEUES_PER_VF_QUAD]; + struct eth_rx_prod_data eth_rx_queue_producers[ETH_MAX_RXQ_VF_QUAD]; }; /* Mstorm VF zone */ @@ -1148,7 +1212,7 @@ struct pf_start_tunnel_config { /* Ramrod data for PF start ramrod */ struct pf_start_ramrod_data { struct regpair event_ring_pbl_addr; - struct regpair consolid_q_pbl_addr; + struct regpair consolid_q_pbl_base_addr; struct pf_start_tunnel_config tunnel_config; __le16 event_ring_sb_id; u8 base_vf_id; @@ -1166,6 +1230,9 @@ struct pf_start_ramrod_data { u8 reserved0; struct hsi_fp_ver_struct hsi_fp_ver; struct outer_tag_config_struct outer_tag_config; + u8 pf_fp_err_mode; + u8 consolid_q_num_pages; + u8 reserved[6]; }; /* Data for port update ramrod */ @@ -1230,6 +1297,13 @@ enum ports_mode { MAX_PORTS_MODE }; +/* Protocol-common error code */ +enum protocol_common_error_code { + COMMON_ERR_CODE_OK = 0, + COMMON_ERR_CODE_ERROR, + MAX_PROTOCOL_COMMON_ERROR_CODE +}; + /* use to index in hsi_fp_[major|minor]_ver_arr per protocol */ enum protocol_version_array_key { ETH_VER_KEY = 0, @@ -1525,74 +1599,74 @@ enum dmae_cmd_src_enum { MAX_DMAE_CMD_SRC_ENUM }; -struct e4_mstorm_core_conn_ag_ctx { +struct mstorm_core_conn_ag_ctx { u8 byte0; u8 byte1; u8 flags0; -#define E4_MSTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1 -#define E4_MSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0 -#define E4_MSTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1 -#define E4_MSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1 -#define E4_MSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3 -#define E4_MSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 2 -#define E4_MSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3 -#define E4_MSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 4 -#define E4_MSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3 -#define E4_MSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 6 +#define MSTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1 +#define MSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0 +#define MSTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1 +#define MSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1 +#define MSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3 +#define MSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 2 +#define MSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3 +#define MSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 4 +#define MSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3 +#define MSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 6 u8 flags1; -#define E4_MSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1 -#define E4_MSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 0 -#define E4_MSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1 -#define E4_MSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 1 -#define E4_MSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1 -#define E4_MSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 2 -#define E4_MSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1 -#define E4_MSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 3 -#define E4_MSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1 -#define E4_MSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 4 -#define E4_MSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1 -#define E4_MSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 5 -#define E4_MSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1 -#define E4_MSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 6 -#define E4_MSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1 -#define E4_MSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 7 +#define MSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1 +#define MSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 0 +#define MSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1 +#define MSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 1 +#define MSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1 +#define MSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 2 +#define MSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define MSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 3 +#define MSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define MSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 4 +#define MSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define MSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 5 +#define MSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define MSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 6 +#define MSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define MSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 7 __le16 word0; __le16 word1; __le32 reg0; __le32 reg1; }; -struct e4_ystorm_core_conn_ag_ctx { +struct ystorm_core_conn_ag_ctx { u8 byte0; u8 byte1; u8 flags0; -#define E4_YSTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1 -#define E4_YSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0 -#define E4_YSTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1 -#define E4_YSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1 -#define E4_YSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3 -#define E4_YSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 2 -#define E4_YSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3 -#define E4_YSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 4 -#define E4_YSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3 -#define E4_YSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 6 +#define YSTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1 +#define YSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0 +#define YSTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1 +#define YSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1 +#define YSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3 +#define YSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 2 +#define YSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3 +#define YSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 4 +#define YSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3 +#define YSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 6 u8 flags1; -#define E4_YSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1 -#define E4_YSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 0 -#define E4_YSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1 -#define E4_YSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 1 -#define E4_YSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1 -#define E4_YSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 2 -#define E4_YSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1 -#define E4_YSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 3 -#define E4_YSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1 -#define E4_YSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 4 -#define E4_YSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1 -#define E4_YSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 5 -#define E4_YSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1 -#define E4_YSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 6 -#define E4_YSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1 -#define E4_YSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 7 +#define YSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1 +#define YSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 0 +#define YSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1 +#define YSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 1 +#define YSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1 +#define YSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 2 +#define YSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define YSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 3 +#define YSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define YSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 4 +#define YSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define YSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 5 +#define YSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define YSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 6 +#define YSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define YSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 7 u8 byte2; u8 byte3; __le16 word0; @@ -1704,6 +1778,7 @@ struct igu_msix_vector { #define IGU_MSIX_VECTOR_RESERVED1_MASK 0xFF #define IGU_MSIX_VECTOR_RESERVED1_SHIFT 24 }; + /* per encapsulation type enabling flags */ struct prs_reg_encapsulation_type_en { u8 flags; @@ -1778,22 +1853,22 @@ struct qm_rf_opportunistic_mask { }; /* QM hardware structure of QM map memory */ -struct qm_rf_pq_map_e4 { +struct qm_rf_pq_map { __le32 reg; -#define QM_RF_PQ_MAP_E4_PQ_VALID_MASK 0x1 -#define QM_RF_PQ_MAP_E4_PQ_VALID_SHIFT 0 -#define QM_RF_PQ_MAP_E4_RL_ID_MASK 0xFF -#define QM_RF_PQ_MAP_E4_RL_ID_SHIFT 1 -#define QM_RF_PQ_MAP_E4_VP_PQ_ID_MASK 0x1FF -#define QM_RF_PQ_MAP_E4_VP_PQ_ID_SHIFT 9 -#define QM_RF_PQ_MAP_E4_VOQ_MASK 0x1F -#define QM_RF_PQ_MAP_E4_VOQ_SHIFT 18 -#define QM_RF_PQ_MAP_E4_WRR_WEIGHT_GROUP_MASK 0x3 -#define QM_RF_PQ_MAP_E4_WRR_WEIGHT_GROUP_SHIFT 23 -#define QM_RF_PQ_MAP_E4_RL_VALID_MASK 0x1 -#define QM_RF_PQ_MAP_E4_RL_VALID_SHIFT 25 -#define QM_RF_PQ_MAP_E4_RESERVED_MASK 0x3F -#define QM_RF_PQ_MAP_E4_RESERVED_SHIFT 26 +#define QM_RF_PQ_MAP_PQ_VALID_MASK 0x1 +#define QM_RF_PQ_MAP_PQ_VALID_SHIFT 0 +#define QM_RF_PQ_MAP_RL_ID_MASK 0xFF +#define QM_RF_PQ_MAP_RL_ID_SHIFT 1 +#define QM_RF_PQ_MAP_VP_PQ_ID_MASK 0x1FF +#define QM_RF_PQ_MAP_VP_PQ_ID_SHIFT 9 +#define QM_RF_PQ_MAP_VOQ_MASK 0x1F +#define QM_RF_PQ_MAP_VOQ_SHIFT 18 +#define QM_RF_PQ_MAP_WRR_WEIGHT_GROUP_MASK 0x3 +#define QM_RF_PQ_MAP_WRR_WEIGHT_GROUP_SHIFT 23 +#define QM_RF_PQ_MAP_RL_VALID_MASK 0x1 +#define QM_RF_PQ_MAP_RL_VALID_SHIFT 25 +#define QM_RF_PQ_MAP_RESERVED_MASK 0x3F +#define QM_RF_PQ_MAP_RESERVED_SHIFT 26 }; /* Completion params for aggregated interrupt completion */ @@ -1831,769 +1906,6 @@ struct virt_mem_desc { u32 size; /* In bytes */ }; -/****************************************/ -/* Debug Tools HSI constants and macros */ -/****************************************/ - -enum block_id { - BLOCK_GRC, - BLOCK_MISCS, - BLOCK_MISC, - BLOCK_DBU, - BLOCK_PGLUE_B, - BLOCK_CNIG, - BLOCK_CPMU, - BLOCK_NCSI, - BLOCK_OPTE, - BLOCK_BMB, - BLOCK_PCIE, - BLOCK_MCP, - BLOCK_MCP2, - BLOCK_PSWHST, - BLOCK_PSWHST2, - BLOCK_PSWRD, - BLOCK_PSWRD2, - BLOCK_PSWWR, - BLOCK_PSWWR2, - BLOCK_PSWRQ, - BLOCK_PSWRQ2, - BLOCK_PGLCS, - BLOCK_DMAE, - BLOCK_PTU, - BLOCK_TCM, - BLOCK_MCM, - BLOCK_UCM, - BLOCK_XCM, - BLOCK_YCM, - BLOCK_PCM, - BLOCK_QM, - BLOCK_TM, - BLOCK_DORQ, - BLOCK_BRB, - BLOCK_SRC, - BLOCK_PRS, - BLOCK_TSDM, - BLOCK_MSDM, - BLOCK_USDM, - BLOCK_XSDM, - BLOCK_YSDM, - BLOCK_PSDM, - BLOCK_TSEM, - BLOCK_MSEM, - BLOCK_USEM, - BLOCK_XSEM, - BLOCK_YSEM, - BLOCK_PSEM, - BLOCK_RSS, - BLOCK_TMLD, - BLOCK_MULD, - BLOCK_YULD, - BLOCK_XYLD, - BLOCK_PRM, - BLOCK_PBF_PB1, - BLOCK_PBF_PB2, - BLOCK_RPB, - BLOCK_BTB, - BLOCK_PBF, - BLOCK_RDIF, - BLOCK_TDIF, - BLOCK_CDU, - BLOCK_CCFC, - BLOCK_TCFC, - BLOCK_IGU, - BLOCK_CAU, - BLOCK_UMAC, - BLOCK_XMAC, - BLOCK_MSTAT, - BLOCK_DBG, - BLOCK_NIG, - BLOCK_WOL, - BLOCK_BMBN, - BLOCK_IPC, - BLOCK_NWM, - BLOCK_NWS, - BLOCK_MS, - BLOCK_PHY_PCIE, - BLOCK_LED, - BLOCK_AVS_WRAP, - BLOCK_PXPREQBUS, - BLOCK_BAR0_MAP, - BLOCK_MCP_FIO, - BLOCK_LAST_INIT, - BLOCK_PRS_FC, - BLOCK_PBF_FC, - BLOCK_NIG_LB_FC, - BLOCK_NIG_LB_FC_PLLH, - BLOCK_NIG_TX_FC_PLLH, - BLOCK_NIG_TX_FC, - BLOCK_NIG_RX_FC_PLLH, - BLOCK_NIG_RX_FC, - MAX_BLOCK_ID -}; - -/* binary debug buffer types */ -enum bin_dbg_buffer_type { - BIN_BUF_DBG_MODE_TREE, - BIN_BUF_DBG_DUMP_REG, - BIN_BUF_DBG_DUMP_MEM, - BIN_BUF_DBG_IDLE_CHK_REGS, - BIN_BUF_DBG_IDLE_CHK_IMMS, - BIN_BUF_DBG_IDLE_CHK_RULES, - BIN_BUF_DBG_IDLE_CHK_PARSING_DATA, - BIN_BUF_DBG_ATTN_BLOCKS, - BIN_BUF_DBG_ATTN_REGS, - BIN_BUF_DBG_ATTN_INDEXES, - BIN_BUF_DBG_ATTN_NAME_OFFSETS, - BIN_BUF_DBG_BLOCKS, - BIN_BUF_DBG_BLOCKS_CHIP_DATA, - BIN_BUF_DBG_BUS_LINES, - BIN_BUF_DBG_BLOCKS_USER_DATA, - BIN_BUF_DBG_BLOCKS_CHIP_USER_DATA, - BIN_BUF_DBG_BUS_LINE_NAME_OFFSETS, - BIN_BUF_DBG_RESET_REGS, - BIN_BUF_DBG_PARSING_STRINGS, - MAX_BIN_DBG_BUFFER_TYPE -}; - - -/* Attention bit mapping */ -struct dbg_attn_bit_mapping { - u16 data; -#define DBG_ATTN_BIT_MAPPING_VAL_MASK 0x7FFF -#define DBG_ATTN_BIT_MAPPING_VAL_SHIFT 0 -#define DBG_ATTN_BIT_MAPPING_IS_UNUSED_BIT_CNT_MASK 0x1 -#define DBG_ATTN_BIT_MAPPING_IS_UNUSED_BIT_CNT_SHIFT 15 -}; - -/* Attention block per-type data */ -struct dbg_attn_block_type_data { - u16 names_offset; - u16 reserved1; - u8 num_regs; - u8 reserved2; - u16 regs_offset; - -}; - -/* Block attentions */ -struct dbg_attn_block { - struct dbg_attn_block_type_data per_type_data[2]; -}; - -/* Attention register result */ -struct dbg_attn_reg_result { - u32 data; -#define DBG_ATTN_REG_RESULT_STS_ADDRESS_MASK 0xFFFFFF -#define DBG_ATTN_REG_RESULT_STS_ADDRESS_SHIFT 0 -#define DBG_ATTN_REG_RESULT_NUM_REG_ATTN_MASK 0xFF -#define DBG_ATTN_REG_RESULT_NUM_REG_ATTN_SHIFT 24 - u16 block_attn_offset; - u16 reserved; - u32 sts_val; - u32 mask_val; -}; - -/* Attention block result */ -struct dbg_attn_block_result { - u8 block_id; - u8 data; -#define DBG_ATTN_BLOCK_RESULT_ATTN_TYPE_MASK 0x3 -#define DBG_ATTN_BLOCK_RESULT_ATTN_TYPE_SHIFT 0 -#define DBG_ATTN_BLOCK_RESULT_NUM_REGS_MASK 0x3F -#define DBG_ATTN_BLOCK_RESULT_NUM_REGS_SHIFT 2 - u16 names_offset; - struct dbg_attn_reg_result reg_results[15]; -}; - -/* Mode header */ -struct dbg_mode_hdr { - u16 data; -#define DBG_MODE_HDR_EVAL_MODE_MASK 0x1 -#define DBG_MODE_HDR_EVAL_MODE_SHIFT 0 -#define DBG_MODE_HDR_MODES_BUF_OFFSET_MASK 0x7FFF -#define DBG_MODE_HDR_MODES_BUF_OFFSET_SHIFT 1 -}; - -/* Attention register */ -struct dbg_attn_reg { - struct dbg_mode_hdr mode; - u16 block_attn_offset; - u32 data; -#define DBG_ATTN_REG_STS_ADDRESS_MASK 0xFFFFFF -#define DBG_ATTN_REG_STS_ADDRESS_SHIFT 0 -#define DBG_ATTN_REG_NUM_REG_ATTN_MASK 0xFF -#define DBG_ATTN_REG_NUM_REG_ATTN_SHIFT 24 - u32 sts_clr_address; - u32 mask_address; -}; - -/* Attention types */ -enum dbg_attn_type { - ATTN_TYPE_INTERRUPT, - ATTN_TYPE_PARITY, - MAX_DBG_ATTN_TYPE -}; - -/* Block debug data */ -struct dbg_block { - u8 name[15]; - u8 associated_storm_letter; -}; - -/* Chip-specific block debug data */ -struct dbg_block_chip { - u8 flags; -#define DBG_BLOCK_CHIP_IS_REMOVED_MASK 0x1 -#define DBG_BLOCK_CHIP_IS_REMOVED_SHIFT 0 -#define DBG_BLOCK_CHIP_HAS_RESET_REG_MASK 0x1 -#define DBG_BLOCK_CHIP_HAS_RESET_REG_SHIFT 1 -#define DBG_BLOCK_CHIP_UNRESET_BEFORE_DUMP_MASK 0x1 -#define DBG_BLOCK_CHIP_UNRESET_BEFORE_DUMP_SHIFT 2 -#define DBG_BLOCK_CHIP_HAS_DBG_BUS_MASK 0x1 -#define DBG_BLOCK_CHIP_HAS_DBG_BUS_SHIFT 3 -#define DBG_BLOCK_CHIP_HAS_LATENCY_EVENTS_MASK 0x1 -#define DBG_BLOCK_CHIP_HAS_LATENCY_EVENTS_SHIFT 4 -#define DBG_BLOCK_CHIP_RESERVED0_MASK 0x7 -#define DBG_BLOCK_CHIP_RESERVED0_SHIFT 5 - u8 dbg_client_id; - u8 reset_reg_id; - u8 reset_reg_bit_offset; - struct dbg_mode_hdr dbg_bus_mode; - u16 reserved1; - u8 reserved2; - u8 num_of_dbg_bus_lines; - u16 dbg_bus_lines_offset; - u32 dbg_select_reg_addr; - u32 dbg_dword_enable_reg_addr; - u32 dbg_shift_reg_addr; - u32 dbg_force_valid_reg_addr; - u32 dbg_force_frame_reg_addr; -}; - -/* Chip-specific block user debug data */ -struct dbg_block_chip_user { - u8 num_of_dbg_bus_lines; - u8 has_latency_events; - u16 names_offset; -}; - -/* Block user debug data */ -struct dbg_block_user { - u8 name[16]; -}; - -/* Block Debug line data */ -struct dbg_bus_line { - u8 data; -#define DBG_BUS_LINE_NUM_OF_GROUPS_MASK 0xF -#define DBG_BUS_LINE_NUM_OF_GROUPS_SHIFT 0 -#define DBG_BUS_LINE_IS_256B_MASK 0x1 -#define DBG_BUS_LINE_IS_256B_SHIFT 4 -#define DBG_BUS_LINE_RESERVED_MASK 0x7 -#define DBG_BUS_LINE_RESERVED_SHIFT 5 - u8 group_sizes; -}; - -/* Condition header for registers dump */ -struct dbg_dump_cond_hdr { - struct dbg_mode_hdr mode; /* Mode header */ - u8 block_id; /* block ID */ - u8 data_size; /* size in dwords of the data following this header */ -}; - -/* Memory data for registers dump */ -struct dbg_dump_mem { - u32 dword0; -#define DBG_DUMP_MEM_ADDRESS_MASK 0xFFFFFF -#define DBG_DUMP_MEM_ADDRESS_SHIFT 0 -#define DBG_DUMP_MEM_MEM_GROUP_ID_MASK 0xFF -#define DBG_DUMP_MEM_MEM_GROUP_ID_SHIFT 24 - u32 dword1; -#define DBG_DUMP_MEM_LENGTH_MASK 0xFFFFFF -#define DBG_DUMP_MEM_LENGTH_SHIFT 0 -#define DBG_DUMP_MEM_WIDE_BUS_MASK 0x1 -#define DBG_DUMP_MEM_WIDE_BUS_SHIFT 24 -#define DBG_DUMP_MEM_RESERVED_MASK 0x7F -#define DBG_DUMP_MEM_RESERVED_SHIFT 25 -}; - -/* Register data for registers dump */ -struct dbg_dump_reg { - u32 data; -#define DBG_DUMP_REG_ADDRESS_MASK 0x7FFFFF -#define DBG_DUMP_REG_ADDRESS_SHIFT 0 -#define DBG_DUMP_REG_WIDE_BUS_MASK 0x1 -#define DBG_DUMP_REG_WIDE_BUS_SHIFT 23 -#define DBG_DUMP_REG_LENGTH_MASK 0xFF -#define DBG_DUMP_REG_LENGTH_SHIFT 24 -}; - -/* Split header for registers dump */ -struct dbg_dump_split_hdr { - u32 hdr; -#define DBG_DUMP_SPLIT_HDR_DATA_SIZE_MASK 0xFFFFFF -#define DBG_DUMP_SPLIT_HDR_DATA_SIZE_SHIFT 0 -#define DBG_DUMP_SPLIT_HDR_SPLIT_TYPE_ID_MASK 0xFF -#define DBG_DUMP_SPLIT_HDR_SPLIT_TYPE_ID_SHIFT 24 -}; - -/* Condition header for idle check */ -struct dbg_idle_chk_cond_hdr { - struct dbg_mode_hdr mode; /* Mode header */ - u16 data_size; /* size in dwords of the data following this header */ -}; - -/* Idle Check condition register */ -struct dbg_idle_chk_cond_reg { - u32 data; -#define DBG_IDLE_CHK_COND_REG_ADDRESS_MASK 0x7FFFFF -#define DBG_IDLE_CHK_COND_REG_ADDRESS_SHIFT 0 -#define DBG_IDLE_CHK_COND_REG_WIDE_BUS_MASK 0x1 -#define DBG_IDLE_CHK_COND_REG_WIDE_BUS_SHIFT 23 -#define DBG_IDLE_CHK_COND_REG_BLOCK_ID_MASK 0xFF -#define DBG_IDLE_CHK_COND_REG_BLOCK_ID_SHIFT 24 - u16 num_entries; - u8 entry_size; - u8 start_entry; -}; - -/* Idle Check info register */ -struct dbg_idle_chk_info_reg { - u32 data; -#define DBG_IDLE_CHK_INFO_REG_ADDRESS_MASK 0x7FFFFF -#define DBG_IDLE_CHK_INFO_REG_ADDRESS_SHIFT 0 -#define DBG_IDLE_CHK_INFO_REG_WIDE_BUS_MASK 0x1 -#define DBG_IDLE_CHK_INFO_REG_WIDE_BUS_SHIFT 23 -#define DBG_IDLE_CHK_INFO_REG_BLOCK_ID_MASK 0xFF -#define DBG_IDLE_CHK_INFO_REG_BLOCK_ID_SHIFT 24 - u16 size; /* register size in dwords */ - struct dbg_mode_hdr mode; /* Mode header */ -}; - -/* Idle Check register */ -union dbg_idle_chk_reg { - struct dbg_idle_chk_cond_reg cond_reg; /* condition register */ - struct dbg_idle_chk_info_reg info_reg; /* info register */ -}; - -/* Idle Check result header */ -struct dbg_idle_chk_result_hdr { - u16 rule_id; /* Failing rule index */ - u16 mem_entry_id; /* Failing memory entry index */ - u8 num_dumped_cond_regs; /* number of dumped condition registers */ - u8 num_dumped_info_regs; /* number of dumped condition registers */ - u8 severity; /* from dbg_idle_chk_severity_types enum */ - u8 reserved; -}; - -/* Idle Check result register header */ -struct dbg_idle_chk_result_reg_hdr { - u8 data; -#define DBG_IDLE_CHK_RESULT_REG_HDR_IS_MEM_MASK 0x1 -#define DBG_IDLE_CHK_RESULT_REG_HDR_IS_MEM_SHIFT 0 -#define DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID_MASK 0x7F -#define DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID_SHIFT 1 - u8 start_entry; /* index of the first checked entry */ - u16 size; /* register size in dwords */ -}; - -/* Idle Check rule */ -struct dbg_idle_chk_rule { - u16 rule_id; /* Idle Check rule ID */ - u8 severity; /* value from dbg_idle_chk_severity_types enum */ - u8 cond_id; /* Condition ID */ - u8 num_cond_regs; /* number of condition registers */ - u8 num_info_regs; /* number of info registers */ - u8 num_imms; /* number of immediates in the condition */ - u8 reserved1; - u16 reg_offset; /* offset of this rules registers in the idle check - * register array (in dbg_idle_chk_reg units). - */ - u16 imm_offset; /* offset of this rules immediate values in the - * immediate values array (in dwords). - */ -}; - -/* Idle Check rule parsing data */ -struct dbg_idle_chk_rule_parsing_data { - u32 data; -#define DBG_IDLE_CHK_RULE_PARSING_DATA_HAS_FW_MSG_MASK 0x1 -#define DBG_IDLE_CHK_RULE_PARSING_DATA_HAS_FW_MSG_SHIFT 0 -#define DBG_IDLE_CHK_RULE_PARSING_DATA_STR_OFFSET_MASK 0x7FFFFFFF -#define DBG_IDLE_CHK_RULE_PARSING_DATA_STR_OFFSET_SHIFT 1 -}; - -/* Idle check severity types */ -enum dbg_idle_chk_severity_types { - /* idle check failure should cause an error */ - IDLE_CHK_SEVERITY_ERROR, - /* idle check failure should cause an error only if theres no traffic */ - IDLE_CHK_SEVERITY_ERROR_NO_TRAFFIC, - /* idle check failure should cause a warning */ - IDLE_CHK_SEVERITY_WARNING, - MAX_DBG_IDLE_CHK_SEVERITY_TYPES -}; - -/* Reset register */ -struct dbg_reset_reg { - u32 data; -#define DBG_RESET_REG_ADDR_MASK 0xFFFFFF -#define DBG_RESET_REG_ADDR_SHIFT 0 -#define DBG_RESET_REG_IS_REMOVED_MASK 0x1 -#define DBG_RESET_REG_IS_REMOVED_SHIFT 24 -#define DBG_RESET_REG_RESERVED_MASK 0x7F -#define DBG_RESET_REG_RESERVED_SHIFT 25 -}; - -/* Debug Bus block data */ -struct dbg_bus_block_data { - u8 enable_mask; - u8 right_shift; - u8 force_valid_mask; - u8 force_frame_mask; - u8 dword_mask; - u8 line_num; - u8 hw_id; - u8 flags; -#define DBG_BUS_BLOCK_DATA_IS_256B_LINE_MASK 0x1 -#define DBG_BUS_BLOCK_DATA_IS_256B_LINE_SHIFT 0 -#define DBG_BUS_BLOCK_DATA_RESERVED_MASK 0x7F -#define DBG_BUS_BLOCK_DATA_RESERVED_SHIFT 1 -}; - -enum dbg_bus_clients { - DBG_BUS_CLIENT_RBCN, - DBG_BUS_CLIENT_RBCP, - DBG_BUS_CLIENT_RBCR, - DBG_BUS_CLIENT_RBCT, - DBG_BUS_CLIENT_RBCU, - DBG_BUS_CLIENT_RBCF, - DBG_BUS_CLIENT_RBCX, - DBG_BUS_CLIENT_RBCS, - DBG_BUS_CLIENT_RBCH, - DBG_BUS_CLIENT_RBCZ, - DBG_BUS_CLIENT_OTHER_ENGINE, - DBG_BUS_CLIENT_TIMESTAMP, - DBG_BUS_CLIENT_CPU, - DBG_BUS_CLIENT_RBCY, - DBG_BUS_CLIENT_RBCQ, - DBG_BUS_CLIENT_RBCM, - DBG_BUS_CLIENT_RBCB, - DBG_BUS_CLIENT_RBCW, - DBG_BUS_CLIENT_RBCV, - MAX_DBG_BUS_CLIENTS -}; - -/* Debug Bus constraint operation types */ -enum dbg_bus_constraint_ops { - DBG_BUS_CONSTRAINT_OP_EQ, - DBG_BUS_CONSTRAINT_OP_NE, - DBG_BUS_CONSTRAINT_OP_LT, - DBG_BUS_CONSTRAINT_OP_LTC, - DBG_BUS_CONSTRAINT_OP_LE, - DBG_BUS_CONSTRAINT_OP_LEC, - DBG_BUS_CONSTRAINT_OP_GT, - DBG_BUS_CONSTRAINT_OP_GTC, - DBG_BUS_CONSTRAINT_OP_GE, - DBG_BUS_CONSTRAINT_OP_GEC, - MAX_DBG_BUS_CONSTRAINT_OPS -}; - -/* Debug Bus trigger state data */ -struct dbg_bus_trigger_state_data { - u8 msg_len; - u8 constraint_dword_mask; - u8 storm_id; - u8 reserved; -}; - -/* Debug Bus memory address */ -struct dbg_bus_mem_addr { - u32 lo; - u32 hi; -}; - -/* Debug Bus PCI buffer data */ -struct dbg_bus_pci_buf_data { - struct dbg_bus_mem_addr phys_addr; /* PCI buffer physical address */ - struct dbg_bus_mem_addr virt_addr; /* PCI buffer virtual address */ - u32 size; /* PCI buffer size in bytes */ -}; - -/* Debug Bus Storm EID range filter params */ -struct dbg_bus_storm_eid_range_params { - u8 min; /* Minimal event ID to filter on */ - u8 max; /* Maximal event ID to filter on */ -}; - -/* Debug Bus Storm EID mask filter params */ -struct dbg_bus_storm_eid_mask_params { - u8 val; /* Event ID value */ - u8 mask; /* Event ID mask. 1s in the mask = dont care bits. */ -}; - -/* Debug Bus Storm EID filter params */ -union dbg_bus_storm_eid_params { - struct dbg_bus_storm_eid_range_params range; - struct dbg_bus_storm_eid_mask_params mask; -}; - -/* Debug Bus Storm data */ -struct dbg_bus_storm_data { - u8 enabled; - u8 mode; - u8 hw_id; - u8 eid_filter_en; - u8 eid_range_not_mask; - u8 cid_filter_en; - union dbg_bus_storm_eid_params eid_filter_params; - u32 cid; -}; - -/* Debug Bus data */ -struct dbg_bus_data { - u32 app_version; - u8 state; - u8 mode_256b_en; - u8 num_enabled_blocks; - u8 num_enabled_storms; - u8 target; - u8 one_shot_en; - u8 grc_input_en; - u8 timestamp_input_en; - u8 filter_en; - u8 adding_filter; - u8 filter_pre_trigger; - u8 filter_post_trigger; - u8 trigger_en; - u8 filter_constraint_dword_mask; - u8 next_trigger_state; - u8 next_constraint_id; - struct dbg_bus_trigger_state_data trigger_states[3]; - u8 filter_msg_len; - u8 rcv_from_other_engine; - u8 blocks_dword_mask; - u8 blocks_dword_overlap; - u32 hw_id_mask; - struct dbg_bus_pci_buf_data pci_buf; - struct dbg_bus_block_data blocks[132]; - struct dbg_bus_storm_data storms[6]; -}; - -/* Debug bus states */ -enum dbg_bus_states { - DBG_BUS_STATE_IDLE, - DBG_BUS_STATE_READY, - DBG_BUS_STATE_RECORDING, - DBG_BUS_STATE_STOPPED, - MAX_DBG_BUS_STATES -}; - -/* Debug Bus Storm modes */ -enum dbg_bus_storm_modes { - DBG_BUS_STORM_MODE_PRINTF, - DBG_BUS_STORM_MODE_PRAM_ADDR, - DBG_BUS_STORM_MODE_DRA_RW, - DBG_BUS_STORM_MODE_DRA_W, - DBG_BUS_STORM_MODE_LD_ST_ADDR, - DBG_BUS_STORM_MODE_DRA_FSM, - DBG_BUS_STORM_MODE_FAST_DBGMUX, - DBG_BUS_STORM_MODE_RH, - DBG_BUS_STORM_MODE_RH_WITH_STORE, - DBG_BUS_STORM_MODE_FOC, - DBG_BUS_STORM_MODE_EXT_STORE, - MAX_DBG_BUS_STORM_MODES -}; - -/* Debug bus target IDs */ -enum dbg_bus_targets { - DBG_BUS_TARGET_ID_INT_BUF, - DBG_BUS_TARGET_ID_NIG, - DBG_BUS_TARGET_ID_PCI, - MAX_DBG_BUS_TARGETS -}; - -/* GRC Dump data */ -struct dbg_grc_data { - u8 params_initialized; - u8 reserved1; - u16 reserved2; - u32 param_val[48]; -}; - -/* Debug GRC params */ -enum dbg_grc_params { - DBG_GRC_PARAM_DUMP_TSTORM, - DBG_GRC_PARAM_DUMP_MSTORM, - DBG_GRC_PARAM_DUMP_USTORM, - DBG_GRC_PARAM_DUMP_XSTORM, - DBG_GRC_PARAM_DUMP_YSTORM, - DBG_GRC_PARAM_DUMP_PSTORM, - DBG_GRC_PARAM_DUMP_REGS, - DBG_GRC_PARAM_DUMP_RAM, - DBG_GRC_PARAM_DUMP_PBUF, - DBG_GRC_PARAM_DUMP_IOR, - DBG_GRC_PARAM_DUMP_VFC, - DBG_GRC_PARAM_DUMP_CM_CTX, - DBG_GRC_PARAM_DUMP_PXP, - DBG_GRC_PARAM_DUMP_RSS, - DBG_GRC_PARAM_DUMP_CAU, - DBG_GRC_PARAM_DUMP_QM, - DBG_GRC_PARAM_DUMP_MCP, - DBG_GRC_PARAM_DUMP_DORQ, - DBG_GRC_PARAM_DUMP_CFC, - DBG_GRC_PARAM_DUMP_IGU, - DBG_GRC_PARAM_DUMP_BRB, - DBG_GRC_PARAM_DUMP_BTB, - DBG_GRC_PARAM_DUMP_BMB, - DBG_GRC_PARAM_RESERVD1, - DBG_GRC_PARAM_DUMP_MULD, - DBG_GRC_PARAM_DUMP_PRS, - DBG_GRC_PARAM_DUMP_DMAE, - DBG_GRC_PARAM_DUMP_TM, - DBG_GRC_PARAM_DUMP_SDM, - DBG_GRC_PARAM_DUMP_DIF, - DBG_GRC_PARAM_DUMP_STATIC, - DBG_GRC_PARAM_UNSTALL, - DBG_GRC_PARAM_RESERVED2, - DBG_GRC_PARAM_MCP_TRACE_META_SIZE, - DBG_GRC_PARAM_EXCLUDE_ALL, - DBG_GRC_PARAM_CRASH, - DBG_GRC_PARAM_PARITY_SAFE, - DBG_GRC_PARAM_DUMP_CM, - DBG_GRC_PARAM_DUMP_PHY, - DBG_GRC_PARAM_NO_MCP, - DBG_GRC_PARAM_NO_FW_VER, - DBG_GRC_PARAM_RESERVED3, - DBG_GRC_PARAM_DUMP_MCP_HW_DUMP, - DBG_GRC_PARAM_DUMP_ILT_CDUC, - DBG_GRC_PARAM_DUMP_ILT_CDUT, - DBG_GRC_PARAM_DUMP_CAU_EXT, - MAX_DBG_GRC_PARAMS -}; - -/* Debug status codes */ -enum dbg_status { - DBG_STATUS_OK, - DBG_STATUS_APP_VERSION_NOT_SET, - DBG_STATUS_UNSUPPORTED_APP_VERSION, - DBG_STATUS_DBG_BLOCK_NOT_RESET, - DBG_STATUS_INVALID_ARGS, - DBG_STATUS_OUTPUT_ALREADY_SET, - DBG_STATUS_INVALID_PCI_BUF_SIZE, - DBG_STATUS_PCI_BUF_ALLOC_FAILED, - DBG_STATUS_PCI_BUF_NOT_ALLOCATED, - DBG_STATUS_INVALID_FILTER_TRIGGER_DWORDS, - DBG_STATUS_NO_MATCHING_FRAMING_MODE, - DBG_STATUS_VFC_READ_ERROR, - DBG_STATUS_STORM_ALREADY_ENABLED, - DBG_STATUS_STORM_NOT_ENABLED, - DBG_STATUS_BLOCK_ALREADY_ENABLED, - DBG_STATUS_BLOCK_NOT_ENABLED, - DBG_STATUS_NO_INPUT_ENABLED, - DBG_STATUS_NO_FILTER_TRIGGER_256B, - DBG_STATUS_FILTER_ALREADY_ENABLED, - DBG_STATUS_TRIGGER_ALREADY_ENABLED, - DBG_STATUS_TRIGGER_NOT_ENABLED, - DBG_STATUS_CANT_ADD_CONSTRAINT, - DBG_STATUS_TOO_MANY_TRIGGER_STATES, - DBG_STATUS_TOO_MANY_CONSTRAINTS, - DBG_STATUS_RECORDING_NOT_STARTED, - DBG_STATUS_DATA_DIDNT_TRIGGER, - DBG_STATUS_NO_DATA_RECORDED, - DBG_STATUS_DUMP_BUF_TOO_SMALL, - DBG_STATUS_DUMP_NOT_CHUNK_ALIGNED, - DBG_STATUS_UNKNOWN_CHIP, - DBG_STATUS_VIRT_MEM_ALLOC_FAILED, - DBG_STATUS_BLOCK_IN_RESET, - DBG_STATUS_INVALID_TRACE_SIGNATURE, - DBG_STATUS_INVALID_NVRAM_BUNDLE, - DBG_STATUS_NVRAM_GET_IMAGE_FAILED, - DBG_STATUS_NON_ALIGNED_NVRAM_IMAGE, - DBG_STATUS_NVRAM_READ_FAILED, - DBG_STATUS_IDLE_CHK_PARSE_FAILED, - DBG_STATUS_MCP_TRACE_BAD_DATA, - DBG_STATUS_MCP_TRACE_NO_META, - DBG_STATUS_MCP_COULD_NOT_HALT, - DBG_STATUS_MCP_COULD_NOT_RESUME, - DBG_STATUS_RESERVED0, - DBG_STATUS_SEMI_FIFO_NOT_EMPTY, - DBG_STATUS_IGU_FIFO_BAD_DATA, - DBG_STATUS_MCP_COULD_NOT_MASK_PRTY, - DBG_STATUS_FW_ASSERTS_PARSE_FAILED, - DBG_STATUS_REG_FIFO_BAD_DATA, - DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA, - DBG_STATUS_DBG_ARRAY_NOT_SET, - DBG_STATUS_RESERVED1, - DBG_STATUS_NON_MATCHING_LINES, - DBG_STATUS_INSUFFICIENT_HW_IDS, - DBG_STATUS_DBG_BUS_IN_USE, - DBG_STATUS_INVALID_STORM_DBG_MODE, - DBG_STATUS_OTHER_ENGINE_BB_ONLY, - DBG_STATUS_FILTER_SINGLE_HW_ID, - DBG_STATUS_TRIGGER_SINGLE_HW_ID, - DBG_STATUS_MISSING_TRIGGER_STATE_STORM, - MAX_DBG_STATUS -}; - -/* Debug Storms IDs */ -enum dbg_storms { - DBG_TSTORM_ID, - DBG_MSTORM_ID, - DBG_USTORM_ID, - DBG_XSTORM_ID, - DBG_YSTORM_ID, - DBG_PSTORM_ID, - MAX_DBG_STORMS -}; - -/* Idle Check data */ -struct idle_chk_data { - u32 buf_size; - u8 buf_size_set; - u8 reserved1; - u16 reserved2; -}; - -struct pretend_params { - u8 split_type; - u8 reserved; - u16 split_id; -}; - -/* Debug Tools data (per HW function) - */ -struct dbg_tools_data { - struct dbg_grc_data grc; - struct dbg_bus_data bus; - struct idle_chk_data idle_chk; - u8 mode_enable[40]; - u8 block_in_reset[132]; - u8 chip_id; - u8 hw_type; - u8 num_ports; - u8 num_pfs_per_port; - u8 num_vfs; - u8 initialized; - u8 use_dmae; - u8 reserved; - struct pretend_params pretend; - u32 num_regs_read; -}; - -/* ILT Clients */ -enum ilt_clients { - ILT_CLI_CDUC, - ILT_CLI_CDUT, - ILT_CLI_QM, - ILT_CLI_TM, - ILT_CLI_SRC, - ILT_CLI_TSDM, - ILT_CLI_RGFS, - ILT_CLI_TGFS, - MAX_ILT_CLIENTS -}; - /********************************/ /* HSI Init Functions constants */ /********************************/ @@ -2644,6 +1956,9 @@ struct init_nig_pri_tc_map_req { /* QM per global RL init parameters */ struct init_qm_global_rl_params { + u8 type; + u8 reserved0; + u16 reserved1; u32 rate_limit; }; @@ -2658,18 +1973,33 @@ struct init_qm_port_params { /* QM per-PQ init parameters */ struct init_qm_pq_params { - u8 vport_id; + u16 vport_id; + u16 rl_id; + u8 rl_valid; u8 tc_id; u8 wrr_group; - u8 rl_valid; - u16 rl_id; u8 port_id; - u8 reserved; +}; + +/* QM per RL init parameters */ +struct init_qm_rl_params { + u32 vport_rl; + u8 vport_rl_type; + u8 reserved[3]; +}; + +/* QM Rate Limiter types */ +enum init_qm_rl_type { + QM_RL_TYPE_NORMAL, + QM_RL_TYPE_QCN, + MAX_INIT_QM_RL_TYPE }; /* QM per-vport init parameters */ struct init_qm_vport_params { u16 wfq; + u16 reserved; + u16 tc_wfq[NUM_OF_TCS]; u16 first_tx_pq_id[NUM_OF_TCS]; }; @@ -2728,14 +2058,14 @@ struct fw_info_location { }; enum init_modes { - MODE_RESERVED, + MODE_BB_A0_DEPRECATED, MODE_BB, MODE_K2, MODE_ASIC, - MODE_RESERVED2, - MODE_RESERVED3, - MODE_RESERVED4, - MODE_RESERVED5, + MODE_EMUL_REDUCED, + MODE_EMUL_FULL, + MODE_FPGA, + MODE_CHIPSIM, MODE_SF, MODE_MF_SD, MODE_MF_SI, @@ -2743,8 +2073,8 @@ enum init_modes { MODE_PORTS_PER_ENG_2, MODE_PORTS_PER_ENG_4, MODE_100G, - MODE_RESERVED6, - MODE_RESERVED7, + MODE_SKIP_PRAM_INIT, + MODE_EMUL_MAC, MAX_INIT_MODES }; @@ -3009,706 +2339,6 @@ struct iro { u16 size; }; -/***************************** Public Functions *******************************/ - -/** - * @brief qed_dbg_set_bin_ptr - Sets a pointer to the binary data with debug - * arrays. - * - * @param p_hwfn - HW device data - * @param bin_ptr - a pointer to the binary data with debug arrays. - */ -enum dbg_status qed_dbg_set_bin_ptr(struct qed_hwfn *p_hwfn, - const u8 * const bin_ptr); - -/** - * @brief qed_read_regs - Reads registers into a buffer (using GRC). - * - * @param p_hwfn - HW device data - * @param p_ptt - Ptt window used for writing the registers. - * @param buf - Destination buffer. - * @param addr - Source GRC address in dwords. - * @param len - Number of registers to read. - */ -void qed_read_regs(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, u32 *buf, u32 addr, u32 len); - -/** - * @brief qed_read_fw_info - Reads FW info from the chip. - * - * The FW info contains FW-related information, such as the FW version, - * FW image (main/L2B/kuku), FW timestamp, etc. - * The FW info is read from the internal RAM of the first Storm that is not in - * reset. - * - * @param p_hwfn - HW device data - * @param p_ptt - Ptt window used for writing the registers. - * @param fw_info - Out: a pointer to write the FW info into. - * - * @return true if the FW info was read successfully from one of the Storms, - * or false if all Storms are in reset. - */ -bool qed_read_fw_info(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, struct fw_info *fw_info); -/** - * @brief qed_dbg_grc_config - Sets the value of a GRC parameter. - * - * @param p_hwfn - HW device data - * @param grc_param - GRC parameter - * @param val - Value to set. - * - * @return error if one of the following holds: - * - the version wasn't set - * - grc_param is invalid - * - val is outside the allowed boundaries - */ -enum dbg_status qed_dbg_grc_config(struct qed_hwfn *p_hwfn, - enum dbg_grc_params grc_param, u32 val); - -/** - * @brief qed_dbg_grc_set_params_default - Reverts all GRC parameters to their - * default value. - * - * @param p_hwfn - HW device data - */ -void qed_dbg_grc_set_params_default(struct qed_hwfn *p_hwfn); -/** - * @brief qed_dbg_grc_get_dump_buf_size - Returns the required buffer size for - * GRC Dump. - * - * @param p_hwfn - HW device data - * @param p_ptt - Ptt window used for writing the registers. - * @param buf_size - OUT: required buffer size (in dwords) for the GRC Dump - * data. - * - * @return error if one of the following holds: - * - the version wasn't set - * Otherwise, returns ok. - */ -enum dbg_status qed_dbg_grc_get_dump_buf_size(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, - u32 *buf_size); - -/** - * @brief qed_dbg_grc_dump - Dumps GRC data into the specified buffer. - * - * @param p_hwfn - HW device data - * @param p_ptt - Ptt window used for writing the registers. - * @param dump_buf - Pointer to write the collected GRC data into. - * @param buf_size_in_dwords - Size of the specified buffer in dwords. - * @param num_dumped_dwords - OUT: number of dumped dwords. - * - * @return error if one of the following holds: - * - the version wasn't set - * - the specified dump buffer is too small - * Otherwise, returns ok. - */ -enum dbg_status qed_dbg_grc_dump(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, - u32 *dump_buf, - u32 buf_size_in_dwords, - u32 *num_dumped_dwords); - -/** - * @brief qed_dbg_idle_chk_get_dump_buf_size - Returns the required buffer size - * for idle check results. - * - * @param p_hwfn - HW device data - * @param p_ptt - Ptt window used for writing the registers. - * @param buf_size - OUT: required buffer size (in dwords) for the idle check - * data. - * - * @return error if one of the following holds: - * - the version wasn't set - * Otherwise, returns ok. - */ -enum dbg_status qed_dbg_idle_chk_get_dump_buf_size(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, - u32 *buf_size); - -/** - * @brief qed_dbg_idle_chk_dump - Performs idle check and writes the results - * into the specified buffer. - * - * @param p_hwfn - HW device data - * @param p_ptt - Ptt window used for writing the registers. - * @param dump_buf - Pointer to write the idle check data into. - * @param buf_size_in_dwords - Size of the specified buffer in dwords. - * @param num_dumped_dwords - OUT: number of dumped dwords. - * - * @return error if one of the following holds: - * - the version wasn't set - * - the specified buffer is too small - * Otherwise, returns ok. - */ -enum dbg_status qed_dbg_idle_chk_dump(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, - u32 *dump_buf, - u32 buf_size_in_dwords, - u32 *num_dumped_dwords); - -/** - * @brief qed_dbg_mcp_trace_get_dump_buf_size - Returns the required buffer size - * for mcp trace results. - * - * @param p_hwfn - HW device data - * @param p_ptt - Ptt window used for writing the registers. - * @param buf_size - OUT: required buffer size (in dwords) for mcp trace data. - * - * @return error if one of the following holds: - * - the version wasn't set - * - the trace data in MCP scratchpad contain an invalid signature - * - the bundle ID in NVRAM is invalid - * - the trace meta data cannot be found (in NVRAM or image file) - * Otherwise, returns ok. - */ -enum dbg_status qed_dbg_mcp_trace_get_dump_buf_size(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, - u32 *buf_size); - -/** - * @brief qed_dbg_mcp_trace_dump - Performs mcp trace and writes the results - * into the specified buffer. - * - * @param p_hwfn - HW device data - * @param p_ptt - Ptt window used for writing the registers. - * @param dump_buf - Pointer to write the mcp trace data into. - * @param buf_size_in_dwords - Size of the specified buffer in dwords. - * @param num_dumped_dwords - OUT: number of dumped dwords. - * - * @return error if one of the following holds: - * - the version wasn't set - * - the specified buffer is too small - * - the trace data in MCP scratchpad contain an invalid signature - * - the bundle ID in NVRAM is invalid - * - the trace meta data cannot be found (in NVRAM or image file) - * - the trace meta data cannot be read (from NVRAM or image file) - * Otherwise, returns ok. - */ -enum dbg_status qed_dbg_mcp_trace_dump(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, - u32 *dump_buf, - u32 buf_size_in_dwords, - u32 *num_dumped_dwords); - -/** - * @brief qed_dbg_reg_fifo_get_dump_buf_size - Returns the required buffer size - * for grc trace fifo results. - * - * @param p_hwfn - HW device data - * @param p_ptt - Ptt window used for writing the registers. - * @param buf_size - OUT: required buffer size (in dwords) for reg fifo data. - * - * @return error if one of the following holds: - * - the version wasn't set - * Otherwise, returns ok. - */ -enum dbg_status qed_dbg_reg_fifo_get_dump_buf_size(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, - u32 *buf_size); - -/** - * @brief qed_dbg_reg_fifo_dump - Reads the reg fifo and writes the results into - * the specified buffer. - * - * @param p_hwfn - HW device data - * @param p_ptt - Ptt window used for writing the registers. - * @param dump_buf - Pointer to write the reg fifo data into. - * @param buf_size_in_dwords - Size of the specified buffer in dwords. - * @param num_dumped_dwords - OUT: number of dumped dwords. - * - * @return error if one of the following holds: - * - the version wasn't set - * - the specified buffer is too small - * - DMAE transaction failed - * Otherwise, returns ok. - */ -enum dbg_status qed_dbg_reg_fifo_dump(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, - u32 *dump_buf, - u32 buf_size_in_dwords, - u32 *num_dumped_dwords); - -/** - * @brief qed_dbg_igu_fifo_get_dump_buf_size - Returns the required buffer size - * for the IGU fifo results. - * - * @param p_hwfn - HW device data - * @param p_ptt - Ptt window used for writing the registers. - * @param buf_size - OUT: required buffer size (in dwords) for the IGU fifo - * data. - * - * @return error if one of the following holds: - * - the version wasn't set - * Otherwise, returns ok. - */ -enum dbg_status qed_dbg_igu_fifo_get_dump_buf_size(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, - u32 *buf_size); - -/** - * @brief qed_dbg_igu_fifo_dump - Reads the IGU fifo and writes the results into - * the specified buffer. - * - * @param p_hwfn - HW device data - * @param p_ptt - Ptt window used for writing the registers. - * @param dump_buf - Pointer to write the IGU fifo data into. - * @param buf_size_in_dwords - Size of the specified buffer in dwords. - * @param num_dumped_dwords - OUT: number of dumped dwords. - * - * @return error if one of the following holds: - * - the version wasn't set - * - the specified buffer is too small - * - DMAE transaction failed - * Otherwise, returns ok. - */ -enum dbg_status qed_dbg_igu_fifo_dump(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, - u32 *dump_buf, - u32 buf_size_in_dwords, - u32 *num_dumped_dwords); - -/** - * @brief qed_dbg_protection_override_get_dump_buf_size - Returns the required - * buffer size for protection override window results. - * - * @param p_hwfn - HW device data - * @param p_ptt - Ptt window used for writing the registers. - * @param buf_size - OUT: required buffer size (in dwords) for protection - * override data. - * - * @return error if one of the following holds: - * - the version wasn't set - * Otherwise, returns ok. - */ -enum dbg_status -qed_dbg_protection_override_get_dump_buf_size(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, - u32 *buf_size); -/** - * @brief qed_dbg_protection_override_dump - Reads protection override window - * entries and writes the results into the specified buffer. - * - * @param p_hwfn - HW device data - * @param p_ptt - Ptt window used for writing the registers. - * @param dump_buf - Pointer to write the protection override data into. - * @param buf_size_in_dwords - Size of the specified buffer in dwords. - * @param num_dumped_dwords - OUT: number of dumped dwords. - * - * @return error if one of the following holds: - * - the version wasn't set - * - the specified buffer is too small - * - DMAE transaction failed - * Otherwise, returns ok. - */ -enum dbg_status qed_dbg_protection_override_dump(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, - u32 *dump_buf, - u32 buf_size_in_dwords, - u32 *num_dumped_dwords); -/** - * @brief qed_dbg_fw_asserts_get_dump_buf_size - Returns the required buffer - * size for FW Asserts results. - * - * @param p_hwfn - HW device data - * @param p_ptt - Ptt window used for writing the registers. - * @param buf_size - OUT: required buffer size (in dwords) for FW Asserts data. - * - * @return error if one of the following holds: - * - the version wasn't set - * Otherwise, returns ok. - */ -enum dbg_status qed_dbg_fw_asserts_get_dump_buf_size(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, - u32 *buf_size); -/** - * @brief qed_dbg_fw_asserts_dump - Reads the FW Asserts and writes the results - * into the specified buffer. - * - * @param p_hwfn - HW device data - * @param p_ptt - Ptt window used for writing the registers. - * @param dump_buf - Pointer to write the FW Asserts data into. - * @param buf_size_in_dwords - Size of the specified buffer in dwords. - * @param num_dumped_dwords - OUT: number of dumped dwords. - * - * @return error if one of the following holds: - * - the version wasn't set - * - the specified buffer is too small - * Otherwise, returns ok. - */ -enum dbg_status qed_dbg_fw_asserts_dump(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, - u32 *dump_buf, - u32 buf_size_in_dwords, - u32 *num_dumped_dwords); - -/** - * @brief qed_dbg_read_attn - Reads the attention registers of the specified - * block and type, and writes the results into the specified buffer. - * - * @param p_hwfn - HW device data - * @param p_ptt - Ptt window used for writing the registers. - * @param block - Block ID. - * @param attn_type - Attention type. - * @param clear_status - Indicates if the attention status should be cleared. - * @param results - OUT: Pointer to write the read results into - * - * @return error if one of the following holds: - * - the version wasn't set - * Otherwise, returns ok. - */ -enum dbg_status qed_dbg_read_attn(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, - enum block_id block, - enum dbg_attn_type attn_type, - bool clear_status, - struct dbg_attn_block_result *results); - -/** - * @brief qed_dbg_print_attn - Prints attention registers values in the - * specified results struct. - * - * @param p_hwfn - * @param results - Pointer to the attention read results - * - * @return error if one of the following holds: - * - the version wasn't set - * Otherwise, returns ok. - */ -enum dbg_status qed_dbg_print_attn(struct qed_hwfn *p_hwfn, - struct dbg_attn_block_result *results); - -/******************************* Data Types **********************************/ - -struct mcp_trace_format { - u32 data; -#define MCP_TRACE_FORMAT_MODULE_MASK 0x0000ffff -#define MCP_TRACE_FORMAT_MODULE_OFFSET 0 -#define MCP_TRACE_FORMAT_LEVEL_MASK 0x00030000 -#define MCP_TRACE_FORMAT_LEVEL_OFFSET 16 -#define MCP_TRACE_FORMAT_P1_SIZE_MASK 0x000c0000 -#define MCP_TRACE_FORMAT_P1_SIZE_OFFSET 18 -#define MCP_TRACE_FORMAT_P2_SIZE_MASK 0x00300000 -#define MCP_TRACE_FORMAT_P2_SIZE_OFFSET 20 -#define MCP_TRACE_FORMAT_P3_SIZE_MASK 0x00c00000 -#define MCP_TRACE_FORMAT_P3_SIZE_OFFSET 22 -#define MCP_TRACE_FORMAT_LEN_MASK 0xff000000 -#define MCP_TRACE_FORMAT_LEN_OFFSET 24 - - char *format_str; -}; - -/* MCP Trace Meta data structure */ -struct mcp_trace_meta { - u32 modules_num; - char **modules; - u32 formats_num; - struct mcp_trace_format *formats; - bool is_allocated; -}; - -/* Debug Tools user data */ -struct dbg_tools_user_data { - struct mcp_trace_meta mcp_trace_meta; - const u32 *mcp_trace_user_meta_buf; -}; - -/******************************** Constants **********************************/ - -#define MAX_NAME_LEN 16 - -/***************************** Public Functions *******************************/ - -/** - * @brief qed_dbg_user_set_bin_ptr - Sets a pointer to the binary data with - * debug arrays. - * - * @param p_hwfn - HW device data - * @param bin_ptr - a pointer to the binary data with debug arrays. - */ -enum dbg_status qed_dbg_user_set_bin_ptr(struct qed_hwfn *p_hwfn, - const u8 * const bin_ptr); - -/** - * @brief qed_dbg_alloc_user_data - Allocates user debug data. - * - * @param p_hwfn - HW device data - * @param user_data_ptr - OUT: a pointer to the allocated memory. - */ -enum dbg_status qed_dbg_alloc_user_data(struct qed_hwfn *p_hwfn, - void **user_data_ptr); - -/** - * @brief qed_dbg_get_status_str - Returns a string for the specified status. - * - * @param status - a debug status code. - * - * @return a string for the specified status - */ -const char *qed_dbg_get_status_str(enum dbg_status status); - -/** - * @brief qed_get_idle_chk_results_buf_size - Returns the required buffer size - * for idle check results (in bytes). - * - * @param p_hwfn - HW device data - * @param dump_buf - idle check dump buffer. - * @param num_dumped_dwords - number of dwords that were dumped. - * @param results_buf_size - OUT: required buffer size (in bytes) for the parsed - * results. - * - * @return error if the parsing fails, ok otherwise. - */ -enum dbg_status qed_get_idle_chk_results_buf_size(struct qed_hwfn *p_hwfn, - u32 *dump_buf, - u32 num_dumped_dwords, - u32 *results_buf_size); -/** - * @brief qed_print_idle_chk_results - Prints idle check results - * - * @param p_hwfn - HW device data - * @param dump_buf - idle check dump buffer. - * @param num_dumped_dwords - number of dwords that were dumped. - * @param results_buf - buffer for printing the idle check results. - * @param num_errors - OUT: number of errors found in idle check. - * @param num_warnings - OUT: number of warnings found in idle check. - * - * @return error if the parsing fails, ok otherwise. - */ -enum dbg_status qed_print_idle_chk_results(struct qed_hwfn *p_hwfn, - u32 *dump_buf, - u32 num_dumped_dwords, - char *results_buf, - u32 *num_errors, - u32 *num_warnings); - -/** - * @brief qed_dbg_mcp_trace_set_meta_data - Sets the MCP Trace meta data. - * - * Needed in case the MCP Trace dump doesn't contain the meta data (e.g. due to - * no NVRAM access). - * - * @param data - pointer to MCP Trace meta data - * @param size - size of MCP Trace meta data in dwords - */ -void qed_dbg_mcp_trace_set_meta_data(struct qed_hwfn *p_hwfn, - const u32 *meta_buf); - -/** - * @brief qed_get_mcp_trace_results_buf_size - Returns the required buffer size - * for MCP Trace results (in bytes). - * - * @param p_hwfn - HW device data - * @param dump_buf - MCP Trace dump buffer. - * @param num_dumped_dwords - number of dwords that were dumped. - * @param results_buf_size - OUT: required buffer size (in bytes) for the parsed - * results. - * - * @return error if the parsing fails, ok otherwise. - */ -enum dbg_status qed_get_mcp_trace_results_buf_size(struct qed_hwfn *p_hwfn, - u32 *dump_buf, - u32 num_dumped_dwords, - u32 *results_buf_size); - -/** - * @brief qed_print_mcp_trace_results - Prints MCP Trace results - * - * @param p_hwfn - HW device data - * @param dump_buf - mcp trace dump buffer, starting from the header. - * @param num_dumped_dwords - number of dwords that were dumped. - * @param results_buf - buffer for printing the mcp trace results. - * - * @return error if the parsing fails, ok otherwise. - */ -enum dbg_status qed_print_mcp_trace_results(struct qed_hwfn *p_hwfn, - u32 *dump_buf, - u32 num_dumped_dwords, - char *results_buf); - -/** - * @brief qed_print_mcp_trace_results_cont - Prints MCP Trace results, and - * keeps the MCP trace meta data allocated, to support continuous MCP Trace - * parsing. After the continuous parsing ends, mcp_trace_free_meta_data should - * be called to free the meta data. - * - * @param p_hwfn - HW device data - * @param dump_buf - mcp trace dump buffer, starting from the header. - * @param results_buf - buffer for printing the mcp trace results. - * - * @return error if the parsing fails, ok otherwise. - */ -enum dbg_status qed_print_mcp_trace_results_cont(struct qed_hwfn *p_hwfn, - u32 *dump_buf, - char *results_buf); - -/** - * @brief print_mcp_trace_line - Prints MCP Trace results for a single line - * - * @param p_hwfn - HW device data - * @param dump_buf - mcp trace dump buffer, starting from the header. - * @param num_dumped_bytes - number of bytes that were dumped. - * @param results_buf - buffer for printing the mcp trace results. - * - * @return error if the parsing fails, ok otherwise. - */ -enum dbg_status qed_print_mcp_trace_line(struct qed_hwfn *p_hwfn, - u8 *dump_buf, - u32 num_dumped_bytes, - char *results_buf); - -/** - * @brief mcp_trace_free_meta_data - Frees the MCP Trace meta data. - * Should be called after continuous MCP Trace parsing. - * - * @param p_hwfn - HW device data - */ -void qed_mcp_trace_free_meta_data(struct qed_hwfn *p_hwfn); - -/** - * @brief qed_get_reg_fifo_results_buf_size - Returns the required buffer size - * for reg_fifo results (in bytes). - * - * @param p_hwfn - HW device data - * @param dump_buf - reg fifo dump buffer. - * @param num_dumped_dwords - number of dwords that were dumped. - * @param results_buf_size - OUT: required buffer size (in bytes) for the parsed - * results. - * - * @return error if the parsing fails, ok otherwise. - */ -enum dbg_status qed_get_reg_fifo_results_buf_size(struct qed_hwfn *p_hwfn, - u32 *dump_buf, - u32 num_dumped_dwords, - u32 *results_buf_size); - -/** - * @brief qed_print_reg_fifo_results - Prints reg fifo results - * - * @param p_hwfn - HW device data - * @param dump_buf - reg fifo dump buffer, starting from the header. - * @param num_dumped_dwords - number of dwords that were dumped. - * @param results_buf - buffer for printing the reg fifo results. - * - * @return error if the parsing fails, ok otherwise. - */ -enum dbg_status qed_print_reg_fifo_results(struct qed_hwfn *p_hwfn, - u32 *dump_buf, - u32 num_dumped_dwords, - char *results_buf); - -/** - * @brief qed_get_igu_fifo_results_buf_size - Returns the required buffer size - * for igu_fifo results (in bytes). - * - * @param p_hwfn - HW device data - * @param dump_buf - IGU fifo dump buffer. - * @param num_dumped_dwords - number of dwords that were dumped. - * @param results_buf_size - OUT: required buffer size (in bytes) for the parsed - * results. - * - * @return error if the parsing fails, ok otherwise. - */ -enum dbg_status qed_get_igu_fifo_results_buf_size(struct qed_hwfn *p_hwfn, - u32 *dump_buf, - u32 num_dumped_dwords, - u32 *results_buf_size); - -/** - * @brief qed_print_igu_fifo_results - Prints IGU fifo results - * - * @param p_hwfn - HW device data - * @param dump_buf - IGU fifo dump buffer, starting from the header. - * @param num_dumped_dwords - number of dwords that were dumped. - * @param results_buf - buffer for printing the IGU fifo results. - * - * @return error if the parsing fails, ok otherwise. - */ -enum dbg_status qed_print_igu_fifo_results(struct qed_hwfn *p_hwfn, - u32 *dump_buf, - u32 num_dumped_dwords, - char *results_buf); - -/** - * @brief qed_get_protection_override_results_buf_size - Returns the required - * buffer size for protection override results (in bytes). - * - * @param p_hwfn - HW device data - * @param dump_buf - protection override dump buffer. - * @param num_dumped_dwords - number of dwords that were dumped. - * @param results_buf_size - OUT: required buffer size (in bytes) for the parsed - * results. - * - * @return error if the parsing fails, ok otherwise. - */ -enum dbg_status -qed_get_protection_override_results_buf_size(struct qed_hwfn *p_hwfn, - u32 *dump_buf, - u32 num_dumped_dwords, - u32 *results_buf_size); - -/** - * @brief qed_print_protection_override_results - Prints protection override - * results. - * - * @param p_hwfn - HW device data - * @param dump_buf - protection override dump buffer, starting from the header. - * @param num_dumped_dwords - number of dwords that were dumped. - * @param results_buf - buffer for printing the reg fifo results. - * - * @return error if the parsing fails, ok otherwise. - */ -enum dbg_status qed_print_protection_override_results(struct qed_hwfn *p_hwfn, - u32 *dump_buf, - u32 num_dumped_dwords, - char *results_buf); - -/** - * @brief qed_get_fw_asserts_results_buf_size - Returns the required buffer size - * for FW Asserts results (in bytes). - * - * @param p_hwfn - HW device data - * @param dump_buf - FW Asserts dump buffer. - * @param num_dumped_dwords - number of dwords that were dumped. - * @param results_buf_size - OUT: required buffer size (in bytes) for the parsed - * results. - * - * @return error if the parsing fails, ok otherwise. - */ -enum dbg_status qed_get_fw_asserts_results_buf_size(struct qed_hwfn *p_hwfn, - u32 *dump_buf, - u32 num_dumped_dwords, - u32 *results_buf_size); - -/** - * @brief qed_print_fw_asserts_results - Prints FW Asserts results - * - * @param p_hwfn - HW device data - * @param dump_buf - FW Asserts dump buffer, starting from the header. - * @param num_dumped_dwords - number of dwords that were dumped. - * @param results_buf - buffer for printing the FW Asserts results. - * - * @return error if the parsing fails, ok otherwise. - */ -enum dbg_status qed_print_fw_asserts_results(struct qed_hwfn *p_hwfn, - u32 *dump_buf, - u32 num_dumped_dwords, - char *results_buf); - -/** - * @brief qed_dbg_parse_attn - Parses and prints attention registers values in - * the specified results struct. - * - * @param p_hwfn - HW device data - * @param results - Pointer to the attention read results - * - * @return error if one of the following holds: - * - the version wasn't set - * Otherwise, returns ok. - */ -enum dbg_status qed_dbg_parse_attn(struct qed_hwfn *p_hwfn, - struct dbg_attn_block_result *results); - /* Win 2 */ #define GTT_BAR0_MAP_REG_IGU_CMD 0x00f000UL @@ -3745,19 +2375,28 @@ enum dbg_status qed_dbg_parse_attn(struct qed_hwfn *p_hwfn, /* Win 13 */ #define GTT_BAR0_MAP_REG_PSDM_RAM 0x01a000UL +/* Returns the VOQ based on port and TC */ +#define VOQ(port, tc, max_phys_tcs_per_port) ((tc) == \ + PURE_LB_TC ? NUM_OF_PHYS_TCS *\ + MAX_NUM_PORTS_BB + \ + (port) : (port) * \ + (max_phys_tcs_per_port) + (tc)) + +struct init_qm_pq_params; + /** - * @brief qed_qm_pf_mem_size - prepare QM ILT sizes + * qed_qm_pf_mem_size(): Prepare QM ILT sizes. * - * Returns the required host memory size in 4KB units. - * Must be called before all QM init HSI functions. + * @num_pf_cids: Number of connections used by this PF. + * @num_vf_cids: Number of connections used by VFs of this PF. + * @num_tids: Number of tasks used by this PF. + * @num_pf_pqs: Number of PQs used by this PF. + * @num_vf_pqs: Number of PQs used by VFs of this PF. * - * @param num_pf_cids - number of connections used by this PF - * @param num_vf_cids - number of connections used by VFs of this PF - * @param num_tids - number of tasks used by this PF - * @param num_pf_pqs - number of PQs used by this PF - * @param num_vf_pqs - number of PQs used by VFs of this PF + * Return: The required host memory size in 4KB units. * - * @return The required host memory size in 4KB units. + * Returns the required host memory size in 4KB units. + * Must be called before all QM init HSI functions. */ u32 qed_qm_pf_mem_size(u32 num_pf_cids, u32 num_vf_cids, @@ -3771,8 +2410,19 @@ struct qed_qm_common_rt_init_params { bool global_rl_en; bool vport_wfq_en; struct init_qm_port_params *port_params; + struct init_qm_global_rl_params + global_rl_params[COMMON_MAX_QM_GLOBAL_RLS]; }; +/** + * qed_qm_common_rt_init(): Prepare QM runtime init values for the + * engine phase. + * + * @p_hwfn: HW device data. + * @p_params: Parameters. + * + * Return: 0 on success, -1 on error. + */ int qed_qm_common_rt_init(struct qed_hwfn *p_hwfn, struct qed_qm_common_rt_init_params *p_params); @@ -3789,85 +2439,116 @@ struct qed_qm_pf_rt_init_params { u16 num_vf_pqs; u16 start_vport; u16 num_vports; + u16 start_rl; + u16 num_rls; u16 pf_wfq; u32 pf_rl; + u32 link_speed; struct init_qm_pq_params *pq_params; struct init_qm_vport_params *vport_params; + struct init_qm_rl_params *rl_params; }; +/** + * qed_qm_pf_rt_init(): Prepare QM runtime init values for the PF phase. + * + * @p_hwfn: HW device data. + * @p_ptt: Ptt window used for writing the registers + * @p_params: Parameters. + * + * Return: 0 on success, -1 on error. + */ int qed_qm_pf_rt_init(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, - struct qed_qm_pf_rt_init_params *p_params); + struct qed_ptt *p_ptt, + struct qed_qm_pf_rt_init_params *p_params); /** - * @brief qed_init_pf_wfq - Initializes the WFQ weight of the specified PF + * qed_init_pf_wfq(): Initializes the WFQ weight of the specified PF. * - * @param p_hwfn - * @param p_ptt - ptt window used for writing the registers - * @param pf_id - PF ID - * @param pf_wfq - WFQ weight. Must be non-zero. + * @p_hwfn: HW device data. + * @p_ptt: Ptt window used for writing the registers + * @pf_id: PF ID + * @pf_wfq: WFQ weight. Must be non-zero. * - * @return 0 on success, -1 on error. + * Return: 0 on success, -1 on error. */ int qed_init_pf_wfq(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u8 pf_id, u16 pf_wfq); /** - * @brief qed_init_pf_rl - Initializes the rate limit of the specified PF + * qed_init_pf_rl(): Initializes the rate limit of the specified PF * - * @param p_hwfn - * @param p_ptt - ptt window used for writing the registers - * @param pf_id - PF ID - * @param pf_rl - rate limit in Mb/sec units + * @p_hwfn: HW device data. + * @p_ptt: Ptt window used for writing the registers. + * @pf_id: PF ID. + * @pf_rl: rate limit in Mb/sec units * - * @return 0 on success, -1 on error. + * Return: 0 on success, -1 on error. */ int qed_init_pf_rl(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u8 pf_id, u32 pf_rl); /** - * @brief qed_init_vport_wfq Initializes the WFQ weight of the specified VPORT + * qed_init_vport_wfq(): Initializes the WFQ weight of the specified VPORT * - * @param p_hwfn - * @param p_ptt - ptt window used for writing the registers - * @param first_tx_pq_id- An array containing the first Tx PQ ID associated - * with the VPORT for each TC. This array is filled by - * qed_qm_pf_rt_init - * @param vport_wfq - WFQ weight. Must be non-zero. + * @p_hwfn: HW device data. + * @p_ptt: Ptt window used for writing the registers + * @first_tx_pq_id: An array containing the first Tx PQ ID associated + * with the VPORT for each TC. This array is filled by + * qed_qm_pf_rt_init + * @wfq: WFQ weight. Must be non-zero. * - * @return 0 on success, -1 on error. + * Return: 0 on success, -1 on error. */ int qed_init_vport_wfq(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u16 first_tx_pq_id[NUM_OF_TCS], u16 wfq); /** - * @brief qed_init_global_rl - Initializes the rate limit of the specified - * rate limiter + * qed_init_vport_tc_wfq(): Initializes the WFQ weight of the specified + * VPORT and TC. * - * @param p_hwfn - * @param p_ptt - ptt window used for writing the registers - * @param rl_id - RL ID - * @param rate_limit - rate limit in Mb/sec units + * @p_hwfn: HW device data. + * @p_ptt: Ptt window used for writing the registers. + * @first_tx_pq_id: The first Tx PQ ID associated with the VPORT and TC. + * (filled by qed_qm_pf_rt_init). + * @weight: VPORT+TC WFQ weight. * - * @return 0 on success, -1 on error. + * Return: 0 on success, -1 on error. + */ +int qed_init_vport_tc_wfq(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u16 first_tx_pq_id, u16 weight); + +/** + * qed_init_global_rl(): Initializes the rate limit of the specified + * rate limiter. + * + * @p_hwfn: HW device data. + * @p_ptt: Ptt window used for writing the registers. + * @rl_id: RL ID. + * @rate_limit: Rate limit in Mb/sec units + * @vport_rl_type: Vport RL type. + * + * Return: 0 on success, -1 on error. */ int qed_init_global_rl(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, - u16 rl_id, u32 rate_limit); + u16 rl_id, u32 rate_limit, + enum init_qm_rl_type vport_rl_type); /** - * @brief qed_send_qm_stop_cmd Sends a stop command to the QM + * qed_send_qm_stop_cmd(): Sends a stop command to the QM. * - * @param p_hwfn - * @param p_ptt - * @param is_release_cmd - true for release, false for stop. - * @param is_tx_pq - true for Tx PQs, false for Other PQs. - * @param start_pq - first PQ ID to stop - * @param num_pqs - Number of PQs to stop, starting from start_pq. + * @p_hwfn: HW device data. + * @p_ptt: Ptt window used for writing the registers. + * @is_release_cmd: true for release, false for stop. + * @is_tx_pq: true for Tx PQs, false for Other PQs. + * @start_pq: first PQ ID to stop + * @num_pqs: Number of PQs to stop, starting from start_pq. * - * @return bool, true if successful, false if timeout occurred while waiting for - * QM command done. + * Return: Bool, true if successful, false if timeout occurred while waiting + * for QM command done. */ bool qed_send_qm_stop_cmd(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, @@ -3875,53 +2556,64 @@ bool qed_send_qm_stop_cmd(struct qed_hwfn *p_hwfn, bool is_tx_pq, u16 start_pq, u16 num_pqs); /** - * @brief qed_set_vxlan_dest_port - initializes vxlan tunnel destination udp port + * qed_set_vxlan_dest_port(): Initializes vxlan tunnel destination udp port. * - * @param p_hwfn - * @param p_ptt - ptt window used for writing the registers. - * @param dest_port - vxlan destination udp port. + * @p_hwfn: HW device data. + * @p_ptt: Ptt window used for writing the registers. + * @dest_port: vxlan destination udp port. + * + * Return: Void. */ void qed_set_vxlan_dest_port(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u16 dest_port); /** - * @brief qed_set_vxlan_enable - enable or disable VXLAN tunnel in HW + * qed_set_vxlan_enable(): Enable or disable VXLAN tunnel in HW. + * + * @p_hwfn: HW device data. + * @p_ptt: Ptt window used for writing the registers. + * @vxlan_enable: vxlan enable flag. * - * @param p_hwfn - * @param p_ptt - ptt window used for writing the registers. - * @param vxlan_enable - vxlan enable flag. + * Return: Void. */ void qed_set_vxlan_enable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, bool vxlan_enable); /** - * @brief qed_set_gre_enable - enable or disable GRE tunnel in HW + * qed_set_gre_enable(): Enable or disable GRE tunnel in HW. * - * @param p_hwfn - * @param p_ptt - ptt window used for writing the registers. - * @param eth_gre_enable - eth GRE enable enable flag. - * @param ip_gre_enable - IP GRE enable enable flag. + * @p_hwfn: HW device data. + * @p_ptt: Ptt window used for writing the registers. + * @eth_gre_enable: Eth GRE enable flag. + * @ip_gre_enable: IP GRE enable flag. + * + * Return: Void. */ void qed_set_gre_enable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, bool eth_gre_enable, bool ip_gre_enable); /** - * @brief qed_set_geneve_dest_port - initializes geneve tunnel destination udp port + * qed_set_geneve_dest_port(): Initializes geneve tunnel destination udp port + * + * @p_hwfn: HW device data. + * @p_ptt: Ptt window used for writing the registers. + * @dest_port: Geneve destination udp port. * - * @param p_hwfn - * @param p_ptt - ptt window used for writing the registers. - * @param dest_port - geneve destination udp port. + * Retur: Void. */ void qed_set_geneve_dest_port(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u16 dest_port); /** - * @brief qed_set_gre_enable - enable or disable GRE tunnel in HW + * qed_set_geneve_enable(): Enable or disable GRE tunnel in HW. + * + * @p_hwfn: HW device data. + * @p_ptt: Ptt window used for writing the registers. + * @eth_geneve_enable: Eth GENEVE enable flag. + * @ip_geneve_enable: IP GENEVE enable flag. * - * @param p_ptt - ptt window used for writing the registers. - * @param eth_geneve_enable - eth GENEVE enable enable flag. - * @param ip_geneve_enable - IP GENEVE enable enable flag. + * Return: Void. */ void qed_set_geneve_enable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, @@ -3931,25 +2623,29 @@ void qed_set_vxlan_no_l2_enable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, bool enable); /** - * @brief qed_gft_disable - Disable GFT + * qed_gft_disable(): Disable GFT. * - * @param p_hwfn - * @param p_ptt - ptt window used for writing the registers. - * @param pf_id - pf on which to disable GFT. + * @p_hwfn: HW device data. + * @p_ptt: Ptt window used for writing the registers. + * @pf_id: PF on which to disable GFT. + * + * Return: Void. */ void qed_gft_disable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u16 pf_id); /** - * @brief qed_gft_config - Enable and configure HW for GFT + * qed_gft_config(): Enable and configure HW for GFT. + * + * @p_hwfn: HW device data. + * @p_ptt: Ptt window used for writing the registers. + * @pf_id: PF on which to enable GFT. + * @tcp: Set profile tcp packets. + * @udp: Set profile udp packet. + * @ipv4: Set profile ipv4 packet. + * @ipv6: Set profile ipv6 packet. + * @profile_type: Define packet same fields. Use enum gft_profile_type. * - * @param p_hwfn - HW device data - * @param p_ptt - ptt window used for writing the registers. - * @param pf_id - pf on which to enable GFT. - * @param tcp - set profile tcp packets. - * @param udp - set profile udp packet. - * @param ipv4 - set profile ipv4 packet. - * @param ipv6 - set profile ipv6 packet. - * @param profile_type - define packet same fields. Use enum gft_profile_type. + * Return: Void. */ void qed_gft_config(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, @@ -3959,438 +2655,135 @@ void qed_gft_config(struct qed_hwfn *p_hwfn, bool ipv4, bool ipv6, enum gft_profile_type profile_type); /** - * @brief qed_enable_context_validation - Enable and configure context - * validation. + * qed_enable_context_validation(): Enable and configure context + * validation. + * + * @p_hwfn: HW device data. + * @p_ptt: Ptt window used for writing the registers. * - * @param p_hwfn - * @param p_ptt - ptt window used for writing the registers. + * Return: Void. */ void qed_enable_context_validation(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); /** - * @brief qed_calc_session_ctx_validation - Calcualte validation byte for - * session context. + * qed_calc_session_ctx_validation(): Calcualte validation byte for + * session context. * - * @param p_ctx_mem - pointer to context memory. - * @param ctx_size - context size. - * @param ctx_type - context type. - * @param cid - context cid. + * @p_ctx_mem: Pointer to context memory. + * @ctx_size: Context size. + * @ctx_type: Context type. + * @cid: Context cid. + * + * Return: Void. */ void qed_calc_session_ctx_validation(void *p_ctx_mem, u16 ctx_size, u8 ctx_type, u32 cid); /** - * @brief qed_calc_task_ctx_validation - Calcualte validation byte for task - * context. + * qed_calc_task_ctx_validation(): Calcualte validation byte for task + * context. + * + * @p_ctx_mem: Pointer to context memory. + * @ctx_size: Context size. + * @ctx_type: Context type. + * @tid: Context tid. * - * @param p_ctx_mem - pointer to context memory. - * @param ctx_size - context size. - * @param ctx_type - context type. - * @param tid - context tid. + * Return: Void. */ void qed_calc_task_ctx_validation(void *p_ctx_mem, u16 ctx_size, u8 ctx_type, u32 tid); /** - * @brief qed_memset_session_ctx - Memset session context to 0 while - * preserving validation bytes. + * qed_memset_session_ctx(): Memset session context to 0 while + * preserving validation bytes. + * + * @p_ctx_mem: Pointer to context memory. + * @ctx_size: Size to initialzie. + * @ctx_type: Context type. * - * @param p_hwfn - - * @param p_ctx_mem - pointer to context memory. - * @param ctx_size - size to initialzie. - * @param ctx_type - context type. + * Return: Void. */ void qed_memset_session_ctx(void *p_ctx_mem, u32 ctx_size, u8 ctx_type); /** - * @brief qed_memset_task_ctx - Memset task context to 0 while preserving - * validation bytes. + * qed_memset_task_ctx(): Memset task context to 0 while preserving + * validation bytes. * - * @param p_ctx_mem - pointer to context memory. - * @param ctx_size - size to initialzie. - * @param ctx_type - context type. + * @p_ctx_mem: Pointer to context memory. + * @ctx_size: size to initialzie. + * @ctx_type: context type. + * + * Return: Void. */ void qed_memset_task_ctx(void *p_ctx_mem, u32 ctx_size, u8 ctx_type); #define NUM_STORMS 6 /** - * @brief qed_set_rdma_error_level - Sets the RDMA assert level. - * If the severity of the error will be - * above the level, the FW will assert. - * @param p_hwfn - HW device data - * @param p_ptt - ptt window used for writing the registers - * @param assert_level - An array of assert levels for each storm. + * qed_set_rdma_error_level(): Sets the RDMA assert level. + * If the severity of the error will be + * above the level, the FW will assert. + * @p_hwfn: HW device data. + * @p_ptt: Ptt window used for writing the registers. + * @assert_level: An array of assert levels for each storm. * + * Return: Void. */ void qed_set_rdma_error_level(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u8 assert_level[NUM_STORMS]); /** - * @brief qed_fw_overlay_mem_alloc - Allocates and fills the FW overlay memory. + * qed_fw_overlay_mem_alloc(): Allocates and fills the FW overlay memory. * - * @param p_hwfn - HW device data - * @param fw_overlay_in_buf - the input FW overlay buffer. - * @param buf_size - the size of the input FW overlay buffer in bytes. - * must be aligned to dwords. - * @param fw_overlay_out_mem - OUT: a pointer to the allocated overlays memory. + * @p_hwfn: HW device data. + * @fw_overlay_in_buf: The input FW overlay buffer. + * @buf_size_in_bytes: The size of the input FW overlay buffer in bytes. + * must be aligned to dwords. * - * @return a pointer to the allocated overlays memory, + * Return: A pointer to the allocated overlays memory, * or NULL in case of failures. */ struct phys_mem_desc * qed_fw_overlay_mem_alloc(struct qed_hwfn *p_hwfn, - const u32 * const fw_overlay_in_buf, + const u32 *const fw_overlay_in_buf, u32 buf_size_in_bytes); /** - * @brief qed_fw_overlay_init_ram - Initializes the FW overlay RAM. + * qed_fw_overlay_init_ram(): Initializes the FW overlay RAM. + * + * @p_hwfn: HW device data. + * @p_ptt: Ptt window used for writing the registers. + * @fw_overlay_mem: the allocated FW overlay memory. * - * @param p_hwfn - HW device data. - * @param p_ptt - ptt window used for writing the registers. - * @param fw_overlay_mem - the allocated FW overlay memory. + * Return: Void. */ void qed_fw_overlay_init_ram(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, struct phys_mem_desc *fw_overlay_mem); /** - * @brief qed_fw_overlay_mem_free - Frees the FW overlay memory. + * qed_fw_overlay_mem_free(): Frees the FW overlay memory. * - * @param p_hwfn - HW device data. - * @param fw_overlay_mem - the allocated FW overlay memory to free. + * @p_hwfn: HW device data. + * @fw_overlay_mem: The allocated FW overlay memory to free. + * + * Return: Void. */ void qed_fw_overlay_mem_free(struct qed_hwfn *p_hwfn, - struct phys_mem_desc *fw_overlay_mem); + struct phys_mem_desc **fw_overlay_mem); -/* Ystorm flow control mode. Use enum fw_flow_ctrl_mode */ -#define YSTORM_FLOW_CONTROL_MODE_OFFSET (IRO[0].base) -#define YSTORM_FLOW_CONTROL_MODE_SIZE (IRO[0].size) - -/* Tstorm port statistics */ -#define TSTORM_PORT_STAT_OFFSET(port_id) \ - (IRO[1].base + ((port_id) * IRO[1].m1)) -#define TSTORM_PORT_STAT_SIZE (IRO[1].size) - -/* Tstorm ll2 port statistics */ -#define TSTORM_LL2_PORT_STAT_OFFSET(port_id) \ - (IRO[2].base + ((port_id) * IRO[2].m1)) -#define TSTORM_LL2_PORT_STAT_SIZE (IRO[2].size) - -/* Ustorm VF-PF Channel ready flag */ -#define USTORM_VF_PF_CHANNEL_READY_OFFSET(vf_id) \ - (IRO[3].base + ((vf_id) * IRO[3].m1)) -#define USTORM_VF_PF_CHANNEL_READY_SIZE (IRO[3].size) - -/* Ustorm Final flr cleanup ack */ -#define USTORM_FLR_FINAL_ACK_OFFSET(pf_id) \ - (IRO[4].base + ((pf_id) * IRO[4].m1)) -#define USTORM_FLR_FINAL_ACK_SIZE (IRO[4].size) - -/* Ustorm Event ring consumer */ -#define USTORM_EQE_CONS_OFFSET(pf_id) \ - (IRO[5].base + ((pf_id) * IRO[5].m1)) -#define USTORM_EQE_CONS_SIZE (IRO[5].size) - -/* Ustorm eth queue zone */ -#define USTORM_ETH_QUEUE_ZONE_OFFSET(queue_zone_id) \ - (IRO[6].base + ((queue_zone_id) * IRO[6].m1)) -#define USTORM_ETH_QUEUE_ZONE_SIZE (IRO[6].size) - -/* Ustorm Common Queue ring consumer */ -#define USTORM_COMMON_QUEUE_CONS_OFFSET(queue_zone_id) \ - (IRO[7].base + ((queue_zone_id) * IRO[7].m1)) -#define USTORM_COMMON_QUEUE_CONS_SIZE (IRO[7].size) - -/* Xstorm common PQ info */ -#define XSTORM_PQ_INFO_OFFSET(pq_id) \ - (IRO[8].base + ((pq_id) * IRO[8].m1)) -#define XSTORM_PQ_INFO_SIZE (IRO[8].size) - -/* Xstorm Integration Test Data */ -#define XSTORM_INTEG_TEST_DATA_OFFSET (IRO[9].base) -#define XSTORM_INTEG_TEST_DATA_SIZE (IRO[9].size) - -/* Ystorm Integration Test Data */ -#define YSTORM_INTEG_TEST_DATA_OFFSET (IRO[10].base) -#define YSTORM_INTEG_TEST_DATA_SIZE (IRO[10].size) - -/* Pstorm Integration Test Data */ -#define PSTORM_INTEG_TEST_DATA_OFFSET (IRO[11].base) -#define PSTORM_INTEG_TEST_DATA_SIZE (IRO[11].size) - -/* Tstorm Integration Test Data */ -#define TSTORM_INTEG_TEST_DATA_OFFSET (IRO[12].base) -#define TSTORM_INTEG_TEST_DATA_SIZE (IRO[12].size) - -/* Mstorm Integration Test Data */ -#define MSTORM_INTEG_TEST_DATA_OFFSET (IRO[13].base) -#define MSTORM_INTEG_TEST_DATA_SIZE (IRO[13].size) - -/* Ustorm Integration Test Data */ -#define USTORM_INTEG_TEST_DATA_OFFSET (IRO[14].base) -#define USTORM_INTEG_TEST_DATA_SIZE (IRO[14].size) - -/* Xstorm overlay buffer host address */ -#define XSTORM_OVERLAY_BUF_ADDR_OFFSET (IRO[15].base) -#define XSTORM_OVERLAY_BUF_ADDR_SIZE (IRO[15].size) - -/* Ystorm overlay buffer host address */ -#define YSTORM_OVERLAY_BUF_ADDR_OFFSET (IRO[16].base) -#define YSTORM_OVERLAY_BUF_ADDR_SIZE (IRO[16].size) - -/* Pstorm overlay buffer host address */ -#define PSTORM_OVERLAY_BUF_ADDR_OFFSET (IRO[17].base) -#define PSTORM_OVERLAY_BUF_ADDR_SIZE (IRO[17].size) - -/* Tstorm overlay buffer host address */ -#define TSTORM_OVERLAY_BUF_ADDR_OFFSET (IRO[18].base) -#define TSTORM_OVERLAY_BUF_ADDR_SIZE (IRO[18].size) - -/* Mstorm overlay buffer host address */ -#define MSTORM_OVERLAY_BUF_ADDR_OFFSET (IRO[19].base) -#define MSTORM_OVERLAY_BUF_ADDR_SIZE (IRO[19].size) - -/* Ustorm overlay buffer host address */ -#define USTORM_OVERLAY_BUF_ADDR_OFFSET (IRO[20].base) -#define USTORM_OVERLAY_BUF_ADDR_SIZE (IRO[20].size) - -/* Tstorm producers */ -#define TSTORM_LL2_RX_PRODS_OFFSET(core_rx_queue_id) \ - (IRO[21].base + ((core_rx_queue_id) * IRO[21].m1)) -#define TSTORM_LL2_RX_PRODS_SIZE (IRO[21].size) - -/* Tstorm LightL2 queue statistics */ -#define CORE_LL2_TSTORM_PER_QUEUE_STAT_OFFSET(core_rx_queue_id) \ - (IRO[22].base + ((core_rx_queue_id) * IRO[22].m1)) -#define CORE_LL2_TSTORM_PER_QUEUE_STAT_SIZE (IRO[22].size) - -/* Ustorm LiteL2 queue statistics */ -#define CORE_LL2_USTORM_PER_QUEUE_STAT_OFFSET(core_rx_queue_id) \ - (IRO[23].base + ((core_rx_queue_id) * IRO[23].m1)) -#define CORE_LL2_USTORM_PER_QUEUE_STAT_SIZE (IRO[23].size) - -/* Pstorm LiteL2 queue statistics */ -#define CORE_LL2_PSTORM_PER_QUEUE_STAT_OFFSET(core_tx_stats_id) \ - (IRO[24].base + ((core_tx_stats_id) * IRO[24].m1)) -#define CORE_LL2_PSTORM_PER_QUEUE_STAT_SIZE (IRO[24].size) - -/* Mstorm queue statistics */ -#define MSTORM_QUEUE_STAT_OFFSET(stat_counter_id) \ - (IRO[25].base + ((stat_counter_id) * IRO[25].m1)) -#define MSTORM_QUEUE_STAT_SIZE (IRO[25].size) - -/* TPA agregation timeout in us resolution (on ASIC) */ -#define MSTORM_TPA_TIMEOUT_US_OFFSET (IRO[26].base) -#define MSTORM_TPA_TIMEOUT_US_SIZE (IRO[26].size) - -/* Mstorm ETH VF queues producers offset in RAM. Used in default VF zone size - * mode - */ -#define MSTORM_ETH_VF_PRODS_OFFSET(vf_id, vf_queue_id) \ - (IRO[27].base + ((vf_id) * IRO[27].m1) + ((vf_queue_id) * IRO[27].m2)) -#define MSTORM_ETH_VF_PRODS_SIZE (IRO[27].size) - -/* Mstorm ETH PF queues producers */ -#define MSTORM_ETH_PF_PRODS_OFFSET(queue_id) \ - (IRO[28].base + ((queue_id) * IRO[28].m1)) -#define MSTORM_ETH_PF_PRODS_SIZE (IRO[28].size) - -/* Mstorm pf statistics */ -#define MSTORM_ETH_PF_STAT_OFFSET(pf_id) \ - (IRO[29].base + ((pf_id) * IRO[29].m1)) -#define MSTORM_ETH_PF_STAT_SIZE (IRO[29].size) - -/* Ustorm queue statistics */ -#define USTORM_QUEUE_STAT_OFFSET(stat_counter_id) \ - (IRO[30].base + ((stat_counter_id) * IRO[30].m1)) -#define USTORM_QUEUE_STAT_SIZE (IRO[30].size) - -/* Ustorm pf statistics */ -#define USTORM_ETH_PF_STAT_OFFSET(pf_id) \ - (IRO[31].base + ((pf_id) * IRO[31].m1)) -#define USTORM_ETH_PF_STAT_SIZE (IRO[31].size) - -/* Pstorm queue statistics */ -#define PSTORM_QUEUE_STAT_OFFSET(stat_counter_id) \ - (IRO[32].base + ((stat_counter_id) * IRO[32].m1)) -#define PSTORM_QUEUE_STAT_SIZE (IRO[32].size) - -/* Pstorm pf statistics */ -#define PSTORM_ETH_PF_STAT_OFFSET(pf_id) \ - (IRO[33].base + ((pf_id) * IRO[33].m1)) -#define PSTORM_ETH_PF_STAT_SIZE (IRO[33].size) - -/* Control frame's EthType configuration for TX control frame security */ -#define PSTORM_CTL_FRAME_ETHTYPE_OFFSET(eth_type_id) \ - (IRO[34].base + ((eth_type_id) * IRO[34].m1)) -#define PSTORM_CTL_FRAME_ETHTYPE_SIZE (IRO[34].size) - -/* Tstorm last parser message */ -#define TSTORM_ETH_PRS_INPUT_OFFSET (IRO[35].base) -#define TSTORM_ETH_PRS_INPUT_SIZE (IRO[35].size) - -/* Tstorm Eth limit Rx rate */ -#define ETH_RX_RATE_LIMIT_OFFSET(pf_id) \ - (IRO[36].base + ((pf_id) * IRO[36].m1)) -#define ETH_RX_RATE_LIMIT_SIZE (IRO[36].size) - -/* RSS indirection table entry update command per PF offset in TSTORM PF BAR0. - * Use eth_tstorm_rss_update_data for update - */ -#define TSTORM_ETH_RSS_UPDATE_OFFSET(pf_id) \ - (IRO[37].base + ((pf_id) * IRO[37].m1)) -#define TSTORM_ETH_RSS_UPDATE_SIZE (IRO[37].size) - -/* Xstorm queue zone */ -#define XSTORM_ETH_QUEUE_ZONE_OFFSET(queue_id) \ - (IRO[38].base + ((queue_id) * IRO[38].m1)) -#define XSTORM_ETH_QUEUE_ZONE_SIZE (IRO[38].size) - -/* Ystorm cqe producer */ -#define YSTORM_TOE_CQ_PROD_OFFSET(rss_id) \ - (IRO[39].base + ((rss_id) * IRO[39].m1)) -#define YSTORM_TOE_CQ_PROD_SIZE (IRO[39].size) - -/* Ustorm cqe producer */ -#define USTORM_TOE_CQ_PROD_OFFSET(rss_id) \ - (IRO[40].base + ((rss_id) * IRO[40].m1)) -#define USTORM_TOE_CQ_PROD_SIZE (IRO[40].size) - -/* Ustorm grq producer */ -#define USTORM_TOE_GRQ_PROD_OFFSET(pf_id) \ - (IRO[41].base + ((pf_id) * IRO[41].m1)) -#define USTORM_TOE_GRQ_PROD_SIZE (IRO[41].size) - -/* Tstorm cmdq-cons of given command queue-id */ -#define TSTORM_SCSI_CMDQ_CONS_OFFSET(cmdq_queue_id) \ - (IRO[42].base + ((cmdq_queue_id) * IRO[42].m1)) -#define TSTORM_SCSI_CMDQ_CONS_SIZE (IRO[42].size) - -/* Tstorm (reflects M-Storm) bdq-external-producer of given function ID, - * BDqueue-id +#define PCICFG_OFFSET 0x2000 +#define GRC_CONFIG_REG_PF_INIT_VF 0x624 + +/* First VF_NUM for PF is encoded in this register. + * The number of VFs assigned to a PF is assumed to be a multiple of 8. + * Software should program these bits based on Total Number of VFs programmed + * for each PF. + * Since registers from 0x000-0x7ff are spilt across functions, each PF will + * have the same location for the same 4 bits */ -#define TSTORM_SCSI_BDQ_EXT_PROD_OFFSET(storage_func_id, bdq_id) \ - (IRO[43].base + ((storage_func_id) * IRO[43].m1) + \ - ((bdq_id) * IRO[43].m2)) -#define TSTORM_SCSI_BDQ_EXT_PROD_SIZE (IRO[43].size) - -/* Mstorm bdq-external-producer of given BDQ resource ID, BDqueue-id */ -#define MSTORM_SCSI_BDQ_EXT_PROD_OFFSET(storage_func_id, bdq_id) \ - (IRO[44].base + ((storage_func_id) * IRO[44].m1) + \ - ((bdq_id) * IRO[44].m2)) -#define MSTORM_SCSI_BDQ_EXT_PROD_SIZE (IRO[44].size) - -/* Tstorm iSCSI RX stats */ -#define TSTORM_ISCSI_RX_STATS_OFFSET(storage_func_id) \ - (IRO[45].base + ((storage_func_id) * IRO[45].m1)) -#define TSTORM_ISCSI_RX_STATS_SIZE (IRO[45].size) - -/* Mstorm iSCSI RX stats */ -#define MSTORM_ISCSI_RX_STATS_OFFSET(storage_func_id) \ - (IRO[46].base + ((storage_func_id) * IRO[46].m1)) -#define MSTORM_ISCSI_RX_STATS_SIZE (IRO[46].size) - -/* Ustorm iSCSI RX stats */ -#define USTORM_ISCSI_RX_STATS_OFFSET(storage_func_id) \ - (IRO[47].base + ((storage_func_id) * IRO[47].m1)) -#define USTORM_ISCSI_RX_STATS_SIZE (IRO[47].size) - -/* Xstorm iSCSI TX stats */ -#define XSTORM_ISCSI_TX_STATS_OFFSET(storage_func_id) \ - (IRO[48].base + ((storage_func_id) * IRO[48].m1)) -#define XSTORM_ISCSI_TX_STATS_SIZE (IRO[48].size) - -/* Ystorm iSCSI TX stats */ -#define YSTORM_ISCSI_TX_STATS_OFFSET(storage_func_id) \ - (IRO[49].base + ((storage_func_id) * IRO[49].m1)) -#define YSTORM_ISCSI_TX_STATS_SIZE (IRO[49].size) - -/* Pstorm iSCSI TX stats */ -#define PSTORM_ISCSI_TX_STATS_OFFSET(storage_func_id) \ - (IRO[50].base + ((storage_func_id) * IRO[50].m1)) -#define PSTORM_ISCSI_TX_STATS_SIZE (IRO[50].size) - -/* Tstorm FCoE RX stats */ -#define TSTORM_FCOE_RX_STATS_OFFSET(pf_id) \ - (IRO[51].base + ((pf_id) * IRO[51].m1)) -#define TSTORM_FCOE_RX_STATS_SIZE (IRO[51].size) - -/* Pstorm FCoE TX stats */ -#define PSTORM_FCOE_TX_STATS_OFFSET(pf_id) \ - (IRO[52].base + ((pf_id) * IRO[52].m1)) -#define PSTORM_FCOE_TX_STATS_SIZE (IRO[52].size) - -/* Pstorm RDMA queue statistics */ -#define PSTORM_RDMA_QUEUE_STAT_OFFSET(rdma_stat_counter_id) \ - (IRO[53].base + ((rdma_stat_counter_id) * IRO[53].m1)) -#define PSTORM_RDMA_QUEUE_STAT_SIZE (IRO[53].size) - -/* Tstorm RDMA queue statistics */ -#define TSTORM_RDMA_QUEUE_STAT_OFFSET(rdma_stat_counter_id) \ - (IRO[54].base + ((rdma_stat_counter_id) * IRO[54].m1)) -#define TSTORM_RDMA_QUEUE_STAT_SIZE (IRO[54].size) - -/* Xstorm error level for assert */ -#define XSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) \ - (IRO[55].base + ((pf_id) * IRO[55].m1)) -#define XSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[55].size) - -/* Ystorm error level for assert */ -#define YSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) \ - (IRO[56].base + ((pf_id) * IRO[56].m1)) -#define YSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[56].size) - -/* Pstorm error level for assert */ -#define PSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) \ - (IRO[57].base + ((pf_id) * IRO[57].m1)) -#define PSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[57].size) - -/* Tstorm error level for assert */ -#define TSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) \ - (IRO[58].base + ((pf_id) * IRO[58].m1)) -#define TSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[58].size) - -/* Mstorm error level for assert */ -#define MSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) \ - (IRO[59].base + ((pf_id) * IRO[59].m1)) -#define MSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[59].size) - -/* Ustorm error level for assert */ -#define USTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) \ - (IRO[60].base + ((pf_id) * IRO[60].m1)) -#define USTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[60].size) - -/* Xstorm iWARP rxmit stats */ -#define XSTORM_IWARP_RXMIT_STATS_OFFSET(pf_id) \ - (IRO[61].base + ((pf_id) * IRO[61].m1)) -#define XSTORM_IWARP_RXMIT_STATS_SIZE (IRO[61].size) - -/* Tstorm RoCE Event Statistics */ -#define TSTORM_ROCE_EVENTS_STAT_OFFSET(roce_pf_id) \ - (IRO[62].base + ((roce_pf_id) * IRO[62].m1)) -#define TSTORM_ROCE_EVENTS_STAT_SIZE (IRO[62].size) - -/* DCQCN Received Statistics */ -#define YSTORM_ROCE_DCQCN_RECEIVED_STATS_OFFSET(roce_pf_id)\ - (IRO[63].base + ((roce_pf_id) * IRO[63].m1)) -#define YSTORM_ROCE_DCQCN_RECEIVED_STATS_SIZE (IRO[63].size) - -/* RoCE Error Statistics */ -#define YSTORM_ROCE_ERROR_STATS_OFFSET(roce_pf_id) \ - (IRO[64].base + ((roce_pf_id) * IRO[64].m1)) -#define YSTORM_ROCE_ERROR_STATS_SIZE (IRO[64].size) - -/* DCQCN Sent Statistics */ -#define PSTORM_ROCE_DCQCN_SENT_STATS_OFFSET(roce_pf_id) \ - (IRO[65].base + ((roce_pf_id) * IRO[65].m1)) -#define PSTORM_ROCE_DCQCN_SENT_STATS_SIZE (IRO[65].size) - -/* RoCE CQEs Statistics */ -#define USTORM_ROCE_CQE_STATS_OFFSET(roce_pf_id) \ - (IRO[66].base + ((roce_pf_id) * IRO[66].m1)) -#define USTORM_ROCE_CQE_STATS_SIZE (IRO[66].size) +#define GRC_CR_PF_INIT_VF_PF_FIRST_VF_NUM_MASK 0xff /* Runtime array offsets */ #define DORQ_REG_PF_MAX_ICID_0_RT_OFFSET 0 @@ -4721,116 +3114,118 @@ void qed_fw_overlay_mem_free(struct qed_hwfn *p_hwfn, #define QM_REG_TXPQMAP_RT_SIZE 512 #define QM_REG_WFQVPWEIGHT_RT_OFFSET 31556 #define QM_REG_WFQVPWEIGHT_RT_SIZE 512 -#define QM_REG_WFQVPCRD_RT_OFFSET 32068 +#define QM_REG_WFQVPUPPERBOUND_RT_OFFSET 32068 +#define QM_REG_WFQVPUPPERBOUND_RT_SIZE 512 +#define QM_REG_WFQVPCRD_RT_OFFSET 32580 #define QM_REG_WFQVPCRD_RT_SIZE 512 -#define QM_REG_WFQVPMAP_RT_OFFSET 32580 +#define QM_REG_WFQVPMAP_RT_OFFSET 33092 #define QM_REG_WFQVPMAP_RT_SIZE 512 -#define QM_REG_PTRTBLTX_RT_OFFSET 33092 +#define QM_REG_PTRTBLTX_RT_OFFSET 33604 #define QM_REG_PTRTBLTX_RT_SIZE 1024 -#define QM_REG_WFQPFCRD_MSB_RT_OFFSET 34116 +#define QM_REG_WFQPFCRD_MSB_RT_OFFSET 34628 #define QM_REG_WFQPFCRD_MSB_RT_SIZE 160 -#define NIG_REG_TAG_ETHERTYPE_0_RT_OFFSET 34276 -#define NIG_REG_BRB_GATE_DNTFWD_PORT_RT_OFFSET 34277 -#define NIG_REG_OUTER_TAG_VALUE_LIST0_RT_OFFSET 34278 -#define NIG_REG_OUTER_TAG_VALUE_LIST1_RT_OFFSET 34279 -#define NIG_REG_OUTER_TAG_VALUE_LIST2_RT_OFFSET 34280 -#define NIG_REG_OUTER_TAG_VALUE_LIST3_RT_OFFSET 34281 -#define NIG_REG_LLH_FUNC_TAGMAC_CLS_TYPE_RT_OFFSET 34282 -#define NIG_REG_LLH_FUNC_TAG_EN_RT_OFFSET 34283 +#define NIG_REG_TAG_ETHERTYPE_0_RT_OFFSET 34788 +#define NIG_REG_BRB_GATE_DNTFWD_PORT_RT_OFFSET 34789 +#define NIG_REG_OUTER_TAG_VALUE_LIST0_RT_OFFSET 34790 +#define NIG_REG_OUTER_TAG_VALUE_LIST1_RT_OFFSET 34791 +#define NIG_REG_OUTER_TAG_VALUE_LIST2_RT_OFFSET 34792 +#define NIG_REG_OUTER_TAG_VALUE_LIST3_RT_OFFSET 34793 +#define NIG_REG_LLH_FUNC_TAGMAC_CLS_TYPE_RT_OFFSET 34794 +#define NIG_REG_LLH_FUNC_TAG_EN_RT_OFFSET 34795 #define NIG_REG_LLH_FUNC_TAG_EN_RT_SIZE 4 -#define NIG_REG_LLH_FUNC_TAG_VALUE_RT_OFFSET 34287 +#define NIG_REG_LLH_FUNC_TAG_VALUE_RT_OFFSET 34799 #define NIG_REG_LLH_FUNC_TAG_VALUE_RT_SIZE 4 -#define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_OFFSET 34291 +#define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_OFFSET 34803 #define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_SIZE 32 -#define NIG_REG_LLH_FUNC_FILTER_EN_RT_OFFSET 34323 +#define NIG_REG_LLH_FUNC_FILTER_EN_RT_OFFSET 34835 #define NIG_REG_LLH_FUNC_FILTER_EN_RT_SIZE 16 -#define NIG_REG_LLH_FUNC_FILTER_MODE_RT_OFFSET 34339 +#define NIG_REG_LLH_FUNC_FILTER_MODE_RT_OFFSET 34851 #define NIG_REG_LLH_FUNC_FILTER_MODE_RT_SIZE 16 -#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_OFFSET 34355 +#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_OFFSET 34867 #define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_SIZE 16 -#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_OFFSET 34371 +#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_OFFSET 34883 #define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_SIZE 16 -#define NIG_REG_TX_EDPM_CTRL_RT_OFFSET 34387 -#define NIG_REG_PPF_TO_ENGINE_SEL_RT_OFFSET 34388 +#define NIG_REG_TX_EDPM_CTRL_RT_OFFSET 34899 +#define NIG_REG_PPF_TO_ENGINE_SEL_RT_OFFSET 34900 #define NIG_REG_PPF_TO_ENGINE_SEL_RT_SIZE 8 -#define CDU_REG_CID_ADDR_PARAMS_RT_OFFSET 34396 -#define CDU_REG_SEGMENT0_PARAMS_RT_OFFSET 34397 -#define CDU_REG_SEGMENT1_PARAMS_RT_OFFSET 34398 -#define CDU_REG_PF_SEG0_TYPE_OFFSET_RT_OFFSET 34399 -#define CDU_REG_PF_SEG1_TYPE_OFFSET_RT_OFFSET 34400 -#define CDU_REG_PF_SEG2_TYPE_OFFSET_RT_OFFSET 34401 -#define CDU_REG_PF_SEG3_TYPE_OFFSET_RT_OFFSET 34402 -#define CDU_REG_PF_FL_SEG0_TYPE_OFFSET_RT_OFFSET 34403 -#define CDU_REG_PF_FL_SEG1_TYPE_OFFSET_RT_OFFSET 34404 -#define CDU_REG_PF_FL_SEG2_TYPE_OFFSET_RT_OFFSET 34405 -#define CDU_REG_PF_FL_SEG3_TYPE_OFFSET_RT_OFFSET 34406 -#define CDU_REG_VF_SEG_TYPE_OFFSET_RT_OFFSET 34407 -#define CDU_REG_VF_FL_SEG_TYPE_OFFSET_RT_OFFSET 34408 -#define PBF_REG_TAG_ETHERTYPE_0_RT_OFFSET 34409 -#define PBF_REG_BTB_SHARED_AREA_SIZE_RT_OFFSET 34410 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET 34411 -#define PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET 34412 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ0_RT_OFFSET 34413 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET 34414 -#define PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET 34415 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ1_RT_OFFSET 34416 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ2_RT_OFFSET 34417 -#define PBF_REG_BTB_GUARANTEED_VOQ2_RT_OFFSET 34418 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ2_RT_OFFSET 34419 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ3_RT_OFFSET 34420 -#define PBF_REG_BTB_GUARANTEED_VOQ3_RT_OFFSET 34421 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ3_RT_OFFSET 34422 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ4_RT_OFFSET 34423 -#define PBF_REG_BTB_GUARANTEED_VOQ4_RT_OFFSET 34424 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ4_RT_OFFSET 34425 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ5_RT_OFFSET 34426 -#define PBF_REG_BTB_GUARANTEED_VOQ5_RT_OFFSET 34427 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ5_RT_OFFSET 34428 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ6_RT_OFFSET 34429 -#define PBF_REG_BTB_GUARANTEED_VOQ6_RT_OFFSET 34430 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ6_RT_OFFSET 34431 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ7_RT_OFFSET 34432 -#define PBF_REG_BTB_GUARANTEED_VOQ7_RT_OFFSET 34433 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ7_RT_OFFSET 34434 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ8_RT_OFFSET 34435 -#define PBF_REG_BTB_GUARANTEED_VOQ8_RT_OFFSET 34436 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ8_RT_OFFSET 34437 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ9_RT_OFFSET 34438 -#define PBF_REG_BTB_GUARANTEED_VOQ9_RT_OFFSET 34439 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ9_RT_OFFSET 34440 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ10_RT_OFFSET 34441 -#define PBF_REG_BTB_GUARANTEED_VOQ10_RT_OFFSET 34442 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ10_RT_OFFSET 34443 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ11_RT_OFFSET 34444 -#define PBF_REG_BTB_GUARANTEED_VOQ11_RT_OFFSET 34445 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ11_RT_OFFSET 34446 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ12_RT_OFFSET 34447 -#define PBF_REG_BTB_GUARANTEED_VOQ12_RT_OFFSET 34448 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ12_RT_OFFSET 34449 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ13_RT_OFFSET 34450 -#define PBF_REG_BTB_GUARANTEED_VOQ13_RT_OFFSET 34451 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ13_RT_OFFSET 34452 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ14_RT_OFFSET 34453 -#define PBF_REG_BTB_GUARANTEED_VOQ14_RT_OFFSET 34454 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ14_RT_OFFSET 34455 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ15_RT_OFFSET 34456 -#define PBF_REG_BTB_GUARANTEED_VOQ15_RT_OFFSET 34457 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ15_RT_OFFSET 34458 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ16_RT_OFFSET 34459 -#define PBF_REG_BTB_GUARANTEED_VOQ16_RT_OFFSET 34460 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ16_RT_OFFSET 34461 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ17_RT_OFFSET 34462 -#define PBF_REG_BTB_GUARANTEED_VOQ17_RT_OFFSET 34463 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ17_RT_OFFSET 34464 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ18_RT_OFFSET 34465 -#define PBF_REG_BTB_GUARANTEED_VOQ18_RT_OFFSET 34466 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ18_RT_OFFSET 34467 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ19_RT_OFFSET 34468 -#define PBF_REG_BTB_GUARANTEED_VOQ19_RT_OFFSET 34469 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ19_RT_OFFSET 34470 -#define XCM_REG_CON_PHY_Q3_RT_OFFSET 34471 - -#define RUNTIME_ARRAY_SIZE 34472 +#define CDU_REG_CID_ADDR_PARAMS_RT_OFFSET 34908 +#define CDU_REG_SEGMENT0_PARAMS_RT_OFFSET 34909 +#define CDU_REG_SEGMENT1_PARAMS_RT_OFFSET 34910 +#define CDU_REG_PF_SEG0_TYPE_OFFSET_RT_OFFSET 34911 +#define CDU_REG_PF_SEG1_TYPE_OFFSET_RT_OFFSET 34912 +#define CDU_REG_PF_SEG2_TYPE_OFFSET_RT_OFFSET 34913 +#define CDU_REG_PF_SEG3_TYPE_OFFSET_RT_OFFSET 34914 +#define CDU_REG_PF_FL_SEG0_TYPE_OFFSET_RT_OFFSET 34915 +#define CDU_REG_PF_FL_SEG1_TYPE_OFFSET_RT_OFFSET 34916 +#define CDU_REG_PF_FL_SEG2_TYPE_OFFSET_RT_OFFSET 34917 +#define CDU_REG_PF_FL_SEG3_TYPE_OFFSET_RT_OFFSET 34918 +#define CDU_REG_VF_SEG_TYPE_OFFSET_RT_OFFSET 34919 +#define CDU_REG_VF_FL_SEG_TYPE_OFFSET_RT_OFFSET 34920 +#define PBF_REG_TAG_ETHERTYPE_0_RT_OFFSET 34921 +#define PBF_REG_BTB_SHARED_AREA_SIZE_RT_OFFSET 34922 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET 34923 +#define PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET 34924 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ0_RT_OFFSET 34925 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET 34926 +#define PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET 34927 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ1_RT_OFFSET 34928 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ2_RT_OFFSET 34929 +#define PBF_REG_BTB_GUARANTEED_VOQ2_RT_OFFSET 34930 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ2_RT_OFFSET 34931 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ3_RT_OFFSET 34932 +#define PBF_REG_BTB_GUARANTEED_VOQ3_RT_OFFSET 34933 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ3_RT_OFFSET 34934 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ4_RT_OFFSET 34935 +#define PBF_REG_BTB_GUARANTEED_VOQ4_RT_OFFSET 34936 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ4_RT_OFFSET 34937 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ5_RT_OFFSET 34938 +#define PBF_REG_BTB_GUARANTEED_VOQ5_RT_OFFSET 34939 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ5_RT_OFFSET 34940 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ6_RT_OFFSET 34941 +#define PBF_REG_BTB_GUARANTEED_VOQ6_RT_OFFSET 34942 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ6_RT_OFFSET 34943 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ7_RT_OFFSET 34944 +#define PBF_REG_BTB_GUARANTEED_VOQ7_RT_OFFSET 34945 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ7_RT_OFFSET 34946 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ8_RT_OFFSET 34947 +#define PBF_REG_BTB_GUARANTEED_VOQ8_RT_OFFSET 34948 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ8_RT_OFFSET 34949 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ9_RT_OFFSET 34950 +#define PBF_REG_BTB_GUARANTEED_VOQ9_RT_OFFSET 34951 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ9_RT_OFFSET 34952 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ10_RT_OFFSET 34953 +#define PBF_REG_BTB_GUARANTEED_VOQ10_RT_OFFSET 34954 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ10_RT_OFFSET 34955 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ11_RT_OFFSET 34956 +#define PBF_REG_BTB_GUARANTEED_VOQ11_RT_OFFSET 34957 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ11_RT_OFFSET 34958 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ12_RT_OFFSET 34959 +#define PBF_REG_BTB_GUARANTEED_VOQ12_RT_OFFSET 34960 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ12_RT_OFFSET 34961 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ13_RT_OFFSET 34962 +#define PBF_REG_BTB_GUARANTEED_VOQ13_RT_OFFSET 34963 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ13_RT_OFFSET 34964 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ14_RT_OFFSET 34965 +#define PBF_REG_BTB_GUARANTEED_VOQ14_RT_OFFSET 34966 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ14_RT_OFFSET 34967 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ15_RT_OFFSET 34968 +#define PBF_REG_BTB_GUARANTEED_VOQ15_RT_OFFSET 34969 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ15_RT_OFFSET 34970 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ16_RT_OFFSET 34971 +#define PBF_REG_BTB_GUARANTEED_VOQ16_RT_OFFSET 34972 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ16_RT_OFFSET 34973 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ17_RT_OFFSET 34974 +#define PBF_REG_BTB_GUARANTEED_VOQ17_RT_OFFSET 34975 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ17_RT_OFFSET 34976 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ18_RT_OFFSET 34977 +#define PBF_REG_BTB_GUARANTEED_VOQ18_RT_OFFSET 34978 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ18_RT_OFFSET 34979 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ19_RT_OFFSET 34980 +#define PBF_REG_BTB_GUARANTEED_VOQ19_RT_OFFSET 34981 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ19_RT_OFFSET 34982 +#define XCM_REG_CON_PHY_Q3_RT_OFFSET 34983 + +#define RUNTIME_ARRAY_SIZE 34984 /* Init Callbacks */ #define DMAE_READY_CB 0 @@ -4850,216 +3245,216 @@ struct xstorm_eth_conn_st_ctx { __le32 reserved[60]; }; -struct e4_xstorm_eth_conn_ag_ctx { +struct xstorm_eth_conn_ag_ctx { u8 reserved0; u8 state; u8 flags0; -#define E4_XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 -#define E4_XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 -#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED1_MASK 0x1 -#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED1_SHIFT 1 -#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED2_MASK 0x1 -#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED2_SHIFT 2 -#define E4_XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1 -#define E4_XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3 -#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED3_MASK 0x1 -#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED3_SHIFT 4 -#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED4_MASK 0x1 -#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED4_SHIFT 5 -#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED5_MASK 0x1 -#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED5_SHIFT 6 -#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED6_MASK 0x1 -#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED6_SHIFT 7 +#define XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 +#define XSTORM_ETH_CONN_AG_CTX_RESERVED1_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_RESERVED1_SHIFT 1 +#define XSTORM_ETH_CONN_AG_CTX_RESERVED2_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_RESERVED2_SHIFT 2 +#define XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3 +#define XSTORM_ETH_CONN_AG_CTX_RESERVED3_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_RESERVED3_SHIFT 4 +#define XSTORM_ETH_CONN_AG_CTX_RESERVED4_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_RESERVED4_SHIFT 5 +#define XSTORM_ETH_CONN_AG_CTX_RESERVED5_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_RESERVED5_SHIFT 6 +#define XSTORM_ETH_CONN_AG_CTX_RESERVED6_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_RESERVED6_SHIFT 7 u8 flags1; -#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED7_MASK 0x1 -#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED7_SHIFT 0 -#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED8_MASK 0x1 -#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED8_SHIFT 1 -#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED9_MASK 0x1 -#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED9_SHIFT 2 -#define E4_XSTORM_ETH_CONN_AG_CTX_BIT11_MASK 0x1 -#define E4_XSTORM_ETH_CONN_AG_CTX_BIT11_SHIFT 3 -#define E4_XSTORM_ETH_CONN_AG_CTX_E5_RESERVED2_MASK 0x1 -#define E4_XSTORM_ETH_CONN_AG_CTX_E5_RESERVED2_SHIFT 4 -#define E4_XSTORM_ETH_CONN_AG_CTX_E5_RESERVED3_MASK 0x1 -#define E4_XSTORM_ETH_CONN_AG_CTX_E5_RESERVED3_SHIFT 5 -#define E4_XSTORM_ETH_CONN_AG_CTX_TX_RULE_ACTIVE_MASK 0x1 -#define E4_XSTORM_ETH_CONN_AG_CTX_TX_RULE_ACTIVE_SHIFT 6 -#define E4_XSTORM_ETH_CONN_AG_CTX_DQ_CF_ACTIVE_MASK 0x1 -#define E4_XSTORM_ETH_CONN_AG_CTX_DQ_CF_ACTIVE_SHIFT 7 +#define XSTORM_ETH_CONN_AG_CTX_RESERVED7_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_RESERVED7_SHIFT 0 +#define XSTORM_ETH_CONN_AG_CTX_RESERVED8_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_RESERVED8_SHIFT 1 +#define XSTORM_ETH_CONN_AG_CTX_RESERVED9_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_RESERVED9_SHIFT 2 +#define XSTORM_ETH_CONN_AG_CTX_BIT11_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_BIT11_SHIFT 3 +#define XSTORM_ETH_CONN_AG_CTX_E5_RESERVED2_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_E5_RESERVED2_SHIFT 4 +#define XSTORM_ETH_CONN_AG_CTX_E5_RESERVED3_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_E5_RESERVED3_SHIFT 5 +#define XSTORM_ETH_CONN_AG_CTX_TX_RULE_ACTIVE_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_TX_RULE_ACTIVE_SHIFT 6 +#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_ACTIVE_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_ACTIVE_SHIFT 7 u8 flags2; -#define E4_XSTORM_ETH_CONN_AG_CTX_CF0_MASK 0x3 -#define E4_XSTORM_ETH_CONN_AG_CTX_CF0_SHIFT 0 -#define E4_XSTORM_ETH_CONN_AG_CTX_CF1_MASK 0x3 -#define E4_XSTORM_ETH_CONN_AG_CTX_CF1_SHIFT 2 -#define E4_XSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3 -#define E4_XSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 4 -#define E4_XSTORM_ETH_CONN_AG_CTX_CF3_MASK 0x3 -#define E4_XSTORM_ETH_CONN_AG_CTX_CF3_SHIFT 6 +#define XSTORM_ETH_CONN_AG_CTX_CF0_MASK 0x3 +#define XSTORM_ETH_CONN_AG_CTX_CF0_SHIFT 0 +#define XSTORM_ETH_CONN_AG_CTX_CF1_MASK 0x3 +#define XSTORM_ETH_CONN_AG_CTX_CF1_SHIFT 2 +#define XSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3 +#define XSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 4 +#define XSTORM_ETH_CONN_AG_CTX_CF3_MASK 0x3 +#define XSTORM_ETH_CONN_AG_CTX_CF3_SHIFT 6 u8 flags3; -#define E4_XSTORM_ETH_CONN_AG_CTX_CF4_MASK 0x3 -#define E4_XSTORM_ETH_CONN_AG_CTX_CF4_SHIFT 0 -#define E4_XSTORM_ETH_CONN_AG_CTX_CF5_MASK 0x3 -#define E4_XSTORM_ETH_CONN_AG_CTX_CF5_SHIFT 2 -#define E4_XSTORM_ETH_CONN_AG_CTX_CF6_MASK 0x3 -#define E4_XSTORM_ETH_CONN_AG_CTX_CF6_SHIFT 4 -#define E4_XSTORM_ETH_CONN_AG_CTX_CF7_MASK 0x3 -#define E4_XSTORM_ETH_CONN_AG_CTX_CF7_SHIFT 6 +#define XSTORM_ETH_CONN_AG_CTX_CF4_MASK 0x3 +#define XSTORM_ETH_CONN_AG_CTX_CF4_SHIFT 0 +#define XSTORM_ETH_CONN_AG_CTX_CF5_MASK 0x3 +#define XSTORM_ETH_CONN_AG_CTX_CF5_SHIFT 2 +#define XSTORM_ETH_CONN_AG_CTX_CF6_MASK 0x3 +#define XSTORM_ETH_CONN_AG_CTX_CF6_SHIFT 4 +#define XSTORM_ETH_CONN_AG_CTX_CF7_MASK 0x3 +#define XSTORM_ETH_CONN_AG_CTX_CF7_SHIFT 6 u8 flags4; -#define E4_XSTORM_ETH_CONN_AG_CTX_CF8_MASK 0x3 -#define E4_XSTORM_ETH_CONN_AG_CTX_CF8_SHIFT 0 -#define E4_XSTORM_ETH_CONN_AG_CTX_CF9_MASK 0x3 -#define E4_XSTORM_ETH_CONN_AG_CTX_CF9_SHIFT 2 -#define E4_XSTORM_ETH_CONN_AG_CTX_CF10_MASK 0x3 -#define E4_XSTORM_ETH_CONN_AG_CTX_CF10_SHIFT 4 -#define E4_XSTORM_ETH_CONN_AG_CTX_CF11_MASK 0x3 -#define E4_XSTORM_ETH_CONN_AG_CTX_CF11_SHIFT 6 +#define XSTORM_ETH_CONN_AG_CTX_CF8_MASK 0x3 +#define XSTORM_ETH_CONN_AG_CTX_CF8_SHIFT 0 +#define XSTORM_ETH_CONN_AG_CTX_CF9_MASK 0x3 +#define XSTORM_ETH_CONN_AG_CTX_CF9_SHIFT 2 +#define XSTORM_ETH_CONN_AG_CTX_CF10_MASK 0x3 +#define XSTORM_ETH_CONN_AG_CTX_CF10_SHIFT 4 +#define XSTORM_ETH_CONN_AG_CTX_CF11_MASK 0x3 +#define XSTORM_ETH_CONN_AG_CTX_CF11_SHIFT 6 u8 flags5; -#define E4_XSTORM_ETH_CONN_AG_CTX_CF12_MASK 0x3 -#define E4_XSTORM_ETH_CONN_AG_CTX_CF12_SHIFT 0 -#define E4_XSTORM_ETH_CONN_AG_CTX_CF13_MASK 0x3 -#define E4_XSTORM_ETH_CONN_AG_CTX_CF13_SHIFT 2 -#define E4_XSTORM_ETH_CONN_AG_CTX_CF14_MASK 0x3 -#define E4_XSTORM_ETH_CONN_AG_CTX_CF14_SHIFT 4 -#define E4_XSTORM_ETH_CONN_AG_CTX_CF15_MASK 0x3 -#define E4_XSTORM_ETH_CONN_AG_CTX_CF15_SHIFT 6 +#define XSTORM_ETH_CONN_AG_CTX_CF12_MASK 0x3 +#define XSTORM_ETH_CONN_AG_CTX_CF12_SHIFT 0 +#define XSTORM_ETH_CONN_AG_CTX_CF13_MASK 0x3 +#define XSTORM_ETH_CONN_AG_CTX_CF13_SHIFT 2 +#define XSTORM_ETH_CONN_AG_CTX_CF14_MASK 0x3 +#define XSTORM_ETH_CONN_AG_CTX_CF14_SHIFT 4 +#define XSTORM_ETH_CONN_AG_CTX_CF15_MASK 0x3 +#define XSTORM_ETH_CONN_AG_CTX_CF15_SHIFT 6 u8 flags6; -#define E4_XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_MASK 0x3 -#define E4_XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_SHIFT 0 -#define E4_XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_MASK 0x3 -#define E4_XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_SHIFT 2 -#define E4_XSTORM_ETH_CONN_AG_CTX_DQ_CF_MASK 0x3 -#define E4_XSTORM_ETH_CONN_AG_CTX_DQ_CF_SHIFT 4 -#define E4_XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_MASK 0x3 -#define E4_XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_SHIFT 6 +#define XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_MASK 0x3 +#define XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_SHIFT 0 +#define XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_MASK 0x3 +#define XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_SHIFT 2 +#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_MASK 0x3 +#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_SHIFT 4 +#define XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_MASK 0x3 +#define XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_SHIFT 6 u8 flags7; -#define E4_XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_MASK 0x3 -#define E4_XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_SHIFT 0 -#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED10_MASK 0x3 -#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED10_SHIFT 2 -#define E4_XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_MASK 0x3 -#define E4_XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_SHIFT 4 -#define E4_XSTORM_ETH_CONN_AG_CTX_CF0EN_MASK 0x1 -#define E4_XSTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT 6 -#define E4_XSTORM_ETH_CONN_AG_CTX_CF1EN_MASK 0x1 -#define E4_XSTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT 7 +#define XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_MASK 0x3 +#define XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_SHIFT 0 +#define XSTORM_ETH_CONN_AG_CTX_RESERVED10_MASK 0x3 +#define XSTORM_ETH_CONN_AG_CTX_RESERVED10_SHIFT 2 +#define XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_MASK 0x3 +#define XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_SHIFT 4 +#define XSTORM_ETH_CONN_AG_CTX_CF0EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT 6 +#define XSTORM_ETH_CONN_AG_CTX_CF1EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT 7 u8 flags8; -#define E4_XSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1 -#define E4_XSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 0 -#define E4_XSTORM_ETH_CONN_AG_CTX_CF3EN_MASK 0x1 -#define E4_XSTORM_ETH_CONN_AG_CTX_CF3EN_SHIFT 1 -#define E4_XSTORM_ETH_CONN_AG_CTX_CF4EN_MASK 0x1 -#define E4_XSTORM_ETH_CONN_AG_CTX_CF4EN_SHIFT 2 -#define E4_XSTORM_ETH_CONN_AG_CTX_CF5EN_MASK 0x1 -#define E4_XSTORM_ETH_CONN_AG_CTX_CF5EN_SHIFT 3 -#define E4_XSTORM_ETH_CONN_AG_CTX_CF6EN_MASK 0x1 -#define E4_XSTORM_ETH_CONN_AG_CTX_CF6EN_SHIFT 4 -#define E4_XSTORM_ETH_CONN_AG_CTX_CF7EN_MASK 0x1 -#define E4_XSTORM_ETH_CONN_AG_CTX_CF7EN_SHIFT 5 -#define E4_XSTORM_ETH_CONN_AG_CTX_CF8EN_MASK 0x1 -#define E4_XSTORM_ETH_CONN_AG_CTX_CF8EN_SHIFT 6 -#define E4_XSTORM_ETH_CONN_AG_CTX_CF9EN_MASK 0x1 -#define E4_XSTORM_ETH_CONN_AG_CTX_CF9EN_SHIFT 7 +#define XSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 0 +#define XSTORM_ETH_CONN_AG_CTX_CF3EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_CF3EN_SHIFT 1 +#define XSTORM_ETH_CONN_AG_CTX_CF4EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_CF4EN_SHIFT 2 +#define XSTORM_ETH_CONN_AG_CTX_CF5EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_CF5EN_SHIFT 3 +#define XSTORM_ETH_CONN_AG_CTX_CF6EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_CF6EN_SHIFT 4 +#define XSTORM_ETH_CONN_AG_CTX_CF7EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_CF7EN_SHIFT 5 +#define XSTORM_ETH_CONN_AG_CTX_CF8EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_CF8EN_SHIFT 6 +#define XSTORM_ETH_CONN_AG_CTX_CF9EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_CF9EN_SHIFT 7 u8 flags9; -#define E4_XSTORM_ETH_CONN_AG_CTX_CF10EN_MASK 0x1 -#define E4_XSTORM_ETH_CONN_AG_CTX_CF10EN_SHIFT 0 -#define E4_XSTORM_ETH_CONN_AG_CTX_CF11EN_MASK 0x1 -#define E4_XSTORM_ETH_CONN_AG_CTX_CF11EN_SHIFT 1 -#define E4_XSTORM_ETH_CONN_AG_CTX_CF12EN_MASK 0x1 -#define E4_XSTORM_ETH_CONN_AG_CTX_CF12EN_SHIFT 2 -#define E4_XSTORM_ETH_CONN_AG_CTX_CF13EN_MASK 0x1 -#define E4_XSTORM_ETH_CONN_AG_CTX_CF13EN_SHIFT 3 -#define E4_XSTORM_ETH_CONN_AG_CTX_CF14EN_MASK 0x1 -#define E4_XSTORM_ETH_CONN_AG_CTX_CF14EN_SHIFT 4 -#define E4_XSTORM_ETH_CONN_AG_CTX_CF15EN_MASK 0x1 -#define E4_XSTORM_ETH_CONN_AG_CTX_CF15EN_SHIFT 5 -#define E4_XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_MASK 0x1 -#define E4_XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_SHIFT 6 -#define E4_XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_EN_MASK 0x1 -#define E4_XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_EN_SHIFT 7 +#define XSTORM_ETH_CONN_AG_CTX_CF10EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_CF10EN_SHIFT 0 +#define XSTORM_ETH_CONN_AG_CTX_CF11EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_CF11EN_SHIFT 1 +#define XSTORM_ETH_CONN_AG_CTX_CF12EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_CF12EN_SHIFT 2 +#define XSTORM_ETH_CONN_AG_CTX_CF13EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_CF13EN_SHIFT 3 +#define XSTORM_ETH_CONN_AG_CTX_CF14EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_CF14EN_SHIFT 4 +#define XSTORM_ETH_CONN_AG_CTX_CF15EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_CF15EN_SHIFT 5 +#define XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_SHIFT 6 +#define XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_EN_SHIFT 7 u8 flags10; -#define E4_XSTORM_ETH_CONN_AG_CTX_DQ_CF_EN_MASK 0x1 -#define E4_XSTORM_ETH_CONN_AG_CTX_DQ_CF_EN_SHIFT 0 -#define E4_XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_EN_MASK 0x1 -#define E4_XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_EN_SHIFT 1 -#define E4_XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1 -#define E4_XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2 -#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED11_MASK 0x1 -#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED11_SHIFT 3 -#define E4_XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1 -#define E4_XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4 -#define E4_XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_MASK 0x1 -#define E4_XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_SHIFT 5 -#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED12_MASK 0x1 -#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED12_SHIFT 6 -#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED13_MASK 0x1 -#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED13_SHIFT 7 +#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_EN_SHIFT 0 +#define XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_EN_SHIFT 1 +#define XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2 +#define XSTORM_ETH_CONN_AG_CTX_RESERVED11_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_RESERVED11_SHIFT 3 +#define XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4 +#define XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_SHIFT 5 +#define XSTORM_ETH_CONN_AG_CTX_RESERVED12_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_RESERVED12_SHIFT 6 +#define XSTORM_ETH_CONN_AG_CTX_RESERVED13_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_RESERVED13_SHIFT 7 u8 flags11; -#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED14_MASK 0x1 -#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED14_SHIFT 0 -#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED15_MASK 0x1 -#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED15_SHIFT 1 -#define E4_XSTORM_ETH_CONN_AG_CTX_TX_DEC_RULE_EN_MASK 0x1 -#define E4_XSTORM_ETH_CONN_AG_CTX_TX_DEC_RULE_EN_SHIFT 2 -#define E4_XSTORM_ETH_CONN_AG_CTX_RULE5EN_MASK 0x1 -#define E4_XSTORM_ETH_CONN_AG_CTX_RULE5EN_SHIFT 3 -#define E4_XSTORM_ETH_CONN_AG_CTX_RULE6EN_MASK 0x1 -#define E4_XSTORM_ETH_CONN_AG_CTX_RULE6EN_SHIFT 4 -#define E4_XSTORM_ETH_CONN_AG_CTX_RULE7EN_MASK 0x1 -#define E4_XSTORM_ETH_CONN_AG_CTX_RULE7EN_SHIFT 5 -#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED1_MASK 0x1 -#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED1_SHIFT 6 -#define E4_XSTORM_ETH_CONN_AG_CTX_RULE9EN_MASK 0x1 -#define E4_XSTORM_ETH_CONN_AG_CTX_RULE9EN_SHIFT 7 +#define XSTORM_ETH_CONN_AG_CTX_RESERVED14_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_RESERVED14_SHIFT 0 +#define XSTORM_ETH_CONN_AG_CTX_RESERVED15_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_RESERVED15_SHIFT 1 +#define XSTORM_ETH_CONN_AG_CTX_TX_DEC_RULE_EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_TX_DEC_RULE_EN_SHIFT 2 +#define XSTORM_ETH_CONN_AG_CTX_RULE5EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_RULE5EN_SHIFT 3 +#define XSTORM_ETH_CONN_AG_CTX_RULE6EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_RULE6EN_SHIFT 4 +#define XSTORM_ETH_CONN_AG_CTX_RULE7EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_RULE7EN_SHIFT 5 +#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED1_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED1_SHIFT 6 +#define XSTORM_ETH_CONN_AG_CTX_RULE9EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_RULE9EN_SHIFT 7 u8 flags12; -#define E4_XSTORM_ETH_CONN_AG_CTX_RULE10EN_MASK 0x1 -#define E4_XSTORM_ETH_CONN_AG_CTX_RULE10EN_SHIFT 0 -#define E4_XSTORM_ETH_CONN_AG_CTX_RULE11EN_MASK 0x1 -#define E4_XSTORM_ETH_CONN_AG_CTX_RULE11EN_SHIFT 1 -#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED2_MASK 0x1 -#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED2_SHIFT 2 -#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED3_MASK 0x1 -#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED3_SHIFT 3 -#define E4_XSTORM_ETH_CONN_AG_CTX_RULE14EN_MASK 0x1 -#define E4_XSTORM_ETH_CONN_AG_CTX_RULE14EN_SHIFT 4 -#define E4_XSTORM_ETH_CONN_AG_CTX_RULE15EN_MASK 0x1 -#define E4_XSTORM_ETH_CONN_AG_CTX_RULE15EN_SHIFT 5 -#define E4_XSTORM_ETH_CONN_AG_CTX_RULE16EN_MASK 0x1 -#define E4_XSTORM_ETH_CONN_AG_CTX_RULE16EN_SHIFT 6 -#define E4_XSTORM_ETH_CONN_AG_CTX_RULE17EN_MASK 0x1 -#define E4_XSTORM_ETH_CONN_AG_CTX_RULE17EN_SHIFT 7 +#define XSTORM_ETH_CONN_AG_CTX_RULE10EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_RULE10EN_SHIFT 0 +#define XSTORM_ETH_CONN_AG_CTX_RULE11EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_RULE11EN_SHIFT 1 +#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED2_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED2_SHIFT 2 +#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED3_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED3_SHIFT 3 +#define XSTORM_ETH_CONN_AG_CTX_RULE14EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_RULE14EN_SHIFT 4 +#define XSTORM_ETH_CONN_AG_CTX_RULE15EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_RULE15EN_SHIFT 5 +#define XSTORM_ETH_CONN_AG_CTX_RULE16EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_RULE16EN_SHIFT 6 +#define XSTORM_ETH_CONN_AG_CTX_RULE17EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_RULE17EN_SHIFT 7 u8 flags13; -#define E4_XSTORM_ETH_CONN_AG_CTX_RULE18EN_MASK 0x1 -#define E4_XSTORM_ETH_CONN_AG_CTX_RULE18EN_SHIFT 0 -#define E4_XSTORM_ETH_CONN_AG_CTX_RULE19EN_MASK 0x1 -#define E4_XSTORM_ETH_CONN_AG_CTX_RULE19EN_SHIFT 1 -#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED4_MASK 0x1 -#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED4_SHIFT 2 -#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED5_MASK 0x1 -#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED5_SHIFT 3 -#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED6_MASK 0x1 -#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED6_SHIFT 4 -#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED7_MASK 0x1 -#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED7_SHIFT 5 -#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED8_MASK 0x1 -#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED8_SHIFT 6 -#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED9_MASK 0x1 -#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED9_SHIFT 7 +#define XSTORM_ETH_CONN_AG_CTX_RULE18EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_RULE18EN_SHIFT 0 +#define XSTORM_ETH_CONN_AG_CTX_RULE19EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_RULE19EN_SHIFT 1 +#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED4_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED4_SHIFT 2 +#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED5_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED5_SHIFT 3 +#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED6_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED6_SHIFT 4 +#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED7_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED7_SHIFT 5 +#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED8_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED8_SHIFT 6 +#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED9_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED9_SHIFT 7 u8 flags14; -#define E4_XSTORM_ETH_CONN_AG_CTX_EDPM_USE_EXT_HDR_MASK 0x1 -#define E4_XSTORM_ETH_CONN_AG_CTX_EDPM_USE_EXT_HDR_SHIFT 0 -#define E4_XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_MASK 0x1 -#define E4_XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_SHIFT 1 -#define E4_XSTORM_ETH_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_MASK 0x1 -#define E4_XSTORM_ETH_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_SHIFT 2 -#define E4_XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_MASK 0x1 -#define E4_XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_SHIFT 3 -#define E4_XSTORM_ETH_CONN_AG_CTX_L2_EDPM_ENABLE_MASK 0x1 -#define E4_XSTORM_ETH_CONN_AG_CTX_L2_EDPM_ENABLE_SHIFT 4 -#define E4_XSTORM_ETH_CONN_AG_CTX_ROCE_EDPM_ENABLE_MASK 0x1 -#define E4_XSTORM_ETH_CONN_AG_CTX_ROCE_EDPM_ENABLE_SHIFT 5 -#define E4_XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_MASK 0x3 -#define E4_XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_SHIFT 6 +#define XSTORM_ETH_CONN_AG_CTX_EDPM_USE_EXT_HDR_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_EDPM_USE_EXT_HDR_SHIFT 0 +#define XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_SHIFT 1 +#define XSTORM_ETH_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_SHIFT 2 +#define XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_SHIFT 3 +#define XSTORM_ETH_CONN_AG_CTX_L2_EDPM_ENABLE_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_L2_EDPM_ENABLE_SHIFT 4 +#define XSTORM_ETH_CONN_AG_CTX_ROCE_EDPM_ENABLE_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_ROCE_EDPM_ENABLE_SHIFT 5 +#define XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_MASK 0x3 +#define XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_SHIFT 6 u8 edpm_event_id; __le16 physical_q0; __le16 e5_reserved1; @@ -5118,37 +3513,37 @@ struct ystorm_eth_conn_st_ctx { __le32 reserved[8]; }; -struct e4_ystorm_eth_conn_ag_ctx { +struct ystorm_eth_conn_ag_ctx { u8 byte0; u8 state; u8 flags0; -#define E4_YSTORM_ETH_CONN_AG_CTX_BIT0_MASK 0x1 -#define E4_YSTORM_ETH_CONN_AG_CTX_BIT0_SHIFT 0 -#define E4_YSTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1 -#define E4_YSTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1 -#define E4_YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_MASK 0x3 -#define E4_YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_SHIFT 2 -#define E4_YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_MASK 0x3 -#define E4_YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_SHIFT 4 -#define E4_YSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3 -#define E4_YSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 6 +#define YSTORM_ETH_CONN_AG_CTX_BIT0_MASK 0x1 +#define YSTORM_ETH_CONN_AG_CTX_BIT0_SHIFT 0 +#define YSTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1 +#define YSTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1 +#define YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_MASK 0x3 +#define YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_SHIFT 2 +#define YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_MASK 0x3 +#define YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_SHIFT 4 +#define YSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3 +#define YSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 6 u8 flags1; -#define E4_YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_MASK 0x1 -#define E4_YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_SHIFT 0 -#define E4_YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_EN_MASK 0x1 -#define E4_YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_EN_SHIFT 1 -#define E4_YSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1 -#define E4_YSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 2 -#define E4_YSTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1 -#define E4_YSTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 3 -#define E4_YSTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1 -#define E4_YSTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 4 -#define E4_YSTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1 -#define E4_YSTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 5 -#define E4_YSTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1 -#define E4_YSTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 6 -#define E4_YSTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1 -#define E4_YSTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 7 +#define YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_MASK 0x1 +#define YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_SHIFT 0 +#define YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_EN_MASK 0x1 +#define YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_EN_SHIFT 1 +#define YSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1 +#define YSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 2 +#define YSTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define YSTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 3 +#define YSTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define YSTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 4 +#define YSTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define YSTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 5 +#define YSTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define YSTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 6 +#define YSTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define YSTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 7 u8 tx_q0_int_coallecing_timeset; u8 byte3; __le16 word0; @@ -5162,89 +3557,89 @@ struct e4_ystorm_eth_conn_ag_ctx { __le32 reg3; }; -struct e4_tstorm_eth_conn_ag_ctx { +struct tstorm_eth_conn_ag_ctx { u8 byte0; u8 byte1; u8 flags0; -#define E4_TSTORM_ETH_CONN_AG_CTX_BIT0_MASK 0x1 -#define E4_TSTORM_ETH_CONN_AG_CTX_BIT0_SHIFT 0 -#define E4_TSTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1 -#define E4_TSTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1 -#define E4_TSTORM_ETH_CONN_AG_CTX_BIT2_MASK 0x1 -#define E4_TSTORM_ETH_CONN_AG_CTX_BIT2_SHIFT 2 -#define E4_TSTORM_ETH_CONN_AG_CTX_BIT3_MASK 0x1 -#define E4_TSTORM_ETH_CONN_AG_CTX_BIT3_SHIFT 3 -#define E4_TSTORM_ETH_CONN_AG_CTX_BIT4_MASK 0x1 -#define E4_TSTORM_ETH_CONN_AG_CTX_BIT4_SHIFT 4 -#define E4_TSTORM_ETH_CONN_AG_CTX_BIT5_MASK 0x1 -#define E4_TSTORM_ETH_CONN_AG_CTX_BIT5_SHIFT 5 -#define E4_TSTORM_ETH_CONN_AG_CTX_CF0_MASK 0x3 -#define E4_TSTORM_ETH_CONN_AG_CTX_CF0_SHIFT 6 +#define TSTORM_ETH_CONN_AG_CTX_BIT0_MASK 0x1 +#define TSTORM_ETH_CONN_AG_CTX_BIT0_SHIFT 0 +#define TSTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1 +#define TSTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1 +#define TSTORM_ETH_CONN_AG_CTX_BIT2_MASK 0x1 +#define TSTORM_ETH_CONN_AG_CTX_BIT2_SHIFT 2 +#define TSTORM_ETH_CONN_AG_CTX_BIT3_MASK 0x1 +#define TSTORM_ETH_CONN_AG_CTX_BIT3_SHIFT 3 +#define TSTORM_ETH_CONN_AG_CTX_BIT4_MASK 0x1 +#define TSTORM_ETH_CONN_AG_CTX_BIT4_SHIFT 4 +#define TSTORM_ETH_CONN_AG_CTX_BIT5_MASK 0x1 +#define TSTORM_ETH_CONN_AG_CTX_BIT5_SHIFT 5 +#define TSTORM_ETH_CONN_AG_CTX_CF0_MASK 0x3 +#define TSTORM_ETH_CONN_AG_CTX_CF0_SHIFT 6 u8 flags1; -#define E4_TSTORM_ETH_CONN_AG_CTX_CF1_MASK 0x3 -#define E4_TSTORM_ETH_CONN_AG_CTX_CF1_SHIFT 0 -#define E4_TSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3 -#define E4_TSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 2 -#define E4_TSTORM_ETH_CONN_AG_CTX_CF3_MASK 0x3 -#define E4_TSTORM_ETH_CONN_AG_CTX_CF3_SHIFT 4 -#define E4_TSTORM_ETH_CONN_AG_CTX_CF4_MASK 0x3 -#define E4_TSTORM_ETH_CONN_AG_CTX_CF4_SHIFT 6 +#define TSTORM_ETH_CONN_AG_CTX_CF1_MASK 0x3 +#define TSTORM_ETH_CONN_AG_CTX_CF1_SHIFT 0 +#define TSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3 +#define TSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 2 +#define TSTORM_ETH_CONN_AG_CTX_CF3_MASK 0x3 +#define TSTORM_ETH_CONN_AG_CTX_CF3_SHIFT 4 +#define TSTORM_ETH_CONN_AG_CTX_CF4_MASK 0x3 +#define TSTORM_ETH_CONN_AG_CTX_CF4_SHIFT 6 u8 flags2; -#define E4_TSTORM_ETH_CONN_AG_CTX_CF5_MASK 0x3 -#define E4_TSTORM_ETH_CONN_AG_CTX_CF5_SHIFT 0 -#define E4_TSTORM_ETH_CONN_AG_CTX_CF6_MASK 0x3 -#define E4_TSTORM_ETH_CONN_AG_CTX_CF6_SHIFT 2 -#define E4_TSTORM_ETH_CONN_AG_CTX_CF7_MASK 0x3 -#define E4_TSTORM_ETH_CONN_AG_CTX_CF7_SHIFT 4 -#define E4_TSTORM_ETH_CONN_AG_CTX_CF8_MASK 0x3 -#define E4_TSTORM_ETH_CONN_AG_CTX_CF8_SHIFT 6 +#define TSTORM_ETH_CONN_AG_CTX_CF5_MASK 0x3 +#define TSTORM_ETH_CONN_AG_CTX_CF5_SHIFT 0 +#define TSTORM_ETH_CONN_AG_CTX_CF6_MASK 0x3 +#define TSTORM_ETH_CONN_AG_CTX_CF6_SHIFT 2 +#define TSTORM_ETH_CONN_AG_CTX_CF7_MASK 0x3 +#define TSTORM_ETH_CONN_AG_CTX_CF7_SHIFT 4 +#define TSTORM_ETH_CONN_AG_CTX_CF8_MASK 0x3 +#define TSTORM_ETH_CONN_AG_CTX_CF8_SHIFT 6 u8 flags3; -#define E4_TSTORM_ETH_CONN_AG_CTX_CF9_MASK 0x3 -#define E4_TSTORM_ETH_CONN_AG_CTX_CF9_SHIFT 0 -#define E4_TSTORM_ETH_CONN_AG_CTX_CF10_MASK 0x3 -#define E4_TSTORM_ETH_CONN_AG_CTX_CF10_SHIFT 2 -#define E4_TSTORM_ETH_CONN_AG_CTX_CF0EN_MASK 0x1 -#define E4_TSTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT 4 -#define E4_TSTORM_ETH_CONN_AG_CTX_CF1EN_MASK 0x1 -#define E4_TSTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT 5 -#define E4_TSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1 -#define E4_TSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 6 -#define E4_TSTORM_ETH_CONN_AG_CTX_CF3EN_MASK 0x1 -#define E4_TSTORM_ETH_CONN_AG_CTX_CF3EN_SHIFT 7 +#define TSTORM_ETH_CONN_AG_CTX_CF9_MASK 0x3 +#define TSTORM_ETH_CONN_AG_CTX_CF9_SHIFT 0 +#define TSTORM_ETH_CONN_AG_CTX_CF10_MASK 0x3 +#define TSTORM_ETH_CONN_AG_CTX_CF10_SHIFT 2 +#define TSTORM_ETH_CONN_AG_CTX_CF0EN_MASK 0x1 +#define TSTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT 4 +#define TSTORM_ETH_CONN_AG_CTX_CF1EN_MASK 0x1 +#define TSTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT 5 +#define TSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1 +#define TSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 6 +#define TSTORM_ETH_CONN_AG_CTX_CF3EN_MASK 0x1 +#define TSTORM_ETH_CONN_AG_CTX_CF3EN_SHIFT 7 u8 flags4; -#define E4_TSTORM_ETH_CONN_AG_CTX_CF4EN_MASK 0x1 -#define E4_TSTORM_ETH_CONN_AG_CTX_CF4EN_SHIFT 0 -#define E4_TSTORM_ETH_CONN_AG_CTX_CF5EN_MASK 0x1 -#define E4_TSTORM_ETH_CONN_AG_CTX_CF5EN_SHIFT 1 -#define E4_TSTORM_ETH_CONN_AG_CTX_CF6EN_MASK 0x1 -#define E4_TSTORM_ETH_CONN_AG_CTX_CF6EN_SHIFT 2 -#define E4_TSTORM_ETH_CONN_AG_CTX_CF7EN_MASK 0x1 -#define E4_TSTORM_ETH_CONN_AG_CTX_CF7EN_SHIFT 3 -#define E4_TSTORM_ETH_CONN_AG_CTX_CF8EN_MASK 0x1 -#define E4_TSTORM_ETH_CONN_AG_CTX_CF8EN_SHIFT 4 -#define E4_TSTORM_ETH_CONN_AG_CTX_CF9EN_MASK 0x1 -#define E4_TSTORM_ETH_CONN_AG_CTX_CF9EN_SHIFT 5 -#define E4_TSTORM_ETH_CONN_AG_CTX_CF10EN_MASK 0x1 -#define E4_TSTORM_ETH_CONN_AG_CTX_CF10EN_SHIFT 6 -#define E4_TSTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1 -#define E4_TSTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 7 +#define TSTORM_ETH_CONN_AG_CTX_CF4EN_MASK 0x1 +#define TSTORM_ETH_CONN_AG_CTX_CF4EN_SHIFT 0 +#define TSTORM_ETH_CONN_AG_CTX_CF5EN_MASK 0x1 +#define TSTORM_ETH_CONN_AG_CTX_CF5EN_SHIFT 1 +#define TSTORM_ETH_CONN_AG_CTX_CF6EN_MASK 0x1 +#define TSTORM_ETH_CONN_AG_CTX_CF6EN_SHIFT 2 +#define TSTORM_ETH_CONN_AG_CTX_CF7EN_MASK 0x1 +#define TSTORM_ETH_CONN_AG_CTX_CF7EN_SHIFT 3 +#define TSTORM_ETH_CONN_AG_CTX_CF8EN_MASK 0x1 +#define TSTORM_ETH_CONN_AG_CTX_CF8EN_SHIFT 4 +#define TSTORM_ETH_CONN_AG_CTX_CF9EN_MASK 0x1 +#define TSTORM_ETH_CONN_AG_CTX_CF9EN_SHIFT 5 +#define TSTORM_ETH_CONN_AG_CTX_CF10EN_MASK 0x1 +#define TSTORM_ETH_CONN_AG_CTX_CF10EN_SHIFT 6 +#define TSTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define TSTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 7 u8 flags5; -#define E4_TSTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1 -#define E4_TSTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 0 -#define E4_TSTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1 -#define E4_TSTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 1 -#define E4_TSTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1 -#define E4_TSTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 2 -#define E4_TSTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1 -#define E4_TSTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 3 -#define E4_TSTORM_ETH_CONN_AG_CTX_RULE5EN_MASK 0x1 -#define E4_TSTORM_ETH_CONN_AG_CTX_RULE5EN_SHIFT 4 -#define E4_TSTORM_ETH_CONN_AG_CTX_RX_BD_EN_MASK 0x1 -#define E4_TSTORM_ETH_CONN_AG_CTX_RX_BD_EN_SHIFT 5 -#define E4_TSTORM_ETH_CONN_AG_CTX_RULE7EN_MASK 0x1 -#define E4_TSTORM_ETH_CONN_AG_CTX_RULE7EN_SHIFT 6 -#define E4_TSTORM_ETH_CONN_AG_CTX_RULE8EN_MASK 0x1 -#define E4_TSTORM_ETH_CONN_AG_CTX_RULE8EN_SHIFT 7 +#define TSTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define TSTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 0 +#define TSTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define TSTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 1 +#define TSTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define TSTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 2 +#define TSTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define TSTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 3 +#define TSTORM_ETH_CONN_AG_CTX_RULE5EN_MASK 0x1 +#define TSTORM_ETH_CONN_AG_CTX_RULE5EN_SHIFT 4 +#define TSTORM_ETH_CONN_AG_CTX_RX_BD_EN_MASK 0x1 +#define TSTORM_ETH_CONN_AG_CTX_RX_BD_EN_SHIFT 5 +#define TSTORM_ETH_CONN_AG_CTX_RULE7EN_MASK 0x1 +#define TSTORM_ETH_CONN_AG_CTX_RULE7EN_SHIFT 6 +#define TSTORM_ETH_CONN_AG_CTX_RULE8EN_MASK 0x1 +#define TSTORM_ETH_CONN_AG_CTX_RULE8EN_SHIFT 7 __le32 reg0; __le32 reg1; __le32 reg2; @@ -5266,63 +3661,63 @@ struct e4_tstorm_eth_conn_ag_ctx { __le32 reg10; }; -struct e4_ustorm_eth_conn_ag_ctx { +struct ustorm_eth_conn_ag_ctx { u8 byte0; u8 byte1; u8 flags0; -#define E4_USTORM_ETH_CONN_AG_CTX_BIT0_MASK 0x1 -#define E4_USTORM_ETH_CONN_AG_CTX_BIT0_SHIFT 0 -#define E4_USTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1 -#define E4_USTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1 -#define E4_USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_MASK 0x3 -#define E4_USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_SHIFT 2 -#define E4_USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_MASK 0x3 -#define E4_USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_SHIFT 4 -#define E4_USTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3 -#define E4_USTORM_ETH_CONN_AG_CTX_CF2_SHIFT 6 +#define USTORM_ETH_CONN_AG_CTX_BIT0_MASK 0x1 +#define USTORM_ETH_CONN_AG_CTX_BIT0_SHIFT 0 +#define USTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1 +#define USTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1 +#define USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_MASK 0x3 +#define USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_SHIFT 2 +#define USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_MASK 0x3 +#define USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_SHIFT 4 +#define USTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3 +#define USTORM_ETH_CONN_AG_CTX_CF2_SHIFT 6 u8 flags1; -#define E4_USTORM_ETH_CONN_AG_CTX_CF3_MASK 0x3 -#define E4_USTORM_ETH_CONN_AG_CTX_CF3_SHIFT 0 -#define E4_USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_MASK 0x3 -#define E4_USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_SHIFT 2 -#define E4_USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_MASK 0x3 -#define E4_USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_SHIFT 4 -#define E4_USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_MASK 0x3 -#define E4_USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_SHIFT 6 +#define USTORM_ETH_CONN_AG_CTX_CF3_MASK 0x3 +#define USTORM_ETH_CONN_AG_CTX_CF3_SHIFT 0 +#define USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_MASK 0x3 +#define USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_SHIFT 2 +#define USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_MASK 0x3 +#define USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_SHIFT 4 +#define USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_MASK 0x3 +#define USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_SHIFT 6 u8 flags2; -#define E4_USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_EN_MASK 0x1 -#define E4_USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_EN_SHIFT 0 -#define E4_USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_EN_MASK 0x1 -#define E4_USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_EN_SHIFT 1 -#define E4_USTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1 -#define E4_USTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 2 -#define E4_USTORM_ETH_CONN_AG_CTX_CF3EN_MASK 0x1 -#define E4_USTORM_ETH_CONN_AG_CTX_CF3EN_SHIFT 3 -#define E4_USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_EN_MASK 0x1 -#define E4_USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_EN_SHIFT 4 -#define E4_USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_EN_MASK 0x1 -#define E4_USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_EN_SHIFT 5 -#define E4_USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_MASK 0x1 -#define E4_USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_SHIFT 6 -#define E4_USTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1 -#define E4_USTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 7 +#define USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_EN_MASK 0x1 +#define USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_EN_SHIFT 0 +#define USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_EN_MASK 0x1 +#define USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_EN_SHIFT 1 +#define USTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1 +#define USTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 2 +#define USTORM_ETH_CONN_AG_CTX_CF3EN_MASK 0x1 +#define USTORM_ETH_CONN_AG_CTX_CF3EN_SHIFT 3 +#define USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_EN_MASK 0x1 +#define USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_EN_SHIFT 4 +#define USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_EN_MASK 0x1 +#define USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_EN_SHIFT 5 +#define USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_MASK 0x1 +#define USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_SHIFT 6 +#define USTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define USTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 7 u8 flags3; -#define E4_USTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1 -#define E4_USTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 0 -#define E4_USTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1 -#define E4_USTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 1 -#define E4_USTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1 -#define E4_USTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 2 -#define E4_USTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1 -#define E4_USTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 3 -#define E4_USTORM_ETH_CONN_AG_CTX_RULE5EN_MASK 0x1 -#define E4_USTORM_ETH_CONN_AG_CTX_RULE5EN_SHIFT 4 -#define E4_USTORM_ETH_CONN_AG_CTX_RULE6EN_MASK 0x1 -#define E4_USTORM_ETH_CONN_AG_CTX_RULE6EN_SHIFT 5 -#define E4_USTORM_ETH_CONN_AG_CTX_RULE7EN_MASK 0x1 -#define E4_USTORM_ETH_CONN_AG_CTX_RULE7EN_SHIFT 6 -#define E4_USTORM_ETH_CONN_AG_CTX_RULE8EN_MASK 0x1 -#define E4_USTORM_ETH_CONN_AG_CTX_RULE8EN_SHIFT 7 +#define USTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define USTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 0 +#define USTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define USTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 1 +#define USTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define USTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 2 +#define USTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define USTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 3 +#define USTORM_ETH_CONN_AG_CTX_RULE5EN_MASK 0x1 +#define USTORM_ETH_CONN_AG_CTX_RULE5EN_SHIFT 4 +#define USTORM_ETH_CONN_AG_CTX_RULE6EN_MASK 0x1 +#define USTORM_ETH_CONN_AG_CTX_RULE6EN_SHIFT 5 +#define USTORM_ETH_CONN_AG_CTX_RULE7EN_MASK 0x1 +#define USTORM_ETH_CONN_AG_CTX_RULE7EN_SHIFT 6 +#define USTORM_ETH_CONN_AG_CTX_RULE8EN_MASK 0x1 +#define USTORM_ETH_CONN_AG_CTX_RULE8EN_SHIFT 7 u8 byte2; u8 byte3; __le16 word0; @@ -5346,16 +3741,16 @@ struct mstorm_eth_conn_st_ctx { }; /* eth connection context */ -struct e4_eth_conn_context { +struct eth_conn_context { struct tstorm_eth_conn_st_ctx tstorm_st_context; struct regpair tstorm_st_padding[2]; struct pstorm_eth_conn_st_ctx pstorm_st_context; struct xstorm_eth_conn_st_ctx xstorm_st_context; - struct e4_xstorm_eth_conn_ag_ctx xstorm_ag_context; - struct e4_tstorm_eth_conn_ag_ctx tstorm_ag_context; + struct xstorm_eth_conn_ag_ctx xstorm_ag_context; + struct tstorm_eth_conn_ag_ctx tstorm_ag_context; struct ystorm_eth_conn_st_ctx ystorm_st_context; - struct e4_ystorm_eth_conn_ag_ctx ystorm_ag_context; - struct e4_ustorm_eth_conn_ag_ctx ustorm_ag_context; + struct ystorm_eth_conn_ag_ctx ystorm_ag_context; + struct ustorm_eth_conn_ag_ctx ustorm_ag_context; struct ustorm_eth_conn_st_ctx ustorm_st_context; struct mstorm_eth_conn_st_ctx mstorm_st_context; }; @@ -5512,7 +3907,7 @@ enum eth_ramrod_cmd_id { ETH_RAMROD_RX_ADD_UDP_FILTER, ETH_RAMROD_RX_DELETE_UDP_FILTER, ETH_RAMROD_RX_CREATE_GFT_ACTION, - ETH_RAMROD_GFT_UPDATE_FILTER, + ETH_RAMROD_RX_UPDATE_GFT_FILTER, ETH_RAMROD_TX_QUEUE_UPDATE, ETH_RAMROD_RGFS_FILTER_ADD, ETH_RAMROD_RGFS_FILTER_DEL, @@ -5596,10 +3991,12 @@ struct eth_vport_rss_config { u8 update_rss_ind_table; u8 update_rss_capabilities; u8 tbl_size; - __le32 reserved2[2]; + u8 ind_table_mask_valid; + u8 reserved2[3]; __le16 indirection_table[ETH_RSS_IND_TABLE_ENTRIES_NUM]; + __le32 ind_table_mask[ETH_RSS_IND_TABLE_MASK_SIZE_REGS]; __le32 rss_key[ETH_RSS_KEY_SIZE_REGS]; - __le32 reserved3[2]; + __le32 reserved3; }; /* eth vport RSS mode */ @@ -5674,8 +4071,20 @@ enum gft_filter_update_action { MAX_GFT_FILTER_UPDATE_ACTION }; +/* Ramrod data for rx create gft action */ +struct rx_create_gft_action_ramrod_data { + u8 vport_id; + u8 reserved[7]; +}; + +/* Ramrod data for rx create openflow action */ +struct rx_create_openflow_action_ramrod_data { + u8 vport_id; + u8 reserved[7]; +}; + /* Ramrod data for rx add openflow filter */ -struct rx_add_openflow_filter_data { +struct rx_openflow_filter_ramrod_data { __le16 action_icid; u8 priority; u8 reserved0; @@ -5698,18 +4107,6 @@ struct rx_add_openflow_filter_data { __le16 l4_src_port; }; -/* Ramrod data for rx create gft action */ -struct rx_create_gft_action_data { - u8 vport_id; - u8 reserved[7]; -}; - -/* Ramrod data for rx create openflow action */ -struct rx_create_openflow_action_data { - u8 vport_id; - u8 reserved[7]; -}; - /* Ramrod data for rx queue start ramrod */ struct rx_queue_start_ramrod_data { __le16 rx_queue_id; @@ -5768,7 +4165,7 @@ struct rx_queue_update_ramrod_data { }; /* Ramrod data for rx Add UDP Filter */ -struct rx_udp_filter_data { +struct rx_udp_filter_ramrod_data { __le16 action_icid; __le16 vlan_id; u8 ip_type; @@ -5784,7 +4181,7 @@ struct rx_udp_filter_data { /* Add or delete GFT filter - filter is packet header of type of packet wished * to pass certain FW flow. */ -struct rx_update_gft_filter_data { +struct rx_update_gft_filter_ramrod_data { struct regpair pkt_hdr_addr; __le16 pkt_hdr_length; __le16 action_icid; @@ -5824,7 +4221,8 @@ struct tx_queue_start_ramrod_data { u8 pxp_tph_valid_bd; u8 pxp_tph_valid_pkt; __le16 pxp_st_index; - __le16 comp_agg_size; + u8 comp_agg_size; + u8 reserved3; __le16 queue_zone_id; __le16 reserved2; __le16 pbl_size; @@ -5945,7 +4343,12 @@ struct vport_update_ramrod_data_cmn { u8 ctl_frame_ethtype_check_en; u8 update_in_to_in_pri_map_mode; u8 in_to_in_pri_map[8]; - u8 reserved[6]; + u8 update_tx_dst_port_mode_flg; + u8 tx_dst_port_mode_config; + u8 dst_vport_id; + u8 tx_dst_port_mode; + u8 dst_vport_id_valid; + u8 reserved[1]; }; struct vport_update_ramrod_mcast { @@ -5964,7 +4367,7 @@ struct vport_update_ramrod_data { struct eth_vport_rss_config rss_config; }; -struct e4_xstorm_eth_conn_ag_ctx_dq_ext_ldpart { +struct xstorm_eth_conn_ag_ctx_dq_ext_ldpart { u8 reserved0; u8 state; u8 flags0; @@ -6193,253 +4596,253 @@ struct e4_xstorm_eth_conn_ag_ctx_dq_ext_ldpart { __le32 reg4; }; -struct e4_mstorm_eth_conn_ag_ctx { +struct mstorm_eth_conn_ag_ctx { u8 byte0; u8 byte1; u8 flags0; -#define E4_MSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 -#define E4_MSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 -#define E4_MSTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1 -#define E4_MSTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1 -#define E4_MSTORM_ETH_CONN_AG_CTX_CF0_MASK 0x3 -#define E4_MSTORM_ETH_CONN_AG_CTX_CF0_SHIFT 2 -#define E4_MSTORM_ETH_CONN_AG_CTX_CF1_MASK 0x3 -#define E4_MSTORM_ETH_CONN_AG_CTX_CF1_SHIFT 4 -#define E4_MSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3 -#define E4_MSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 6 +#define MSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define MSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 +#define MSTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1 +#define MSTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1 +#define MSTORM_ETH_CONN_AG_CTX_CF0_MASK 0x3 +#define MSTORM_ETH_CONN_AG_CTX_CF0_SHIFT 2 +#define MSTORM_ETH_CONN_AG_CTX_CF1_MASK 0x3 +#define MSTORM_ETH_CONN_AG_CTX_CF1_SHIFT 4 +#define MSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3 +#define MSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 6 u8 flags1; -#define E4_MSTORM_ETH_CONN_AG_CTX_CF0EN_MASK 0x1 -#define E4_MSTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT 0 -#define E4_MSTORM_ETH_CONN_AG_CTX_CF1EN_MASK 0x1 -#define E4_MSTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT 1 -#define E4_MSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1 -#define E4_MSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 2 -#define E4_MSTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1 -#define E4_MSTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 3 -#define E4_MSTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1 -#define E4_MSTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 4 -#define E4_MSTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1 -#define E4_MSTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 5 -#define E4_MSTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1 -#define E4_MSTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 6 -#define E4_MSTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1 -#define E4_MSTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 7 +#define MSTORM_ETH_CONN_AG_CTX_CF0EN_MASK 0x1 +#define MSTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT 0 +#define MSTORM_ETH_CONN_AG_CTX_CF1EN_MASK 0x1 +#define MSTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT 1 +#define MSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1 +#define MSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 2 +#define MSTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define MSTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 3 +#define MSTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define MSTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 4 +#define MSTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define MSTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 5 +#define MSTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define MSTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 6 +#define MSTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define MSTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 7 __le16 word0; __le16 word1; __le32 reg0; __le32 reg1; }; -struct e4_xstorm_eth_hw_conn_ag_ctx { +struct xstorm_eth_hw_conn_ag_ctx { u8 reserved0; u8 state; u8 flags0; -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED1_MASK 0x1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED1_SHIFT 1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED2_MASK 0x1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED2_SHIFT 2 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED3_MASK 0x1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED3_SHIFT 4 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED4_MASK 0x1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED4_SHIFT 5 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED5_MASK 0x1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED5_SHIFT 6 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED6_MASK 0x1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED6_SHIFT 7 +#define XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED1_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED1_SHIFT 1 +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED2_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED2_SHIFT 2 +#define XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3 +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED3_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED3_SHIFT 4 +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED4_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED4_SHIFT 5 +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED5_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED5_SHIFT 6 +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED6_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED6_SHIFT 7 u8 flags1; -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED7_MASK 0x1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED7_SHIFT 0 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED8_MASK 0x1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED8_SHIFT 1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED9_MASK 0x1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED9_SHIFT 2 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_BIT11_MASK 0x1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_BIT11_SHIFT 3 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_E5_RESERVED2_MASK 0x1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_E5_RESERVED2_SHIFT 4 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_E5_RESERVED3_MASK 0x1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_E5_RESERVED3_SHIFT 5 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TX_RULE_ACTIVE_MASK 0x1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TX_RULE_ACTIVE_SHIFT 6 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_ACTIVE_MASK 0x1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_ACTIVE_SHIFT 7 +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED7_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED7_SHIFT 0 +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED8_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED8_SHIFT 1 +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED9_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED9_SHIFT 2 +#define XSTORM_ETH_HW_CONN_AG_CTX_BIT11_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_BIT11_SHIFT 3 +#define XSTORM_ETH_HW_CONN_AG_CTX_E5_RESERVED2_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_E5_RESERVED2_SHIFT 4 +#define XSTORM_ETH_HW_CONN_AG_CTX_E5_RESERVED3_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_E5_RESERVED3_SHIFT 5 +#define XSTORM_ETH_HW_CONN_AG_CTX_TX_RULE_ACTIVE_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_TX_RULE_ACTIVE_SHIFT 6 +#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_ACTIVE_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_ACTIVE_SHIFT 7 u8 flags2; -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF0_MASK 0x3 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF0_SHIFT 0 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF1_MASK 0x3 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF1_SHIFT 2 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF2_MASK 0x3 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF2_SHIFT 4 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF3_MASK 0x3 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF3_SHIFT 6 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF0_MASK 0x3 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF0_SHIFT 0 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF1_MASK 0x3 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF1_SHIFT 2 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF2_MASK 0x3 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF2_SHIFT 4 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF3_MASK 0x3 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF3_SHIFT 6 u8 flags3; -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF4_MASK 0x3 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF4_SHIFT 0 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF5_MASK 0x3 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF5_SHIFT 2 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF6_MASK 0x3 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF6_SHIFT 4 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF7_MASK 0x3 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF7_SHIFT 6 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF4_MASK 0x3 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF4_SHIFT 0 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF5_MASK 0x3 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF5_SHIFT 2 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF6_MASK 0x3 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF6_SHIFT 4 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF7_MASK 0x3 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF7_SHIFT 6 u8 flags4; -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF8_MASK 0x3 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF8_SHIFT 0 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF9_MASK 0x3 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF9_SHIFT 2 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF10_MASK 0x3 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF10_SHIFT 4 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF11_MASK 0x3 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF11_SHIFT 6 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF8_MASK 0x3 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF8_SHIFT 0 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF9_MASK 0x3 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF9_SHIFT 2 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF10_MASK 0x3 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF10_SHIFT 4 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF11_MASK 0x3 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF11_SHIFT 6 u8 flags5; -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF12_MASK 0x3 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF12_SHIFT 0 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF13_MASK 0x3 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF13_SHIFT 2 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF14_MASK 0x3 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF14_SHIFT 4 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF15_MASK 0x3 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF15_SHIFT 6 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF12_MASK 0x3 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF12_SHIFT 0 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF13_MASK 0x3 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF13_SHIFT 2 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF14_MASK 0x3 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF14_SHIFT 4 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF15_MASK 0x3 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF15_SHIFT 6 u8 flags6; -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_MASK 0x3 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_SHIFT 0 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_MASK 0x3 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_SHIFT 2 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_MASK 0x3 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_SHIFT 4 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_MASK 0x3 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_SHIFT 6 +#define XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_MASK 0x3 +#define XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_SHIFT 0 +#define XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_MASK 0x3 +#define XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_SHIFT 2 +#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_MASK 0x3 +#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_SHIFT 4 +#define XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_MASK 0x3 +#define XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_SHIFT 6 u8 flags7; -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_MASK 0x3 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_SHIFT 0 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED10_MASK 0x3 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED10_SHIFT 2 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_MASK 0x3 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_SHIFT 4 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF0EN_MASK 0x1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF0EN_SHIFT 6 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF1EN_MASK 0x1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF1EN_SHIFT 7 +#define XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_MASK 0x3 +#define XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_SHIFT 0 +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED10_MASK 0x3 +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED10_SHIFT 2 +#define XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_MASK 0x3 +#define XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_SHIFT 4 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF0EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF0EN_SHIFT 6 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF1EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF1EN_SHIFT 7 u8 flags8; -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF2EN_MASK 0x1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF2EN_SHIFT 0 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF3EN_MASK 0x1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF3EN_SHIFT 1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF4EN_MASK 0x1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF4EN_SHIFT 2 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF5EN_MASK 0x1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF5EN_SHIFT 3 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF6EN_MASK 0x1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF6EN_SHIFT 4 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF7EN_MASK 0x1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF7EN_SHIFT 5 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF8EN_MASK 0x1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF8EN_SHIFT 6 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF9EN_MASK 0x1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF9EN_SHIFT 7 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF2EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF2EN_SHIFT 0 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF3EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF3EN_SHIFT 1 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF4EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF4EN_SHIFT 2 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF5EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF5EN_SHIFT 3 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF6EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF6EN_SHIFT 4 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF7EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF7EN_SHIFT 5 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF8EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF8EN_SHIFT 6 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF9EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF9EN_SHIFT 7 u8 flags9; -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF10EN_MASK 0x1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF10EN_SHIFT 0 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF11EN_MASK 0x1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF11EN_SHIFT 1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF12EN_MASK 0x1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF12EN_SHIFT 2 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF13EN_MASK 0x1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF13EN_SHIFT 3 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF14EN_MASK 0x1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF14EN_SHIFT 4 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF15EN_MASK 0x1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF15EN_SHIFT 5 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_MASK 0x1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_SHIFT 6 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_EN_MASK 0x1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_EN_SHIFT 7 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF10EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF10EN_SHIFT 0 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF11EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF11EN_SHIFT 1 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF12EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF12EN_SHIFT 2 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF13EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF13EN_SHIFT 3 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF14EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF14EN_SHIFT 4 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF15EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF15EN_SHIFT 5 +#define XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_SHIFT 6 +#define XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_EN_SHIFT 7 u8 flags10; -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_EN_MASK 0x1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_EN_SHIFT 0 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_EN_MASK 0x1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_EN_SHIFT 1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED11_MASK 0x1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED11_SHIFT 3 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_MASK 0x1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_SHIFT 5 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED12_MASK 0x1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED12_SHIFT 6 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED13_MASK 0x1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED13_SHIFT 7 +#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_EN_SHIFT 0 +#define XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_EN_SHIFT 1 +#define XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2 +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED11_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED11_SHIFT 3 +#define XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4 +#define XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_SHIFT 5 +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED12_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED12_SHIFT 6 +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED13_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED13_SHIFT 7 u8 flags11; -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED14_MASK 0x1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED14_SHIFT 0 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED15_MASK 0x1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED15_SHIFT 1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TX_DEC_RULE_EN_MASK 0x1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TX_DEC_RULE_EN_SHIFT 2 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE5EN_MASK 0x1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE5EN_SHIFT 3 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE6EN_MASK 0x1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE6EN_SHIFT 4 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE7EN_MASK 0x1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE7EN_SHIFT 5 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED1_MASK 0x1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED1_SHIFT 6 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE9EN_MASK 0x1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE9EN_SHIFT 7 +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED14_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED14_SHIFT 0 +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED15_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED15_SHIFT 1 +#define XSTORM_ETH_HW_CONN_AG_CTX_TX_DEC_RULE_EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_TX_DEC_RULE_EN_SHIFT 2 +#define XSTORM_ETH_HW_CONN_AG_CTX_RULE5EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_RULE5EN_SHIFT 3 +#define XSTORM_ETH_HW_CONN_AG_CTX_RULE6EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_RULE6EN_SHIFT 4 +#define XSTORM_ETH_HW_CONN_AG_CTX_RULE7EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_RULE7EN_SHIFT 5 +#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED1_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED1_SHIFT 6 +#define XSTORM_ETH_HW_CONN_AG_CTX_RULE9EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_RULE9EN_SHIFT 7 u8 flags12; -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE10EN_MASK 0x1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE10EN_SHIFT 0 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE11EN_MASK 0x1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE11EN_SHIFT 1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED2_MASK 0x1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED2_SHIFT 2 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED3_MASK 0x1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED3_SHIFT 3 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE14EN_MASK 0x1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE14EN_SHIFT 4 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE15EN_MASK 0x1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE15EN_SHIFT 5 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE16EN_MASK 0x1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE16EN_SHIFT 6 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE17EN_MASK 0x1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE17EN_SHIFT 7 +#define XSTORM_ETH_HW_CONN_AG_CTX_RULE10EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_RULE10EN_SHIFT 0 +#define XSTORM_ETH_HW_CONN_AG_CTX_RULE11EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_RULE11EN_SHIFT 1 +#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED2_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED2_SHIFT 2 +#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED3_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED3_SHIFT 3 +#define XSTORM_ETH_HW_CONN_AG_CTX_RULE14EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_RULE14EN_SHIFT 4 +#define XSTORM_ETH_HW_CONN_AG_CTX_RULE15EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_RULE15EN_SHIFT 5 +#define XSTORM_ETH_HW_CONN_AG_CTX_RULE16EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_RULE16EN_SHIFT 6 +#define XSTORM_ETH_HW_CONN_AG_CTX_RULE17EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_RULE17EN_SHIFT 7 u8 flags13; -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE18EN_MASK 0x1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE18EN_SHIFT 0 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE19EN_MASK 0x1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE19EN_SHIFT 1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED4_MASK 0x1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED4_SHIFT 2 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED5_MASK 0x1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED5_SHIFT 3 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED6_MASK 0x1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED6_SHIFT 4 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED7_MASK 0x1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED7_SHIFT 5 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED8_MASK 0x1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED8_SHIFT 6 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED9_MASK 0x1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED9_SHIFT 7 +#define XSTORM_ETH_HW_CONN_AG_CTX_RULE18EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_RULE18EN_SHIFT 0 +#define XSTORM_ETH_HW_CONN_AG_CTX_RULE19EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_RULE19EN_SHIFT 1 +#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED4_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED4_SHIFT 2 +#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED5_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED5_SHIFT 3 +#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED6_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED6_SHIFT 4 +#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED7_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED7_SHIFT 5 +#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED8_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED8_SHIFT 6 +#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED9_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED9_SHIFT 7 u8 flags14; -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EDPM_USE_EXT_HDR_MASK 0x1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EDPM_USE_EXT_HDR_SHIFT 0 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_MASK 0x1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_SHIFT 1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_MASK 0x1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_SHIFT 2 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_MASK 0x1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_SHIFT 3 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_L2_EDPM_ENABLE_MASK 0x1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_L2_EDPM_ENABLE_SHIFT 4 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_ROCE_EDPM_ENABLE_MASK 0x1 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_ROCE_EDPM_ENABLE_SHIFT 5 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_MASK 0x3 -#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_SHIFT 6 +#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_USE_EXT_HDR_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_USE_EXT_HDR_SHIFT 0 +#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_SHIFT 1 +#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_SHIFT 2 +#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_SHIFT 3 +#define XSTORM_ETH_HW_CONN_AG_CTX_L2_EDPM_ENABLE_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_L2_EDPM_ENABLE_SHIFT 4 +#define XSTORM_ETH_HW_CONN_AG_CTX_ROCE_EDPM_ENABLE_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_ROCE_EDPM_ENABLE_SHIFT 5 +#define XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_MASK 0x3 +#define XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_SHIFT 6 u8 edpm_event_id; __le16 physical_q0; __le16 e5_reserved1; @@ -6479,7 +4882,6 @@ struct gft_cam_line_mapped { #define GFT_CAM_LINE_MAPPED_RESERVED1_SHIFT 29 }; - /* Used in gft_profile_key: Indication for ip version */ enum gft_profile_ip_version { GFT_PROFILE_IPV4 = 0, @@ -6640,49 +5042,49 @@ struct ystorm_rdma_task_st_ctx { struct regpair temp[4]; }; -struct e4_ystorm_rdma_task_ag_ctx { +struct ystorm_rdma_task_ag_ctx { u8 reserved; u8 byte1; __le16 msem_ctx_upd_seq; u8 flags0; -#define E4_YSTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF -#define E4_YSTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0 -#define E4_YSTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1 -#define E4_YSTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4 -#define E4_YSTORM_RDMA_TASK_AG_CTX_BIT1_MASK 0x1 -#define E4_YSTORM_RDMA_TASK_AG_CTX_BIT1_SHIFT 5 -#define E4_YSTORM_RDMA_TASK_AG_CTX_VALID_MASK 0x1 -#define E4_YSTORM_RDMA_TASK_AG_CTX_VALID_SHIFT 6 -#define E4_YSTORM_RDMA_TASK_AG_CTX_DIF_FIRST_IO_MASK 0x1 -#define E4_YSTORM_RDMA_TASK_AG_CTX_DIF_FIRST_IO_SHIFT 7 +#define YSTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF +#define YSTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0 +#define YSTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define YSTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4 +#define YSTORM_RDMA_TASK_AG_CTX_BIT1_MASK 0x1 +#define YSTORM_RDMA_TASK_AG_CTX_BIT1_SHIFT 5 +#define YSTORM_RDMA_TASK_AG_CTX_VALID_MASK 0x1 +#define YSTORM_RDMA_TASK_AG_CTX_VALID_SHIFT 6 +#define YSTORM_RDMA_TASK_AG_CTX_DIF_FIRST_IO_MASK 0x1 +#define YSTORM_RDMA_TASK_AG_CTX_DIF_FIRST_IO_SHIFT 7 u8 flags1; -#define E4_YSTORM_RDMA_TASK_AG_CTX_CF0_MASK 0x3 -#define E4_YSTORM_RDMA_TASK_AG_CTX_CF0_SHIFT 0 -#define E4_YSTORM_RDMA_TASK_AG_CTX_CF1_MASK 0x3 -#define E4_YSTORM_RDMA_TASK_AG_CTX_CF1_SHIFT 2 -#define E4_YSTORM_RDMA_TASK_AG_CTX_CF2SPECIAL_MASK 0x3 -#define E4_YSTORM_RDMA_TASK_AG_CTX_CF2SPECIAL_SHIFT 4 -#define E4_YSTORM_RDMA_TASK_AG_CTX_CF0EN_MASK 0x1 -#define E4_YSTORM_RDMA_TASK_AG_CTX_CF0EN_SHIFT 6 -#define E4_YSTORM_RDMA_TASK_AG_CTX_CF1EN_MASK 0x1 -#define E4_YSTORM_RDMA_TASK_AG_CTX_CF1EN_SHIFT 7 +#define YSTORM_RDMA_TASK_AG_CTX_CF0_MASK 0x3 +#define YSTORM_RDMA_TASK_AG_CTX_CF0_SHIFT 0 +#define YSTORM_RDMA_TASK_AG_CTX_CF1_MASK 0x3 +#define YSTORM_RDMA_TASK_AG_CTX_CF1_SHIFT 2 +#define YSTORM_RDMA_TASK_AG_CTX_CF2SPECIAL_MASK 0x3 +#define YSTORM_RDMA_TASK_AG_CTX_CF2SPECIAL_SHIFT 4 +#define YSTORM_RDMA_TASK_AG_CTX_CF0EN_MASK 0x1 +#define YSTORM_RDMA_TASK_AG_CTX_CF0EN_SHIFT 6 +#define YSTORM_RDMA_TASK_AG_CTX_CF1EN_MASK 0x1 +#define YSTORM_RDMA_TASK_AG_CTX_CF1EN_SHIFT 7 u8 flags2; -#define E4_YSTORM_RDMA_TASK_AG_CTX_BIT4_MASK 0x1 -#define E4_YSTORM_RDMA_TASK_AG_CTX_BIT4_SHIFT 0 -#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE0EN_MASK 0x1 -#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE0EN_SHIFT 1 -#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE1EN_MASK 0x1 -#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE1EN_SHIFT 2 -#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE2EN_MASK 0x1 -#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE2EN_SHIFT 3 -#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE3EN_MASK 0x1 -#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE3EN_SHIFT 4 -#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE4EN_MASK 0x1 -#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE4EN_SHIFT 5 -#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE5EN_MASK 0x1 -#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE5EN_SHIFT 6 -#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE6EN_MASK 0x1 -#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE6EN_SHIFT 7 +#define YSTORM_RDMA_TASK_AG_CTX_BIT4_MASK 0x1 +#define YSTORM_RDMA_TASK_AG_CTX_BIT4_SHIFT 0 +#define YSTORM_RDMA_TASK_AG_CTX_RULE0EN_MASK 0x1 +#define YSTORM_RDMA_TASK_AG_CTX_RULE0EN_SHIFT 1 +#define YSTORM_RDMA_TASK_AG_CTX_RULE1EN_MASK 0x1 +#define YSTORM_RDMA_TASK_AG_CTX_RULE1EN_SHIFT 2 +#define YSTORM_RDMA_TASK_AG_CTX_RULE2EN_MASK 0x1 +#define YSTORM_RDMA_TASK_AG_CTX_RULE2EN_SHIFT 3 +#define YSTORM_RDMA_TASK_AG_CTX_RULE3EN_MASK 0x1 +#define YSTORM_RDMA_TASK_AG_CTX_RULE3EN_SHIFT 4 +#define YSTORM_RDMA_TASK_AG_CTX_RULE4EN_MASK 0x1 +#define YSTORM_RDMA_TASK_AG_CTX_RULE4EN_SHIFT 5 +#define YSTORM_RDMA_TASK_AG_CTX_RULE5EN_MASK 0x1 +#define YSTORM_RDMA_TASK_AG_CTX_RULE5EN_SHIFT 6 +#define YSTORM_RDMA_TASK_AG_CTX_RULE6EN_MASK 0x1 +#define YSTORM_RDMA_TASK_AG_CTX_RULE6EN_SHIFT 7 u8 key; __le32 mw_cnt_or_qp_id; u8 ref_cnt_seq; @@ -6696,49 +5098,49 @@ struct e4_ystorm_rdma_task_ag_ctx { __le32 fbo_hi; }; -struct e4_mstorm_rdma_task_ag_ctx { +struct mstorm_rdma_task_ag_ctx { u8 reserved; u8 byte1; __le16 icid; u8 flags0; -#define E4_MSTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF -#define E4_MSTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0 -#define E4_MSTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1 -#define E4_MSTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4 -#define E4_MSTORM_RDMA_TASK_AG_CTX_BIT1_MASK 0x1 -#define E4_MSTORM_RDMA_TASK_AG_CTX_BIT1_SHIFT 5 -#define E4_MSTORM_RDMA_TASK_AG_CTX_BIT2_MASK 0x1 -#define E4_MSTORM_RDMA_TASK_AG_CTX_BIT2_SHIFT 6 -#define E4_MSTORM_RDMA_TASK_AG_CTX_DIF_FIRST_IO_MASK 0x1 -#define E4_MSTORM_RDMA_TASK_AG_CTX_DIF_FIRST_IO_SHIFT 7 +#define MSTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF +#define MSTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0 +#define MSTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define MSTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4 +#define MSTORM_RDMA_TASK_AG_CTX_BIT1_MASK 0x1 +#define MSTORM_RDMA_TASK_AG_CTX_BIT1_SHIFT 5 +#define MSTORM_RDMA_TASK_AG_CTX_BIT2_MASK 0x1 +#define MSTORM_RDMA_TASK_AG_CTX_BIT2_SHIFT 6 +#define MSTORM_RDMA_TASK_AG_CTX_DIF_FIRST_IO_MASK 0x1 +#define MSTORM_RDMA_TASK_AG_CTX_DIF_FIRST_IO_SHIFT 7 u8 flags1; -#define E4_MSTORM_RDMA_TASK_AG_CTX_CF0_MASK 0x3 -#define E4_MSTORM_RDMA_TASK_AG_CTX_CF0_SHIFT 0 -#define E4_MSTORM_RDMA_TASK_AG_CTX_CF1_MASK 0x3 -#define E4_MSTORM_RDMA_TASK_AG_CTX_CF1_SHIFT 2 -#define E4_MSTORM_RDMA_TASK_AG_CTX_CF2_MASK 0x3 -#define E4_MSTORM_RDMA_TASK_AG_CTX_CF2_SHIFT 4 -#define E4_MSTORM_RDMA_TASK_AG_CTX_CF0EN_MASK 0x1 -#define E4_MSTORM_RDMA_TASK_AG_CTX_CF0EN_SHIFT 6 -#define E4_MSTORM_RDMA_TASK_AG_CTX_CF1EN_MASK 0x1 -#define E4_MSTORM_RDMA_TASK_AG_CTX_CF1EN_SHIFT 7 +#define MSTORM_RDMA_TASK_AG_CTX_CF0_MASK 0x3 +#define MSTORM_RDMA_TASK_AG_CTX_CF0_SHIFT 0 +#define MSTORM_RDMA_TASK_AG_CTX_CF1_MASK 0x3 +#define MSTORM_RDMA_TASK_AG_CTX_CF1_SHIFT 2 +#define MSTORM_RDMA_TASK_AG_CTX_CF2_MASK 0x3 +#define MSTORM_RDMA_TASK_AG_CTX_CF2_SHIFT 4 +#define MSTORM_RDMA_TASK_AG_CTX_CF0EN_MASK 0x1 +#define MSTORM_RDMA_TASK_AG_CTX_CF0EN_SHIFT 6 +#define MSTORM_RDMA_TASK_AG_CTX_CF1EN_MASK 0x1 +#define MSTORM_RDMA_TASK_AG_CTX_CF1EN_SHIFT 7 u8 flags2; -#define E4_MSTORM_RDMA_TASK_AG_CTX_CF2EN_MASK 0x1 -#define E4_MSTORM_RDMA_TASK_AG_CTX_CF2EN_SHIFT 0 -#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE0EN_MASK 0x1 -#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE0EN_SHIFT 1 -#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE1EN_MASK 0x1 -#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE1EN_SHIFT 2 -#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE2EN_MASK 0x1 -#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE2EN_SHIFT 3 -#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE3EN_MASK 0x1 -#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE3EN_SHIFT 4 -#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE4EN_MASK 0x1 -#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE4EN_SHIFT 5 -#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE5EN_MASK 0x1 -#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE5EN_SHIFT 6 -#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE6EN_MASK 0x1 -#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE6EN_SHIFT 7 +#define MSTORM_RDMA_TASK_AG_CTX_CF2EN_MASK 0x1 +#define MSTORM_RDMA_TASK_AG_CTX_CF2EN_SHIFT 0 +#define MSTORM_RDMA_TASK_AG_CTX_RULE0EN_MASK 0x1 +#define MSTORM_RDMA_TASK_AG_CTX_RULE0EN_SHIFT 1 +#define MSTORM_RDMA_TASK_AG_CTX_RULE1EN_MASK 0x1 +#define MSTORM_RDMA_TASK_AG_CTX_RULE1EN_SHIFT 2 +#define MSTORM_RDMA_TASK_AG_CTX_RULE2EN_MASK 0x1 +#define MSTORM_RDMA_TASK_AG_CTX_RULE2EN_SHIFT 3 +#define MSTORM_RDMA_TASK_AG_CTX_RULE3EN_MASK 0x1 +#define MSTORM_RDMA_TASK_AG_CTX_RULE3EN_SHIFT 4 +#define MSTORM_RDMA_TASK_AG_CTX_RULE4EN_MASK 0x1 +#define MSTORM_RDMA_TASK_AG_CTX_RULE4EN_SHIFT 5 +#define MSTORM_RDMA_TASK_AG_CTX_RULE5EN_MASK 0x1 +#define MSTORM_RDMA_TASK_AG_CTX_RULE5EN_SHIFT 6 +#define MSTORM_RDMA_TASK_AG_CTX_RULE6EN_MASK 0x1 +#define MSTORM_RDMA_TASK_AG_CTX_RULE6EN_SHIFT 7 u8 key; __le32 mw_cnt_or_qp_id; u8 ref_cnt_seq; @@ -6762,56 +5164,56 @@ struct ustorm_rdma_task_st_ctx { struct regpair temp[6]; }; -struct e4_ustorm_rdma_task_ag_ctx { +struct ustorm_rdma_task_ag_ctx { u8 reserved; u8 state; __le16 icid; u8 flags0; -#define E4_USTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF -#define E4_USTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0 -#define E4_USTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1 -#define E4_USTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4 -#define E4_USTORM_RDMA_TASK_AG_CTX_BIT1_MASK 0x1 -#define E4_USTORM_RDMA_TASK_AG_CTX_BIT1_SHIFT 5 -#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_WRITE_RESULT_CF_MASK 0x3 -#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_WRITE_RESULT_CF_SHIFT 6 +#define USTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF +#define USTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0 +#define USTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define USTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4 +#define USTORM_RDMA_TASK_AG_CTX_BIT1_MASK 0x1 +#define USTORM_RDMA_TASK_AG_CTX_BIT1_SHIFT 5 +#define USTORM_RDMA_TASK_AG_CTX_DIF_WRITE_RESULT_CF_MASK 0x3 +#define USTORM_RDMA_TASK_AG_CTX_DIF_WRITE_RESULT_CF_SHIFT 6 u8 flags1; -#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_RESULT_TOGGLE_BIT_MASK 0x3 -#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_RESULT_TOGGLE_BIT_SHIFT 0 -#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_TX_IO_FLG_MASK 0x3 -#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_TX_IO_FLG_SHIFT 2 -#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_BLOCK_SIZE_MASK 0x3 -#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_BLOCK_SIZE_SHIFT 4 -#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_CF_MASK 0x3 -#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_CF_SHIFT 6 +#define USTORM_RDMA_TASK_AG_CTX_DIF_RESULT_TOGGLE_BIT_MASK 0x3 +#define USTORM_RDMA_TASK_AG_CTX_DIF_RESULT_TOGGLE_BIT_SHIFT 0 +#define USTORM_RDMA_TASK_AG_CTX_DIF_TX_IO_FLG_MASK 0x3 +#define USTORM_RDMA_TASK_AG_CTX_DIF_TX_IO_FLG_SHIFT 2 +#define USTORM_RDMA_TASK_AG_CTX_DIF_BLOCK_SIZE_MASK 0x3 +#define USTORM_RDMA_TASK_AG_CTX_DIF_BLOCK_SIZE_SHIFT 4 +#define USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_CF_MASK 0x3 +#define USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_CF_SHIFT 6 u8 flags2; -#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_WRITE_RESULT_CF_EN_MASK 0x1 -#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_WRITE_RESULT_CF_EN_SHIFT 0 -#define E4_USTORM_RDMA_TASK_AG_CTX_RESERVED2_MASK 0x1 -#define E4_USTORM_RDMA_TASK_AG_CTX_RESERVED2_SHIFT 1 -#define E4_USTORM_RDMA_TASK_AG_CTX_RESERVED3_MASK 0x1 -#define E4_USTORM_RDMA_TASK_AG_CTX_RESERVED3_SHIFT 2 -#define E4_USTORM_RDMA_TASK_AG_CTX_RESERVED4_MASK 0x1 -#define E4_USTORM_RDMA_TASK_AG_CTX_RESERVED4_SHIFT 3 -#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_CF_EN_MASK 0x1 -#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_CF_EN_SHIFT 4 -#define E4_USTORM_RDMA_TASK_AG_CTX_RULE0EN_MASK 0x1 -#define E4_USTORM_RDMA_TASK_AG_CTX_RULE0EN_SHIFT 5 -#define E4_USTORM_RDMA_TASK_AG_CTX_RULE1EN_MASK 0x1 -#define E4_USTORM_RDMA_TASK_AG_CTX_RULE1EN_SHIFT 6 -#define E4_USTORM_RDMA_TASK_AG_CTX_RULE2EN_MASK 0x1 -#define E4_USTORM_RDMA_TASK_AG_CTX_RULE2EN_SHIFT 7 +#define USTORM_RDMA_TASK_AG_CTX_DIF_WRITE_RESULT_CF_EN_MASK 0x1 +#define USTORM_RDMA_TASK_AG_CTX_DIF_WRITE_RESULT_CF_EN_SHIFT 0 +#define USTORM_RDMA_TASK_AG_CTX_RESERVED2_MASK 0x1 +#define USTORM_RDMA_TASK_AG_CTX_RESERVED2_SHIFT 1 +#define USTORM_RDMA_TASK_AG_CTX_RESERVED3_MASK 0x1 +#define USTORM_RDMA_TASK_AG_CTX_RESERVED3_SHIFT 2 +#define USTORM_RDMA_TASK_AG_CTX_RESERVED4_MASK 0x1 +#define USTORM_RDMA_TASK_AG_CTX_RESERVED4_SHIFT 3 +#define USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_CF_EN_MASK 0x1 +#define USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_CF_EN_SHIFT 4 +#define USTORM_RDMA_TASK_AG_CTX_RULE0EN_MASK 0x1 +#define USTORM_RDMA_TASK_AG_CTX_RULE0EN_SHIFT 5 +#define USTORM_RDMA_TASK_AG_CTX_RULE1EN_MASK 0x1 +#define USTORM_RDMA_TASK_AG_CTX_RULE1EN_SHIFT 6 +#define USTORM_RDMA_TASK_AG_CTX_RULE2EN_MASK 0x1 +#define USTORM_RDMA_TASK_AG_CTX_RULE2EN_SHIFT 7 u8 flags3; -#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_RXMIT_PROD_CONS_EN_MASK 0x1 -#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_RXMIT_PROD_CONS_EN_SHIFT 0 -#define E4_USTORM_RDMA_TASK_AG_CTX_RULE4EN_MASK 0x1 -#define E4_USTORM_RDMA_TASK_AG_CTX_RULE4EN_SHIFT 1 -#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_WRITE_PROD_CONS_EN_MASK 0x1 -#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_WRITE_PROD_CONS_EN_SHIFT 2 -#define E4_USTORM_RDMA_TASK_AG_CTX_RULE6EN_MASK 0x1 -#define E4_USTORM_RDMA_TASK_AG_CTX_RULE6EN_SHIFT 3 -#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_TYPE_MASK 0xF -#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_TYPE_SHIFT 4 +#define USTORM_RDMA_TASK_AG_CTX_DIF_RXMIT_PROD_CONS_EN_MASK 0x1 +#define USTORM_RDMA_TASK_AG_CTX_DIF_RXMIT_PROD_CONS_EN_SHIFT 0 +#define USTORM_RDMA_TASK_AG_CTX_RULE4EN_MASK 0x1 +#define USTORM_RDMA_TASK_AG_CTX_RULE4EN_SHIFT 1 +#define USTORM_RDMA_TASK_AG_CTX_DIF_WRITE_PROD_CONS_EN_MASK 0x1 +#define USTORM_RDMA_TASK_AG_CTX_DIF_WRITE_PROD_CONS_EN_SHIFT 2 +#define USTORM_RDMA_TASK_AG_CTX_RULE6EN_MASK 0x1 +#define USTORM_RDMA_TASK_AG_CTX_RULE6EN_SHIFT 3 +#define USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_TYPE_MASK 0xF +#define USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_TYPE_SHIFT 4 __le32 dif_err_intervals; __le32 dif_error_1st_interval; __le32 dif_rxmit_cons; @@ -6828,16 +5230,853 @@ struct e4_ustorm_rdma_task_ag_ctx { }; /* RDMA task context */ -struct e4_rdma_task_context { +struct rdma_task_context { struct ystorm_rdma_task_st_ctx ystorm_st_context; - struct e4_ystorm_rdma_task_ag_ctx ystorm_ag_context; + struct ystorm_rdma_task_ag_ctx ystorm_ag_context; struct tdif_task_context tdif_context; - struct e4_mstorm_rdma_task_ag_ctx mstorm_ag_context; + struct mstorm_rdma_task_ag_ctx mstorm_ag_context; struct mstorm_rdma_task_st_ctx mstorm_st_context; struct rdif_task_context rdif_context; struct ustorm_rdma_task_st_ctx ustorm_st_context; struct regpair ustorm_st_padding[2]; - struct e4_ustorm_rdma_task_ag_ctx ustorm_ag_context; + struct ustorm_rdma_task_ag_ctx ustorm_ag_context; +}; + +#define TOE_MAX_RAMROD_PER_PF 8 +#define TOE_TX_PAGE_SIZE_BYTES 4096 +#define TOE_GRQ_PAGE_SIZE_BYTES 4096 +#define TOE_RX_CQ_PAGE_SIZE_BYTES 4096 + +#define TOE_RX_MAX_RSS_CHAINS 64 +#define TOE_TX_MAX_TSS_CHAINS 64 +#define TOE_RSS_INDIRECTION_TABLE_SIZE 128 + +/* The toe storm context of Mstorm */ +struct mstorm_toe_conn_st_ctx { + __le32 reserved[24]; +}; + +/* The toe storm context of Pstorm */ +struct pstorm_toe_conn_st_ctx { + __le32 reserved[36]; +}; + +/* The toe storm context of Ystorm */ +struct ystorm_toe_conn_st_ctx { + __le32 reserved[8]; +}; + +/* The toe storm context of Xstorm */ +struct xstorm_toe_conn_st_ctx { + __le32 reserved[44]; +}; + +struct ystorm_toe_conn_ag_ctx { + u8 byte0; + u8 byte1; + u8 flags0; +#define YSTORM_TOE_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define YSTORM_TOE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 +#define YSTORM_TOE_CONN_AG_CTX_BIT1_MASK 0x1 +#define YSTORM_TOE_CONN_AG_CTX_BIT1_SHIFT 1 +#define YSTORM_TOE_CONN_AG_CTX_SLOW_PATH_CF_MASK 0x3 +#define YSTORM_TOE_CONN_AG_CTX_SLOW_PATH_CF_SHIFT 2 +#define YSTORM_TOE_CONN_AG_CTX_RESET_RECEIVED_CF_MASK 0x3 +#define YSTORM_TOE_CONN_AG_CTX_RESET_RECEIVED_CF_SHIFT 4 +#define YSTORM_TOE_CONN_AG_CTX_CF2_MASK 0x3 +#define YSTORM_TOE_CONN_AG_CTX_CF2_SHIFT 6 + u8 flags1; +#define YSTORM_TOE_CONN_AG_CTX_SLOW_PATH_CF_EN_MASK 0x1 +#define YSTORM_TOE_CONN_AG_CTX_SLOW_PATH_CF_EN_SHIFT 0 +#define YSTORM_TOE_CONN_AG_CTX_RESET_RECEIVED_CF_EN_MASK 0x1 +#define YSTORM_TOE_CONN_AG_CTX_RESET_RECEIVED_CF_EN_SHIFT 1 +#define YSTORM_TOE_CONN_AG_CTX_CF2EN_MASK 0x1 +#define YSTORM_TOE_CONN_AG_CTX_CF2EN_SHIFT 2 +#define YSTORM_TOE_CONN_AG_CTX_REL_SEQ_EN_MASK 0x1 +#define YSTORM_TOE_CONN_AG_CTX_REL_SEQ_EN_SHIFT 3 +#define YSTORM_TOE_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define YSTORM_TOE_CONN_AG_CTX_RULE1EN_SHIFT 4 +#define YSTORM_TOE_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define YSTORM_TOE_CONN_AG_CTX_RULE2EN_SHIFT 5 +#define YSTORM_TOE_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define YSTORM_TOE_CONN_AG_CTX_RULE3EN_SHIFT 6 +#define YSTORM_TOE_CONN_AG_CTX_CONS_PROD_EN_MASK 0x1 +#define YSTORM_TOE_CONN_AG_CTX_CONS_PROD_EN_SHIFT 7 + u8 completion_opcode; + u8 byte3; + __le16 word0; + __le32 rel_seq; + __le32 rel_seq_threshold; + __le16 app_prod; + __le16 app_cons; + __le16 word3; + __le16 word4; + __le32 reg2; + __le32 reg3; +}; + +struct xstorm_toe_conn_ag_ctx { + u8 reserved0; + u8 state; + u8 flags0; +#define XSTORM_TOE_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 +#define XSTORM_TOE_CONN_AG_CTX_EXIST_IN_QM1_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_EXIST_IN_QM1_SHIFT 1 +#define XSTORM_TOE_CONN_AG_CTX_RESERVED1_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_RESERVED1_SHIFT 2 +#define XSTORM_TOE_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3 +#define XSTORM_TOE_CONN_AG_CTX_TX_DEC_RULE_RES_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_TX_DEC_RULE_RES_SHIFT 4 +#define XSTORM_TOE_CONN_AG_CTX_RESERVED2_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_RESERVED2_SHIFT 5 +#define XSTORM_TOE_CONN_AG_CTX_BIT6_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_BIT6_SHIFT 6 +#define XSTORM_TOE_CONN_AG_CTX_BIT7_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_BIT7_SHIFT 7 + u8 flags1; +#define XSTORM_TOE_CONN_AG_CTX_BIT8_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_BIT8_SHIFT 0 +#define XSTORM_TOE_CONN_AG_CTX_BIT9_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_BIT9_SHIFT 1 +#define XSTORM_TOE_CONN_AG_CTX_BIT10_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_BIT10_SHIFT 2 +#define XSTORM_TOE_CONN_AG_CTX_BIT11_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_BIT11_SHIFT 3 +#define XSTORM_TOE_CONN_AG_CTX_BIT12_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_BIT12_SHIFT 4 +#define XSTORM_TOE_CONN_AG_CTX_BIT13_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_BIT13_SHIFT 5 +#define XSTORM_TOE_CONN_AG_CTX_BIT14_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_BIT14_SHIFT 6 +#define XSTORM_TOE_CONN_AG_CTX_BIT15_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_BIT15_SHIFT 7 + u8 flags2; +#define XSTORM_TOE_CONN_AG_CTX_CF0_MASK 0x3 +#define XSTORM_TOE_CONN_AG_CTX_CF0_SHIFT 0 +#define XSTORM_TOE_CONN_AG_CTX_CF1_MASK 0x3 +#define XSTORM_TOE_CONN_AG_CTX_CF1_SHIFT 2 +#define XSTORM_TOE_CONN_AG_CTX_CF2_MASK 0x3 +#define XSTORM_TOE_CONN_AG_CTX_CF2_SHIFT 4 +#define XSTORM_TOE_CONN_AG_CTX_TIMER_STOP_ALL_MASK 0x3 +#define XSTORM_TOE_CONN_AG_CTX_TIMER_STOP_ALL_SHIFT 6 + u8 flags3; +#define XSTORM_TOE_CONN_AG_CTX_CF4_MASK 0x3 +#define XSTORM_TOE_CONN_AG_CTX_CF4_SHIFT 0 +#define XSTORM_TOE_CONN_AG_CTX_CF5_MASK 0x3 +#define XSTORM_TOE_CONN_AG_CTX_CF5_SHIFT 2 +#define XSTORM_TOE_CONN_AG_CTX_CF6_MASK 0x3 +#define XSTORM_TOE_CONN_AG_CTX_CF6_SHIFT 4 +#define XSTORM_TOE_CONN_AG_CTX_CF7_MASK 0x3 +#define XSTORM_TOE_CONN_AG_CTX_CF7_SHIFT 6 + u8 flags4; +#define XSTORM_TOE_CONN_AG_CTX_CF8_MASK 0x3 +#define XSTORM_TOE_CONN_AG_CTX_CF8_SHIFT 0 +#define XSTORM_TOE_CONN_AG_CTX_CF9_MASK 0x3 +#define XSTORM_TOE_CONN_AG_CTX_CF9_SHIFT 2 +#define XSTORM_TOE_CONN_AG_CTX_CF10_MASK 0x3 +#define XSTORM_TOE_CONN_AG_CTX_CF10_SHIFT 4 +#define XSTORM_TOE_CONN_AG_CTX_CF11_MASK 0x3 +#define XSTORM_TOE_CONN_AG_CTX_CF11_SHIFT 6 + u8 flags5; +#define XSTORM_TOE_CONN_AG_CTX_CF12_MASK 0x3 +#define XSTORM_TOE_CONN_AG_CTX_CF12_SHIFT 0 +#define XSTORM_TOE_CONN_AG_CTX_CF13_MASK 0x3 +#define XSTORM_TOE_CONN_AG_CTX_CF13_SHIFT 2 +#define XSTORM_TOE_CONN_AG_CTX_CF14_MASK 0x3 +#define XSTORM_TOE_CONN_AG_CTX_CF14_SHIFT 4 +#define XSTORM_TOE_CONN_AG_CTX_CF15_MASK 0x3 +#define XSTORM_TOE_CONN_AG_CTX_CF15_SHIFT 6 + u8 flags6; +#define XSTORM_TOE_CONN_AG_CTX_CF16_MASK 0x3 +#define XSTORM_TOE_CONN_AG_CTX_CF16_SHIFT 0 +#define XSTORM_TOE_CONN_AG_CTX_CF17_MASK 0x3 +#define XSTORM_TOE_CONN_AG_CTX_CF17_SHIFT 2 +#define XSTORM_TOE_CONN_AG_CTX_CF18_MASK 0x3 +#define XSTORM_TOE_CONN_AG_CTX_CF18_SHIFT 4 +#define XSTORM_TOE_CONN_AG_CTX_DQ_FLUSH_MASK 0x3 +#define XSTORM_TOE_CONN_AG_CTX_DQ_FLUSH_SHIFT 6 + u8 flags7; +#define XSTORM_TOE_CONN_AG_CTX_FLUSH_Q0_MASK 0x3 +#define XSTORM_TOE_CONN_AG_CTX_FLUSH_Q0_SHIFT 0 +#define XSTORM_TOE_CONN_AG_CTX_FLUSH_Q1_MASK 0x3 +#define XSTORM_TOE_CONN_AG_CTX_FLUSH_Q1_SHIFT 2 +#define XSTORM_TOE_CONN_AG_CTX_SLOW_PATH_MASK 0x3 +#define XSTORM_TOE_CONN_AG_CTX_SLOW_PATH_SHIFT 4 +#define XSTORM_TOE_CONN_AG_CTX_CF0EN_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_CF0EN_SHIFT 6 +#define XSTORM_TOE_CONN_AG_CTX_CF1EN_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_CF1EN_SHIFT 7 + u8 flags8; +#define XSTORM_TOE_CONN_AG_CTX_CF2EN_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_CF2EN_SHIFT 0 +#define XSTORM_TOE_CONN_AG_CTX_TIMER_STOP_ALL_EN_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_TIMER_STOP_ALL_EN_SHIFT 1 +#define XSTORM_TOE_CONN_AG_CTX_CF4EN_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_CF4EN_SHIFT 2 +#define XSTORM_TOE_CONN_AG_CTX_CF5EN_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_CF5EN_SHIFT 3 +#define XSTORM_TOE_CONN_AG_CTX_CF6EN_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_CF6EN_SHIFT 4 +#define XSTORM_TOE_CONN_AG_CTX_CF7EN_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_CF7EN_SHIFT 5 +#define XSTORM_TOE_CONN_AG_CTX_CF8EN_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_CF8EN_SHIFT 6 +#define XSTORM_TOE_CONN_AG_CTX_CF9EN_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_CF9EN_SHIFT 7 + u8 flags9; +#define XSTORM_TOE_CONN_AG_CTX_CF10EN_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_CF10EN_SHIFT 0 +#define XSTORM_TOE_CONN_AG_CTX_CF11EN_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_CF11EN_SHIFT 1 +#define XSTORM_TOE_CONN_AG_CTX_CF12EN_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_CF12EN_SHIFT 2 +#define XSTORM_TOE_CONN_AG_CTX_CF13EN_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_CF13EN_SHIFT 3 +#define XSTORM_TOE_CONN_AG_CTX_CF14EN_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_CF14EN_SHIFT 4 +#define XSTORM_TOE_CONN_AG_CTX_CF15EN_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_CF15EN_SHIFT 5 +#define XSTORM_TOE_CONN_AG_CTX_CF16EN_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_CF16EN_SHIFT 6 +#define XSTORM_TOE_CONN_AG_CTX_CF17EN_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_CF17EN_SHIFT 7 + u8 flags10; +#define XSTORM_TOE_CONN_AG_CTX_CF18EN_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_CF18EN_SHIFT 0 +#define XSTORM_TOE_CONN_AG_CTX_DQ_FLUSH_EN_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_DQ_FLUSH_EN_SHIFT 1 +#define XSTORM_TOE_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2 +#define XSTORM_TOE_CONN_AG_CTX_FLUSH_Q1_EN_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_FLUSH_Q1_EN_SHIFT 3 +#define XSTORM_TOE_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4 +#define XSTORM_TOE_CONN_AG_CTX_CF23EN_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_CF23EN_SHIFT 5 +#define XSTORM_TOE_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_RULE0EN_SHIFT 6 +#define XSTORM_TOE_CONN_AG_CTX_MORE_TO_SEND_RULE_EN_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_MORE_TO_SEND_RULE_EN_SHIFT 7 + u8 flags11; +#define XSTORM_TOE_CONN_AG_CTX_TX_BLOCKED_EN_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_TX_BLOCKED_EN_SHIFT 0 +#define XSTORM_TOE_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_RULE3EN_SHIFT 1 +#define XSTORM_TOE_CONN_AG_CTX_RESERVED3_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_RESERVED3_SHIFT 2 +#define XSTORM_TOE_CONN_AG_CTX_RULE5EN_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_RULE5EN_SHIFT 3 +#define XSTORM_TOE_CONN_AG_CTX_RULE6EN_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_RULE6EN_SHIFT 4 +#define XSTORM_TOE_CONN_AG_CTX_RULE7EN_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_RULE7EN_SHIFT 5 +#define XSTORM_TOE_CONN_AG_CTX_A0_RESERVED1_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_A0_RESERVED1_SHIFT 6 +#define XSTORM_TOE_CONN_AG_CTX_RULE9EN_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_RULE9EN_SHIFT 7 + u8 flags12; +#define XSTORM_TOE_CONN_AG_CTX_RULE10EN_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_RULE10EN_SHIFT 0 +#define XSTORM_TOE_CONN_AG_CTX_RULE11EN_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_RULE11EN_SHIFT 1 +#define XSTORM_TOE_CONN_AG_CTX_A0_RESERVED2_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_A0_RESERVED2_SHIFT 2 +#define XSTORM_TOE_CONN_AG_CTX_A0_RESERVED3_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_A0_RESERVED3_SHIFT 3 +#define XSTORM_TOE_CONN_AG_CTX_RULE14EN_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_RULE14EN_SHIFT 4 +#define XSTORM_TOE_CONN_AG_CTX_RULE15EN_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_RULE15EN_SHIFT 5 +#define XSTORM_TOE_CONN_AG_CTX_RULE16EN_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_RULE16EN_SHIFT 6 +#define XSTORM_TOE_CONN_AG_CTX_RULE17EN_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_RULE17EN_SHIFT 7 + u8 flags13; +#define XSTORM_TOE_CONN_AG_CTX_RULE18EN_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_RULE18EN_SHIFT 0 +#define XSTORM_TOE_CONN_AG_CTX_RULE19EN_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_RULE19EN_SHIFT 1 +#define XSTORM_TOE_CONN_AG_CTX_A0_RESERVED4_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_A0_RESERVED4_SHIFT 2 +#define XSTORM_TOE_CONN_AG_CTX_A0_RESERVED5_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_A0_RESERVED5_SHIFT 3 +#define XSTORM_TOE_CONN_AG_CTX_A0_RESERVED6_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_A0_RESERVED6_SHIFT 4 +#define XSTORM_TOE_CONN_AG_CTX_A0_RESERVED7_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_A0_RESERVED7_SHIFT 5 +#define XSTORM_TOE_CONN_AG_CTX_A0_RESERVED8_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_A0_RESERVED8_SHIFT 6 +#define XSTORM_TOE_CONN_AG_CTX_A0_RESERVED9_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_A0_RESERVED9_SHIFT 7 + u8 flags14; +#define XSTORM_TOE_CONN_AG_CTX_BIT16_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_BIT16_SHIFT 0 +#define XSTORM_TOE_CONN_AG_CTX_BIT17_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_BIT17_SHIFT 1 +#define XSTORM_TOE_CONN_AG_CTX_BIT18_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_BIT18_SHIFT 2 +#define XSTORM_TOE_CONN_AG_CTX_BIT19_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_BIT19_SHIFT 3 +#define XSTORM_TOE_CONN_AG_CTX_BIT20_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_BIT20_SHIFT 4 +#define XSTORM_TOE_CONN_AG_CTX_BIT21_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_BIT21_SHIFT 5 +#define XSTORM_TOE_CONN_AG_CTX_CF23_MASK 0x3 +#define XSTORM_TOE_CONN_AG_CTX_CF23_SHIFT 6 + u8 byte2; + __le16 physical_q0; + __le16 physical_q1; + __le16 word2; + __le16 word3; + __le16 bd_prod; + __le16 word5; + __le16 word6; + u8 byte3; + u8 byte4; + u8 byte5; + u8 byte6; + __le32 reg0; + __le32 reg1; + __le32 reg2; + __le32 more_to_send_seq; + __le32 local_adv_wnd_seq; + __le32 reg5; + __le32 reg6; + __le16 word7; + __le16 word8; + __le16 word9; + __le16 word10; + __le32 reg7; + __le32 reg8; + __le32 reg9; + u8 byte7; + u8 byte8; + u8 byte9; + u8 byte10; + u8 byte11; + u8 byte12; + u8 byte13; + u8 byte14; + u8 byte15; + u8 e5_reserved; + __le16 word11; + __le32 reg10; + __le32 reg11; + __le32 reg12; + __le32 reg13; + __le32 reg14; + __le32 reg15; + __le32 reg16; + __le32 reg17; +}; + +struct tstorm_toe_conn_ag_ctx { + u8 reserved0; + u8 byte1; + u8 flags0; +#define TSTORM_TOE_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define TSTORM_TOE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 +#define TSTORM_TOE_CONN_AG_CTX_BIT1_MASK 0x1 +#define TSTORM_TOE_CONN_AG_CTX_BIT1_SHIFT 1 +#define TSTORM_TOE_CONN_AG_CTX_BIT2_MASK 0x1 +#define TSTORM_TOE_CONN_AG_CTX_BIT2_SHIFT 2 +#define TSTORM_TOE_CONN_AG_CTX_BIT3_MASK 0x1 +#define TSTORM_TOE_CONN_AG_CTX_BIT3_SHIFT 3 +#define TSTORM_TOE_CONN_AG_CTX_BIT4_MASK 0x1 +#define TSTORM_TOE_CONN_AG_CTX_BIT4_SHIFT 4 +#define TSTORM_TOE_CONN_AG_CTX_BIT5_MASK 0x1 +#define TSTORM_TOE_CONN_AG_CTX_BIT5_SHIFT 5 +#define TSTORM_TOE_CONN_AG_CTX_TIMEOUT_CF_MASK 0x3 +#define TSTORM_TOE_CONN_AG_CTX_TIMEOUT_CF_SHIFT 6 + u8 flags1; +#define TSTORM_TOE_CONN_AG_CTX_CF1_MASK 0x3 +#define TSTORM_TOE_CONN_AG_CTX_CF1_SHIFT 0 +#define TSTORM_TOE_CONN_AG_CTX_CF2_MASK 0x3 +#define TSTORM_TOE_CONN_AG_CTX_CF2_SHIFT 2 +#define TSTORM_TOE_CONN_AG_CTX_TIMER_STOP_ALL_MASK 0x3 +#define TSTORM_TOE_CONN_AG_CTX_TIMER_STOP_ALL_SHIFT 4 +#define TSTORM_TOE_CONN_AG_CTX_CF4_MASK 0x3 +#define TSTORM_TOE_CONN_AG_CTX_CF4_SHIFT 6 + u8 flags2; +#define TSTORM_TOE_CONN_AG_CTX_CF5_MASK 0x3 +#define TSTORM_TOE_CONN_AG_CTX_CF5_SHIFT 0 +#define TSTORM_TOE_CONN_AG_CTX_CF6_MASK 0x3 +#define TSTORM_TOE_CONN_AG_CTX_CF6_SHIFT 2 +#define TSTORM_TOE_CONN_AG_CTX_CF7_MASK 0x3 +#define TSTORM_TOE_CONN_AG_CTX_CF7_SHIFT 4 +#define TSTORM_TOE_CONN_AG_CTX_CF8_MASK 0x3 +#define TSTORM_TOE_CONN_AG_CTX_CF8_SHIFT 6 + u8 flags3; +#define TSTORM_TOE_CONN_AG_CTX_FLUSH_Q0_MASK 0x3 +#define TSTORM_TOE_CONN_AG_CTX_FLUSH_Q0_SHIFT 0 +#define TSTORM_TOE_CONN_AG_CTX_CF10_MASK 0x3 +#define TSTORM_TOE_CONN_AG_CTX_CF10_SHIFT 2 +#define TSTORM_TOE_CONN_AG_CTX_TIMEOUT_CF_EN_MASK 0x1 +#define TSTORM_TOE_CONN_AG_CTX_TIMEOUT_CF_EN_SHIFT 4 +#define TSTORM_TOE_CONN_AG_CTX_CF1EN_MASK 0x1 +#define TSTORM_TOE_CONN_AG_CTX_CF1EN_SHIFT 5 +#define TSTORM_TOE_CONN_AG_CTX_CF2EN_MASK 0x1 +#define TSTORM_TOE_CONN_AG_CTX_CF2EN_SHIFT 6 +#define TSTORM_TOE_CONN_AG_CTX_TIMER_STOP_ALL_EN_MASK 0x1 +#define TSTORM_TOE_CONN_AG_CTX_TIMER_STOP_ALL_EN_SHIFT 7 + u8 flags4; +#define TSTORM_TOE_CONN_AG_CTX_CF4EN_MASK 0x1 +#define TSTORM_TOE_CONN_AG_CTX_CF4EN_SHIFT 0 +#define TSTORM_TOE_CONN_AG_CTX_CF5EN_MASK 0x1 +#define TSTORM_TOE_CONN_AG_CTX_CF5EN_SHIFT 1 +#define TSTORM_TOE_CONN_AG_CTX_CF6EN_MASK 0x1 +#define TSTORM_TOE_CONN_AG_CTX_CF6EN_SHIFT 2 +#define TSTORM_TOE_CONN_AG_CTX_CF7EN_MASK 0x1 +#define TSTORM_TOE_CONN_AG_CTX_CF7EN_SHIFT 3 +#define TSTORM_TOE_CONN_AG_CTX_CF8EN_MASK 0x1 +#define TSTORM_TOE_CONN_AG_CTX_CF8EN_SHIFT 4 +#define TSTORM_TOE_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1 +#define TSTORM_TOE_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 5 +#define TSTORM_TOE_CONN_AG_CTX_CF10EN_MASK 0x1 +#define TSTORM_TOE_CONN_AG_CTX_CF10EN_SHIFT 6 +#define TSTORM_TOE_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define TSTORM_TOE_CONN_AG_CTX_RULE0EN_SHIFT 7 + u8 flags5; +#define TSTORM_TOE_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define TSTORM_TOE_CONN_AG_CTX_RULE1EN_SHIFT 0 +#define TSTORM_TOE_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define TSTORM_TOE_CONN_AG_CTX_RULE2EN_SHIFT 1 +#define TSTORM_TOE_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define TSTORM_TOE_CONN_AG_CTX_RULE3EN_SHIFT 2 +#define TSTORM_TOE_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define TSTORM_TOE_CONN_AG_CTX_RULE4EN_SHIFT 3 +#define TSTORM_TOE_CONN_AG_CTX_RULE5EN_MASK 0x1 +#define TSTORM_TOE_CONN_AG_CTX_RULE5EN_SHIFT 4 +#define TSTORM_TOE_CONN_AG_CTX_RULE6EN_MASK 0x1 +#define TSTORM_TOE_CONN_AG_CTX_RULE6EN_SHIFT 5 +#define TSTORM_TOE_CONN_AG_CTX_RULE7EN_MASK 0x1 +#define TSTORM_TOE_CONN_AG_CTX_RULE7EN_SHIFT 6 +#define TSTORM_TOE_CONN_AG_CTX_RULE8EN_MASK 0x1 +#define TSTORM_TOE_CONN_AG_CTX_RULE8EN_SHIFT 7 + __le32 reg0; + __le32 reg1; + __le32 reg2; + __le32 reg3; + __le32 reg4; + __le32 reg5; + __le32 reg6; + __le32 reg7; + __le32 reg8; + u8 byte2; + u8 byte3; + __le16 word0; +}; + +struct ustorm_toe_conn_ag_ctx { + u8 reserved; + u8 byte1; + u8 flags0; +#define USTORM_TOE_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define USTORM_TOE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 +#define USTORM_TOE_CONN_AG_CTX_BIT1_MASK 0x1 +#define USTORM_TOE_CONN_AG_CTX_BIT1_SHIFT 1 +#define USTORM_TOE_CONN_AG_CTX_CF0_MASK 0x3 +#define USTORM_TOE_CONN_AG_CTX_CF0_SHIFT 2 +#define USTORM_TOE_CONN_AG_CTX_CF1_MASK 0x3 +#define USTORM_TOE_CONN_AG_CTX_CF1_SHIFT 4 +#define USTORM_TOE_CONN_AG_CTX_PUSH_TIMER_CF_MASK 0x3 +#define USTORM_TOE_CONN_AG_CTX_PUSH_TIMER_CF_SHIFT 6 + u8 flags1; +#define USTORM_TOE_CONN_AG_CTX_TIMER_STOP_ALL_MASK 0x3 +#define USTORM_TOE_CONN_AG_CTX_TIMER_STOP_ALL_SHIFT 0 +#define USTORM_TOE_CONN_AG_CTX_SLOW_PATH_CF_MASK 0x3 +#define USTORM_TOE_CONN_AG_CTX_SLOW_PATH_CF_SHIFT 2 +#define USTORM_TOE_CONN_AG_CTX_DQ_CF_MASK 0x3 +#define USTORM_TOE_CONN_AG_CTX_DQ_CF_SHIFT 4 +#define USTORM_TOE_CONN_AG_CTX_CF6_MASK 0x3 +#define USTORM_TOE_CONN_AG_CTX_CF6_SHIFT 6 + u8 flags2; +#define USTORM_TOE_CONN_AG_CTX_CF0EN_MASK 0x1 +#define USTORM_TOE_CONN_AG_CTX_CF0EN_SHIFT 0 +#define USTORM_TOE_CONN_AG_CTX_CF1EN_MASK 0x1 +#define USTORM_TOE_CONN_AG_CTX_CF1EN_SHIFT 1 +#define USTORM_TOE_CONN_AG_CTX_PUSH_TIMER_CF_EN_MASK 0x1 +#define USTORM_TOE_CONN_AG_CTX_PUSH_TIMER_CF_EN_SHIFT 2 +#define USTORM_TOE_CONN_AG_CTX_TIMER_STOP_ALL_EN_MASK 0x1 +#define USTORM_TOE_CONN_AG_CTX_TIMER_STOP_ALL_EN_SHIFT 3 +#define USTORM_TOE_CONN_AG_CTX_SLOW_PATH_CF_EN_MASK 0x1 +#define USTORM_TOE_CONN_AG_CTX_SLOW_PATH_CF_EN_SHIFT 4 +#define USTORM_TOE_CONN_AG_CTX_DQ_CF_EN_MASK 0x1 +#define USTORM_TOE_CONN_AG_CTX_DQ_CF_EN_SHIFT 5 +#define USTORM_TOE_CONN_AG_CTX_CF6EN_MASK 0x1 +#define USTORM_TOE_CONN_AG_CTX_CF6EN_SHIFT 6 +#define USTORM_TOE_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define USTORM_TOE_CONN_AG_CTX_RULE0EN_SHIFT 7 + u8 flags3; +#define USTORM_TOE_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define USTORM_TOE_CONN_AG_CTX_RULE1EN_SHIFT 0 +#define USTORM_TOE_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define USTORM_TOE_CONN_AG_CTX_RULE2EN_SHIFT 1 +#define USTORM_TOE_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define USTORM_TOE_CONN_AG_CTX_RULE3EN_SHIFT 2 +#define USTORM_TOE_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define USTORM_TOE_CONN_AG_CTX_RULE4EN_SHIFT 3 +#define USTORM_TOE_CONN_AG_CTX_RULE5EN_MASK 0x1 +#define USTORM_TOE_CONN_AG_CTX_RULE5EN_SHIFT 4 +#define USTORM_TOE_CONN_AG_CTX_RULE6EN_MASK 0x1 +#define USTORM_TOE_CONN_AG_CTX_RULE6EN_SHIFT 5 +#define USTORM_TOE_CONN_AG_CTX_RULE7EN_MASK 0x1 +#define USTORM_TOE_CONN_AG_CTX_RULE7EN_SHIFT 6 +#define USTORM_TOE_CONN_AG_CTX_RULE8EN_MASK 0x1 +#define USTORM_TOE_CONN_AG_CTX_RULE8EN_SHIFT 7 + u8 byte2; + u8 byte3; + __le16 word0; + __le16 word1; + __le32 reg0; + __le32 reg1; + __le32 reg2; + __le32 reg3; + __le16 word2; + __le16 word3; +}; + +/* The toe storm context of Tstorm */ +struct tstorm_toe_conn_st_ctx { + __le32 reserved[16]; +}; + +/* The toe storm context of Ustorm */ +struct ustorm_toe_conn_st_ctx { + __le32 reserved[52]; +}; + +/* toe connection context */ +struct toe_conn_context { + struct ystorm_toe_conn_st_ctx ystorm_st_context; + struct pstorm_toe_conn_st_ctx pstorm_st_context; + struct regpair pstorm_st_padding[2]; + struct xstorm_toe_conn_st_ctx xstorm_st_context; + struct regpair xstorm_st_padding[2]; + struct ystorm_toe_conn_ag_ctx ystorm_ag_context; + struct xstorm_toe_conn_ag_ctx xstorm_ag_context; + struct tstorm_toe_conn_ag_ctx tstorm_ag_context; + struct regpair tstorm_ag_padding[2]; + struct timers_context timer_context; + struct ustorm_toe_conn_ag_ctx ustorm_ag_context; + struct tstorm_toe_conn_st_ctx tstorm_st_context; + struct mstorm_toe_conn_st_ctx mstorm_st_context; + struct ustorm_toe_conn_st_ctx ustorm_st_context; +}; + +/* toe init ramrod header */ +struct toe_init_ramrod_header { + u8 first_rss; + u8 num_rss; + u8 reserved[6]; +}; + +/* toe pf init parameters */ +struct toe_pf_init_params { + __le32 push_timeout; + __le16 grq_buffer_size; + __le16 grq_sb_id; + u8 grq_sb_index; + u8 max_seg_retransmit; + u8 doubt_reachability; + u8 ll2_rx_queue_id; + __le16 grq_fetch_threshold; + u8 reserved1[2]; + struct regpair grq_page_addr; +}; + +/* toe tss parameters */ +struct toe_tss_params { + struct regpair curr_page_addr; + struct regpair next_page_addr; + u8 reserved0; + u8 status_block_index; + __le16 status_block_id; + __le16 reserved1[2]; +}; + +/* toe rss parameters */ +struct toe_rss_params { + struct regpair curr_page_addr; + struct regpair next_page_addr; + u8 reserved0; + u8 status_block_index; + __le16 status_block_id; + __le16 reserved1[2]; +}; + +/* toe init ramrod data */ +struct toe_init_ramrod_data { + struct toe_init_ramrod_header hdr; + struct tcp_init_params tcp_params; + struct toe_pf_init_params pf_params; + struct toe_tss_params tss_params[TOE_TX_MAX_TSS_CHAINS]; + struct toe_rss_params rss_params[TOE_RX_MAX_RSS_CHAINS]; +}; + +/* toe offload parameters */ +struct toe_offload_params { + struct regpair tx_bd_page_addr; + struct regpair tx_app_page_addr; + __le32 more_to_send_seq; + __le16 rcv_indication_size; + u8 rss_tss_id; + u8 ignore_grq_push; + struct regpair rx_db_data_ptr; +}; + +/* TOE offload ramrod data - DMAed by firmware */ +struct toe_offload_ramrod_data { + struct tcp_offload_params tcp_ofld_params; + struct toe_offload_params toe_ofld_params; +}; + +/* TOE ramrod command IDs */ +enum toe_ramrod_cmd_id { + TOE_RAMROD_UNUSED, + TOE_RAMROD_FUNC_INIT, + TOE_RAMROD_INITATE_OFFLOAD, + TOE_RAMROD_FUNC_CLOSE, + TOE_RAMROD_SEARCHER_DELETE, + TOE_RAMROD_TERMINATE, + TOE_RAMROD_QUERY, + TOE_RAMROD_UPDATE, + TOE_RAMROD_EMPTY, + TOE_RAMROD_RESET_SEND, + TOE_RAMROD_INVALIDATE, + MAX_TOE_RAMROD_CMD_ID +}; + +/* Toe RQ buffer descriptor */ +struct toe_rx_bd { + struct regpair addr; + __le16 size; + __le16 flags; +#define TOE_RX_BD_START_MASK 0x1 +#define TOE_RX_BD_START_SHIFT 0 +#define TOE_RX_BD_END_MASK 0x1 +#define TOE_RX_BD_END_SHIFT 1 +#define TOE_RX_BD_NO_PUSH_MASK 0x1 +#define TOE_RX_BD_NO_PUSH_SHIFT 2 +#define TOE_RX_BD_SPLIT_MASK 0x1 +#define TOE_RX_BD_SPLIT_SHIFT 3 +#define TOE_RX_BD_RESERVED0_MASK 0xFFF +#define TOE_RX_BD_RESERVED0_SHIFT 4 + __le32 reserved1; +}; + +/* TOE RX completion queue opcodes (opcode 0 is illegal) */ +enum toe_rx_cmp_opcode { + TOE_RX_CMP_OPCODE_GA = 1, + TOE_RX_CMP_OPCODE_GR = 2, + TOE_RX_CMP_OPCODE_GNI = 3, + TOE_RX_CMP_OPCODE_GAIR = 4, + TOE_RX_CMP_OPCODE_GAIL = 5, + TOE_RX_CMP_OPCODE_GRI = 6, + TOE_RX_CMP_OPCODE_GJ = 7, + TOE_RX_CMP_OPCODE_DGI = 8, + TOE_RX_CMP_OPCODE_CMP = 9, + TOE_RX_CMP_OPCODE_REL = 10, + TOE_RX_CMP_OPCODE_SKP = 11, + TOE_RX_CMP_OPCODE_URG = 12, + TOE_RX_CMP_OPCODE_RT_TO = 13, + TOE_RX_CMP_OPCODE_KA_TO = 14, + TOE_RX_CMP_OPCODE_MAX_RT = 15, + TOE_RX_CMP_OPCODE_DBT_RE = 16, + TOE_RX_CMP_OPCODE_SYN = 17, + TOE_RX_CMP_OPCODE_OPT_ERR = 18, + TOE_RX_CMP_OPCODE_FW2_TO = 19, + TOE_RX_CMP_OPCODE_2WY_CLS = 20, + TOE_RX_CMP_OPCODE_RST_RCV = 21, + TOE_RX_CMP_OPCODE_FIN_RCV = 22, + TOE_RX_CMP_OPCODE_FIN_UPL = 23, + TOE_RX_CMP_OPCODE_INIT = 32, + TOE_RX_CMP_OPCODE_RSS_UPDATE = 33, + TOE_RX_CMP_OPCODE_CLOSE = 34, + TOE_RX_CMP_OPCODE_INITIATE_OFFLOAD = 80, + TOE_RX_CMP_OPCODE_SEARCHER_DELETE = 81, + TOE_RX_CMP_OPCODE_TERMINATE = 82, + TOE_RX_CMP_OPCODE_QUERY = 83, + TOE_RX_CMP_OPCODE_RESET_SEND = 84, + TOE_RX_CMP_OPCODE_INVALIDATE = 85, + TOE_RX_CMP_OPCODE_EMPTY = 86, + TOE_RX_CMP_OPCODE_UPDATE = 87, + MAX_TOE_RX_CMP_OPCODE +}; + +/* TOE rx ooo completion data */ +struct toe_rx_cqe_ooo_params { + __le32 nbytes; + __le16 grq_buff_id; + u8 isle_num; + u8 reserved0; +}; + +/* TOE rx in order completion data */ +struct toe_rx_cqe_in_order_params { + __le32 nbytes; + __le16 grq_buff_id; + __le16 reserved1; +}; + +/* Union for TOE rx completion data */ +union toe_rx_cqe_data_union { + struct toe_rx_cqe_ooo_params ooo_params; + struct toe_rx_cqe_in_order_params in_order_params; + struct regpair raw_data; +}; + +/* TOE rx completion element */ +struct toe_rx_cqe { + __le16 icid; + u8 completion_opcode; + u8 reserved0; + __le32 reserved1; + union toe_rx_cqe_data_union data; +}; + +/* toe RX doorbel data */ +struct toe_rx_db_data { + __le32 local_adv_wnd_seq; + __le32 reserved[3]; +}; + +/* Toe GRQ buffer descriptor */ +struct toe_rx_grq_bd { + struct regpair addr; + __le16 buff_id; + __le16 reserved0; + __le32 reserved1; +}; + +/* Toe transmission application buffer descriptor */ +struct toe_tx_app_buff_desc { + __le32 next_buffer_start_seq; + __le32 reserved; +}; + +/* Toe transmission application buffer descriptor page pointer */ +struct toe_tx_app_buff_page_pointer { + struct regpair next_page_addr; +}; + +/* Toe transmission buffer descriptor */ +struct toe_tx_bd { + struct regpair addr; + __le16 size; + __le16 flags; +#define TOE_TX_BD_PUSH_MASK 0x1 +#define TOE_TX_BD_PUSH_SHIFT 0 +#define TOE_TX_BD_NOTIFY_MASK 0x1 +#define TOE_TX_BD_NOTIFY_SHIFT 1 +#define TOE_TX_BD_LARGE_IO_MASK 0x1 +#define TOE_TX_BD_LARGE_IO_SHIFT 2 +#define TOE_TX_BD_BD_CONS_MASK 0x1FFF +#define TOE_TX_BD_BD_CONS_SHIFT 3 + __le32 next_bd_start_seq; +}; + +/* TOE completion opcodes */ +enum toe_tx_cmp_opcode { + TOE_TX_CMP_OPCODE_DATA, + TOE_TX_CMP_OPCODE_TERMINATE, + TOE_TX_CMP_OPCODE_EMPTY, + TOE_TX_CMP_OPCODE_RESET_SEND, + TOE_TX_CMP_OPCODE_INVALIDATE, + TOE_TX_CMP_OPCODE_RST_RCV, + MAX_TOE_TX_CMP_OPCODE +}; + +/* Toe transmission completion element */ +struct toe_tx_cqe { + __le16 icid; + u8 opcode; + u8 reserved; + __le32 size; +}; + +/* Toe transmission page pointer bd */ +struct toe_tx_page_pointer_bd { + struct regpair next_page_addr; + struct regpair prev_page_addr; +}; + +/* Toe transmission completion element page pointer */ +struct toe_tx_page_pointer_cqe { + struct regpair next_page_addr; +}; + +/* toe update parameters */ +struct toe_update_params { + __le16 flags; +#define TOE_UPDATE_PARAMS_RCV_INDICATION_SIZE_CHANGED_MASK 0x1 +#define TOE_UPDATE_PARAMS_RCV_INDICATION_SIZE_CHANGED_SHIFT 0 +#define TOE_UPDATE_PARAMS_RESERVED_MASK 0x7FFF +#define TOE_UPDATE_PARAMS_RESERVED_SHIFT 1 + __le16 rcv_indication_size; + __le16 reserved1[2]; +}; + +/* TOE update ramrod data - DMAed by firmware */ +struct toe_update_ramrod_data { + struct tcp_update_params tcp_upd_params; + struct toe_update_params toe_upd_params; +}; + +struct mstorm_toe_conn_ag_ctx { + u8 byte0; + u8 byte1; + u8 flags0; +#define MSTORM_TOE_CONN_AG_CTX_BIT0_MASK 0x1 +#define MSTORM_TOE_CONN_AG_CTX_BIT0_SHIFT 0 +#define MSTORM_TOE_CONN_AG_CTX_BIT1_MASK 0x1 +#define MSTORM_TOE_CONN_AG_CTX_BIT1_SHIFT 1 +#define MSTORM_TOE_CONN_AG_CTX_CF0_MASK 0x3 +#define MSTORM_TOE_CONN_AG_CTX_CF0_SHIFT 2 +#define MSTORM_TOE_CONN_AG_CTX_CF1_MASK 0x3 +#define MSTORM_TOE_CONN_AG_CTX_CF1_SHIFT 4 +#define MSTORM_TOE_CONN_AG_CTX_CF2_MASK 0x3 +#define MSTORM_TOE_CONN_AG_CTX_CF2_SHIFT 6 + u8 flags1; +#define MSTORM_TOE_CONN_AG_CTX_CF0EN_MASK 0x1 +#define MSTORM_TOE_CONN_AG_CTX_CF0EN_SHIFT 0 +#define MSTORM_TOE_CONN_AG_CTX_CF1EN_MASK 0x1 +#define MSTORM_TOE_CONN_AG_CTX_CF1EN_SHIFT 1 +#define MSTORM_TOE_CONN_AG_CTX_CF2EN_MASK 0x1 +#define MSTORM_TOE_CONN_AG_CTX_CF2EN_SHIFT 2 +#define MSTORM_TOE_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define MSTORM_TOE_CONN_AG_CTX_RULE0EN_SHIFT 3 +#define MSTORM_TOE_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define MSTORM_TOE_CONN_AG_CTX_RULE1EN_SHIFT 4 +#define MSTORM_TOE_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define MSTORM_TOE_CONN_AG_CTX_RULE2EN_SHIFT 5 +#define MSTORM_TOE_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define MSTORM_TOE_CONN_AG_CTX_RULE3EN_SHIFT 6 +#define MSTORM_TOE_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define MSTORM_TOE_CONN_AG_CTX_RULE4EN_SHIFT 7 + __le16 word0; + __le16 word1; + __le32 reg0; + __le32 reg1; +}; + +/* TOE doorbell data */ +struct toe_db_data { + u8 params; +#define TOE_DB_DATA_DEST_MASK 0x3 +#define TOE_DB_DATA_DEST_SHIFT 0 +#define TOE_DB_DATA_AGG_CMD_MASK 0x3 +#define TOE_DB_DATA_AGG_CMD_SHIFT 2 +#define TOE_DB_DATA_BYPASS_EN_MASK 0x1 +#define TOE_DB_DATA_BYPASS_EN_SHIFT 4 +#define TOE_DB_DATA_RESERVED_MASK 0x1 +#define TOE_DB_DATA_RESERVED_SHIFT 5 +#define TOE_DB_DATA_AGG_VAL_SEL_MASK 0x3 +#define TOE_DB_DATA_AGG_VAL_SEL_SHIFT 6 + u8 agg_flags; + __le16 bd_prod; }; /* rdma function init ramrod data */ @@ -6911,6 +6150,8 @@ enum rdma_event_opcode { RDMA_EVENT_CREATE_SRQ, RDMA_EVENT_MODIFY_SRQ, RDMA_EVENT_DESTROY_SRQ, + RDMA_EVENT_START_NAMESPACE_TRACKING, + RDMA_EVENT_STOP_NAMESPACE_TRACKING, MAX_RDMA_EVENT_OPCODE }; @@ -6935,18 +6176,33 @@ struct rdma_init_func_hdr { u8 relaxed_ordering; __le16 first_reg_srq_id; __le32 reg_srq_base_addr; - u8 searcher_mode; - u8 pvrdma_mode; + u8 flags; +#define RDMA_INIT_FUNC_HDR_SEARCHER_MODE_MASK 0x1 +#define RDMA_INIT_FUNC_HDR_SEARCHER_MODE_SHIFT 0 +#define RDMA_INIT_FUNC_HDR_PVRDMA_MODE_MASK 0x1 +#define RDMA_INIT_FUNC_HDR_PVRDMA_MODE_SHIFT 1 +#define RDMA_INIT_FUNC_HDR_DPT_MODE_MASK 0x1 +#define RDMA_INIT_FUNC_HDR_DPT_MODE_SHIFT 2 +#define RDMA_INIT_FUNC_HDR_RESERVED0_MASK 0x1F +#define RDMA_INIT_FUNC_HDR_RESERVED0_SHIFT 3 + u8 dpt_byte_threshold_log; + u8 dpt_common_queue_id; u8 max_num_ns_log; - u8 reserved; }; /* rdma function init ramrod data */ struct rdma_init_func_ramrod_data { struct rdma_init_func_hdr params_header; + struct rdma_cnq_params dptq_params; struct rdma_cnq_params cnq_params[NUM_OF_GLOBAL_QUEUES]; }; +/* rdma namespace tracking ramrod data */ +struct rdma_namespace_tracking_ramrod_data { + u8 name_space; + u8 reserved[7]; +}; + /* RDMA ramrod command IDs */ enum rdma_ramrod_cmd_id { RDMA_RAMROD_UNUSED, @@ -6960,6 +6216,8 @@ enum rdma_ramrod_cmd_id { RDMA_RAMROD_CREATE_SRQ, RDMA_RAMROD_MODIFY_SRQ, RDMA_RAMROD_DESTROY_SRQ, + RDMA_RAMROD_START_NS_TRACKING, + RDMA_RAMROD_STOP_NS_TRACKING, MAX_RDMA_RAMROD_CMD_ID }; @@ -7093,73 +6351,73 @@ struct rdma_xrc_srq_context { struct regpair temp[9]; }; -struct e4_tstorm_rdma_task_ag_ctx { +struct tstorm_rdma_task_ag_ctx { u8 byte0; u8 byte1; __le16 word0; u8 flags0; -#define E4_TSTORM_RDMA_TASK_AG_CTX_NIBBLE0_MASK 0xF -#define E4_TSTORM_RDMA_TASK_AG_CTX_NIBBLE0_SHIFT 0 -#define E4_TSTORM_RDMA_TASK_AG_CTX_BIT0_MASK 0x1 -#define E4_TSTORM_RDMA_TASK_AG_CTX_BIT0_SHIFT 4 -#define E4_TSTORM_RDMA_TASK_AG_CTX_BIT1_MASK 0x1 -#define E4_TSTORM_RDMA_TASK_AG_CTX_BIT1_SHIFT 5 -#define E4_TSTORM_RDMA_TASK_AG_CTX_BIT2_MASK 0x1 -#define E4_TSTORM_RDMA_TASK_AG_CTX_BIT2_SHIFT 6 -#define E4_TSTORM_RDMA_TASK_AG_CTX_BIT3_MASK 0x1 -#define E4_TSTORM_RDMA_TASK_AG_CTX_BIT3_SHIFT 7 +#define TSTORM_RDMA_TASK_AG_CTX_NIBBLE0_MASK 0xF +#define TSTORM_RDMA_TASK_AG_CTX_NIBBLE0_SHIFT 0 +#define TSTORM_RDMA_TASK_AG_CTX_BIT0_MASK 0x1 +#define TSTORM_RDMA_TASK_AG_CTX_BIT0_SHIFT 4 +#define TSTORM_RDMA_TASK_AG_CTX_BIT1_MASK 0x1 +#define TSTORM_RDMA_TASK_AG_CTX_BIT1_SHIFT 5 +#define TSTORM_RDMA_TASK_AG_CTX_BIT2_MASK 0x1 +#define TSTORM_RDMA_TASK_AG_CTX_BIT2_SHIFT 6 +#define TSTORM_RDMA_TASK_AG_CTX_BIT3_MASK 0x1 +#define TSTORM_RDMA_TASK_AG_CTX_BIT3_SHIFT 7 u8 flags1; -#define E4_TSTORM_RDMA_TASK_AG_CTX_BIT4_MASK 0x1 -#define E4_TSTORM_RDMA_TASK_AG_CTX_BIT4_SHIFT 0 -#define E4_TSTORM_RDMA_TASK_AG_CTX_BIT5_MASK 0x1 -#define E4_TSTORM_RDMA_TASK_AG_CTX_BIT5_SHIFT 1 -#define E4_TSTORM_RDMA_TASK_AG_CTX_CF0_MASK 0x3 -#define E4_TSTORM_RDMA_TASK_AG_CTX_CF0_SHIFT 2 -#define E4_TSTORM_RDMA_TASK_AG_CTX_CF1_MASK 0x3 -#define E4_TSTORM_RDMA_TASK_AG_CTX_CF1_SHIFT 4 -#define E4_TSTORM_RDMA_TASK_AG_CTX_CF2_MASK 0x3 -#define E4_TSTORM_RDMA_TASK_AG_CTX_CF2_SHIFT 6 +#define TSTORM_RDMA_TASK_AG_CTX_BIT4_MASK 0x1 +#define TSTORM_RDMA_TASK_AG_CTX_BIT4_SHIFT 0 +#define TSTORM_RDMA_TASK_AG_CTX_BIT5_MASK 0x1 +#define TSTORM_RDMA_TASK_AG_CTX_BIT5_SHIFT 1 +#define TSTORM_RDMA_TASK_AG_CTX_CF0_MASK 0x3 +#define TSTORM_RDMA_TASK_AG_CTX_CF0_SHIFT 2 +#define TSTORM_RDMA_TASK_AG_CTX_CF1_MASK 0x3 +#define TSTORM_RDMA_TASK_AG_CTX_CF1_SHIFT 4 +#define TSTORM_RDMA_TASK_AG_CTX_CF2_MASK 0x3 +#define TSTORM_RDMA_TASK_AG_CTX_CF2_SHIFT 6 u8 flags2; -#define E4_TSTORM_RDMA_TASK_AG_CTX_CF3_MASK 0x3 -#define E4_TSTORM_RDMA_TASK_AG_CTX_CF3_SHIFT 0 -#define E4_TSTORM_RDMA_TASK_AG_CTX_CF4_MASK 0x3 -#define E4_TSTORM_RDMA_TASK_AG_CTX_CF4_SHIFT 2 -#define E4_TSTORM_RDMA_TASK_AG_CTX_CF5_MASK 0x3 -#define E4_TSTORM_RDMA_TASK_AG_CTX_CF5_SHIFT 4 -#define E4_TSTORM_RDMA_TASK_AG_CTX_CF6_MASK 0x3 -#define E4_TSTORM_RDMA_TASK_AG_CTX_CF6_SHIFT 6 +#define TSTORM_RDMA_TASK_AG_CTX_CF3_MASK 0x3 +#define TSTORM_RDMA_TASK_AG_CTX_CF3_SHIFT 0 +#define TSTORM_RDMA_TASK_AG_CTX_CF4_MASK 0x3 +#define TSTORM_RDMA_TASK_AG_CTX_CF4_SHIFT 2 +#define TSTORM_RDMA_TASK_AG_CTX_CF5_MASK 0x3 +#define TSTORM_RDMA_TASK_AG_CTX_CF5_SHIFT 4 +#define TSTORM_RDMA_TASK_AG_CTX_CF6_MASK 0x3 +#define TSTORM_RDMA_TASK_AG_CTX_CF6_SHIFT 6 u8 flags3; -#define E4_TSTORM_RDMA_TASK_AG_CTX_CF7_MASK 0x3 -#define E4_TSTORM_RDMA_TASK_AG_CTX_CF7_SHIFT 0 -#define E4_TSTORM_RDMA_TASK_AG_CTX_CF0EN_MASK 0x1 -#define E4_TSTORM_RDMA_TASK_AG_CTX_CF0EN_SHIFT 2 -#define E4_TSTORM_RDMA_TASK_AG_CTX_CF1EN_MASK 0x1 -#define E4_TSTORM_RDMA_TASK_AG_CTX_CF1EN_SHIFT 3 -#define E4_TSTORM_RDMA_TASK_AG_CTX_CF2EN_MASK 0x1 -#define E4_TSTORM_RDMA_TASK_AG_CTX_CF2EN_SHIFT 4 -#define E4_TSTORM_RDMA_TASK_AG_CTX_CF3EN_MASK 0x1 -#define E4_TSTORM_RDMA_TASK_AG_CTX_CF3EN_SHIFT 5 -#define E4_TSTORM_RDMA_TASK_AG_CTX_CF4EN_MASK 0x1 -#define E4_TSTORM_RDMA_TASK_AG_CTX_CF4EN_SHIFT 6 -#define E4_TSTORM_RDMA_TASK_AG_CTX_CF5EN_MASK 0x1 -#define E4_TSTORM_RDMA_TASK_AG_CTX_CF5EN_SHIFT 7 +#define TSTORM_RDMA_TASK_AG_CTX_CF7_MASK 0x3 +#define TSTORM_RDMA_TASK_AG_CTX_CF7_SHIFT 0 +#define TSTORM_RDMA_TASK_AG_CTX_CF0EN_MASK 0x1 +#define TSTORM_RDMA_TASK_AG_CTX_CF0EN_SHIFT 2 +#define TSTORM_RDMA_TASK_AG_CTX_CF1EN_MASK 0x1 +#define TSTORM_RDMA_TASK_AG_CTX_CF1EN_SHIFT 3 +#define TSTORM_RDMA_TASK_AG_CTX_CF2EN_MASK 0x1 +#define TSTORM_RDMA_TASK_AG_CTX_CF2EN_SHIFT 4 +#define TSTORM_RDMA_TASK_AG_CTX_CF3EN_MASK 0x1 +#define TSTORM_RDMA_TASK_AG_CTX_CF3EN_SHIFT 5 +#define TSTORM_RDMA_TASK_AG_CTX_CF4EN_MASK 0x1 +#define TSTORM_RDMA_TASK_AG_CTX_CF4EN_SHIFT 6 +#define TSTORM_RDMA_TASK_AG_CTX_CF5EN_MASK 0x1 +#define TSTORM_RDMA_TASK_AG_CTX_CF5EN_SHIFT 7 u8 flags4; -#define E4_TSTORM_RDMA_TASK_AG_CTX_CF6EN_MASK 0x1 -#define E4_TSTORM_RDMA_TASK_AG_CTX_CF6EN_SHIFT 0 -#define E4_TSTORM_RDMA_TASK_AG_CTX_CF7EN_MASK 0x1 -#define E4_TSTORM_RDMA_TASK_AG_CTX_CF7EN_SHIFT 1 -#define E4_TSTORM_RDMA_TASK_AG_CTX_RULE0EN_MASK 0x1 -#define E4_TSTORM_RDMA_TASK_AG_CTX_RULE0EN_SHIFT 2 -#define E4_TSTORM_RDMA_TASK_AG_CTX_RULE1EN_MASK 0x1 -#define E4_TSTORM_RDMA_TASK_AG_CTX_RULE1EN_SHIFT 3 -#define E4_TSTORM_RDMA_TASK_AG_CTX_RULE2EN_MASK 0x1 -#define E4_TSTORM_RDMA_TASK_AG_CTX_RULE2EN_SHIFT 4 -#define E4_TSTORM_RDMA_TASK_AG_CTX_RULE3EN_MASK 0x1 -#define E4_TSTORM_RDMA_TASK_AG_CTX_RULE3EN_SHIFT 5 -#define E4_TSTORM_RDMA_TASK_AG_CTX_RULE4EN_MASK 0x1 -#define E4_TSTORM_RDMA_TASK_AG_CTX_RULE4EN_SHIFT 6 -#define E4_TSTORM_RDMA_TASK_AG_CTX_RULE5EN_MASK 0x1 -#define E4_TSTORM_RDMA_TASK_AG_CTX_RULE5EN_SHIFT 7 +#define TSTORM_RDMA_TASK_AG_CTX_CF6EN_MASK 0x1 +#define TSTORM_RDMA_TASK_AG_CTX_CF6EN_SHIFT 0 +#define TSTORM_RDMA_TASK_AG_CTX_CF7EN_MASK 0x1 +#define TSTORM_RDMA_TASK_AG_CTX_CF7EN_SHIFT 1 +#define TSTORM_RDMA_TASK_AG_CTX_RULE0EN_MASK 0x1 +#define TSTORM_RDMA_TASK_AG_CTX_RULE0EN_SHIFT 2 +#define TSTORM_RDMA_TASK_AG_CTX_RULE1EN_MASK 0x1 +#define TSTORM_RDMA_TASK_AG_CTX_RULE1EN_SHIFT 3 +#define TSTORM_RDMA_TASK_AG_CTX_RULE2EN_MASK 0x1 +#define TSTORM_RDMA_TASK_AG_CTX_RULE2EN_SHIFT 4 +#define TSTORM_RDMA_TASK_AG_CTX_RULE3EN_MASK 0x1 +#define TSTORM_RDMA_TASK_AG_CTX_RULE3EN_SHIFT 5 +#define TSTORM_RDMA_TASK_AG_CTX_RULE4EN_MASK 0x1 +#define TSTORM_RDMA_TASK_AG_CTX_RULE4EN_SHIFT 6 +#define TSTORM_RDMA_TASK_AG_CTX_RULE5EN_MASK 0x1 +#define TSTORM_RDMA_TASK_AG_CTX_RULE5EN_SHIFT 7 u8 byte2; __le16 word1; __le32 reg0; @@ -7172,63 +6430,63 @@ struct e4_tstorm_rdma_task_ag_ctx { __le32 reg2; }; -struct e4_ustorm_rdma_conn_ag_ctx { +struct ustorm_rdma_conn_ag_ctx { u8 reserved; u8 byte1; u8 flags0; -#define E4_USTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 -#define E4_USTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 -#define E4_USTORM_RDMA_CONN_AG_CTX_DIF_ERROR_REPORTED_MASK 0x1 -#define E4_USTORM_RDMA_CONN_AG_CTX_DIF_ERROR_REPORTED_SHIFT 1 -#define E4_USTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3 -#define E4_USTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 2 -#define E4_USTORM_RDMA_CONN_AG_CTX_CF1_MASK 0x3 -#define E4_USTORM_RDMA_CONN_AG_CTX_CF1_SHIFT 4 -#define E4_USTORM_RDMA_CONN_AG_CTX_CF2_MASK 0x3 -#define E4_USTORM_RDMA_CONN_AG_CTX_CF2_SHIFT 6 +#define USTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define USTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 +#define USTORM_RDMA_CONN_AG_CTX_DIF_ERROR_REPORTED_MASK 0x1 +#define USTORM_RDMA_CONN_AG_CTX_DIF_ERROR_REPORTED_SHIFT 1 +#define USTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3 +#define USTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 2 +#define USTORM_RDMA_CONN_AG_CTX_CF1_MASK 0x3 +#define USTORM_RDMA_CONN_AG_CTX_CF1_SHIFT 4 +#define USTORM_RDMA_CONN_AG_CTX_CF2_MASK 0x3 +#define USTORM_RDMA_CONN_AG_CTX_CF2_SHIFT 6 u8 flags1; -#define E4_USTORM_RDMA_CONN_AG_CTX_CF3_MASK 0x3 -#define E4_USTORM_RDMA_CONN_AG_CTX_CF3_SHIFT 0 -#define E4_USTORM_RDMA_CONN_AG_CTX_CQ_ARM_SE_CF_MASK 0x3 -#define E4_USTORM_RDMA_CONN_AG_CTX_CQ_ARM_SE_CF_SHIFT 2 -#define E4_USTORM_RDMA_CONN_AG_CTX_CQ_ARM_CF_MASK 0x3 -#define E4_USTORM_RDMA_CONN_AG_CTX_CQ_ARM_CF_SHIFT 4 -#define E4_USTORM_RDMA_CONN_AG_CTX_CF6_MASK 0x3 -#define E4_USTORM_RDMA_CONN_AG_CTX_CF6_SHIFT 6 +#define USTORM_RDMA_CONN_AG_CTX_CF3_MASK 0x3 +#define USTORM_RDMA_CONN_AG_CTX_CF3_SHIFT 0 +#define USTORM_RDMA_CONN_AG_CTX_CQ_ARM_SE_CF_MASK 0x3 +#define USTORM_RDMA_CONN_AG_CTX_CQ_ARM_SE_CF_SHIFT 2 +#define USTORM_RDMA_CONN_AG_CTX_CQ_ARM_CF_MASK 0x3 +#define USTORM_RDMA_CONN_AG_CTX_CQ_ARM_CF_SHIFT 4 +#define USTORM_RDMA_CONN_AG_CTX_CF6_MASK 0x3 +#define USTORM_RDMA_CONN_AG_CTX_CF6_SHIFT 6 u8 flags2; -#define E4_USTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1 -#define E4_USTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 0 -#define E4_USTORM_RDMA_CONN_AG_CTX_CF1EN_MASK 0x1 -#define E4_USTORM_RDMA_CONN_AG_CTX_CF1EN_SHIFT 1 -#define E4_USTORM_RDMA_CONN_AG_CTX_CF2EN_MASK 0x1 -#define E4_USTORM_RDMA_CONN_AG_CTX_CF2EN_SHIFT 2 -#define E4_USTORM_RDMA_CONN_AG_CTX_CF3EN_MASK 0x1 -#define E4_USTORM_RDMA_CONN_AG_CTX_CF3EN_SHIFT 3 -#define E4_USTORM_RDMA_CONN_AG_CTX_CQ_ARM_SE_CF_EN_MASK 0x1 -#define E4_USTORM_RDMA_CONN_AG_CTX_CQ_ARM_SE_CF_EN_SHIFT 4 -#define E4_USTORM_RDMA_CONN_AG_CTX_CQ_ARM_CF_EN_MASK 0x1 -#define E4_USTORM_RDMA_CONN_AG_CTX_CQ_ARM_CF_EN_SHIFT 5 -#define E4_USTORM_RDMA_CONN_AG_CTX_CF6EN_MASK 0x1 -#define E4_USTORM_RDMA_CONN_AG_CTX_CF6EN_SHIFT 6 -#define E4_USTORM_RDMA_CONN_AG_CTX_CQ_SE_EN_MASK 0x1 -#define E4_USTORM_RDMA_CONN_AG_CTX_CQ_SE_EN_SHIFT 7 +#define USTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1 +#define USTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 0 +#define USTORM_RDMA_CONN_AG_CTX_CF1EN_MASK 0x1 +#define USTORM_RDMA_CONN_AG_CTX_CF1EN_SHIFT 1 +#define USTORM_RDMA_CONN_AG_CTX_CF2EN_MASK 0x1 +#define USTORM_RDMA_CONN_AG_CTX_CF2EN_SHIFT 2 +#define USTORM_RDMA_CONN_AG_CTX_CF3EN_MASK 0x1 +#define USTORM_RDMA_CONN_AG_CTX_CF3EN_SHIFT 3 +#define USTORM_RDMA_CONN_AG_CTX_CQ_ARM_SE_CF_EN_MASK 0x1 +#define USTORM_RDMA_CONN_AG_CTX_CQ_ARM_SE_CF_EN_SHIFT 4 +#define USTORM_RDMA_CONN_AG_CTX_CQ_ARM_CF_EN_MASK 0x1 +#define USTORM_RDMA_CONN_AG_CTX_CQ_ARM_CF_EN_SHIFT 5 +#define USTORM_RDMA_CONN_AG_CTX_CF6EN_MASK 0x1 +#define USTORM_RDMA_CONN_AG_CTX_CF6EN_SHIFT 6 +#define USTORM_RDMA_CONN_AG_CTX_CQ_SE_EN_MASK 0x1 +#define USTORM_RDMA_CONN_AG_CTX_CQ_SE_EN_SHIFT 7 u8 flags3; -#define E4_USTORM_RDMA_CONN_AG_CTX_CQ_EN_MASK 0x1 -#define E4_USTORM_RDMA_CONN_AG_CTX_CQ_EN_SHIFT 0 -#define E4_USTORM_RDMA_CONN_AG_CTX_RULE2EN_MASK 0x1 -#define E4_USTORM_RDMA_CONN_AG_CTX_RULE2EN_SHIFT 1 -#define E4_USTORM_RDMA_CONN_AG_CTX_RULE3EN_MASK 0x1 -#define E4_USTORM_RDMA_CONN_AG_CTX_RULE3EN_SHIFT 2 -#define E4_USTORM_RDMA_CONN_AG_CTX_RULE4EN_MASK 0x1 -#define E4_USTORM_RDMA_CONN_AG_CTX_RULE4EN_SHIFT 3 -#define E4_USTORM_RDMA_CONN_AG_CTX_RULE5EN_MASK 0x1 -#define E4_USTORM_RDMA_CONN_AG_CTX_RULE5EN_SHIFT 4 -#define E4_USTORM_RDMA_CONN_AG_CTX_RULE6EN_MASK 0x1 -#define E4_USTORM_RDMA_CONN_AG_CTX_RULE6EN_SHIFT 5 -#define E4_USTORM_RDMA_CONN_AG_CTX_RULE7EN_MASK 0x1 -#define E4_USTORM_RDMA_CONN_AG_CTX_RULE7EN_SHIFT 6 -#define E4_USTORM_RDMA_CONN_AG_CTX_RULE8EN_MASK 0x1 -#define E4_USTORM_RDMA_CONN_AG_CTX_RULE8EN_SHIFT 7 +#define USTORM_RDMA_CONN_AG_CTX_CQ_EN_MASK 0x1 +#define USTORM_RDMA_CONN_AG_CTX_CQ_EN_SHIFT 0 +#define USTORM_RDMA_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define USTORM_RDMA_CONN_AG_CTX_RULE2EN_SHIFT 1 +#define USTORM_RDMA_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define USTORM_RDMA_CONN_AG_CTX_RULE3EN_SHIFT 2 +#define USTORM_RDMA_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define USTORM_RDMA_CONN_AG_CTX_RULE4EN_SHIFT 3 +#define USTORM_RDMA_CONN_AG_CTX_RULE5EN_MASK 0x1 +#define USTORM_RDMA_CONN_AG_CTX_RULE5EN_SHIFT 4 +#define USTORM_RDMA_CONN_AG_CTX_RULE6EN_MASK 0x1 +#define USTORM_RDMA_CONN_AG_CTX_RULE6EN_SHIFT 5 +#define USTORM_RDMA_CONN_AG_CTX_RULE7EN_MASK 0x1 +#define USTORM_RDMA_CONN_AG_CTX_RULE7EN_SHIFT 6 +#define USTORM_RDMA_CONN_AG_CTX_RULE8EN_MASK 0x1 +#define USTORM_RDMA_CONN_AG_CTX_RULE8EN_SHIFT 7 u8 byte2; u8 nvmf_only; __le16 conn_dpi; @@ -7241,214 +6499,214 @@ struct e4_ustorm_rdma_conn_ag_ctx { __le16 word3; }; -struct e4_xstorm_roce_conn_ag_ctx { +struct xstorm_roce_conn_ag_ctx { u8 reserved0; u8 state; u8 flags0; -#define E4_XSTORM_ROCE_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 -#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT1_MASK 0x1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT1_SHIFT 1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT2_MASK 0x1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT2_SHIFT 2 -#define E4_XSTORM_ROCE_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3 -#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT4_MASK 0x1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT4_SHIFT 4 -#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT5_MASK 0x1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT5_SHIFT 5 -#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT6_MASK 0x1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT6_SHIFT 6 -#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT7_MASK 0x1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT7_SHIFT 7 +#define XSTORM_ROCE_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 +#define XSTORM_ROCE_CONN_AG_CTX_BIT1_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_BIT1_SHIFT 1 +#define XSTORM_ROCE_CONN_AG_CTX_BIT2_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_BIT2_SHIFT 2 +#define XSTORM_ROCE_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3 +#define XSTORM_ROCE_CONN_AG_CTX_BIT4_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_BIT4_SHIFT 4 +#define XSTORM_ROCE_CONN_AG_CTX_BIT5_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_BIT5_SHIFT 5 +#define XSTORM_ROCE_CONN_AG_CTX_BIT6_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_BIT6_SHIFT 6 +#define XSTORM_ROCE_CONN_AG_CTX_BIT7_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_BIT7_SHIFT 7 u8 flags1; -#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT8_MASK 0x1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT8_SHIFT 0 -#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT9_MASK 0x1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT9_SHIFT 1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT10_MASK 0x1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT10_SHIFT 2 -#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT11_MASK 0x1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT11_SHIFT 3 -#define E4_XSTORM_ROCE_CONN_AG_CTX_MSDM_FLUSH_MASK 0x1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_MSDM_FLUSH_SHIFT 4 -#define E4_XSTORM_ROCE_CONN_AG_CTX_MSEM_FLUSH_MASK 0x1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_MSEM_FLUSH_SHIFT 5 -#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT14_MASK 0x1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT14_SHIFT 6 -#define E4_XSTORM_ROCE_CONN_AG_CTX_YSTORM_FLUSH_MASK 0x1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_YSTORM_FLUSH_SHIFT 7 +#define XSTORM_ROCE_CONN_AG_CTX_BIT8_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_BIT8_SHIFT 0 +#define XSTORM_ROCE_CONN_AG_CTX_BIT9_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_BIT9_SHIFT 1 +#define XSTORM_ROCE_CONN_AG_CTX_BIT10_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_BIT10_SHIFT 2 +#define XSTORM_ROCE_CONN_AG_CTX_BIT11_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_BIT11_SHIFT 3 +#define XSTORM_ROCE_CONN_AG_CTX_MSDM_FLUSH_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_MSDM_FLUSH_SHIFT 4 +#define XSTORM_ROCE_CONN_AG_CTX_MSEM_FLUSH_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_MSEM_FLUSH_SHIFT 5 +#define XSTORM_ROCE_CONN_AG_CTX_BIT14_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_BIT14_SHIFT 6 +#define XSTORM_ROCE_CONN_AG_CTX_YSTORM_FLUSH_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_YSTORM_FLUSH_SHIFT 7 u8 flags2; -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF0_MASK 0x3 -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF0_SHIFT 0 -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF1_MASK 0x3 -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF1_SHIFT 2 -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF2_MASK 0x3 -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF2_SHIFT 4 -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF3_MASK 0x3 -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF3_SHIFT 6 +#define XSTORM_ROCE_CONN_AG_CTX_CF0_MASK 0x3 +#define XSTORM_ROCE_CONN_AG_CTX_CF0_SHIFT 0 +#define XSTORM_ROCE_CONN_AG_CTX_CF1_MASK 0x3 +#define XSTORM_ROCE_CONN_AG_CTX_CF1_SHIFT 2 +#define XSTORM_ROCE_CONN_AG_CTX_CF2_MASK 0x3 +#define XSTORM_ROCE_CONN_AG_CTX_CF2_SHIFT 4 +#define XSTORM_ROCE_CONN_AG_CTX_CF3_MASK 0x3 +#define XSTORM_ROCE_CONN_AG_CTX_CF3_SHIFT 6 u8 flags3; -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF4_MASK 0x3 -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF4_SHIFT 0 -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF5_MASK 0x3 -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF5_SHIFT 2 -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF6_MASK 0x3 -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF6_SHIFT 4 -#define E4_XSTORM_ROCE_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3 -#define E4_XSTORM_ROCE_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 6 +#define XSTORM_ROCE_CONN_AG_CTX_CF4_MASK 0x3 +#define XSTORM_ROCE_CONN_AG_CTX_CF4_SHIFT 0 +#define XSTORM_ROCE_CONN_AG_CTX_CF5_MASK 0x3 +#define XSTORM_ROCE_CONN_AG_CTX_CF5_SHIFT 2 +#define XSTORM_ROCE_CONN_AG_CTX_CF6_MASK 0x3 +#define XSTORM_ROCE_CONN_AG_CTX_CF6_SHIFT 4 +#define XSTORM_ROCE_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3 +#define XSTORM_ROCE_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 6 u8 flags4; -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF8_MASK 0x3 -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF8_SHIFT 0 -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF9_MASK 0x3 -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF9_SHIFT 2 -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF10_MASK 0x3 -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF10_SHIFT 4 -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF11_MASK 0x3 -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF11_SHIFT 6 +#define XSTORM_ROCE_CONN_AG_CTX_CF8_MASK 0x3 +#define XSTORM_ROCE_CONN_AG_CTX_CF8_SHIFT 0 +#define XSTORM_ROCE_CONN_AG_CTX_CF9_MASK 0x3 +#define XSTORM_ROCE_CONN_AG_CTX_CF9_SHIFT 2 +#define XSTORM_ROCE_CONN_AG_CTX_CF10_MASK 0x3 +#define XSTORM_ROCE_CONN_AG_CTX_CF10_SHIFT 4 +#define XSTORM_ROCE_CONN_AG_CTX_CF11_MASK 0x3 +#define XSTORM_ROCE_CONN_AG_CTX_CF11_SHIFT 6 u8 flags5; -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF12_MASK 0x3 -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF12_SHIFT 0 -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF13_MASK 0x3 -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF13_SHIFT 2 -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF14_MASK 0x3 -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF14_SHIFT 4 -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF15_MASK 0x3 -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF15_SHIFT 6 +#define XSTORM_ROCE_CONN_AG_CTX_CF12_MASK 0x3 +#define XSTORM_ROCE_CONN_AG_CTX_CF12_SHIFT 0 +#define XSTORM_ROCE_CONN_AG_CTX_CF13_MASK 0x3 +#define XSTORM_ROCE_CONN_AG_CTX_CF13_SHIFT 2 +#define XSTORM_ROCE_CONN_AG_CTX_CF14_MASK 0x3 +#define XSTORM_ROCE_CONN_AG_CTX_CF14_SHIFT 4 +#define XSTORM_ROCE_CONN_AG_CTX_CF15_MASK 0x3 +#define XSTORM_ROCE_CONN_AG_CTX_CF15_SHIFT 6 u8 flags6; -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF16_MASK 0x3 -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF16_SHIFT 0 -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF17_MASK 0x3 -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF17_SHIFT 2 -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF18_MASK 0x3 -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF18_SHIFT 4 -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF19_MASK 0x3 -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF19_SHIFT 6 +#define XSTORM_ROCE_CONN_AG_CTX_CF16_MASK 0x3 +#define XSTORM_ROCE_CONN_AG_CTX_CF16_SHIFT 0 +#define XSTORM_ROCE_CONN_AG_CTX_CF17_MASK 0x3 +#define XSTORM_ROCE_CONN_AG_CTX_CF17_SHIFT 2 +#define XSTORM_ROCE_CONN_AG_CTX_CF18_MASK 0x3 +#define XSTORM_ROCE_CONN_AG_CTX_CF18_SHIFT 4 +#define XSTORM_ROCE_CONN_AG_CTX_CF19_MASK 0x3 +#define XSTORM_ROCE_CONN_AG_CTX_CF19_SHIFT 6 u8 flags7; -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF20_MASK 0x3 -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF20_SHIFT 0 -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF21_MASK 0x3 -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF21_SHIFT 2 -#define E4_XSTORM_ROCE_CONN_AG_CTX_SLOW_PATH_MASK 0x3 -#define E4_XSTORM_ROCE_CONN_AG_CTX_SLOW_PATH_SHIFT 4 -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF0EN_MASK 0x1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF0EN_SHIFT 6 -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF1EN_MASK 0x1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF1EN_SHIFT 7 +#define XSTORM_ROCE_CONN_AG_CTX_CF20_MASK 0x3 +#define XSTORM_ROCE_CONN_AG_CTX_CF20_SHIFT 0 +#define XSTORM_ROCE_CONN_AG_CTX_CF21_MASK 0x3 +#define XSTORM_ROCE_CONN_AG_CTX_CF21_SHIFT 2 +#define XSTORM_ROCE_CONN_AG_CTX_SLOW_PATH_MASK 0x3 +#define XSTORM_ROCE_CONN_AG_CTX_SLOW_PATH_SHIFT 4 +#define XSTORM_ROCE_CONN_AG_CTX_CF0EN_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_CF0EN_SHIFT 6 +#define XSTORM_ROCE_CONN_AG_CTX_CF1EN_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_CF1EN_SHIFT 7 u8 flags8; -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF2EN_MASK 0x1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF2EN_SHIFT 0 -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF3EN_MASK 0x1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF3EN_SHIFT 1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF4EN_MASK 0x1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF4EN_SHIFT 2 -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF5EN_MASK 0x1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF5EN_SHIFT 3 -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF6EN_MASK 0x1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF6EN_SHIFT 4 -#define E4_XSTORM_ROCE_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 5 -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF8EN_MASK 0x1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF8EN_SHIFT 6 -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF9EN_MASK 0x1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF9EN_SHIFT 7 +#define XSTORM_ROCE_CONN_AG_CTX_CF2EN_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_CF2EN_SHIFT 0 +#define XSTORM_ROCE_CONN_AG_CTX_CF3EN_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_CF3EN_SHIFT 1 +#define XSTORM_ROCE_CONN_AG_CTX_CF4EN_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_CF4EN_SHIFT 2 +#define XSTORM_ROCE_CONN_AG_CTX_CF5EN_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_CF5EN_SHIFT 3 +#define XSTORM_ROCE_CONN_AG_CTX_CF6EN_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_CF6EN_SHIFT 4 +#define XSTORM_ROCE_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 5 +#define XSTORM_ROCE_CONN_AG_CTX_CF8EN_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_CF8EN_SHIFT 6 +#define XSTORM_ROCE_CONN_AG_CTX_CF9EN_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_CF9EN_SHIFT 7 u8 flags9; -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF10EN_MASK 0x1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF10EN_SHIFT 0 -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF11EN_MASK 0x1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF11EN_SHIFT 1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF12EN_MASK 0x1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF12EN_SHIFT 2 -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF13EN_MASK 0x1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF13EN_SHIFT 3 -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF14EN_MASK 0x1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF14EN_SHIFT 4 -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF15EN_MASK 0x1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF15EN_SHIFT 5 -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF16EN_MASK 0x1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF16EN_SHIFT 6 -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF17EN_MASK 0x1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF17EN_SHIFT 7 +#define XSTORM_ROCE_CONN_AG_CTX_CF10EN_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_CF10EN_SHIFT 0 +#define XSTORM_ROCE_CONN_AG_CTX_CF11EN_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_CF11EN_SHIFT 1 +#define XSTORM_ROCE_CONN_AG_CTX_CF12EN_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_CF12EN_SHIFT 2 +#define XSTORM_ROCE_CONN_AG_CTX_CF13EN_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_CF13EN_SHIFT 3 +#define XSTORM_ROCE_CONN_AG_CTX_CF14EN_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_CF14EN_SHIFT 4 +#define XSTORM_ROCE_CONN_AG_CTX_CF15EN_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_CF15EN_SHIFT 5 +#define XSTORM_ROCE_CONN_AG_CTX_CF16EN_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_CF16EN_SHIFT 6 +#define XSTORM_ROCE_CONN_AG_CTX_CF17EN_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_CF17EN_SHIFT 7 u8 flags10; -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF18EN_MASK 0x1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF18EN_SHIFT 0 -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF19EN_MASK 0x1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF19EN_SHIFT 1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF20EN_MASK 0x1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF20EN_SHIFT 2 -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF21EN_MASK 0x1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF21EN_SHIFT 3 -#define E4_XSTORM_ROCE_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4 -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF23EN_MASK 0x1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF23EN_SHIFT 5 -#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE0EN_MASK 0x1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE0EN_SHIFT 6 -#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE1EN_MASK 0x1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE1EN_SHIFT 7 +#define XSTORM_ROCE_CONN_AG_CTX_CF18EN_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_CF18EN_SHIFT 0 +#define XSTORM_ROCE_CONN_AG_CTX_CF19EN_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_CF19EN_SHIFT 1 +#define XSTORM_ROCE_CONN_AG_CTX_CF20EN_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_CF20EN_SHIFT 2 +#define XSTORM_ROCE_CONN_AG_CTX_CF21EN_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_CF21EN_SHIFT 3 +#define XSTORM_ROCE_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4 +#define XSTORM_ROCE_CONN_AG_CTX_CF23EN_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_CF23EN_SHIFT 5 +#define XSTORM_ROCE_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_RULE0EN_SHIFT 6 +#define XSTORM_ROCE_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_RULE1EN_SHIFT 7 u8 flags11; -#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE2EN_MASK 0x1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE2EN_SHIFT 0 -#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE3EN_MASK 0x1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE3EN_SHIFT 1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE4EN_MASK 0x1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE4EN_SHIFT 2 -#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE5EN_MASK 0x1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE5EN_SHIFT 3 -#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE6EN_MASK 0x1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE6EN_SHIFT 4 -#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE7EN_MASK 0x1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE7EN_SHIFT 5 -#define E4_XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED1_MASK 0x1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED1_SHIFT 6 -#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE9EN_MASK 0x1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE9EN_SHIFT 7 +#define XSTORM_ROCE_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_RULE2EN_SHIFT 0 +#define XSTORM_ROCE_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_RULE3EN_SHIFT 1 +#define XSTORM_ROCE_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_RULE4EN_SHIFT 2 +#define XSTORM_ROCE_CONN_AG_CTX_RULE5EN_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_RULE5EN_SHIFT 3 +#define XSTORM_ROCE_CONN_AG_CTX_RULE6EN_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_RULE6EN_SHIFT 4 +#define XSTORM_ROCE_CONN_AG_CTX_RULE7EN_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_RULE7EN_SHIFT 5 +#define XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED1_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED1_SHIFT 6 +#define XSTORM_ROCE_CONN_AG_CTX_RULE9EN_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_RULE9EN_SHIFT 7 u8 flags12; -#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE10EN_MASK 0x1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE10EN_SHIFT 0 -#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE11EN_MASK 0x1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE11EN_SHIFT 1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED2_MASK 0x1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED2_SHIFT 2 -#define E4_XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED3_MASK 0x1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED3_SHIFT 3 -#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE14EN_MASK 0x1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE14EN_SHIFT 4 -#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE15EN_MASK 0x1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE15EN_SHIFT 5 -#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE16EN_MASK 0x1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE16EN_SHIFT 6 -#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE17EN_MASK 0x1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE17EN_SHIFT 7 +#define XSTORM_ROCE_CONN_AG_CTX_RULE10EN_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_RULE10EN_SHIFT 0 +#define XSTORM_ROCE_CONN_AG_CTX_RULE11EN_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_RULE11EN_SHIFT 1 +#define XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED2_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED2_SHIFT 2 +#define XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED3_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED3_SHIFT 3 +#define XSTORM_ROCE_CONN_AG_CTX_RULE14EN_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_RULE14EN_SHIFT 4 +#define XSTORM_ROCE_CONN_AG_CTX_RULE15EN_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_RULE15EN_SHIFT 5 +#define XSTORM_ROCE_CONN_AG_CTX_RULE16EN_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_RULE16EN_SHIFT 6 +#define XSTORM_ROCE_CONN_AG_CTX_RULE17EN_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_RULE17EN_SHIFT 7 u8 flags13; -#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE18EN_MASK 0x1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE18EN_SHIFT 0 -#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE19EN_MASK 0x1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE19EN_SHIFT 1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED4_MASK 0x1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED4_SHIFT 2 -#define E4_XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED5_MASK 0x1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED5_SHIFT 3 -#define E4_XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED6_MASK 0x1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED6_SHIFT 4 -#define E4_XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED7_MASK 0x1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED7_SHIFT 5 -#define E4_XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED8_MASK 0x1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED8_SHIFT 6 -#define E4_XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED9_MASK 0x1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED9_SHIFT 7 +#define XSTORM_ROCE_CONN_AG_CTX_RULE18EN_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_RULE18EN_SHIFT 0 +#define XSTORM_ROCE_CONN_AG_CTX_RULE19EN_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_RULE19EN_SHIFT 1 +#define XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED4_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED4_SHIFT 2 +#define XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED5_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED5_SHIFT 3 +#define XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED6_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED6_SHIFT 4 +#define XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED7_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED7_SHIFT 5 +#define XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED8_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED8_SHIFT 6 +#define XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED9_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED9_SHIFT 7 u8 flags14; -#define E4_XSTORM_ROCE_CONN_AG_CTX_MIGRATION_MASK 0x1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_MIGRATION_SHIFT 0 -#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT17_MASK 0x1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT17_SHIFT 1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_DPM_PORT_NUM_MASK 0x3 -#define E4_XSTORM_ROCE_CONN_AG_CTX_DPM_PORT_NUM_SHIFT 2 -#define E4_XSTORM_ROCE_CONN_AG_CTX_RESERVED_MASK 0x1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_RESERVED_SHIFT 4 -#define E4_XSTORM_ROCE_CONN_AG_CTX_ROCE_EDPM_ENABLE_MASK 0x1 -#define E4_XSTORM_ROCE_CONN_AG_CTX_ROCE_EDPM_ENABLE_SHIFT 5 -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF23_MASK 0x3 -#define E4_XSTORM_ROCE_CONN_AG_CTX_CF23_SHIFT 6 +#define XSTORM_ROCE_CONN_AG_CTX_MIGRATION_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_MIGRATION_SHIFT 0 +#define XSTORM_ROCE_CONN_AG_CTX_BIT17_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_BIT17_SHIFT 1 +#define XSTORM_ROCE_CONN_AG_CTX_DPM_PORT_NUM_MASK 0x3 +#define XSTORM_ROCE_CONN_AG_CTX_DPM_PORT_NUM_SHIFT 2 +#define XSTORM_ROCE_CONN_AG_CTX_RESERVED_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_RESERVED_SHIFT 4 +#define XSTORM_ROCE_CONN_AG_CTX_ROCE_EDPM_ENABLE_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_ROCE_EDPM_ENABLE_SHIFT 5 +#define XSTORM_ROCE_CONN_AG_CTX_CF23_MASK 0x3 +#define XSTORM_ROCE_CONN_AG_CTX_CF23_SHIFT 6 u8 byte2; __le16 physical_q0; __le16 word1; @@ -7470,89 +6728,89 @@ struct e4_xstorm_roce_conn_ag_ctx { __le32 reg6; }; -struct e4_tstorm_roce_conn_ag_ctx { +struct tstorm_roce_conn_ag_ctx { u8 reserved0; u8 byte1; u8 flags0; -#define E4_TSTORM_ROCE_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 -#define E4_TSTORM_ROCE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 -#define E4_TSTORM_ROCE_CONN_AG_CTX_BIT1_MASK 0x1 -#define E4_TSTORM_ROCE_CONN_AG_CTX_BIT1_SHIFT 1 -#define E4_TSTORM_ROCE_CONN_AG_CTX_BIT2_MASK 0x1 -#define E4_TSTORM_ROCE_CONN_AG_CTX_BIT2_SHIFT 2 -#define E4_TSTORM_ROCE_CONN_AG_CTX_BIT3_MASK 0x1 -#define E4_TSTORM_ROCE_CONN_AG_CTX_BIT3_SHIFT 3 -#define E4_TSTORM_ROCE_CONN_AG_CTX_BIT4_MASK 0x1 -#define E4_TSTORM_ROCE_CONN_AG_CTX_BIT4_SHIFT 4 -#define E4_TSTORM_ROCE_CONN_AG_CTX_BIT5_MASK 0x1 -#define E4_TSTORM_ROCE_CONN_AG_CTX_BIT5_SHIFT 5 -#define E4_TSTORM_ROCE_CONN_AG_CTX_CF0_MASK 0x3 -#define E4_TSTORM_ROCE_CONN_AG_CTX_CF0_SHIFT 6 +#define TSTORM_ROCE_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define TSTORM_ROCE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 +#define TSTORM_ROCE_CONN_AG_CTX_BIT1_MASK 0x1 +#define TSTORM_ROCE_CONN_AG_CTX_BIT1_SHIFT 1 +#define TSTORM_ROCE_CONN_AG_CTX_BIT2_MASK 0x1 +#define TSTORM_ROCE_CONN_AG_CTX_BIT2_SHIFT 2 +#define TSTORM_ROCE_CONN_AG_CTX_BIT3_MASK 0x1 +#define TSTORM_ROCE_CONN_AG_CTX_BIT3_SHIFT 3 +#define TSTORM_ROCE_CONN_AG_CTX_BIT4_MASK 0x1 +#define TSTORM_ROCE_CONN_AG_CTX_BIT4_SHIFT 4 +#define TSTORM_ROCE_CONN_AG_CTX_BIT5_MASK 0x1 +#define TSTORM_ROCE_CONN_AG_CTX_BIT5_SHIFT 5 +#define TSTORM_ROCE_CONN_AG_CTX_CF0_MASK 0x3 +#define TSTORM_ROCE_CONN_AG_CTX_CF0_SHIFT 6 u8 flags1; -#define E4_TSTORM_ROCE_CONN_AG_CTX_MSTORM_FLUSH_CF_MASK 0x3 -#define E4_TSTORM_ROCE_CONN_AG_CTX_MSTORM_FLUSH_CF_SHIFT 0 -#define E4_TSTORM_ROCE_CONN_AG_CTX_CF2_MASK 0x3 -#define E4_TSTORM_ROCE_CONN_AG_CTX_CF2_SHIFT 2 -#define E4_TSTORM_ROCE_CONN_AG_CTX_TIMER_STOP_ALL_CF_MASK 0x3 -#define E4_TSTORM_ROCE_CONN_AG_CTX_TIMER_STOP_ALL_CF_SHIFT 4 -#define E4_TSTORM_ROCE_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3 -#define E4_TSTORM_ROCE_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 6 +#define TSTORM_ROCE_CONN_AG_CTX_MSTORM_FLUSH_CF_MASK 0x3 +#define TSTORM_ROCE_CONN_AG_CTX_MSTORM_FLUSH_CF_SHIFT 0 +#define TSTORM_ROCE_CONN_AG_CTX_CF2_MASK 0x3 +#define TSTORM_ROCE_CONN_AG_CTX_CF2_SHIFT 2 +#define TSTORM_ROCE_CONN_AG_CTX_TIMER_STOP_ALL_CF_MASK 0x3 +#define TSTORM_ROCE_CONN_AG_CTX_TIMER_STOP_ALL_CF_SHIFT 4 +#define TSTORM_ROCE_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3 +#define TSTORM_ROCE_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 6 u8 flags2; -#define E4_TSTORM_ROCE_CONN_AG_CTX_CF5_MASK 0x3 -#define E4_TSTORM_ROCE_CONN_AG_CTX_CF5_SHIFT 0 -#define E4_TSTORM_ROCE_CONN_AG_CTX_CF6_MASK 0x3 -#define E4_TSTORM_ROCE_CONN_AG_CTX_CF6_SHIFT 2 -#define E4_TSTORM_ROCE_CONN_AG_CTX_CF7_MASK 0x3 -#define E4_TSTORM_ROCE_CONN_AG_CTX_CF7_SHIFT 4 -#define E4_TSTORM_ROCE_CONN_AG_CTX_CF8_MASK 0x3 -#define E4_TSTORM_ROCE_CONN_AG_CTX_CF8_SHIFT 6 +#define TSTORM_ROCE_CONN_AG_CTX_CF5_MASK 0x3 +#define TSTORM_ROCE_CONN_AG_CTX_CF5_SHIFT 0 +#define TSTORM_ROCE_CONN_AG_CTX_CF6_MASK 0x3 +#define TSTORM_ROCE_CONN_AG_CTX_CF6_SHIFT 2 +#define TSTORM_ROCE_CONN_AG_CTX_CF7_MASK 0x3 +#define TSTORM_ROCE_CONN_AG_CTX_CF7_SHIFT 4 +#define TSTORM_ROCE_CONN_AG_CTX_CF8_MASK 0x3 +#define TSTORM_ROCE_CONN_AG_CTX_CF8_SHIFT 6 u8 flags3; -#define E4_TSTORM_ROCE_CONN_AG_CTX_CF9_MASK 0x3 -#define E4_TSTORM_ROCE_CONN_AG_CTX_CF9_SHIFT 0 -#define E4_TSTORM_ROCE_CONN_AG_CTX_CF10_MASK 0x3 -#define E4_TSTORM_ROCE_CONN_AG_CTX_CF10_SHIFT 2 -#define E4_TSTORM_ROCE_CONN_AG_CTX_CF0EN_MASK 0x1 -#define E4_TSTORM_ROCE_CONN_AG_CTX_CF0EN_SHIFT 4 -#define E4_TSTORM_ROCE_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_MASK 0x1 -#define E4_TSTORM_ROCE_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_SHIFT 5 -#define E4_TSTORM_ROCE_CONN_AG_CTX_CF2EN_MASK 0x1 -#define E4_TSTORM_ROCE_CONN_AG_CTX_CF2EN_SHIFT 6 -#define E4_TSTORM_ROCE_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_MASK 0x1 -#define E4_TSTORM_ROCE_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_SHIFT 7 +#define TSTORM_ROCE_CONN_AG_CTX_CF9_MASK 0x3 +#define TSTORM_ROCE_CONN_AG_CTX_CF9_SHIFT 0 +#define TSTORM_ROCE_CONN_AG_CTX_CF10_MASK 0x3 +#define TSTORM_ROCE_CONN_AG_CTX_CF10_SHIFT 2 +#define TSTORM_ROCE_CONN_AG_CTX_CF0EN_MASK 0x1 +#define TSTORM_ROCE_CONN_AG_CTX_CF0EN_SHIFT 4 +#define TSTORM_ROCE_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_MASK 0x1 +#define TSTORM_ROCE_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_SHIFT 5 +#define TSTORM_ROCE_CONN_AG_CTX_CF2EN_MASK 0x1 +#define TSTORM_ROCE_CONN_AG_CTX_CF2EN_SHIFT 6 +#define TSTORM_ROCE_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_MASK 0x1 +#define TSTORM_ROCE_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_SHIFT 7 u8 flags4; -#define E4_TSTORM_ROCE_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1 -#define E4_TSTORM_ROCE_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 0 -#define E4_TSTORM_ROCE_CONN_AG_CTX_CF5EN_MASK 0x1 -#define E4_TSTORM_ROCE_CONN_AG_CTX_CF5EN_SHIFT 1 -#define E4_TSTORM_ROCE_CONN_AG_CTX_CF6EN_MASK 0x1 -#define E4_TSTORM_ROCE_CONN_AG_CTX_CF6EN_SHIFT 2 -#define E4_TSTORM_ROCE_CONN_AG_CTX_CF7EN_MASK 0x1 -#define E4_TSTORM_ROCE_CONN_AG_CTX_CF7EN_SHIFT 3 -#define E4_TSTORM_ROCE_CONN_AG_CTX_CF8EN_MASK 0x1 -#define E4_TSTORM_ROCE_CONN_AG_CTX_CF8EN_SHIFT 4 -#define E4_TSTORM_ROCE_CONN_AG_CTX_CF9EN_MASK 0x1 -#define E4_TSTORM_ROCE_CONN_AG_CTX_CF9EN_SHIFT 5 -#define E4_TSTORM_ROCE_CONN_AG_CTX_CF10EN_MASK 0x1 -#define E4_TSTORM_ROCE_CONN_AG_CTX_CF10EN_SHIFT 6 -#define E4_TSTORM_ROCE_CONN_AG_CTX_RULE0EN_MASK 0x1 -#define E4_TSTORM_ROCE_CONN_AG_CTX_RULE0EN_SHIFT 7 +#define TSTORM_ROCE_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1 +#define TSTORM_ROCE_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 0 +#define TSTORM_ROCE_CONN_AG_CTX_CF5EN_MASK 0x1 +#define TSTORM_ROCE_CONN_AG_CTX_CF5EN_SHIFT 1 +#define TSTORM_ROCE_CONN_AG_CTX_CF6EN_MASK 0x1 +#define TSTORM_ROCE_CONN_AG_CTX_CF6EN_SHIFT 2 +#define TSTORM_ROCE_CONN_AG_CTX_CF7EN_MASK 0x1 +#define TSTORM_ROCE_CONN_AG_CTX_CF7EN_SHIFT 3 +#define TSTORM_ROCE_CONN_AG_CTX_CF8EN_MASK 0x1 +#define TSTORM_ROCE_CONN_AG_CTX_CF8EN_SHIFT 4 +#define TSTORM_ROCE_CONN_AG_CTX_CF9EN_MASK 0x1 +#define TSTORM_ROCE_CONN_AG_CTX_CF9EN_SHIFT 5 +#define TSTORM_ROCE_CONN_AG_CTX_CF10EN_MASK 0x1 +#define TSTORM_ROCE_CONN_AG_CTX_CF10EN_SHIFT 6 +#define TSTORM_ROCE_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define TSTORM_ROCE_CONN_AG_CTX_RULE0EN_SHIFT 7 u8 flags5; -#define E4_TSTORM_ROCE_CONN_AG_CTX_RULE1EN_MASK 0x1 -#define E4_TSTORM_ROCE_CONN_AG_CTX_RULE1EN_SHIFT 0 -#define E4_TSTORM_ROCE_CONN_AG_CTX_RULE2EN_MASK 0x1 -#define E4_TSTORM_ROCE_CONN_AG_CTX_RULE2EN_SHIFT 1 -#define E4_TSTORM_ROCE_CONN_AG_CTX_RULE3EN_MASK 0x1 -#define E4_TSTORM_ROCE_CONN_AG_CTX_RULE3EN_SHIFT 2 -#define E4_TSTORM_ROCE_CONN_AG_CTX_RULE4EN_MASK 0x1 -#define E4_TSTORM_ROCE_CONN_AG_CTX_RULE4EN_SHIFT 3 -#define E4_TSTORM_ROCE_CONN_AG_CTX_RULE5EN_MASK 0x1 -#define E4_TSTORM_ROCE_CONN_AG_CTX_RULE5EN_SHIFT 4 -#define E4_TSTORM_ROCE_CONN_AG_CTX_RULE6EN_MASK 0x1 -#define E4_TSTORM_ROCE_CONN_AG_CTX_RULE6EN_SHIFT 5 -#define E4_TSTORM_ROCE_CONN_AG_CTX_RULE7EN_MASK 0x1 -#define E4_TSTORM_ROCE_CONN_AG_CTX_RULE7EN_SHIFT 6 -#define E4_TSTORM_ROCE_CONN_AG_CTX_RULE8EN_MASK 0x1 -#define E4_TSTORM_ROCE_CONN_AG_CTX_RULE8EN_SHIFT 7 +#define TSTORM_ROCE_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define TSTORM_ROCE_CONN_AG_CTX_RULE1EN_SHIFT 0 +#define TSTORM_ROCE_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define TSTORM_ROCE_CONN_AG_CTX_RULE2EN_SHIFT 1 +#define TSTORM_ROCE_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define TSTORM_ROCE_CONN_AG_CTX_RULE3EN_SHIFT 2 +#define TSTORM_ROCE_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define TSTORM_ROCE_CONN_AG_CTX_RULE4EN_SHIFT 3 +#define TSTORM_ROCE_CONN_AG_CTX_RULE5EN_MASK 0x1 +#define TSTORM_ROCE_CONN_AG_CTX_RULE5EN_SHIFT 4 +#define TSTORM_ROCE_CONN_AG_CTX_RULE6EN_MASK 0x1 +#define TSTORM_ROCE_CONN_AG_CTX_RULE6EN_SHIFT 5 +#define TSTORM_ROCE_CONN_AG_CTX_RULE7EN_MASK 0x1 +#define TSTORM_ROCE_CONN_AG_CTX_RULE7EN_SHIFT 6 +#define TSTORM_ROCE_CONN_AG_CTX_RULE8EN_MASK 0x1 +#define TSTORM_ROCE_CONN_AG_CTX_RULE8EN_SHIFT 7 __le32 reg0; __le32 reg1; __le32 reg2; @@ -7605,15 +6863,15 @@ struct ustorm_roce_conn_st_ctx { }; /* roce connection context */ -struct e4_roce_conn_context { +struct roce_conn_context { struct ystorm_roce_conn_st_ctx ystorm_st_context; struct regpair ystorm_st_padding[2]; struct pstorm_roce_conn_st_ctx pstorm_st_context; struct xstorm_roce_conn_st_ctx xstorm_st_context; - struct e4_xstorm_roce_conn_ag_ctx xstorm_ag_context; - struct e4_tstorm_roce_conn_ag_ctx tstorm_ag_context; + struct xstorm_roce_conn_ag_ctx xstorm_ag_context; + struct tstorm_roce_conn_ag_ctx tstorm_ag_context; struct timers_context timer_context; - struct e4_ustorm_rdma_conn_ag_ctx ustorm_ag_context; + struct ustorm_rdma_conn_ag_ctx ustorm_ag_context; struct tstorm_roce_conn_st_ctx tstorm_st_context; struct regpair tstorm_st_padding[2]; struct mstorm_roce_conn_st_ctx mstorm_st_context; @@ -7681,8 +6939,10 @@ struct roce_create_qp_req_ramrod_data { #define ROCE_CREATE_QP_REQ_RAMROD_DATA_EDPM_MODE_SHIFT 0 #define ROCE_CREATE_QP_REQ_RAMROD_DATA_VF_ID_VALID_MASK 0x1 #define ROCE_CREATE_QP_REQ_RAMROD_DATA_VF_ID_VALID_SHIFT 1 -#define ROCE_CREATE_QP_REQ_RAMROD_DATA_RESERVED_MASK 0x3F -#define ROCE_CREATE_QP_REQ_RAMROD_DATA_RESERVED_SHIFT 2 +#define ROCE_CREATE_QP_REQ_RAMROD_DATA_FORCE_LB_MASK 0x1 +#define ROCE_CREATE_QP_REQ_RAMROD_DATA_FORCE_LB_SHIFT 2 +#define ROCE_CREATE_QP_REQ_RAMROD_DATA_RESERVED_MASK 0x1F +#define ROCE_CREATE_QP_REQ_RAMROD_DATA_RESERVED_SHIFT 3 u8 name_space; u8 reserved3[3]; __le16 regular_latency_phy_queue; @@ -7714,8 +6974,10 @@ struct roce_create_qp_resp_ramrod_data { #define ROCE_CREATE_QP_RESP_RAMROD_DATA_XRC_FLAG_SHIFT 16 #define ROCE_CREATE_QP_RESP_RAMROD_DATA_VF_ID_VALID_MASK 0x1 #define ROCE_CREATE_QP_RESP_RAMROD_DATA_VF_ID_VALID_SHIFT 17 -#define ROCE_CREATE_QP_RESP_RAMROD_DATA_RESERVED_MASK 0x3FFF -#define ROCE_CREATE_QP_RESP_RAMROD_DATA_RESERVED_SHIFT 18 +#define ROCE_CREATE_QP_RESP_RAMROD_DATA_FORCE_LB_MASK 0x1 +#define ROCE_CREATE_QP_RESP_RAMROD_DATA_FORCE_LB_SHIFT 18 +#define ROCE_CREATE_QP_RESP_RAMROD_DATA_RESERVED_MASK 0x1FFF +#define ROCE_CREATE_QP_RESP_RAMROD_DATA_RESERVED_SHIFT 19 __le16 xrc_domain; u8 max_ird; u8 traffic_class; @@ -7752,10 +7014,85 @@ struct roce_create_qp_resp_ramrod_data { u8 reserved3[3]; }; +/* RoCE Create Suspended qp requester runtime ramrod data */ +struct roce_create_suspended_qp_req_runtime_ramrod_data { + __le32 flags; +#define ROCE_CREATE_SUSPENDED_QP_REQ_RUNTIME_RAMROD_DATA_ERR_FLG_MASK 0x1 +#define ROCE_CREATE_SUSPENDED_QP_REQ_RUNTIME_RAMROD_DATA_ERR_FLG_SHIFT 0 +#define ROCE_CREATE_SUSPENDED_QP_REQ_RUNTIME_RAMROD_DATA_RESERVED0_MASK \ + 0x7FFFFFFF +#define ROCE_CREATE_SUSPENDED_QP_REQ_RUNTIME_RAMROD_DATA_RESERVED0_SHIFT 1 + __le32 send_msg_psn; + __le32 inflight_sends; + __le32 ssn; +}; + +/* RoCE Create Suspended QP requester ramrod data */ +struct roce_create_suspended_qp_req_ramrod_data { + struct roce_create_qp_req_ramrod_data qp_params; + struct roce_create_suspended_qp_req_runtime_ramrod_data + qp_runtime_params; +}; + +/* RoCE Create Suspended QP responder runtime params */ +struct roce_create_suspended_qp_resp_runtime_params { + __le32 flags; +#define ROCE_CREATE_SUSPENDED_QP_RESP_RUNTIME_PARAMS_ERR_FLG_MASK 0x1 +#define ROCE_CREATE_SUSPENDED_QP_RESP_RUNTIME_PARAMS_ERR_FLG_SHIFT 0 +#define ROCE_CREATE_SUSPENDED_QP_RESP_RUNTIME_PARAMS_RDMA_ACTIVE_MASK 0x1 +#define ROCE_CREATE_SUSPENDED_QP_RESP_RUNTIME_PARAMS_RDMA_ACTIVE_SHIFT 1 +#define ROCE_CREATE_SUSPENDED_QP_RESP_RUNTIME_PARAMS_RESERVED0_MASK 0x3FFFFFFF +#define ROCE_CREATE_SUSPENDED_QP_RESP_RUNTIME_PARAMS_RESERVED0_SHIFT 2 + __le32 receive_msg_psn; + __le32 inflight_receives; + __le32 rmsn; + __le32 rdma_key; + struct regpair rdma_va; + __le32 rdma_length; + __le32 num_rdb_entries; + __le32 resreved; +}; + +/* RoCE RDB array entry */ +struct roce_resp_qp_rdb_entry { + struct regpair atomic_data; + struct regpair va; + __le32 psn; + __le32 rkey; + __le32 byte_count; + u8 op_type; + u8 reserved[3]; +}; + +/* RoCE Create Suspended QP responder runtime ramrod data */ +struct roce_create_suspended_qp_resp_runtime_ramrod_data { + struct roce_create_suspended_qp_resp_runtime_params params; + struct roce_resp_qp_rdb_entry + rdb_array_entries[RDMA_MAX_IRQ_ELEMS_IN_PAGE]; +}; + +/* RoCE Create Suspended QP responder ramrod data */ +struct roce_create_suspended_qp_resp_ramrod_data { + struct roce_create_qp_resp_ramrod_data + qp_params; + struct roce_create_suspended_qp_resp_runtime_ramrod_data + qp_runtime_params; +}; + +/* RoCE create ud qp ramrod data */ +struct roce_create_ud_qp_ramrod_data { + __le16 local_mac_addr[3]; + __le16 vlan_id; + __le32 src_qp_id; + u8 name_space; + u8 reserved[3]; +}; + /* roce DCQCN received statistics */ struct roce_dcqcn_received_stats { struct regpair ecn_pkt_rcv; struct regpair cnp_pkt_rcv; + struct regpair cnp_pkt_reject; }; /* roce DCQCN sent statistics */ @@ -7787,6 +7124,12 @@ struct roce_destroy_qp_resp_ramrod_data { __le32 reserved; }; +/* RoCE destroy ud qp ramrod data */ +struct roce_destroy_ud_qp_ramrod_data { + __le32 src_qp_id; + __le32 reserved; +}; + /* roce error statistics */ struct roce_error_stats { __le32 resp_remote_access_errors; @@ -7809,13 +7152,21 @@ struct roce_events_stats { /* roce slow path EQ cmd IDs */ enum roce_event_opcode { - ROCE_EVENT_CREATE_QP = 11, + ROCE_EVENT_CREATE_QP = 13, ROCE_EVENT_MODIFY_QP, ROCE_EVENT_QUERY_QP, ROCE_EVENT_DESTROY_QP, ROCE_EVENT_CREATE_UD_QP, ROCE_EVENT_DESTROY_UD_QP, ROCE_EVENT_FUNC_UPDATE, + ROCE_EVENT_SUSPEND_QP, + ROCE_EVENT_QUERY_SUSPENDED_QP, + ROCE_EVENT_CREATE_SUSPENDED_QP, + ROCE_EVENT_RESUME_QP, + ROCE_EVENT_SUSPEND_UD_QP, + ROCE_EVENT_RESUME_UD_QP, + ROCE_EVENT_CREATE_SUSPENDED_UD_QP, + ROCE_EVENT_FLUSH_DPT_QP, MAX_ROCE_EVENT_OPCODE }; @@ -7843,6 +7194,18 @@ struct roce_init_func_ramrod_data { struct roce_init_func_params roce; }; +/* roce_ll2_cqe_data */ +struct roce_ll2_cqe_data { + u8 name_space; + u8 flags; +#define ROCE_LL2_CQE_DATA_QP_SUSPENDED_MASK 0x1 +#define ROCE_LL2_CQE_DATA_QP_SUSPENDED_SHIFT 0 +#define ROCE_LL2_CQE_DATA_RESERVED0_MASK 0x7F +#define ROCE_LL2_CQE_DATA_RESERVED0_SHIFT 1 + u8 reserved1[2]; + __le32 cid; +}; + /* roce modify qp requester ramrod data */ struct roce_modify_qp_req_ramrod_data { __le16 flags; @@ -7870,8 +7233,10 @@ struct roce_modify_qp_req_ramrod_data { #define ROCE_MODIFY_QP_REQ_RAMROD_DATA_PRI_SHIFT 10 #define ROCE_MODIFY_QP_REQ_RAMROD_DATA_PHYSICAL_QUEUE_FLG_MASK 0x1 #define ROCE_MODIFY_QP_REQ_RAMROD_DATA_PHYSICAL_QUEUE_FLG_SHIFT 13 -#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_RESERVED1_MASK 0x3 -#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_RESERVED1_SHIFT 14 +#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_FORCE_LB_MASK 0x1 +#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_FORCE_LB_SHIFT 14 +#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_RESERVED1_MASK 0x1 +#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_RESERVED1_SHIFT 15 u8 fields; #define ROCE_MODIFY_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT_MASK 0xF #define ROCE_MODIFY_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT_SHIFT 0 @@ -7917,8 +7282,10 @@ struct roce_modify_qp_resp_ramrod_data { #define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_OPS_EN_FLG_SHIFT 9 #define ROCE_MODIFY_QP_RESP_RAMROD_DATA_PHYSICAL_QUEUE_FLG_MASK 0x1 #define ROCE_MODIFY_QP_RESP_RAMROD_DATA_PHYSICAL_QUEUE_FLG_SHIFT 10 -#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RESERVED1_MASK 0x1F -#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RESERVED1_SHIFT 11 +#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_FORCE_LB_MASK 0x1 +#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_FORCE_LB_SHIFT 11 +#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RESERVED1_MASK 0xF +#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RESERVED1_SHIFT 12 u8 fields; #define ROCE_MODIFY_QP_RESP_RAMROD_DATA_PRI_MASK 0x7 #define ROCE_MODIFY_QP_RESP_RAMROD_DATA_PRI_SHIFT 0 @@ -7969,18 +7336,84 @@ struct roce_query_qp_resp_ramrod_data { struct regpair output_params_addr; }; +/* RoCE Query Suspended QP requester output params */ +struct roce_query_suspended_qp_req_output_params { + __le32 psn; + __le32 flags; +#define ROCE_QUERY_SUSPENDED_QP_REQ_OUTPUT_PARAMS_ERR_FLG_MASK 0x1 +#define ROCE_QUERY_SUSPENDED_QP_REQ_OUTPUT_PARAMS_ERR_FLG_SHIFT 0 +#define ROCE_QUERY_SUSPENDED_QP_REQ_OUTPUT_PARAMS_RESERVED0_MASK 0x7FFFFFFF +#define ROCE_QUERY_SUSPENDED_QP_REQ_OUTPUT_PARAMS_RESERVED0_SHIFT 1 + __le32 send_msg_psn; + __le32 inflight_sends; + __le32 ssn; + __le32 reserved; +}; + +/* RoCE Query Suspended QP requester ramrod data */ +struct roce_query_suspended_qp_req_ramrod_data { + struct regpair output_params_addr; +}; + +/* RoCE Query Suspended QP responder runtime params */ +struct roce_query_suspended_qp_resp_runtime_params { + __le32 psn; + __le32 flags; +#define ROCE_QUERY_SUSPENDED_QP_RESP_RUNTIME_PARAMS_ERR_FLG_MASK 0x1 +#define ROCE_QUERY_SUSPENDED_QP_RESP_RUNTIME_PARAMS_ERR_FLG_SHIFT 0 +#define ROCE_QUERY_SUSPENDED_QP_RESP_RUNTIME_PARAMS_RDMA_ACTIVE_MASK 0x1 +#define ROCE_QUERY_SUSPENDED_QP_RESP_RUNTIME_PARAMS_RDMA_ACTIVE_SHIFT 1 +#define ROCE_QUERY_SUSPENDED_QP_RESP_RUNTIME_PARAMS_RESERVED0_MASK 0x3FFFFFFF +#define ROCE_QUERY_SUSPENDED_QP_RESP_RUNTIME_PARAMS_RESERVED0_SHIFT 2 + __le32 receive_msg_psn; + __le32 inflight_receives; + __le32 rmsn; + __le32 rdma_key; + struct regpair rdma_va; + __le32 rdma_length; + __le32 num_rdb_entries; +}; + +/* RoCE Query Suspended QP responder output params */ +struct roce_query_suspended_qp_resp_output_params { + struct roce_query_suspended_qp_resp_runtime_params runtime_params; + struct roce_resp_qp_rdb_entry + rdb_array_entries[RDMA_MAX_IRQ_ELEMS_IN_PAGE]; +}; + +/* RoCE Query Suspended QP responder ramrod data */ +struct roce_query_suspended_qp_resp_ramrod_data { + struct regpair output_params_addr; +}; + /* ROCE ramrod command IDs */ enum roce_ramrod_cmd_id { - ROCE_RAMROD_CREATE_QP = 11, + ROCE_RAMROD_CREATE_QP = 13, ROCE_RAMROD_MODIFY_QP, ROCE_RAMROD_QUERY_QP, ROCE_RAMROD_DESTROY_QP, ROCE_RAMROD_CREATE_UD_QP, ROCE_RAMROD_DESTROY_UD_QP, ROCE_RAMROD_FUNC_UPDATE, + ROCE_RAMROD_SUSPEND_QP, + ROCE_RAMROD_QUERY_SUSPENDED_QP, + ROCE_RAMROD_CREATE_SUSPENDED_QP, + ROCE_RAMROD_RESUME_QP, + ROCE_RAMROD_SUSPEND_UD_QP, + ROCE_RAMROD_RESUME_UD_QP, + ROCE_RAMROD_CREATE_SUSPENDED_UD_QP, + ROCE_RAMROD_FLUSH_DPT_QP, MAX_ROCE_RAMROD_CMD_ID }; +/* ROCE RDB array entry type */ +enum roce_resp_qp_rdb_entry_type { + ROCE_QP_RDB_ENTRY_RDMA_RESPONSE = 0, + ROCE_QP_RDB_ENTRY_ATOMIC_RESPONSE = 1, + ROCE_QP_RDB_ENTRY_INVALID = 2, + MAX_ROCE_RESP_QP_RDB_ENTRY_TYPE +}; + /* RoCE func init ramrod data */ struct roce_update_func_params { u8 cnp_vlan_priority; @@ -7995,7 +7428,7 @@ struct roce_update_func_params { __le32 cnp_send_timeout; }; -struct e4_xstorm_roce_conn_ag_ctx_dq_ext_ld_part { +struct xstorm_roce_conn_ag_ctx_dq_ext_ld_part { u8 reserved0; u8 state; u8 flags0; @@ -8222,200 +7655,200 @@ struct e4_xstorm_roce_conn_ag_ctx_dq_ext_ld_part { __le32 reg4; }; -struct e4_mstorm_roce_conn_ag_ctx { +struct mstorm_roce_conn_ag_ctx { u8 byte0; u8 byte1; u8 flags0; -#define E4_MSTORM_ROCE_CONN_AG_CTX_BIT0_MASK 0x1 -#define E4_MSTORM_ROCE_CONN_AG_CTX_BIT0_SHIFT 0 -#define E4_MSTORM_ROCE_CONN_AG_CTX_BIT1_MASK 0x1 -#define E4_MSTORM_ROCE_CONN_AG_CTX_BIT1_SHIFT 1 -#define E4_MSTORM_ROCE_CONN_AG_CTX_CF0_MASK 0x3 -#define E4_MSTORM_ROCE_CONN_AG_CTX_CF0_SHIFT 2 -#define E4_MSTORM_ROCE_CONN_AG_CTX_CF1_MASK 0x3 -#define E4_MSTORM_ROCE_CONN_AG_CTX_CF1_SHIFT 4 -#define E4_MSTORM_ROCE_CONN_AG_CTX_CF2_MASK 0x3 -#define E4_MSTORM_ROCE_CONN_AG_CTX_CF2_SHIFT 6 +#define MSTORM_ROCE_CONN_AG_CTX_BIT0_MASK 0x1 +#define MSTORM_ROCE_CONN_AG_CTX_BIT0_SHIFT 0 +#define MSTORM_ROCE_CONN_AG_CTX_BIT1_MASK 0x1 +#define MSTORM_ROCE_CONN_AG_CTX_BIT1_SHIFT 1 +#define MSTORM_ROCE_CONN_AG_CTX_CF0_MASK 0x3 +#define MSTORM_ROCE_CONN_AG_CTX_CF0_SHIFT 2 +#define MSTORM_ROCE_CONN_AG_CTX_CF1_MASK 0x3 +#define MSTORM_ROCE_CONN_AG_CTX_CF1_SHIFT 4 +#define MSTORM_ROCE_CONN_AG_CTX_CF2_MASK 0x3 +#define MSTORM_ROCE_CONN_AG_CTX_CF2_SHIFT 6 u8 flags1; -#define E4_MSTORM_ROCE_CONN_AG_CTX_CF0EN_MASK 0x1 -#define E4_MSTORM_ROCE_CONN_AG_CTX_CF0EN_SHIFT 0 -#define E4_MSTORM_ROCE_CONN_AG_CTX_CF1EN_MASK 0x1 -#define E4_MSTORM_ROCE_CONN_AG_CTX_CF1EN_SHIFT 1 -#define E4_MSTORM_ROCE_CONN_AG_CTX_CF2EN_MASK 0x1 -#define E4_MSTORM_ROCE_CONN_AG_CTX_CF2EN_SHIFT 2 -#define E4_MSTORM_ROCE_CONN_AG_CTX_RULE0EN_MASK 0x1 -#define E4_MSTORM_ROCE_CONN_AG_CTX_RULE0EN_SHIFT 3 -#define E4_MSTORM_ROCE_CONN_AG_CTX_RULE1EN_MASK 0x1 -#define E4_MSTORM_ROCE_CONN_AG_CTX_RULE1EN_SHIFT 4 -#define E4_MSTORM_ROCE_CONN_AG_CTX_RULE2EN_MASK 0x1 -#define E4_MSTORM_ROCE_CONN_AG_CTX_RULE2EN_SHIFT 5 -#define E4_MSTORM_ROCE_CONN_AG_CTX_RULE3EN_MASK 0x1 -#define E4_MSTORM_ROCE_CONN_AG_CTX_RULE3EN_SHIFT 6 -#define E4_MSTORM_ROCE_CONN_AG_CTX_RULE4EN_MASK 0x1 -#define E4_MSTORM_ROCE_CONN_AG_CTX_RULE4EN_SHIFT 7 +#define MSTORM_ROCE_CONN_AG_CTX_CF0EN_MASK 0x1 +#define MSTORM_ROCE_CONN_AG_CTX_CF0EN_SHIFT 0 +#define MSTORM_ROCE_CONN_AG_CTX_CF1EN_MASK 0x1 +#define MSTORM_ROCE_CONN_AG_CTX_CF1EN_SHIFT 1 +#define MSTORM_ROCE_CONN_AG_CTX_CF2EN_MASK 0x1 +#define MSTORM_ROCE_CONN_AG_CTX_CF2EN_SHIFT 2 +#define MSTORM_ROCE_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define MSTORM_ROCE_CONN_AG_CTX_RULE0EN_SHIFT 3 +#define MSTORM_ROCE_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define MSTORM_ROCE_CONN_AG_CTX_RULE1EN_SHIFT 4 +#define MSTORM_ROCE_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define MSTORM_ROCE_CONN_AG_CTX_RULE2EN_SHIFT 5 +#define MSTORM_ROCE_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define MSTORM_ROCE_CONN_AG_CTX_RULE3EN_SHIFT 6 +#define MSTORM_ROCE_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define MSTORM_ROCE_CONN_AG_CTX_RULE4EN_SHIFT 7 __le16 word0; __le16 word1; __le32 reg0; __le32 reg1; }; -struct e4_mstorm_roce_req_conn_ag_ctx { +struct mstorm_roce_req_conn_ag_ctx { u8 byte0; u8 byte1; u8 flags0; -#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_BIT0_MASK 0x1 -#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_BIT0_SHIFT 0 -#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_BIT1_MASK 0x1 -#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_BIT1_SHIFT 1 -#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_CF0_MASK 0x3 -#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_CF0_SHIFT 2 -#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_CF1_MASK 0x3 -#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_CF1_SHIFT 4 -#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_CF2_MASK 0x3 -#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_CF2_SHIFT 6 +#define MSTORM_ROCE_REQ_CONN_AG_CTX_BIT0_MASK 0x1 +#define MSTORM_ROCE_REQ_CONN_AG_CTX_BIT0_SHIFT 0 +#define MSTORM_ROCE_REQ_CONN_AG_CTX_BIT1_MASK 0x1 +#define MSTORM_ROCE_REQ_CONN_AG_CTX_BIT1_SHIFT 1 +#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF0_MASK 0x3 +#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF0_SHIFT 2 +#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF1_MASK 0x3 +#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF1_SHIFT 4 +#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF2_MASK 0x3 +#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF2_SHIFT 6 u8 flags1; -#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_MASK 0x1 -#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_SHIFT 0 -#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_MASK 0x1 -#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_SHIFT 1 -#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_MASK 0x1 -#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_SHIFT 2 -#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_MASK 0x1 -#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_SHIFT 3 -#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_MASK 0x1 -#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_SHIFT 4 -#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_MASK 0x1 -#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_SHIFT 5 -#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_MASK 0x1 -#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_SHIFT 6 -#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_MASK 0x1 -#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_SHIFT 7 +#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_MASK 0x1 +#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_SHIFT 0 +#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_MASK 0x1 +#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_SHIFT 1 +#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_MASK 0x1 +#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_SHIFT 2 +#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_SHIFT 3 +#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_SHIFT 4 +#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_SHIFT 5 +#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_SHIFT 6 +#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_SHIFT 7 __le16 word0; __le16 word1; __le32 reg0; __le32 reg1; }; -struct e4_mstorm_roce_resp_conn_ag_ctx { +struct mstorm_roce_resp_conn_ag_ctx { u8 byte0; u8 byte1; u8 flags0; -#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_BIT0_MASK 0x1 -#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_BIT0_SHIFT 0 -#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_BIT1_MASK 0x1 -#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_BIT1_SHIFT 1 -#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_CF0_MASK 0x3 -#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_CF0_SHIFT 2 -#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_CF1_MASK 0x3 -#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_CF1_SHIFT 4 -#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_CF2_MASK 0x3 -#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_CF2_SHIFT 6 +#define MSTORM_ROCE_RESP_CONN_AG_CTX_BIT0_MASK 0x1 +#define MSTORM_ROCE_RESP_CONN_AG_CTX_BIT0_SHIFT 0 +#define MSTORM_ROCE_RESP_CONN_AG_CTX_BIT1_MASK 0x1 +#define MSTORM_ROCE_RESP_CONN_AG_CTX_BIT1_SHIFT 1 +#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF0_MASK 0x3 +#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF0_SHIFT 2 +#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF1_MASK 0x3 +#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF1_SHIFT 4 +#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF2_MASK 0x3 +#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF2_SHIFT 6 u8 flags1; -#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_MASK 0x1 -#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_SHIFT 0 -#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_MASK 0x1 -#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_SHIFT 1 -#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_MASK 0x1 -#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_SHIFT 2 -#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_MASK 0x1 -#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_SHIFT 3 -#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_MASK 0x1 -#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_SHIFT 4 -#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_MASK 0x1 -#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_SHIFT 5 -#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_MASK 0x1 -#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_SHIFT 6 -#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_MASK 0x1 -#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_SHIFT 7 +#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_MASK 0x1 +#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_SHIFT 0 +#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_MASK 0x1 +#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_SHIFT 1 +#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_MASK 0x1 +#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_SHIFT 2 +#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_SHIFT 3 +#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_SHIFT 4 +#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_SHIFT 5 +#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_SHIFT 6 +#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_SHIFT 7 __le16 word0; __le16 word1; __le32 reg0; __le32 reg1; }; -struct e4_tstorm_roce_req_conn_ag_ctx { +struct tstorm_roce_req_conn_ag_ctx { u8 reserved0; u8 state; u8 flags0; -#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 -#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 -#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_OCCURRED_MASK 0x1 -#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_OCCURRED_SHIFT 1 -#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_TX_CQE_ERROR_OCCURRED_MASK 0x1 -#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_TX_CQE_ERROR_OCCURRED_SHIFT 2 -#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_BIT3_MASK 0x1 -#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_BIT3_SHIFT 3 -#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_MASK 0x1 -#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_SHIFT 4 -#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_CACHED_ORQ_MASK 0x1 -#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_CACHED_ORQ_SHIFT 5 -#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_CF_MASK 0x3 -#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_CF_SHIFT 6 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_OCCURRED_MASK 0x1 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_OCCURRED_SHIFT 1 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_TX_CQE_ERROR_OCCURRED_MASK 0x1 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_TX_CQE_ERROR_OCCURRED_SHIFT 2 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_BIT3_MASK 0x1 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_BIT3_SHIFT 3 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_MASK 0x1 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_SHIFT 4 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_CACHED_ORQ_MASK 0x1 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_CACHED_ORQ_SHIFT 5 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_CF_MASK 0x3 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_CF_SHIFT 6 u8 flags1; -#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_CF_MASK 0x3 -#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_CF_SHIFT 0 -#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_SQ_CF_MASK 0x3 -#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_SQ_CF_SHIFT 2 -#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_STOP_ALL_CF_MASK 0x3 -#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_STOP_ALL_CF_SHIFT 4 -#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3 -#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 6 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_CF_MASK 0x3 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_CF_SHIFT 0 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_SQ_CF_MASK 0x3 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_SQ_CF_SHIFT 2 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_STOP_ALL_CF_MASK 0x3 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_STOP_ALL_CF_SHIFT 4 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 6 u8 flags2; -#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_FORCE_COMP_CF_MASK 0x3 -#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_FORCE_COMP_CF_SHIFT 0 -#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_SET_TIMER_CF_MASK 0x3 -#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_SET_TIMER_CF_SHIFT 2 -#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_TX_ASYNC_ERROR_CF_MASK 0x3 -#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_TX_ASYNC_ERROR_CF_SHIFT 4 -#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RXMIT_DONE_CF_MASK 0x3 -#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RXMIT_DONE_CF_SHIFT 6 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_FORCE_COMP_CF_MASK 0x3 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_FORCE_COMP_CF_SHIFT 0 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_SET_TIMER_CF_MASK 0x3 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_SET_TIMER_CF_SHIFT 2 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_TX_ASYNC_ERROR_CF_MASK 0x3 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_TX_ASYNC_ERROR_CF_SHIFT 4 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_RXMIT_DONE_CF_MASK 0x3 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_RXMIT_DONE_CF_SHIFT 6 u8 flags3; -#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_SCAN_COMPLETED_CF_MASK 0x3 -#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_SCAN_COMPLETED_CF_SHIFT 0 -#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_SQ_DRAIN_COMPLETED_CF_MASK 0x3 -#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_SQ_DRAIN_COMPLETED_CF_SHIFT 2 -#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_CF_EN_MASK 0x1 -#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_CF_EN_SHIFT 4 -#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_MASK 0x1 -#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_SHIFT 5 -#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_SQ_CF_EN_MASK 0x1 -#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_SQ_CF_EN_SHIFT 6 -#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_MASK 0x1 -#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_SHIFT 7 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_SCAN_COMPLETED_CF_MASK 0x3 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_SCAN_COMPLETED_CF_SHIFT 0 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_SQ_DRAIN_COMPLETED_CF_MASK 0x3 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_SQ_DRAIN_COMPLETED_CF_SHIFT 2 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_CF_EN_MASK 0x1 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_CF_EN_SHIFT 4 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_MASK 0x1 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_SHIFT 5 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_SQ_CF_EN_MASK 0x1 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_SQ_CF_EN_SHIFT 6 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_MASK 0x1 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_SHIFT 7 u8 flags4; -#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1 -#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 0 -#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_FORCE_COMP_CF_EN_MASK 0x1 -#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_FORCE_COMP_CF_EN_SHIFT 1 -#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_SET_TIMER_CF_EN_MASK 0x1 -#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_SET_TIMER_CF_EN_SHIFT 2 -#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_TX_ASYNC_ERROR_CF_EN_MASK 0x1 -#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_TX_ASYNC_ERROR_CF_EN_SHIFT 3 -#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RXMIT_DONE_CF_EN_MASK 0x1 -#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RXMIT_DONE_CF_EN_SHIFT 4 -#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_SCAN_COMPLETED_CF_EN_MASK 0x1 -#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_SCAN_COMPLETED_CF_EN_SHIFT 5 -#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_SQ_DRAIN_COMPLETED_CF_EN_MASK 0x1 -#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_SQ_DRAIN_COMPLETED_CF_EN_SHIFT 6 -#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_MASK 0x1 -#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_SHIFT 7 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 0 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_FORCE_COMP_CF_EN_MASK 0x1 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_FORCE_COMP_CF_EN_SHIFT 1 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_SET_TIMER_CF_EN_MASK 0x1 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_SET_TIMER_CF_EN_SHIFT 2 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_TX_ASYNC_ERROR_CF_EN_MASK 0x1 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_TX_ASYNC_ERROR_CF_EN_SHIFT 3 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_RXMIT_DONE_CF_EN_MASK 0x1 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_RXMIT_DONE_CF_EN_SHIFT 4 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_SCAN_COMPLETED_CF_EN_MASK 0x1 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_SCAN_COMPLETED_CF_EN_SHIFT 5 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_SQ_DRAIN_COMPLETED_CF_EN_MASK 0x1 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_SQ_DRAIN_COMPLETED_CF_EN_SHIFT 6 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_SHIFT 7 u8 flags5; -#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_MASK 0x1 -#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_SHIFT 0 -#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_DIF_CNT_EN_MASK 0x1 -#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_DIF_CNT_EN_SHIFT 1 -#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_MASK 0x1 -#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_SHIFT 2 -#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_MASK 0x1 -#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_SHIFT 3 -#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_MASK 0x1 -#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_SHIFT 4 -#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_SND_SQ_CONS_EN_MASK 0x1 -#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_SND_SQ_CONS_EN_SHIFT 5 -#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE7EN_MASK 0x1 -#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE7EN_SHIFT 6 -#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE8EN_MASK 0x1 -#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE8EN_SHIFT 7 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_SHIFT 0 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_DIF_CNT_EN_MASK 0x1 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_DIF_CNT_EN_SHIFT 1 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_SHIFT 2 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_SHIFT 3 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_MASK 0x1 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_SHIFT 4 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_SND_SQ_CONS_EN_MASK 0x1 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_SND_SQ_CONS_EN_SHIFT 5 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE7EN_MASK 0x1 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE7EN_SHIFT 6 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE8EN_MASK 0x1 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE8EN_SHIFT 7 __le32 dif_rxmit_cnt; __le32 snd_nxt_psn; __le32 snd_max_psn; @@ -8437,89 +7870,89 @@ struct e4_tstorm_roce_req_conn_ag_ctx { __le32 reg10; }; -struct e4_tstorm_roce_resp_conn_ag_ctx { +struct tstorm_roce_resp_conn_ag_ctx { u8 byte0; u8 state; u8 flags0; -#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 -#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 -#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_NOTIFY_REQUESTER_MASK 0x1 -#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_NOTIFY_REQUESTER_SHIFT 1 -#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_BIT2_MASK 0x1 -#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_BIT2_SHIFT 2 -#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_BIT3_MASK 0x1 -#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_BIT3_SHIFT 3 -#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_MASK 0x1 -#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_SHIFT 4 -#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_BIT5_MASK 0x1 -#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_BIT5_SHIFT 5 -#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF0_MASK 0x3 -#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF0_SHIFT 6 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_NOTIFY_REQUESTER_MASK 0x1 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_NOTIFY_REQUESTER_SHIFT 1 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_BIT2_MASK 0x1 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_BIT2_SHIFT 2 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_BIT3_MASK 0x1 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_BIT3_SHIFT 3 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_MASK 0x1 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_SHIFT 4 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_BIT5_MASK 0x1 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_BIT5_SHIFT 5 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF0_MASK 0x3 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF0_SHIFT 6 u8 flags1; -#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_CF_MASK 0x3 -#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_CF_SHIFT 0 -#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_TX_ERROR_CF_MASK 0x3 -#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_TX_ERROR_CF_SHIFT 2 -#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF3_MASK 0x3 -#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF3_SHIFT 4 -#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3 -#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 6 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_CF_MASK 0x3 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_CF_SHIFT 0 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_TX_ERROR_CF_MASK 0x3 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_TX_ERROR_CF_SHIFT 2 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF3_MASK 0x3 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF3_SHIFT 4 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 6 u8 flags2; -#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_MASK 0x3 -#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_SHIFT 0 -#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF6_MASK 0x3 -#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF6_SHIFT 2 -#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF7_MASK 0x3 -#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF7_SHIFT 4 -#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF8_MASK 0x3 -#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF8_SHIFT 6 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_MASK 0x3 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_SHIFT 0 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF6_MASK 0x3 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF6_SHIFT 2 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF7_MASK 0x3 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF7_SHIFT 4 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF8_MASK 0x3 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF8_SHIFT 6 u8 flags3; -#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF9_MASK 0x3 -#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF9_SHIFT 0 -#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF10_MASK 0x3 -#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF10_SHIFT 2 -#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_MASK 0x1 -#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_SHIFT 4 -#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_MASK 0x1 -#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_SHIFT 5 -#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_TX_ERROR_CF_EN_MASK 0x1 -#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_TX_ERROR_CF_EN_SHIFT 6 -#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_MASK 0x1 -#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_SHIFT 7 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF9_MASK 0x3 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF9_SHIFT 0 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF10_MASK 0x3 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF10_SHIFT 2 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_MASK 0x1 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_SHIFT 4 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_MASK 0x1 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_SHIFT 5 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_TX_ERROR_CF_EN_MASK 0x1 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_TX_ERROR_CF_EN_SHIFT 6 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_MASK 0x1 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_SHIFT 7 u8 flags4; -#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1 -#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 0 -#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_EN_MASK 0x1 -#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_EN_SHIFT 1 -#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF6EN_MASK 0x1 -#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF6EN_SHIFT 2 -#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF7EN_MASK 0x1 -#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF7EN_SHIFT 3 -#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF8EN_MASK 0x1 -#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF8EN_SHIFT 4 -#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF9EN_MASK 0x1 -#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF9EN_SHIFT 5 -#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF10EN_MASK 0x1 -#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF10EN_SHIFT 6 -#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_MASK 0x1 -#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_SHIFT 7 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 0 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_EN_MASK 0x1 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_EN_SHIFT 1 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF6EN_MASK 0x1 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF6EN_SHIFT 2 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF7EN_MASK 0x1 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF7EN_SHIFT 3 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF8EN_MASK 0x1 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF8EN_SHIFT 4 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF9EN_MASK 0x1 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF9EN_SHIFT 5 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF10EN_MASK 0x1 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF10EN_SHIFT 6 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_SHIFT 7 u8 flags5; -#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_MASK 0x1 -#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_SHIFT 0 -#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_MASK 0x1 -#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_SHIFT 1 -#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_MASK 0x1 -#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_SHIFT 2 -#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_MASK 0x1 -#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_SHIFT 3 -#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_MASK 0x1 -#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_SHIFT 4 -#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RQ_RULE_EN_MASK 0x1 -#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RQ_RULE_EN_SHIFT 5 -#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_MASK 0x1 -#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_SHIFT 6 -#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE8EN_MASK 0x1 -#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE8EN_SHIFT 7 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_SHIFT 0 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_SHIFT 1 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_SHIFT 2 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_SHIFT 3 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_MASK 0x1 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_SHIFT 4 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_RQ_RULE_EN_MASK 0x1 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_RQ_RULE_EN_SHIFT 5 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_MASK 0x1 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_SHIFT 6 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE8EN_MASK 0x1 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE8EN_SHIFT 7 __le32 psn_and_rxmit_id_echo; __le32 reg1; __le32 reg2; @@ -8541,63 +7974,63 @@ struct e4_tstorm_roce_resp_conn_ag_ctx { __le32 reg10; }; -struct e4_ustorm_roce_req_conn_ag_ctx { +struct ustorm_roce_req_conn_ag_ctx { u8 byte0; u8 byte1; u8 flags0; -#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_BIT0_MASK 0x1 -#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_BIT0_SHIFT 0 -#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_BIT1_MASK 0x1 -#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_BIT1_SHIFT 1 -#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF0_MASK 0x3 -#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF0_SHIFT 2 -#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF1_MASK 0x3 -#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF1_SHIFT 4 -#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF2_MASK 0x3 -#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF2_SHIFT 6 +#define USTORM_ROCE_REQ_CONN_AG_CTX_BIT0_MASK 0x1 +#define USTORM_ROCE_REQ_CONN_AG_CTX_BIT0_SHIFT 0 +#define USTORM_ROCE_REQ_CONN_AG_CTX_BIT1_MASK 0x1 +#define USTORM_ROCE_REQ_CONN_AG_CTX_BIT1_SHIFT 1 +#define USTORM_ROCE_REQ_CONN_AG_CTX_CF0_MASK 0x3 +#define USTORM_ROCE_REQ_CONN_AG_CTX_CF0_SHIFT 2 +#define USTORM_ROCE_REQ_CONN_AG_CTX_CF1_MASK 0x3 +#define USTORM_ROCE_REQ_CONN_AG_CTX_CF1_SHIFT 4 +#define USTORM_ROCE_REQ_CONN_AG_CTX_CF2_MASK 0x3 +#define USTORM_ROCE_REQ_CONN_AG_CTX_CF2_SHIFT 6 u8 flags1; -#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF3_MASK 0x3 -#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF3_SHIFT 0 -#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF4_MASK 0x3 -#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF4_SHIFT 2 -#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF5_MASK 0x3 -#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF5_SHIFT 4 -#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF6_MASK 0x3 -#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF6_SHIFT 6 +#define USTORM_ROCE_REQ_CONN_AG_CTX_CF3_MASK 0x3 +#define USTORM_ROCE_REQ_CONN_AG_CTX_CF3_SHIFT 0 +#define USTORM_ROCE_REQ_CONN_AG_CTX_CF4_MASK 0x3 +#define USTORM_ROCE_REQ_CONN_AG_CTX_CF4_SHIFT 2 +#define USTORM_ROCE_REQ_CONN_AG_CTX_CF5_MASK 0x3 +#define USTORM_ROCE_REQ_CONN_AG_CTX_CF5_SHIFT 4 +#define USTORM_ROCE_REQ_CONN_AG_CTX_CF6_MASK 0x3 +#define USTORM_ROCE_REQ_CONN_AG_CTX_CF6_SHIFT 6 u8 flags2; -#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_MASK 0x1 -#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_SHIFT 0 -#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_MASK 0x1 -#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_SHIFT 1 -#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_MASK 0x1 -#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_SHIFT 2 -#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF3EN_MASK 0x1 -#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF3EN_SHIFT 3 -#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF4EN_MASK 0x1 -#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF4EN_SHIFT 4 -#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF5EN_MASK 0x1 -#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF5EN_SHIFT 5 -#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF6EN_MASK 0x1 -#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF6EN_SHIFT 6 -#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_MASK 0x1 -#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_SHIFT 7 +#define USTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_MASK 0x1 +#define USTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_SHIFT 0 +#define USTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_MASK 0x1 +#define USTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_SHIFT 1 +#define USTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_MASK 0x1 +#define USTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_SHIFT 2 +#define USTORM_ROCE_REQ_CONN_AG_CTX_CF3EN_MASK 0x1 +#define USTORM_ROCE_REQ_CONN_AG_CTX_CF3EN_SHIFT 3 +#define USTORM_ROCE_REQ_CONN_AG_CTX_CF4EN_MASK 0x1 +#define USTORM_ROCE_REQ_CONN_AG_CTX_CF4EN_SHIFT 4 +#define USTORM_ROCE_REQ_CONN_AG_CTX_CF5EN_MASK 0x1 +#define USTORM_ROCE_REQ_CONN_AG_CTX_CF5EN_SHIFT 5 +#define USTORM_ROCE_REQ_CONN_AG_CTX_CF6EN_MASK 0x1 +#define USTORM_ROCE_REQ_CONN_AG_CTX_CF6EN_SHIFT 6 +#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_SHIFT 7 u8 flags3; -#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_MASK 0x1 -#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_SHIFT 0 -#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_MASK 0x1 -#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_SHIFT 1 -#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_MASK 0x1 -#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_SHIFT 2 -#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_MASK 0x1 -#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_SHIFT 3 -#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_MASK 0x1 -#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_SHIFT 4 -#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE6EN_MASK 0x1 -#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE6EN_SHIFT 5 -#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE7EN_MASK 0x1 -#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE7EN_SHIFT 6 -#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE8EN_MASK 0x1 -#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE8EN_SHIFT 7 +#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_SHIFT 0 +#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_SHIFT 1 +#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_SHIFT 2 +#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_SHIFT 3 +#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_MASK 0x1 +#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_SHIFT 4 +#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE6EN_MASK 0x1 +#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE6EN_SHIFT 5 +#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE7EN_MASK 0x1 +#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE7EN_SHIFT 6 +#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE8EN_MASK 0x1 +#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE8EN_SHIFT 7 u8 byte2; u8 byte3; __le16 word0; @@ -8610,63 +8043,63 @@ struct e4_ustorm_roce_req_conn_ag_ctx { __le16 word3; }; -struct e4_ustorm_roce_resp_conn_ag_ctx { +struct ustorm_roce_resp_conn_ag_ctx { u8 byte0; u8 byte1; u8 flags0; -#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_BIT0_MASK 0x1 -#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_BIT0_SHIFT 0 -#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_BIT1_MASK 0x1 -#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_BIT1_SHIFT 1 -#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF0_MASK 0x3 -#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF0_SHIFT 2 -#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF1_MASK 0x3 -#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF1_SHIFT 4 -#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF2_MASK 0x3 -#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF2_SHIFT 6 +#define USTORM_ROCE_RESP_CONN_AG_CTX_BIT0_MASK 0x1 +#define USTORM_ROCE_RESP_CONN_AG_CTX_BIT0_SHIFT 0 +#define USTORM_ROCE_RESP_CONN_AG_CTX_BIT1_MASK 0x1 +#define USTORM_ROCE_RESP_CONN_AG_CTX_BIT1_SHIFT 1 +#define USTORM_ROCE_RESP_CONN_AG_CTX_CF0_MASK 0x3 +#define USTORM_ROCE_RESP_CONN_AG_CTX_CF0_SHIFT 2 +#define USTORM_ROCE_RESP_CONN_AG_CTX_CF1_MASK 0x3 +#define USTORM_ROCE_RESP_CONN_AG_CTX_CF1_SHIFT 4 +#define USTORM_ROCE_RESP_CONN_AG_CTX_CF2_MASK 0x3 +#define USTORM_ROCE_RESP_CONN_AG_CTX_CF2_SHIFT 6 u8 flags1; -#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF3_MASK 0x3 -#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF3_SHIFT 0 -#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF4_MASK 0x3 -#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF4_SHIFT 2 -#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF5_MASK 0x3 -#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF5_SHIFT 4 -#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF6_MASK 0x3 -#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF6_SHIFT 6 +#define USTORM_ROCE_RESP_CONN_AG_CTX_CF3_MASK 0x3 +#define USTORM_ROCE_RESP_CONN_AG_CTX_CF3_SHIFT 0 +#define USTORM_ROCE_RESP_CONN_AG_CTX_CF4_MASK 0x3 +#define USTORM_ROCE_RESP_CONN_AG_CTX_CF4_SHIFT 2 +#define USTORM_ROCE_RESP_CONN_AG_CTX_CF5_MASK 0x3 +#define USTORM_ROCE_RESP_CONN_AG_CTX_CF5_SHIFT 4 +#define USTORM_ROCE_RESP_CONN_AG_CTX_CF6_MASK 0x3 +#define USTORM_ROCE_RESP_CONN_AG_CTX_CF6_SHIFT 6 u8 flags2; -#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_MASK 0x1 -#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_SHIFT 0 -#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_MASK 0x1 -#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_SHIFT 1 -#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_MASK 0x1 -#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_SHIFT 2 -#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_MASK 0x1 -#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_SHIFT 3 -#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF4EN_MASK 0x1 -#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF4EN_SHIFT 4 -#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF5EN_MASK 0x1 -#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF5EN_SHIFT 5 -#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF6EN_MASK 0x1 -#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF6EN_SHIFT 6 -#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_MASK 0x1 -#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_SHIFT 7 +#define USTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_MASK 0x1 +#define USTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_SHIFT 0 +#define USTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_MASK 0x1 +#define USTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_SHIFT 1 +#define USTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_MASK 0x1 +#define USTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_SHIFT 2 +#define USTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_MASK 0x1 +#define USTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_SHIFT 3 +#define USTORM_ROCE_RESP_CONN_AG_CTX_CF4EN_MASK 0x1 +#define USTORM_ROCE_RESP_CONN_AG_CTX_CF4EN_SHIFT 4 +#define USTORM_ROCE_RESP_CONN_AG_CTX_CF5EN_MASK 0x1 +#define USTORM_ROCE_RESP_CONN_AG_CTX_CF5EN_SHIFT 5 +#define USTORM_ROCE_RESP_CONN_AG_CTX_CF6EN_MASK 0x1 +#define USTORM_ROCE_RESP_CONN_AG_CTX_CF6EN_SHIFT 6 +#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_SHIFT 7 u8 flags3; -#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_MASK 0x1 -#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_SHIFT 0 -#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_MASK 0x1 -#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_SHIFT 1 -#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_MASK 0x1 -#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_SHIFT 2 -#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_MASK 0x1 -#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_SHIFT 3 -#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_MASK 0x1 -#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_SHIFT 4 -#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE6EN_MASK 0x1 -#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE6EN_SHIFT 5 -#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_MASK 0x1 -#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_SHIFT 6 -#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE8EN_MASK 0x1 -#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE8EN_SHIFT 7 +#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_SHIFT 0 +#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_SHIFT 1 +#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_SHIFT 2 +#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_SHIFT 3 +#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_MASK 0x1 +#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_SHIFT 4 +#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE6EN_MASK 0x1 +#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE6EN_SHIFT 5 +#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_MASK 0x1 +#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_SHIFT 6 +#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE8EN_MASK 0x1 +#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE8EN_SHIFT 7 u8 byte2; u8 byte3; __le16 word0; @@ -8679,214 +8112,214 @@ struct e4_ustorm_roce_resp_conn_ag_ctx { __le16 word3; }; -struct e4_xstorm_roce_req_conn_ag_ctx { +struct xstorm_roce_req_conn_ag_ctx { u8 reserved0; u8 state; u8 flags0; -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED1_MASK 0x1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED1_SHIFT 1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED2_MASK 0x1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED2_SHIFT 2 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED3_MASK 0x1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED3_SHIFT 4 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED4_MASK 0x1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED4_SHIFT 5 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED5_MASK 0x1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED5_SHIFT 6 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED6_MASK 0x1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED6_SHIFT 7 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED1_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED1_SHIFT 1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED2_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED2_SHIFT 2 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED3_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED3_SHIFT 4 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED4_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED4_SHIFT 5 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED5_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED5_SHIFT 6 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED6_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED6_SHIFT 7 u8 flags1; -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED7_MASK 0x1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED7_SHIFT 0 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED8_MASK 0x1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED8_SHIFT 1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_BIT10_MASK 0x1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_BIT10_SHIFT 2 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_BIT11_MASK 0x1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_BIT11_SHIFT 3 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_MSDM_FLUSH_MASK 0x1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_MSDM_FLUSH_SHIFT 4 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_MSEM_FLUSH_MASK 0x1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_MSEM_FLUSH_SHIFT 5 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_STATE_MASK 0x1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_STATE_SHIFT 6 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_YSTORM_FLUSH_MASK 0x1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_YSTORM_FLUSH_SHIFT 7 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED7_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED7_SHIFT 0 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED8_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED8_SHIFT 1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_BIT10_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_BIT10_SHIFT 2 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_BIT11_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_BIT11_SHIFT 3 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_MSDM_FLUSH_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_MSDM_FLUSH_SHIFT 4 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_MSEM_FLUSH_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_MSEM_FLUSH_SHIFT 5 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_STATE_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_STATE_SHIFT 6 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_YSTORM_FLUSH_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_YSTORM_FLUSH_SHIFT 7 u8 flags2; -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF0_MASK 0x3 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF0_SHIFT 0 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF1_MASK 0x3 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF1_SHIFT 2 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF2_MASK 0x3 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF2_SHIFT 4 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF3_MASK 0x3 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF3_SHIFT 6 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF0_MASK 0x3 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF0_SHIFT 0 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF1_MASK 0x3 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF1_SHIFT 2 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF2_MASK 0x3 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF2_SHIFT 4 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF3_MASK 0x3 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF3_SHIFT 6 u8 flags3; -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SQ_FLUSH_CF_MASK 0x3 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SQ_FLUSH_CF_SHIFT 0 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_CF_MASK 0x3 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_CF_SHIFT 2 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SND_RXMIT_CF_MASK 0x3 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SND_RXMIT_CF_SHIFT 4 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 6 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_SQ_FLUSH_CF_MASK 0x3 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_SQ_FLUSH_CF_SHIFT 0 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_CF_MASK 0x3 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_CF_SHIFT 2 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_SND_RXMIT_CF_MASK 0x3 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_SND_RXMIT_CF_SHIFT 4 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 6 u8 flags4; -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_DIF_ERROR_CF_MASK 0x3 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_DIF_ERROR_CF_SHIFT 0 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SCAN_SQ_FOR_COMP_CF_MASK 0x3 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SCAN_SQ_FOR_COMP_CF_SHIFT 2 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF10_MASK 0x3 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF10_SHIFT 4 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF11_MASK 0x3 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF11_SHIFT 6 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_DIF_ERROR_CF_MASK 0x3 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_DIF_ERROR_CF_SHIFT 0 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_SCAN_SQ_FOR_COMP_CF_MASK 0x3 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_SCAN_SQ_FOR_COMP_CF_SHIFT 2 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF10_MASK 0x3 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF10_SHIFT 4 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF11_MASK 0x3 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF11_SHIFT 6 u8 flags5; -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF12_MASK 0x3 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF12_SHIFT 0 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF13_MASK 0x3 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF13_SHIFT 2 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_FMR_ENDED_CF_MASK 0x3 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_FMR_ENDED_CF_SHIFT 4 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF15_MASK 0x3 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF15_SHIFT 6 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF12_MASK 0x3 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF12_SHIFT 0 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF13_MASK 0x3 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF13_SHIFT 2 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_FMR_ENDED_CF_MASK 0x3 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_FMR_ENDED_CF_SHIFT 4 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF15_MASK 0x3 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF15_SHIFT 6 u8 flags6; -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF16_MASK 0x3 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF16_SHIFT 0 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF17_MASK 0x3 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF17_SHIFT 2 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF18_MASK 0x3 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF18_SHIFT 4 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF19_MASK 0x3 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF19_SHIFT 6 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF16_MASK 0x3 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF16_SHIFT 0 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF17_MASK 0x3 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF17_SHIFT 2 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF18_MASK 0x3 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF18_SHIFT 4 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF19_MASK 0x3 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF19_SHIFT 6 u8 flags7; -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF20_MASK 0x3 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF20_SHIFT 0 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF21_MASK 0x3 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF21_SHIFT 2 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SLOW_PATH_MASK 0x3 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SLOW_PATH_SHIFT 4 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_MASK 0x1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_SHIFT 6 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_MASK 0x1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_SHIFT 7 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF20_MASK 0x3 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF20_SHIFT 0 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF21_MASK 0x3 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF21_SHIFT 2 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_SLOW_PATH_MASK 0x3 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_SLOW_PATH_SHIFT 4 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_SHIFT 6 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_SHIFT 7 u8 flags8; -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_MASK 0x1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_SHIFT 0 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF3EN_MASK 0x1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF3EN_SHIFT 1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SQ_FLUSH_CF_EN_MASK 0x1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SQ_FLUSH_CF_EN_SHIFT 2 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_CF_EN_MASK 0x1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_CF_EN_SHIFT 3 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SND_RXMIT_CF_EN_MASK 0x1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SND_RXMIT_CF_EN_SHIFT 4 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 5 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_DIF_ERROR_CF_EN_MASK 0x1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_DIF_ERROR_CF_EN_SHIFT 6 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SCAN_SQ_FOR_COMP_CF_EN_MASK 0x1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SCAN_SQ_FOR_COMP_CF_EN_SHIFT 7 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_SHIFT 0 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF3EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF3EN_SHIFT 1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_SQ_FLUSH_CF_EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_SQ_FLUSH_CF_EN_SHIFT 2 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_CF_EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_CF_EN_SHIFT 3 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_SND_RXMIT_CF_EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_SND_RXMIT_CF_EN_SHIFT 4 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 5 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_DIF_ERROR_CF_EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_DIF_ERROR_CF_EN_SHIFT 6 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_SCAN_SQ_FOR_COMP_CF_EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_SCAN_SQ_FOR_COMP_CF_EN_SHIFT 7 u8 flags9; -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF10EN_MASK 0x1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF10EN_SHIFT 0 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF11EN_MASK 0x1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF11EN_SHIFT 1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF12EN_MASK 0x1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF12EN_SHIFT 2 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF13EN_MASK 0x1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF13EN_SHIFT 3 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_FME_ENDED_CF_EN_MASK 0x1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_FME_ENDED_CF_EN_SHIFT 4 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF15EN_MASK 0x1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF15EN_SHIFT 5 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF16EN_MASK 0x1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF16EN_SHIFT 6 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF17EN_MASK 0x1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF17EN_SHIFT 7 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF10EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF10EN_SHIFT 0 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF11EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF11EN_SHIFT 1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF12EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF12EN_SHIFT 2 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF13EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF13EN_SHIFT 3 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_FME_ENDED_CF_EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_FME_ENDED_CF_EN_SHIFT 4 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF15EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF15EN_SHIFT 5 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF16EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF16EN_SHIFT 6 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF17EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF17EN_SHIFT 7 u8 flags10; -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF18EN_MASK 0x1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF18EN_SHIFT 0 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF19EN_MASK 0x1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF19EN_SHIFT 1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF20EN_MASK 0x1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF20EN_SHIFT 2 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF21EN_MASK 0x1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF21EN_SHIFT 3 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF23EN_MASK 0x1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF23EN_SHIFT 5 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_MASK 0x1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_SHIFT 6 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_MASK 0x1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_SHIFT 7 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF18EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF18EN_SHIFT 0 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF19EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF19EN_SHIFT 1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF20EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF20EN_SHIFT 2 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF21EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF21EN_SHIFT 3 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF23EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF23EN_SHIFT 5 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_SHIFT 6 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_SHIFT 7 u8 flags11; -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_MASK 0x1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_SHIFT 0 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_MASK 0x1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_SHIFT 1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_MASK 0x1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_SHIFT 2 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_MASK 0x1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_SHIFT 3 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE6EN_MASK 0x1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE6EN_SHIFT 4 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_E2E_CREDIT_RULE_EN_MASK 0x1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_E2E_CREDIT_RULE_EN_SHIFT 5 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED1_MASK 0x1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED1_SHIFT 6 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE9EN_MASK 0x1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE9EN_SHIFT 7 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_SHIFT 0 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_SHIFT 1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_SHIFT 2 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_SHIFT 3 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE6EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE6EN_SHIFT 4 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_E2E_CREDIT_RULE_EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_E2E_CREDIT_RULE_EN_SHIFT 5 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED1_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED1_SHIFT 6 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE9EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE9EN_SHIFT 7 u8 flags12; -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SQ_PROD_EN_MASK 0x1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SQ_PROD_EN_SHIFT 0 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE11EN_MASK 0x1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE11EN_SHIFT 1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED2_MASK 0x1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED2_SHIFT 2 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED3_MASK 0x1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED3_SHIFT 3 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_INV_FENCE_RULE_EN_MASK 0x1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_INV_FENCE_RULE_EN_SHIFT 4 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE15EN_MASK 0x1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE15EN_SHIFT 5 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_ORQ_FENCE_RULE_EN_MASK 0x1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_ORQ_FENCE_RULE_EN_SHIFT 6 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_MAX_ORD_RULE_EN_MASK 0x1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_MAX_ORD_RULE_EN_SHIFT 7 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_SQ_PROD_EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_SQ_PROD_EN_SHIFT 0 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE11EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE11EN_SHIFT 1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED2_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED2_SHIFT 2 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED3_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED3_SHIFT 3 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_INV_FENCE_RULE_EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_INV_FENCE_RULE_EN_SHIFT 4 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE15EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE15EN_SHIFT 5 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_ORQ_FENCE_RULE_EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_ORQ_FENCE_RULE_EN_SHIFT 6 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_MAX_ORD_RULE_EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_MAX_ORD_RULE_EN_SHIFT 7 u8 flags13; -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE18EN_MASK 0x1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE18EN_SHIFT 0 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE19EN_MASK 0x1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE19EN_SHIFT 1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED4_MASK 0x1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED4_SHIFT 2 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED5_MASK 0x1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED5_SHIFT 3 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED6_MASK 0x1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED6_SHIFT 4 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED7_MASK 0x1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED7_SHIFT 5 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED8_MASK 0x1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED8_SHIFT 6 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED9_MASK 0x1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED9_SHIFT 7 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE18EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE18EN_SHIFT 0 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE19EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE19EN_SHIFT 1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED4_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED4_SHIFT 2 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED5_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED5_SHIFT 3 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED6_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED6_SHIFT 4 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED7_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED7_SHIFT 5 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED8_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED8_SHIFT 6 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED9_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED9_SHIFT 7 u8 flags14; -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_MIGRATION_FLAG_MASK 0x1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_MIGRATION_FLAG_SHIFT 0 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_BIT17_MASK 0x1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_BIT17_SHIFT 1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_DPM_PORT_NUM_MASK 0x3 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_DPM_PORT_NUM_SHIFT 2 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED_MASK 0x1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED_SHIFT 4 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_ROCE_EDPM_ENABLE_MASK 0x1 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_ROCE_EDPM_ENABLE_SHIFT 5 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF23_MASK 0x3 -#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF23_SHIFT 6 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_MIGRATION_FLAG_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_MIGRATION_FLAG_SHIFT 0 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_BIT17_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_BIT17_SHIFT 1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_DPM_PORT_NUM_MASK 0x3 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_DPM_PORT_NUM_SHIFT 2 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED_SHIFT 4 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_ROCE_EDPM_ENABLE_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_ROCE_EDPM_ENABLE_SHIFT 5 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF23_MASK 0x3 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF23_SHIFT 6 u8 byte2; __le16 physical_q0; __le16 word1; @@ -8908,216 +8341,216 @@ struct e4_xstorm_roce_req_conn_ag_ctx { __le32 orq_cons; }; -struct e4_xstorm_roce_resp_conn_ag_ctx { +struct xstorm_roce_resp_conn_ag_ctx { u8 reserved0; u8 state; u8 flags0; -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED1_MASK 0x1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED1_SHIFT 1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED2_MASK 0x1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED2_SHIFT 2 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED3_MASK 0x1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED3_SHIFT 4 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED4_MASK 0x1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED4_SHIFT 5 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED5_MASK 0x1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED5_SHIFT 6 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED6_MASK 0x1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED6_SHIFT 7 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED1_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED1_SHIFT 1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED2_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED2_SHIFT 2 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED3_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED3_SHIFT 4 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED4_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED4_SHIFT 5 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED5_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED5_SHIFT 6 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED6_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED6_SHIFT 7 u8 flags1; -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED7_MASK 0x1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED7_SHIFT 0 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED8_MASK 0x1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED8_SHIFT 1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT10_MASK 0x1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT10_SHIFT 2 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT11_MASK 0x1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT11_SHIFT 3 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_MSDM_FLUSH_MASK 0x1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_MSDM_FLUSH_SHIFT 4 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_MSEM_FLUSH_MASK 0x1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_MSEM_FLUSH_SHIFT 5 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_ERROR_STATE_MASK 0x1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_ERROR_STATE_SHIFT 6 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_YSTORM_FLUSH_MASK 0x1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_YSTORM_FLUSH_SHIFT 7 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED7_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED7_SHIFT 0 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED8_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED8_SHIFT 1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT10_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT10_SHIFT 2 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT11_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT11_SHIFT 3 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_MSDM_FLUSH_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_MSDM_FLUSH_SHIFT 4 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_MSEM_FLUSH_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_MSEM_FLUSH_SHIFT 5 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_ERROR_STATE_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_ERROR_STATE_SHIFT 6 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_YSTORM_FLUSH_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_YSTORM_FLUSH_SHIFT 7 u8 flags2; -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF0_MASK 0x3 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF0_SHIFT 0 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF1_MASK 0x3 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF1_SHIFT 2 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF2_MASK 0x3 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF2_SHIFT 4 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF3_MASK 0x3 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF3_SHIFT 6 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF0_MASK 0x3 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF0_SHIFT 0 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF1_MASK 0x3 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF1_SHIFT 2 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF2_MASK 0x3 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF2_SHIFT 4 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF3_MASK 0x3 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF3_SHIFT 6 u8 flags3; -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RXMIT_CF_MASK 0x3 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RXMIT_CF_SHIFT 0 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_MASK 0x3 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_SHIFT 2 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_FORCE_ACK_CF_MASK 0x3 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_FORCE_ACK_CF_SHIFT 4 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 6 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RXMIT_CF_MASK 0x3 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RXMIT_CF_SHIFT 0 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_MASK 0x3 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_SHIFT 2 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_FORCE_ACK_CF_MASK 0x3 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_FORCE_ACK_CF_SHIFT 4 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 6 u8 flags4; -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF8_MASK 0x3 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF8_SHIFT 0 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF9_MASK 0x3 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF9_SHIFT 2 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF10_MASK 0x3 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF10_SHIFT 4 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF11_MASK 0x3 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF11_SHIFT 6 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF8_MASK 0x3 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF8_SHIFT 0 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF9_MASK 0x3 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF9_SHIFT 2 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF10_MASK 0x3 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF10_SHIFT 4 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF11_MASK 0x3 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF11_SHIFT 6 u8 flags5; -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF12_MASK 0x3 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF12_SHIFT 0 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF13_MASK 0x3 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF13_SHIFT 2 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF14_MASK 0x3 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF14_SHIFT 4 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF15_MASK 0x3 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF15_SHIFT 6 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF12_MASK 0x3 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF12_SHIFT 0 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF13_MASK 0x3 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF13_SHIFT 2 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF14_MASK 0x3 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF14_SHIFT 4 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF15_MASK 0x3 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF15_SHIFT 6 u8 flags6; -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF16_MASK 0x3 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF16_SHIFT 0 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF17_MASK 0x3 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF17_SHIFT 2 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF18_MASK 0x3 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF18_SHIFT 4 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF19_MASK 0x3 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF19_SHIFT 6 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF16_MASK 0x3 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF16_SHIFT 0 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF17_MASK 0x3 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF17_SHIFT 2 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF18_MASK 0x3 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF18_SHIFT 4 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF19_MASK 0x3 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF19_SHIFT 6 u8 flags7; -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF20_MASK 0x3 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF20_SHIFT 0 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF21_MASK 0x3 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF21_SHIFT 2 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_SLOW_PATH_MASK 0x3 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_SLOW_PATH_SHIFT 4 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_MASK 0x1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_SHIFT 6 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_MASK 0x1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_SHIFT 7 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF20_MASK 0x3 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF20_SHIFT 0 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF21_MASK 0x3 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF21_SHIFT 2 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_SLOW_PATH_MASK 0x3 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_SLOW_PATH_SHIFT 4 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_SHIFT 6 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_SHIFT 7 u8 flags8; -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_MASK 0x1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_SHIFT 0 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_MASK 0x1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_SHIFT 1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RXMIT_CF_EN_MASK 0x1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RXMIT_CF_EN_SHIFT 2 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_EN_MASK 0x1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_EN_SHIFT 3 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_FORCE_ACK_CF_EN_MASK 0x1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_FORCE_ACK_CF_EN_SHIFT 4 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 5 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF8EN_MASK 0x1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF8EN_SHIFT 6 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF9EN_MASK 0x1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF9EN_SHIFT 7 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_SHIFT 0 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_SHIFT 1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RXMIT_CF_EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RXMIT_CF_EN_SHIFT 2 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_EN_SHIFT 3 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_FORCE_ACK_CF_EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_FORCE_ACK_CF_EN_SHIFT 4 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 5 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF8EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF8EN_SHIFT 6 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF9EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF9EN_SHIFT 7 u8 flags9; -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF10EN_MASK 0x1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF10EN_SHIFT 0 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF11EN_MASK 0x1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF11EN_SHIFT 1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF12EN_MASK 0x1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF12EN_SHIFT 2 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF13EN_MASK 0x1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF13EN_SHIFT 3 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF14EN_MASK 0x1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF14EN_SHIFT 4 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF15EN_MASK 0x1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF15EN_SHIFT 5 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF16EN_MASK 0x1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF16EN_SHIFT 6 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF17EN_MASK 0x1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF17EN_SHIFT 7 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF10EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF10EN_SHIFT 0 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF11EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF11EN_SHIFT 1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF12EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF12EN_SHIFT 2 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF13EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF13EN_SHIFT 3 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF14EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF14EN_SHIFT 4 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF15EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF15EN_SHIFT 5 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF16EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF16EN_SHIFT 6 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF17EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF17EN_SHIFT 7 u8 flags10; -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF18EN_MASK 0x1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF18EN_SHIFT 0 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF19EN_MASK 0x1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF19EN_SHIFT 1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF20EN_MASK 0x1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF20EN_SHIFT 2 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF21EN_MASK 0x1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF21EN_SHIFT 3 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF23EN_MASK 0x1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF23EN_SHIFT 5 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_MASK 0x1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_SHIFT 6 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_MASK 0x1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_SHIFT 7 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF18EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF18EN_SHIFT 0 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF19EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF19EN_SHIFT 1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF20EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF20EN_SHIFT 2 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF21EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF21EN_SHIFT 3 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF23EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF23EN_SHIFT 5 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_SHIFT 6 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_SHIFT 7 u8 flags11; -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_MASK 0x1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_SHIFT 0 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_MASK 0x1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_SHIFT 1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_MASK 0x1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_SHIFT 2 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_MASK 0x1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_SHIFT 3 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE6EN_MASK 0x1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE6EN_SHIFT 4 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_MASK 0x1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_SHIFT 5 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED1_MASK 0x1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED1_SHIFT 6 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE9EN_MASK 0x1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE9EN_SHIFT 7 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_SHIFT 0 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_SHIFT 1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_SHIFT 2 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_SHIFT 3 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE6EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE6EN_SHIFT 4 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_SHIFT 5 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED1_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED1_SHIFT 6 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE9EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE9EN_SHIFT 7 u8 flags12; -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_IRQ_PROD_RULE_EN_MASK 0x1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_IRQ_PROD_RULE_EN_SHIFT 0 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE11EN_MASK 0x1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE11EN_SHIFT 1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED2_MASK 0x1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED2_SHIFT 2 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED3_MASK 0x1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED3_SHIFT 3 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE14EN_MASK 0x1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE14EN_SHIFT 4 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE15EN_MASK 0x1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE15EN_SHIFT 5 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE16EN_MASK 0x1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE16EN_SHIFT 6 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE17EN_MASK 0x1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE17EN_SHIFT 7 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_IRQ_PROD_RULE_EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_IRQ_PROD_RULE_EN_SHIFT 0 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE11EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE11EN_SHIFT 1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED2_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED2_SHIFT 2 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED3_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED3_SHIFT 3 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE14EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE14EN_SHIFT 4 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE15EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE15EN_SHIFT 5 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE16EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE16EN_SHIFT 6 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE17EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE17EN_SHIFT 7 u8 flags13; -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE18EN_MASK 0x1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE18EN_SHIFT 0 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE19EN_MASK 0x1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE19EN_SHIFT 1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED4_MASK 0x1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED4_SHIFT 2 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED5_MASK 0x1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED5_SHIFT 3 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED6_MASK 0x1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED6_SHIFT 4 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED7_MASK 0x1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED7_SHIFT 5 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED8_MASK 0x1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED8_SHIFT 6 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED9_MASK 0x1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED9_SHIFT 7 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE18EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE18EN_SHIFT 0 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE19EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE19EN_SHIFT 1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED4_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED4_SHIFT 2 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED5_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED5_SHIFT 3 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED6_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED6_SHIFT 4 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED7_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED7_SHIFT 5 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED8_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED8_SHIFT 6 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED9_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED9_SHIFT 7 u8 flags14; -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT16_MASK 0x1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT16_SHIFT 0 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT17_MASK 0x1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT17_SHIFT 1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT18_MASK 0x1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT18_SHIFT 2 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT19_MASK 0x1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT19_SHIFT 3 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT20_MASK 0x1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT20_SHIFT 4 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT21_MASK 0x1 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT21_SHIFT 5 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF23_MASK 0x3 -#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF23_SHIFT 6 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT16_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT16_SHIFT 0 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT17_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT17_SHIFT 1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT18_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT18_SHIFT 2 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT19_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT19_SHIFT 3 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT20_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT20_SHIFT 4 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT21_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT21_SHIFT 5 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF23_MASK 0x3 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF23_SHIFT 6 u8 byte2; __le16 physical_q0; __le16 irq_prod_shadow; @@ -9139,37 +8572,37 @@ struct e4_xstorm_roce_resp_conn_ag_ctx { __le32 msn_and_syndrome; }; -struct e4_ystorm_roce_conn_ag_ctx { +struct ystorm_roce_conn_ag_ctx { u8 byte0; u8 byte1; u8 flags0; -#define E4_YSTORM_ROCE_CONN_AG_CTX_BIT0_MASK 0x1 -#define E4_YSTORM_ROCE_CONN_AG_CTX_BIT0_SHIFT 0 -#define E4_YSTORM_ROCE_CONN_AG_CTX_BIT1_MASK 0x1 -#define E4_YSTORM_ROCE_CONN_AG_CTX_BIT1_SHIFT 1 -#define E4_YSTORM_ROCE_CONN_AG_CTX_CF0_MASK 0x3 -#define E4_YSTORM_ROCE_CONN_AG_CTX_CF0_SHIFT 2 -#define E4_YSTORM_ROCE_CONN_AG_CTX_CF1_MASK 0x3 -#define E4_YSTORM_ROCE_CONN_AG_CTX_CF1_SHIFT 4 -#define E4_YSTORM_ROCE_CONN_AG_CTX_CF2_MASK 0x3 -#define E4_YSTORM_ROCE_CONN_AG_CTX_CF2_SHIFT 6 +#define YSTORM_ROCE_CONN_AG_CTX_BIT0_MASK 0x1 +#define YSTORM_ROCE_CONN_AG_CTX_BIT0_SHIFT 0 +#define YSTORM_ROCE_CONN_AG_CTX_BIT1_MASK 0x1 +#define YSTORM_ROCE_CONN_AG_CTX_BIT1_SHIFT 1 +#define YSTORM_ROCE_CONN_AG_CTX_CF0_MASK 0x3 +#define YSTORM_ROCE_CONN_AG_CTX_CF0_SHIFT 2 +#define YSTORM_ROCE_CONN_AG_CTX_CF1_MASK 0x3 +#define YSTORM_ROCE_CONN_AG_CTX_CF1_SHIFT 4 +#define YSTORM_ROCE_CONN_AG_CTX_CF2_MASK 0x3 +#define YSTORM_ROCE_CONN_AG_CTX_CF2_SHIFT 6 u8 flags1; -#define E4_YSTORM_ROCE_CONN_AG_CTX_CF0EN_MASK 0x1 -#define E4_YSTORM_ROCE_CONN_AG_CTX_CF0EN_SHIFT 0 -#define E4_YSTORM_ROCE_CONN_AG_CTX_CF1EN_MASK 0x1 -#define E4_YSTORM_ROCE_CONN_AG_CTX_CF1EN_SHIFT 1 -#define E4_YSTORM_ROCE_CONN_AG_CTX_CF2EN_MASK 0x1 -#define E4_YSTORM_ROCE_CONN_AG_CTX_CF2EN_SHIFT 2 -#define E4_YSTORM_ROCE_CONN_AG_CTX_RULE0EN_MASK 0x1 -#define E4_YSTORM_ROCE_CONN_AG_CTX_RULE0EN_SHIFT 3 -#define E4_YSTORM_ROCE_CONN_AG_CTX_RULE1EN_MASK 0x1 -#define E4_YSTORM_ROCE_CONN_AG_CTX_RULE1EN_SHIFT 4 -#define E4_YSTORM_ROCE_CONN_AG_CTX_RULE2EN_MASK 0x1 -#define E4_YSTORM_ROCE_CONN_AG_CTX_RULE2EN_SHIFT 5 -#define E4_YSTORM_ROCE_CONN_AG_CTX_RULE3EN_MASK 0x1 -#define E4_YSTORM_ROCE_CONN_AG_CTX_RULE3EN_SHIFT 6 -#define E4_YSTORM_ROCE_CONN_AG_CTX_RULE4EN_MASK 0x1 -#define E4_YSTORM_ROCE_CONN_AG_CTX_RULE4EN_SHIFT 7 +#define YSTORM_ROCE_CONN_AG_CTX_CF0EN_MASK 0x1 +#define YSTORM_ROCE_CONN_AG_CTX_CF0EN_SHIFT 0 +#define YSTORM_ROCE_CONN_AG_CTX_CF1EN_MASK 0x1 +#define YSTORM_ROCE_CONN_AG_CTX_CF1EN_SHIFT 1 +#define YSTORM_ROCE_CONN_AG_CTX_CF2EN_MASK 0x1 +#define YSTORM_ROCE_CONN_AG_CTX_CF2EN_SHIFT 2 +#define YSTORM_ROCE_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define YSTORM_ROCE_CONN_AG_CTX_RULE0EN_SHIFT 3 +#define YSTORM_ROCE_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define YSTORM_ROCE_CONN_AG_CTX_RULE1EN_SHIFT 4 +#define YSTORM_ROCE_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define YSTORM_ROCE_CONN_AG_CTX_RULE2EN_SHIFT 5 +#define YSTORM_ROCE_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define YSTORM_ROCE_CONN_AG_CTX_RULE3EN_SHIFT 6 +#define YSTORM_ROCE_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define YSTORM_ROCE_CONN_AG_CTX_RULE4EN_SHIFT 7 u8 byte2; u8 byte3; __le16 word0; @@ -9183,37 +8616,37 @@ struct e4_ystorm_roce_conn_ag_ctx { __le32 reg3; }; -struct e4_ystorm_roce_req_conn_ag_ctx { +struct ystorm_roce_req_conn_ag_ctx { u8 byte0; u8 byte1; u8 flags0; -#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_BIT0_MASK 0x1 -#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_BIT0_SHIFT 0 -#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_BIT1_MASK 0x1 -#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_BIT1_SHIFT 1 -#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_CF0_MASK 0x3 -#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_CF0_SHIFT 2 -#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_CF1_MASK 0x3 -#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_CF1_SHIFT 4 -#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_CF2_MASK 0x3 -#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_CF2_SHIFT 6 +#define YSTORM_ROCE_REQ_CONN_AG_CTX_BIT0_MASK 0x1 +#define YSTORM_ROCE_REQ_CONN_AG_CTX_BIT0_SHIFT 0 +#define YSTORM_ROCE_REQ_CONN_AG_CTX_BIT1_MASK 0x1 +#define YSTORM_ROCE_REQ_CONN_AG_CTX_BIT1_SHIFT 1 +#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF0_MASK 0x3 +#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF0_SHIFT 2 +#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF1_MASK 0x3 +#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF1_SHIFT 4 +#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF2_MASK 0x3 +#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF2_SHIFT 6 u8 flags1; -#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_MASK 0x1 -#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_SHIFT 0 -#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_MASK 0x1 -#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_SHIFT 1 -#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_MASK 0x1 -#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_SHIFT 2 -#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_MASK 0x1 -#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_SHIFT 3 -#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_MASK 0x1 -#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_SHIFT 4 -#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_MASK 0x1 -#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_SHIFT 5 -#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_MASK 0x1 -#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_SHIFT 6 -#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_MASK 0x1 -#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_SHIFT 7 +#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_MASK 0x1 +#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_SHIFT 0 +#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_MASK 0x1 +#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_SHIFT 1 +#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_MASK 0x1 +#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_SHIFT 2 +#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_SHIFT 3 +#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_SHIFT 4 +#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_SHIFT 5 +#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_SHIFT 6 +#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_SHIFT 7 u8 byte2; u8 byte3; __le16 word0; @@ -9227,37 +8660,37 @@ struct e4_ystorm_roce_req_conn_ag_ctx { __le32 reg3; }; -struct e4_ystorm_roce_resp_conn_ag_ctx { +struct ystorm_roce_resp_conn_ag_ctx { u8 byte0; u8 byte1; u8 flags0; -#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_BIT0_MASK 0x1 -#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_BIT0_SHIFT 0 -#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_BIT1_MASK 0x1 -#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_BIT1_SHIFT 1 -#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_CF0_MASK 0x3 -#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_CF0_SHIFT 2 -#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_CF1_MASK 0x3 -#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_CF1_SHIFT 4 -#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_CF2_MASK 0x3 -#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_CF2_SHIFT 6 +#define YSTORM_ROCE_RESP_CONN_AG_CTX_BIT0_MASK 0x1 +#define YSTORM_ROCE_RESP_CONN_AG_CTX_BIT0_SHIFT 0 +#define YSTORM_ROCE_RESP_CONN_AG_CTX_BIT1_MASK 0x1 +#define YSTORM_ROCE_RESP_CONN_AG_CTX_BIT1_SHIFT 1 +#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF0_MASK 0x3 +#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF0_SHIFT 2 +#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF1_MASK 0x3 +#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF1_SHIFT 4 +#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF2_MASK 0x3 +#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF2_SHIFT 6 u8 flags1; -#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_MASK 0x1 -#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_SHIFT 0 -#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_MASK 0x1 -#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_SHIFT 1 -#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_MASK 0x1 -#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_SHIFT 2 -#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_MASK 0x1 -#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_SHIFT 3 -#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_MASK 0x1 -#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_SHIFT 4 -#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_MASK 0x1 -#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_SHIFT 5 -#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_MASK 0x1 -#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_SHIFT 6 -#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_MASK 0x1 -#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_SHIFT 7 +#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_MASK 0x1 +#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_SHIFT 0 +#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_MASK 0x1 +#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_SHIFT 1 +#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_MASK 0x1 +#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_SHIFT 2 +#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_SHIFT 3 +#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_SHIFT 4 +#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_SHIFT 5 +#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_SHIFT 6 +#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_SHIFT 7 u8 byte2; u8 byte3; __le16 word0; @@ -9294,216 +8727,216 @@ struct xstorm_iwarp_conn_st_ctx { __le32 reserved[48]; }; -struct e4_xstorm_iwarp_conn_ag_ctx { +struct xstorm_iwarp_conn_ag_ctx { u8 reserved0; u8 state; u8 flags0; -#define E4_XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 -#define E4_XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM1_MASK 0x1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM1_SHIFT 1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM2_MASK 0x1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM2_SHIFT 2 -#define E4_XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3 -#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT4_MASK 0x1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT4_SHIFT 4 -#define E4_XSTORM_IWARP_CONN_AG_CTX_RESERVED2_MASK 0x1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_RESERVED2_SHIFT 5 -#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT6_MASK 0x1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT6_SHIFT 6 -#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT7_MASK 0x1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT7_SHIFT 7 +#define XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 +#define XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM1_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM1_SHIFT 1 +#define XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM2_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM2_SHIFT 2 +#define XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3 +#define XSTORM_IWARP_CONN_AG_CTX_BIT4_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_BIT4_SHIFT 4 +#define XSTORM_IWARP_CONN_AG_CTX_RESERVED2_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_RESERVED2_SHIFT 5 +#define XSTORM_IWARP_CONN_AG_CTX_BIT6_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_BIT6_SHIFT 6 +#define XSTORM_IWARP_CONN_AG_CTX_BIT7_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_BIT7_SHIFT 7 u8 flags1; -#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT8_MASK 0x1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT8_SHIFT 0 -#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT9_MASK 0x1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT9_SHIFT 1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT10_MASK 0x1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT10_SHIFT 2 -#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT11_MASK 0x1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT11_SHIFT 3 -#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT12_MASK 0x1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT12_SHIFT 4 -#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT13_MASK 0x1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT13_SHIFT 5 -#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT14_MASK 0x1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT14_SHIFT 6 -#define E4_XSTORM_IWARP_CONN_AG_CTX_YSTORM_FLUSH_OR_REWIND_SND_MAX_MASK 0x1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_YSTORM_FLUSH_OR_REWIND_SND_MAX_SHIFT 7 +#define XSTORM_IWARP_CONN_AG_CTX_BIT8_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_BIT8_SHIFT 0 +#define XSTORM_IWARP_CONN_AG_CTX_BIT9_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_BIT9_SHIFT 1 +#define XSTORM_IWARP_CONN_AG_CTX_BIT10_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_BIT10_SHIFT 2 +#define XSTORM_IWARP_CONN_AG_CTX_BIT11_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_BIT11_SHIFT 3 +#define XSTORM_IWARP_CONN_AG_CTX_BIT12_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_BIT12_SHIFT 4 +#define XSTORM_IWARP_CONN_AG_CTX_BIT13_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_BIT13_SHIFT 5 +#define XSTORM_IWARP_CONN_AG_CTX_BIT14_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_BIT14_SHIFT 6 +#define XSTORM_IWARP_CONN_AG_CTX_YSTORM_FLUSH_OR_REWIND_SND_MAX_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_YSTORM_FLUSH_OR_REWIND_SND_MAX_SHIFT 7 u8 flags2; -#define E4_XSTORM_IWARP_CONN_AG_CTX_CF0_MASK 0x3 -#define E4_XSTORM_IWARP_CONN_AG_CTX_CF0_SHIFT 0 -#define E4_XSTORM_IWARP_CONN_AG_CTX_CF1_MASK 0x3 -#define E4_XSTORM_IWARP_CONN_AG_CTX_CF1_SHIFT 2 -#define E4_XSTORM_IWARP_CONN_AG_CTX_CF2_MASK 0x3 -#define E4_XSTORM_IWARP_CONN_AG_CTX_CF2_SHIFT 4 -#define E4_XSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_MASK 0x3 -#define E4_XSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_SHIFT 6 +#define XSTORM_IWARP_CONN_AG_CTX_CF0_MASK 0x3 +#define XSTORM_IWARP_CONN_AG_CTX_CF0_SHIFT 0 +#define XSTORM_IWARP_CONN_AG_CTX_CF1_MASK 0x3 +#define XSTORM_IWARP_CONN_AG_CTX_CF1_SHIFT 2 +#define XSTORM_IWARP_CONN_AG_CTX_CF2_MASK 0x3 +#define XSTORM_IWARP_CONN_AG_CTX_CF2_SHIFT 4 +#define XSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_MASK 0x3 +#define XSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_SHIFT 6 u8 flags3; -#define E4_XSTORM_IWARP_CONN_AG_CTX_CF4_MASK 0x3 -#define E4_XSTORM_IWARP_CONN_AG_CTX_CF4_SHIFT 0 -#define E4_XSTORM_IWARP_CONN_AG_CTX_CF5_MASK 0x3 -#define E4_XSTORM_IWARP_CONN_AG_CTX_CF5_SHIFT 2 -#define E4_XSTORM_IWARP_CONN_AG_CTX_CF6_MASK 0x3 -#define E4_XSTORM_IWARP_CONN_AG_CTX_CF6_SHIFT 4 -#define E4_XSTORM_IWARP_CONN_AG_CTX_CF7_MASK 0x3 -#define E4_XSTORM_IWARP_CONN_AG_CTX_CF7_SHIFT 6 +#define XSTORM_IWARP_CONN_AG_CTX_CF4_MASK 0x3 +#define XSTORM_IWARP_CONN_AG_CTX_CF4_SHIFT 0 +#define XSTORM_IWARP_CONN_AG_CTX_CF5_MASK 0x3 +#define XSTORM_IWARP_CONN_AG_CTX_CF5_SHIFT 2 +#define XSTORM_IWARP_CONN_AG_CTX_CF6_MASK 0x3 +#define XSTORM_IWARP_CONN_AG_CTX_CF6_SHIFT 4 +#define XSTORM_IWARP_CONN_AG_CTX_CF7_MASK 0x3 +#define XSTORM_IWARP_CONN_AG_CTX_CF7_SHIFT 6 u8 flags4; -#define E4_XSTORM_IWARP_CONN_AG_CTX_CF8_MASK 0x3 -#define E4_XSTORM_IWARP_CONN_AG_CTX_CF8_SHIFT 0 -#define E4_XSTORM_IWARP_CONN_AG_CTX_CF9_MASK 0x3 -#define E4_XSTORM_IWARP_CONN_AG_CTX_CF9_SHIFT 2 -#define E4_XSTORM_IWARP_CONN_AG_CTX_CF10_MASK 0x3 -#define E4_XSTORM_IWARP_CONN_AG_CTX_CF10_SHIFT 4 -#define E4_XSTORM_IWARP_CONN_AG_CTX_CF11_MASK 0x3 -#define E4_XSTORM_IWARP_CONN_AG_CTX_CF11_SHIFT 6 +#define XSTORM_IWARP_CONN_AG_CTX_CF8_MASK 0x3 +#define XSTORM_IWARP_CONN_AG_CTX_CF8_SHIFT 0 +#define XSTORM_IWARP_CONN_AG_CTX_CF9_MASK 0x3 +#define XSTORM_IWARP_CONN_AG_CTX_CF9_SHIFT 2 +#define XSTORM_IWARP_CONN_AG_CTX_CF10_MASK 0x3 +#define XSTORM_IWARP_CONN_AG_CTX_CF10_SHIFT 4 +#define XSTORM_IWARP_CONN_AG_CTX_CF11_MASK 0x3 +#define XSTORM_IWARP_CONN_AG_CTX_CF11_SHIFT 6 u8 flags5; -#define E4_XSTORM_IWARP_CONN_AG_CTX_CF12_MASK 0x3 -#define E4_XSTORM_IWARP_CONN_AG_CTX_CF12_SHIFT 0 -#define E4_XSTORM_IWARP_CONN_AG_CTX_CF13_MASK 0x3 -#define E4_XSTORM_IWARP_CONN_AG_CTX_CF13_SHIFT 2 -#define E4_XSTORM_IWARP_CONN_AG_CTX_SQ_FLUSH_CF_MASK 0x3 -#define E4_XSTORM_IWARP_CONN_AG_CTX_SQ_FLUSH_CF_SHIFT 4 -#define E4_XSTORM_IWARP_CONN_AG_CTX_CF15_MASK 0x3 -#define E4_XSTORM_IWARP_CONN_AG_CTX_CF15_SHIFT 6 +#define XSTORM_IWARP_CONN_AG_CTX_CF12_MASK 0x3 +#define XSTORM_IWARP_CONN_AG_CTX_CF12_SHIFT 0 +#define XSTORM_IWARP_CONN_AG_CTX_CF13_MASK 0x3 +#define XSTORM_IWARP_CONN_AG_CTX_CF13_SHIFT 2 +#define XSTORM_IWARP_CONN_AG_CTX_SQ_FLUSH_CF_MASK 0x3 +#define XSTORM_IWARP_CONN_AG_CTX_SQ_FLUSH_CF_SHIFT 4 +#define XSTORM_IWARP_CONN_AG_CTX_CF15_MASK 0x3 +#define XSTORM_IWARP_CONN_AG_CTX_CF15_SHIFT 6 u8 flags6; -#define E4_XSTORM_IWARP_CONN_AG_CTX_MPA_OR_ERROR_WAKEUP_TRIGGER_CF_MASK 0x3 -#define E4_XSTORM_IWARP_CONN_AG_CTX_MPA_OR_ERROR_WAKEUP_TRIGGER_CF_SHIFT 0 -#define E4_XSTORM_IWARP_CONN_AG_CTX_CF17_MASK 0x3 -#define E4_XSTORM_IWARP_CONN_AG_CTX_CF17_SHIFT 2 -#define E4_XSTORM_IWARP_CONN_AG_CTX_CF18_MASK 0x3 -#define E4_XSTORM_IWARP_CONN_AG_CTX_CF18_SHIFT 4 -#define E4_XSTORM_IWARP_CONN_AG_CTX_DQ_FLUSH_MASK 0x3 -#define E4_XSTORM_IWARP_CONN_AG_CTX_DQ_FLUSH_SHIFT 6 +#define XSTORM_IWARP_CONN_AG_CTX_MPA_OR_ERROR_WAKEUP_TRIGGER_CF_MASK 0x3 +#define XSTORM_IWARP_CONN_AG_CTX_MPA_OR_ERROR_WAKEUP_TRIGGER_CF_SHIFT 0 +#define XSTORM_IWARP_CONN_AG_CTX_CF17_MASK 0x3 +#define XSTORM_IWARP_CONN_AG_CTX_CF17_SHIFT 2 +#define XSTORM_IWARP_CONN_AG_CTX_CF18_MASK 0x3 +#define XSTORM_IWARP_CONN_AG_CTX_CF18_SHIFT 4 +#define XSTORM_IWARP_CONN_AG_CTX_DQ_FLUSH_MASK 0x3 +#define XSTORM_IWARP_CONN_AG_CTX_DQ_FLUSH_SHIFT 6 u8 flags7; -#define E4_XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_MASK 0x3 -#define E4_XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_SHIFT 0 -#define E4_XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q1_MASK 0x3 -#define E4_XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q1_SHIFT 2 -#define E4_XSTORM_IWARP_CONN_AG_CTX_SLOW_PATH_MASK 0x3 -#define E4_XSTORM_IWARP_CONN_AG_CTX_SLOW_PATH_SHIFT 4 -#define E4_XSTORM_IWARP_CONN_AG_CTX_CF0EN_MASK 0x1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_CF0EN_SHIFT 6 -#define E4_XSTORM_IWARP_CONN_AG_CTX_CF1EN_MASK 0x1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_CF1EN_SHIFT 7 +#define XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_MASK 0x3 +#define XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_SHIFT 0 +#define XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q1_MASK 0x3 +#define XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q1_SHIFT 2 +#define XSTORM_IWARP_CONN_AG_CTX_SLOW_PATH_MASK 0x3 +#define XSTORM_IWARP_CONN_AG_CTX_SLOW_PATH_SHIFT 4 +#define XSTORM_IWARP_CONN_AG_CTX_CF0EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_CF0EN_SHIFT 6 +#define XSTORM_IWARP_CONN_AG_CTX_CF1EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_CF1EN_SHIFT 7 u8 flags8; -#define E4_XSTORM_IWARP_CONN_AG_CTX_CF2EN_MASK 0x1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_CF2EN_SHIFT 0 -#define E4_XSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_EN_MASK 0x1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_EN_SHIFT 1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_CF4EN_MASK 0x1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_CF4EN_SHIFT 2 -#define E4_XSTORM_IWARP_CONN_AG_CTX_CF5EN_MASK 0x1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_CF5EN_SHIFT 3 -#define E4_XSTORM_IWARP_CONN_AG_CTX_CF6EN_MASK 0x1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_CF6EN_SHIFT 4 -#define E4_XSTORM_IWARP_CONN_AG_CTX_CF7EN_MASK 0x1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_CF7EN_SHIFT 5 -#define E4_XSTORM_IWARP_CONN_AG_CTX_CF8EN_MASK 0x1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_CF8EN_SHIFT 6 -#define E4_XSTORM_IWARP_CONN_AG_CTX_CF9EN_MASK 0x1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_CF9EN_SHIFT 7 +#define XSTORM_IWARP_CONN_AG_CTX_CF2EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_CF2EN_SHIFT 0 +#define XSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_EN_SHIFT 1 +#define XSTORM_IWARP_CONN_AG_CTX_CF4EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_CF4EN_SHIFT 2 +#define XSTORM_IWARP_CONN_AG_CTX_CF5EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_CF5EN_SHIFT 3 +#define XSTORM_IWARP_CONN_AG_CTX_CF6EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_CF6EN_SHIFT 4 +#define XSTORM_IWARP_CONN_AG_CTX_CF7EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_CF7EN_SHIFT 5 +#define XSTORM_IWARP_CONN_AG_CTX_CF8EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_CF8EN_SHIFT 6 +#define XSTORM_IWARP_CONN_AG_CTX_CF9EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_CF9EN_SHIFT 7 u8 flags9; -#define E4_XSTORM_IWARP_CONN_AG_CTX_CF10EN_MASK 0x1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_CF10EN_SHIFT 0 -#define E4_XSTORM_IWARP_CONN_AG_CTX_CF11EN_MASK 0x1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_CF11EN_SHIFT 1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_CF12EN_MASK 0x1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_CF12EN_SHIFT 2 -#define E4_XSTORM_IWARP_CONN_AG_CTX_CF13EN_MASK 0x1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_CF13EN_SHIFT 3 -#define E4_XSTORM_IWARP_CONN_AG_CTX_SQ_FLUSH_CF_EN_MASK 0x1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_SQ_FLUSH_CF_EN_SHIFT 4 -#define E4_XSTORM_IWARP_CONN_AG_CTX_CF15EN_MASK 0x1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_CF15EN_SHIFT 5 -#define E4_XSTORM_IWARP_CONN_AG_CTX_MPA_OR_ERROR_WAKEUP_TRIGGER_CF_EN_MASK 0x1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_MPA_OR_ERROR_WAKEUP_TRIGGER_CF_EN_SHIFT 6 -#define E4_XSTORM_IWARP_CONN_AG_CTX_CF17EN_MASK 0x1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_CF17EN_SHIFT 7 +#define XSTORM_IWARP_CONN_AG_CTX_CF10EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_CF10EN_SHIFT 0 +#define XSTORM_IWARP_CONN_AG_CTX_CF11EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_CF11EN_SHIFT 1 +#define XSTORM_IWARP_CONN_AG_CTX_CF12EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_CF12EN_SHIFT 2 +#define XSTORM_IWARP_CONN_AG_CTX_CF13EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_CF13EN_SHIFT 3 +#define XSTORM_IWARP_CONN_AG_CTX_SQ_FLUSH_CF_EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_SQ_FLUSH_CF_EN_SHIFT 4 +#define XSTORM_IWARP_CONN_AG_CTX_CF15EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_CF15EN_SHIFT 5 +#define XSTORM_IWARP_CONN_AG_CTX_MPA_OR_ERROR_WAKEUP_TRIGGER_CF_EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_MPA_OR_ERROR_WAKEUP_TRIGGER_CF_EN_SHIFT 6 +#define XSTORM_IWARP_CONN_AG_CTX_CF17EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_CF17EN_SHIFT 7 u8 flags10; -#define E4_XSTORM_IWARP_CONN_AG_CTX_CF18EN_MASK 0x1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_CF18EN_SHIFT 0 -#define E4_XSTORM_IWARP_CONN_AG_CTX_DQ_FLUSH_EN_MASK 0x1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_DQ_FLUSH_EN_SHIFT 1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2 -#define E4_XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q1_EN_MASK 0x1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q1_EN_SHIFT 3 -#define E4_XSTORM_IWARP_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4 -#define E4_XSTORM_IWARP_CONN_AG_CTX_SEND_TERMINATE_CF_EN_MASK 0x1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_SEND_TERMINATE_CF_EN_SHIFT 5 -#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE0EN_MASK 0x1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE0EN_SHIFT 6 -#define E4_XSTORM_IWARP_CONN_AG_CTX_MORE_TO_SEND_RULE_EN_MASK 0x1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_MORE_TO_SEND_RULE_EN_SHIFT 7 +#define XSTORM_IWARP_CONN_AG_CTX_CF18EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_CF18EN_SHIFT 0 +#define XSTORM_IWARP_CONN_AG_CTX_DQ_FLUSH_EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_DQ_FLUSH_EN_SHIFT 1 +#define XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2 +#define XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q1_EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q1_EN_SHIFT 3 +#define XSTORM_IWARP_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4 +#define XSTORM_IWARP_CONN_AG_CTX_SEND_TERMINATE_CF_EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_SEND_TERMINATE_CF_EN_SHIFT 5 +#define XSTORM_IWARP_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_RULE0EN_SHIFT 6 +#define XSTORM_IWARP_CONN_AG_CTX_MORE_TO_SEND_RULE_EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_MORE_TO_SEND_RULE_EN_SHIFT 7 u8 flags11; -#define E4_XSTORM_IWARP_CONN_AG_CTX_TX_BLOCKED_EN_MASK 0x1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_TX_BLOCKED_EN_SHIFT 0 -#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE3EN_MASK 0x1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE3EN_SHIFT 1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_RESERVED3_MASK 0x1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_RESERVED3_SHIFT 2 -#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE5EN_MASK 0x1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE5EN_SHIFT 3 -#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE6EN_MASK 0x1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE6EN_SHIFT 4 -#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE7EN_MASK 0x1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE7EN_SHIFT 5 -#define E4_XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED1_MASK 0x1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED1_SHIFT 6 -#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE9EN_MASK 0x1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE9EN_SHIFT 7 +#define XSTORM_IWARP_CONN_AG_CTX_TX_BLOCKED_EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_TX_BLOCKED_EN_SHIFT 0 +#define XSTORM_IWARP_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_RULE3EN_SHIFT 1 +#define XSTORM_IWARP_CONN_AG_CTX_RESERVED3_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_RESERVED3_SHIFT 2 +#define XSTORM_IWARP_CONN_AG_CTX_RULE5EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_RULE5EN_SHIFT 3 +#define XSTORM_IWARP_CONN_AG_CTX_RULE6EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_RULE6EN_SHIFT 4 +#define XSTORM_IWARP_CONN_AG_CTX_RULE7EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_RULE7EN_SHIFT 5 +#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED1_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED1_SHIFT 6 +#define XSTORM_IWARP_CONN_AG_CTX_RULE9EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_RULE9EN_SHIFT 7 u8 flags12; -#define E4_XSTORM_IWARP_CONN_AG_CTX_SQ_NOT_EMPTY_RULE_EN_MASK 0x1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_SQ_NOT_EMPTY_RULE_EN_SHIFT 0 -#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE11EN_MASK 0x1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE11EN_SHIFT 1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED2_MASK 0x1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED2_SHIFT 2 -#define E4_XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED3_MASK 0x1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED3_SHIFT 3 -#define E4_XSTORM_IWARP_CONN_AG_CTX_SQ_FENCE_RULE_EN_MASK 0x1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_SQ_FENCE_RULE_EN_SHIFT 4 -#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE15EN_MASK 0x1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE15EN_SHIFT 5 -#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE16EN_MASK 0x1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE16EN_SHIFT 6 -#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE17EN_MASK 0x1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE17EN_SHIFT 7 +#define XSTORM_IWARP_CONN_AG_CTX_SQ_NOT_EMPTY_RULE_EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_SQ_NOT_EMPTY_RULE_EN_SHIFT 0 +#define XSTORM_IWARP_CONN_AG_CTX_RULE11EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_RULE11EN_SHIFT 1 +#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED2_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED2_SHIFT 2 +#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED3_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED3_SHIFT 3 +#define XSTORM_IWARP_CONN_AG_CTX_SQ_FENCE_RULE_EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_SQ_FENCE_RULE_EN_SHIFT 4 +#define XSTORM_IWARP_CONN_AG_CTX_RULE15EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_RULE15EN_SHIFT 5 +#define XSTORM_IWARP_CONN_AG_CTX_RULE16EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_RULE16EN_SHIFT 6 +#define XSTORM_IWARP_CONN_AG_CTX_RULE17EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_RULE17EN_SHIFT 7 u8 flags13; -#define E4_XSTORM_IWARP_CONN_AG_CTX_IRQ_NOT_EMPTY_RULE_EN_MASK 0x1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_IRQ_NOT_EMPTY_RULE_EN_SHIFT 0 -#define E4_XSTORM_IWARP_CONN_AG_CTX_HQ_NOT_FULL_RULE_EN_MASK 0x1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_HQ_NOT_FULL_RULE_EN_SHIFT 1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_ORQ_RD_FENCE_RULE_EN_MASK 0x1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_ORQ_RD_FENCE_RULE_EN_SHIFT 2 -#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE21EN_MASK 0x1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE21EN_SHIFT 3 -#define E4_XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED6_MASK 0x1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED6_SHIFT 4 -#define E4_XSTORM_IWARP_CONN_AG_CTX_ORQ_NOT_FULL_RULE_EN_MASK 0x1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_ORQ_NOT_FULL_RULE_EN_SHIFT 5 -#define E4_XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED8_MASK 0x1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED8_SHIFT 6 -#define E4_XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED9_MASK 0x1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED9_SHIFT 7 +#define XSTORM_IWARP_CONN_AG_CTX_IRQ_NOT_EMPTY_RULE_EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_IRQ_NOT_EMPTY_RULE_EN_SHIFT 0 +#define XSTORM_IWARP_CONN_AG_CTX_HQ_NOT_FULL_RULE_EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_HQ_NOT_FULL_RULE_EN_SHIFT 1 +#define XSTORM_IWARP_CONN_AG_CTX_ORQ_RD_FENCE_RULE_EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_ORQ_RD_FENCE_RULE_EN_SHIFT 2 +#define XSTORM_IWARP_CONN_AG_CTX_RULE21EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_RULE21EN_SHIFT 3 +#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED6_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED6_SHIFT 4 +#define XSTORM_IWARP_CONN_AG_CTX_ORQ_NOT_FULL_RULE_EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_ORQ_NOT_FULL_RULE_EN_SHIFT 5 +#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED8_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED8_SHIFT 6 +#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED9_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED9_SHIFT 7 u8 flags14; -#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT16_MASK 0x1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT16_SHIFT 0 -#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT17_MASK 0x1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT17_SHIFT 1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT18_MASK 0x1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT18_SHIFT 2 -#define E4_XSTORM_IWARP_CONN_AG_CTX_E5_RESERVED1_MASK 0x1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_E5_RESERVED1_SHIFT 3 -#define E4_XSTORM_IWARP_CONN_AG_CTX_E5_RESERVED2_MASK 0x1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_E5_RESERVED2_SHIFT 4 -#define E4_XSTORM_IWARP_CONN_AG_CTX_E5_RESERVED3_MASK 0x1 -#define E4_XSTORM_IWARP_CONN_AG_CTX_E5_RESERVED3_SHIFT 5 -#define E4_XSTORM_IWARP_CONN_AG_CTX_SEND_TERMINATE_CF_MASK 0x3 -#define E4_XSTORM_IWARP_CONN_AG_CTX_SEND_TERMINATE_CF_SHIFT 6 +#define XSTORM_IWARP_CONN_AG_CTX_BIT16_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_BIT16_SHIFT 0 +#define XSTORM_IWARP_CONN_AG_CTX_BIT17_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_BIT17_SHIFT 1 +#define XSTORM_IWARP_CONN_AG_CTX_BIT18_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_BIT18_SHIFT 2 +#define XSTORM_IWARP_CONN_AG_CTX_E5_RESERVED1_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_E5_RESERVED1_SHIFT 3 +#define XSTORM_IWARP_CONN_AG_CTX_E5_RESERVED2_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_E5_RESERVED2_SHIFT 4 +#define XSTORM_IWARP_CONN_AG_CTX_E5_RESERVED3_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_E5_RESERVED3_SHIFT 5 +#define XSTORM_IWARP_CONN_AG_CTX_SEND_TERMINATE_CF_MASK 0x3 +#define XSTORM_IWARP_CONN_AG_CTX_SEND_TERMINATE_CF_SHIFT 6 u8 byte2; __le16 physical_q0; __le16 physical_q1; @@ -9551,89 +8984,89 @@ struct e4_xstorm_iwarp_conn_ag_ctx { __le32 reg17; }; -struct e4_tstorm_iwarp_conn_ag_ctx { +struct tstorm_iwarp_conn_ag_ctx { u8 reserved0; u8 state; u8 flags0; -#define E4_TSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 -#define E4_TSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 -#define E4_TSTORM_IWARP_CONN_AG_CTX_BIT1_MASK 0x1 -#define E4_TSTORM_IWARP_CONN_AG_CTX_BIT1_SHIFT 1 -#define E4_TSTORM_IWARP_CONN_AG_CTX_BIT2_MASK 0x1 -#define E4_TSTORM_IWARP_CONN_AG_CTX_BIT2_SHIFT 2 -#define E4_TSTORM_IWARP_CONN_AG_CTX_MSTORM_FLUSH_OR_TERMINATE_SENT_MASK 0x1 -#define E4_TSTORM_IWARP_CONN_AG_CTX_MSTORM_FLUSH_OR_TERMINATE_SENT_SHIFT 3 -#define E4_TSTORM_IWARP_CONN_AG_CTX_BIT4_MASK 0x1 -#define E4_TSTORM_IWARP_CONN_AG_CTX_BIT4_SHIFT 4 -#define E4_TSTORM_IWARP_CONN_AG_CTX_CACHED_ORQ_MASK 0x1 -#define E4_TSTORM_IWARP_CONN_AG_CTX_CACHED_ORQ_SHIFT 5 -#define E4_TSTORM_IWARP_CONN_AG_CTX_CF0_MASK 0x3 -#define E4_TSTORM_IWARP_CONN_AG_CTX_CF0_SHIFT 6 +#define TSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define TSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 +#define TSTORM_IWARP_CONN_AG_CTX_BIT1_MASK 0x1 +#define TSTORM_IWARP_CONN_AG_CTX_BIT1_SHIFT 1 +#define TSTORM_IWARP_CONN_AG_CTX_BIT2_MASK 0x1 +#define TSTORM_IWARP_CONN_AG_CTX_BIT2_SHIFT 2 +#define TSTORM_IWARP_CONN_AG_CTX_MSTORM_FLUSH_OR_TERMINATE_SENT_MASK 0x1 +#define TSTORM_IWARP_CONN_AG_CTX_MSTORM_FLUSH_OR_TERMINATE_SENT_SHIFT 3 +#define TSTORM_IWARP_CONN_AG_CTX_BIT4_MASK 0x1 +#define TSTORM_IWARP_CONN_AG_CTX_BIT4_SHIFT 4 +#define TSTORM_IWARP_CONN_AG_CTX_CACHED_ORQ_MASK 0x1 +#define TSTORM_IWARP_CONN_AG_CTX_CACHED_ORQ_SHIFT 5 +#define TSTORM_IWARP_CONN_AG_CTX_CF0_MASK 0x3 +#define TSTORM_IWARP_CONN_AG_CTX_CF0_SHIFT 6 u8 flags1; -#define E4_TSTORM_IWARP_CONN_AG_CTX_RQ_POST_CF_MASK 0x3 -#define E4_TSTORM_IWARP_CONN_AG_CTX_RQ_POST_CF_SHIFT 0 -#define E4_TSTORM_IWARP_CONN_AG_CTX_MPA_TIMEOUT_CF_MASK 0x3 -#define E4_TSTORM_IWARP_CONN_AG_CTX_MPA_TIMEOUT_CF_SHIFT 2 -#define E4_TSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_MASK 0x3 -#define E4_TSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_SHIFT 4 -#define E4_TSTORM_IWARP_CONN_AG_CTX_CF4_MASK 0x3 -#define E4_TSTORM_IWARP_CONN_AG_CTX_CF4_SHIFT 6 +#define TSTORM_IWARP_CONN_AG_CTX_RQ_POST_CF_MASK 0x3 +#define TSTORM_IWARP_CONN_AG_CTX_RQ_POST_CF_SHIFT 0 +#define TSTORM_IWARP_CONN_AG_CTX_MPA_TIMEOUT_CF_MASK 0x3 +#define TSTORM_IWARP_CONN_AG_CTX_MPA_TIMEOUT_CF_SHIFT 2 +#define TSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_MASK 0x3 +#define TSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_SHIFT 4 +#define TSTORM_IWARP_CONN_AG_CTX_CF4_MASK 0x3 +#define TSTORM_IWARP_CONN_AG_CTX_CF4_SHIFT 6 u8 flags2; -#define E4_TSTORM_IWARP_CONN_AG_CTX_CF5_MASK 0x3 -#define E4_TSTORM_IWARP_CONN_AG_CTX_CF5_SHIFT 0 -#define E4_TSTORM_IWARP_CONN_AG_CTX_CF6_MASK 0x3 -#define E4_TSTORM_IWARP_CONN_AG_CTX_CF6_SHIFT 2 -#define E4_TSTORM_IWARP_CONN_AG_CTX_CF7_MASK 0x3 -#define E4_TSTORM_IWARP_CONN_AG_CTX_CF7_SHIFT 4 -#define E4_TSTORM_IWARP_CONN_AG_CTX_CF8_MASK 0x3 -#define E4_TSTORM_IWARP_CONN_AG_CTX_CF8_SHIFT 6 +#define TSTORM_IWARP_CONN_AG_CTX_CF5_MASK 0x3 +#define TSTORM_IWARP_CONN_AG_CTX_CF5_SHIFT 0 +#define TSTORM_IWARP_CONN_AG_CTX_CF6_MASK 0x3 +#define TSTORM_IWARP_CONN_AG_CTX_CF6_SHIFT 2 +#define TSTORM_IWARP_CONN_AG_CTX_CF7_MASK 0x3 +#define TSTORM_IWARP_CONN_AG_CTX_CF7_SHIFT 4 +#define TSTORM_IWARP_CONN_AG_CTX_CF8_MASK 0x3 +#define TSTORM_IWARP_CONN_AG_CTX_CF8_SHIFT 6 u8 flags3; -#define E4_TSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_AND_TCP_HANDSHAKE_COMPLETE_MASK 0x3 -#define E4_TSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_AND_TCP_HANDSHAKE_COMPLETE_SHIFT 0 -#define E4_TSTORM_IWARP_CONN_AG_CTX_FLUSH_OR_ERROR_DETECTED_MASK 0x3 -#define E4_TSTORM_IWARP_CONN_AG_CTX_FLUSH_OR_ERROR_DETECTED_SHIFT 2 -#define E4_TSTORM_IWARP_CONN_AG_CTX_CF0EN_MASK 0x1 -#define E4_TSTORM_IWARP_CONN_AG_CTX_CF0EN_SHIFT 4 -#define E4_TSTORM_IWARP_CONN_AG_CTX_RQ_POST_CF_EN_MASK 0x1 -#define E4_TSTORM_IWARP_CONN_AG_CTX_RQ_POST_CF_EN_SHIFT 5 -#define E4_TSTORM_IWARP_CONN_AG_CTX_MPA_TIMEOUT_CF_EN_MASK 0x1 -#define E4_TSTORM_IWARP_CONN_AG_CTX_MPA_TIMEOUT_CF_EN_SHIFT 6 -#define E4_TSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_EN_MASK 0x1 -#define E4_TSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_EN_SHIFT 7 +#define TSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_AND_TCP_HANDSHAKE_COMPLETE_MASK 0x3 +#define TSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_AND_TCP_HANDSHAKE_COMPLETE_SHIFT 0 +#define TSTORM_IWARP_CONN_AG_CTX_FLUSH_OR_ERROR_DETECTED_MASK 0x3 +#define TSTORM_IWARP_CONN_AG_CTX_FLUSH_OR_ERROR_DETECTED_SHIFT 2 +#define TSTORM_IWARP_CONN_AG_CTX_CF0EN_MASK 0x1 +#define TSTORM_IWARP_CONN_AG_CTX_CF0EN_SHIFT 4 +#define TSTORM_IWARP_CONN_AG_CTX_RQ_POST_CF_EN_MASK 0x1 +#define TSTORM_IWARP_CONN_AG_CTX_RQ_POST_CF_EN_SHIFT 5 +#define TSTORM_IWARP_CONN_AG_CTX_MPA_TIMEOUT_CF_EN_MASK 0x1 +#define TSTORM_IWARP_CONN_AG_CTX_MPA_TIMEOUT_CF_EN_SHIFT 6 +#define TSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_EN_MASK 0x1 +#define TSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_EN_SHIFT 7 u8 flags4; -#define E4_TSTORM_IWARP_CONN_AG_CTX_CF4EN_MASK 0x1 -#define E4_TSTORM_IWARP_CONN_AG_CTX_CF4EN_SHIFT 0 -#define E4_TSTORM_IWARP_CONN_AG_CTX_CF5EN_MASK 0x1 -#define E4_TSTORM_IWARP_CONN_AG_CTX_CF5EN_SHIFT 1 -#define E4_TSTORM_IWARP_CONN_AG_CTX_CF6EN_MASK 0x1 -#define E4_TSTORM_IWARP_CONN_AG_CTX_CF6EN_SHIFT 2 -#define E4_TSTORM_IWARP_CONN_AG_CTX_CF7EN_MASK 0x1 -#define E4_TSTORM_IWARP_CONN_AG_CTX_CF7EN_SHIFT 3 -#define E4_TSTORM_IWARP_CONN_AG_CTX_CF8EN_MASK 0x1 -#define E4_TSTORM_IWARP_CONN_AG_CTX_CF8EN_SHIFT 4 -#define E4_TSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_AND_TCP_HANDSHAKE_COMPL_EN_MASK 0x1 -#define E4_TSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_AND_TCP_HANDSHAKE_COMPL_EN_SHIFT 5 -#define E4_TSTORM_IWARP_CONN_AG_CTX_FLUSH_OR_ERROR_DETECTED_EN_MASK 0x1 -#define E4_TSTORM_IWARP_CONN_AG_CTX_FLUSH_OR_ERROR_DETECTED_EN_SHIFT 6 -#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE0EN_MASK 0x1 -#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE0EN_SHIFT 7 +#define TSTORM_IWARP_CONN_AG_CTX_CF4EN_MASK 0x1 +#define TSTORM_IWARP_CONN_AG_CTX_CF4EN_SHIFT 0 +#define TSTORM_IWARP_CONN_AG_CTX_CF5EN_MASK 0x1 +#define TSTORM_IWARP_CONN_AG_CTX_CF5EN_SHIFT 1 +#define TSTORM_IWARP_CONN_AG_CTX_CF6EN_MASK 0x1 +#define TSTORM_IWARP_CONN_AG_CTX_CF6EN_SHIFT 2 +#define TSTORM_IWARP_CONN_AG_CTX_CF7EN_MASK 0x1 +#define TSTORM_IWARP_CONN_AG_CTX_CF7EN_SHIFT 3 +#define TSTORM_IWARP_CONN_AG_CTX_CF8EN_MASK 0x1 +#define TSTORM_IWARP_CONN_AG_CTX_CF8EN_SHIFT 4 +#define TSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_AND_TCP_HANDSHAKE_COMPL_EN_MASK 0x1 +#define TSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_AND_TCP_HANDSHAKE_COMPL_EN_SHIFT 5 +#define TSTORM_IWARP_CONN_AG_CTX_FLUSH_OR_ERROR_DETECTED_EN_MASK 0x1 +#define TSTORM_IWARP_CONN_AG_CTX_FLUSH_OR_ERROR_DETECTED_EN_SHIFT 6 +#define TSTORM_IWARP_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define TSTORM_IWARP_CONN_AG_CTX_RULE0EN_SHIFT 7 u8 flags5; -#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE1EN_MASK 0x1 -#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE1EN_SHIFT 0 -#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE2EN_MASK 0x1 -#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE2EN_SHIFT 1 -#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE3EN_MASK 0x1 -#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE3EN_SHIFT 2 -#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE4EN_MASK 0x1 -#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE4EN_SHIFT 3 -#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE5EN_MASK 0x1 -#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE5EN_SHIFT 4 -#define E4_TSTORM_IWARP_CONN_AG_CTX_SND_SQ_CONS_RULE_MASK 0x1 -#define E4_TSTORM_IWARP_CONN_AG_CTX_SND_SQ_CONS_RULE_SHIFT 5 -#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE7EN_MASK 0x1 -#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE7EN_SHIFT 6 -#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE8EN_MASK 0x1 -#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE8EN_SHIFT 7 +#define TSTORM_IWARP_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define TSTORM_IWARP_CONN_AG_CTX_RULE1EN_SHIFT 0 +#define TSTORM_IWARP_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define TSTORM_IWARP_CONN_AG_CTX_RULE2EN_SHIFT 1 +#define TSTORM_IWARP_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define TSTORM_IWARP_CONN_AG_CTX_RULE3EN_SHIFT 2 +#define TSTORM_IWARP_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define TSTORM_IWARP_CONN_AG_CTX_RULE4EN_SHIFT 3 +#define TSTORM_IWARP_CONN_AG_CTX_RULE5EN_MASK 0x1 +#define TSTORM_IWARP_CONN_AG_CTX_RULE5EN_SHIFT 4 +#define TSTORM_IWARP_CONN_AG_CTX_SND_SQ_CONS_RULE_MASK 0x1 +#define TSTORM_IWARP_CONN_AG_CTX_SND_SQ_CONS_RULE_SHIFT 5 +#define TSTORM_IWARP_CONN_AG_CTX_RULE7EN_MASK 0x1 +#define TSTORM_IWARP_CONN_AG_CTX_RULE7EN_SHIFT 6 +#define TSTORM_IWARP_CONN_AG_CTX_RULE8EN_MASK 0x1 +#define TSTORM_IWARP_CONN_AG_CTX_RULE8EN_SHIFT 7 __le32 reg0; __le32 reg1; __le32 unaligned_nxt_seq; @@ -9671,16 +9104,16 @@ struct ustorm_iwarp_conn_st_ctx { }; /* iwarp connection context */ -struct e4_iwarp_conn_context { +struct iwarp_conn_context { struct ystorm_iwarp_conn_st_ctx ystorm_st_context; struct regpair ystorm_st_padding[2]; struct pstorm_iwarp_conn_st_ctx pstorm_st_context; struct regpair pstorm_st_padding[2]; struct xstorm_iwarp_conn_st_ctx xstorm_st_context; - struct e4_xstorm_iwarp_conn_ag_ctx xstorm_ag_context; - struct e4_tstorm_iwarp_conn_ag_ctx tstorm_ag_context; + struct xstorm_iwarp_conn_ag_ctx xstorm_ag_context; + struct tstorm_iwarp_conn_ag_ctx tstorm_ag_context; struct timers_context timer_context; - struct e4_ustorm_rdma_conn_ag_ctx ustorm_ag_context; + struct ustorm_rdma_conn_ag_ctx ustorm_ag_context; struct tstorm_iwarp_conn_st_ctx tstorm_st_context; struct regpair tstorm_st_padding[2]; struct mstorm_iwarp_conn_st_ctx mstorm_st_context; @@ -9731,8 +9164,8 @@ enum iwarp_eqe_async_opcode { IWARP_EVENT_TYPE_ASYNC_EXCEPTION_DETECTED, IWARP_EVENT_TYPE_ASYNC_QP_IN_ERROR_STATE, IWARP_EVENT_TYPE_ASYNC_CQ_OVERFLOW, - IWARP_EVENT_TYPE_ASYNC_SRQ_EMPTY, IWARP_EVENT_TYPE_ASYNC_SRQ_LIMIT, + IWARP_EVENT_TYPE_ASYNC_SRQ_EMPTY, MAX_IWARP_EQE_ASYNC_OPCODE }; @@ -9750,8 +9183,7 @@ struct iwarp_eqe_data_tcp_async_completion { /* iWARP completion queue types */ enum iwarp_eqe_sync_opcode { - IWARP_EVENT_TYPE_TCP_OFFLOAD = - 11, + IWARP_EVENT_TYPE_TCP_OFFLOAD = 13, IWARP_EVENT_TYPE_MPA_OFFLOAD, IWARP_EVENT_TYPE_MPA_OFFLOAD_SEND_RTR, IWARP_EVENT_TYPE_CREATE_QP, @@ -9783,8 +9215,6 @@ enum iwarp_fw_return_code { IWARP_EXCEPTION_DETECTED_LLP_RESET, IWARP_EXCEPTION_DETECTED_IRQ_FULL, IWARP_EXCEPTION_DETECTED_RQ_EMPTY, - IWARP_EXCEPTION_DETECTED_SRQ_EMPTY, - IWARP_EXCEPTION_DETECTED_SRQ_LIMIT, IWARP_EXCEPTION_DETECTED_LLP_TIMEOUT, IWARP_EXCEPTION_DETECTED_REMOTE_PROTECTION_ERROR, IWARP_EXCEPTION_DETECTED_CQ_OVERFLOW, @@ -9878,9 +9308,10 @@ struct iwarp_mpa_offload_ramrod_data { struct regpair async_eqe_output_buf; struct regpair handle_for_async; struct regpair shared_queue_addr; + __le32 additional_setup_time; __le16 rcv_wnd; u8 stats_counter_id; - u8 reserved3[13]; + u8 reserved3[9]; }; /* iWARP TCP connection offload params passed by driver to FW */ @@ -9888,11 +9319,13 @@ struct iwarp_offload_params { struct mpa_ulp_buffer incoming_ulp_buffer; struct regpair async_eqe_output_buf; struct regpair handle_for_async; + __le32 additional_setup_time; __le16 physical_q0; __le16 physical_q1; u8 stats_counter_id; u8 mpa_mode; - u8 reserved[10]; + u8 src_vport_id; + u8 reserved[5]; }; /* iWARP query QP output params */ @@ -9912,7 +9345,7 @@ struct iwarp_query_qp_ramrod_data { /* iWARP Ramrod Command IDs */ enum iwarp_ramrod_cmd_id { - IWARP_RAMROD_CMD_ID_TCP_OFFLOAD = 11, + IWARP_RAMROD_CMD_ID_TCP_OFFLOAD = 13, IWARP_RAMROD_CMD_ID_MPA_OFFLOAD, IWARP_RAMROD_CMD_ID_MPA_OFFLOAD_SEND_RTR, IWARP_RAMROD_CMD_ID_CREATE_QP, @@ -9971,100 +9404,100 @@ struct unaligned_opaque_data { __le32 cid; }; -struct e4_mstorm_iwarp_conn_ag_ctx { +struct mstorm_iwarp_conn_ag_ctx { u8 reserved; u8 state; u8 flags0; -#define E4_MSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 -#define E4_MSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 -#define E4_MSTORM_IWARP_CONN_AG_CTX_BIT1_MASK 0x1 -#define E4_MSTORM_IWARP_CONN_AG_CTX_BIT1_SHIFT 1 -#define E4_MSTORM_IWARP_CONN_AG_CTX_INV_STAG_DONE_CF_MASK 0x3 -#define E4_MSTORM_IWARP_CONN_AG_CTX_INV_STAG_DONE_CF_SHIFT 2 -#define E4_MSTORM_IWARP_CONN_AG_CTX_CF1_MASK 0x3 -#define E4_MSTORM_IWARP_CONN_AG_CTX_CF1_SHIFT 4 -#define E4_MSTORM_IWARP_CONN_AG_CTX_CF2_MASK 0x3 -#define E4_MSTORM_IWARP_CONN_AG_CTX_CF2_SHIFT 6 +#define MSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define MSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 +#define MSTORM_IWARP_CONN_AG_CTX_BIT1_MASK 0x1 +#define MSTORM_IWARP_CONN_AG_CTX_BIT1_SHIFT 1 +#define MSTORM_IWARP_CONN_AG_CTX_INV_STAG_DONE_CF_MASK 0x3 +#define MSTORM_IWARP_CONN_AG_CTX_INV_STAG_DONE_CF_SHIFT 2 +#define MSTORM_IWARP_CONN_AG_CTX_CF1_MASK 0x3 +#define MSTORM_IWARP_CONN_AG_CTX_CF1_SHIFT 4 +#define MSTORM_IWARP_CONN_AG_CTX_CF2_MASK 0x3 +#define MSTORM_IWARP_CONN_AG_CTX_CF2_SHIFT 6 u8 flags1; -#define E4_MSTORM_IWARP_CONN_AG_CTX_INV_STAG_DONE_CF_EN_MASK 0x1 -#define E4_MSTORM_IWARP_CONN_AG_CTX_INV_STAG_DONE_CF_EN_SHIFT 0 -#define E4_MSTORM_IWARP_CONN_AG_CTX_CF1EN_MASK 0x1 -#define E4_MSTORM_IWARP_CONN_AG_CTX_CF1EN_SHIFT 1 -#define E4_MSTORM_IWARP_CONN_AG_CTX_CF2EN_MASK 0x1 -#define E4_MSTORM_IWARP_CONN_AG_CTX_CF2EN_SHIFT 2 -#define E4_MSTORM_IWARP_CONN_AG_CTX_RULE0EN_MASK 0x1 -#define E4_MSTORM_IWARP_CONN_AG_CTX_RULE0EN_SHIFT 3 -#define E4_MSTORM_IWARP_CONN_AG_CTX_RULE1EN_MASK 0x1 -#define E4_MSTORM_IWARP_CONN_AG_CTX_RULE1EN_SHIFT 4 -#define E4_MSTORM_IWARP_CONN_AG_CTX_RULE2EN_MASK 0x1 -#define E4_MSTORM_IWARP_CONN_AG_CTX_RULE2EN_SHIFT 5 -#define E4_MSTORM_IWARP_CONN_AG_CTX_RCQ_CONS_EN_MASK 0x1 -#define E4_MSTORM_IWARP_CONN_AG_CTX_RCQ_CONS_EN_SHIFT 6 -#define E4_MSTORM_IWARP_CONN_AG_CTX_RULE4EN_MASK 0x1 -#define E4_MSTORM_IWARP_CONN_AG_CTX_RULE4EN_SHIFT 7 +#define MSTORM_IWARP_CONN_AG_CTX_INV_STAG_DONE_CF_EN_MASK 0x1 +#define MSTORM_IWARP_CONN_AG_CTX_INV_STAG_DONE_CF_EN_SHIFT 0 +#define MSTORM_IWARP_CONN_AG_CTX_CF1EN_MASK 0x1 +#define MSTORM_IWARP_CONN_AG_CTX_CF1EN_SHIFT 1 +#define MSTORM_IWARP_CONN_AG_CTX_CF2EN_MASK 0x1 +#define MSTORM_IWARP_CONN_AG_CTX_CF2EN_SHIFT 2 +#define MSTORM_IWARP_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define MSTORM_IWARP_CONN_AG_CTX_RULE0EN_SHIFT 3 +#define MSTORM_IWARP_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define MSTORM_IWARP_CONN_AG_CTX_RULE1EN_SHIFT 4 +#define MSTORM_IWARP_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define MSTORM_IWARP_CONN_AG_CTX_RULE2EN_SHIFT 5 +#define MSTORM_IWARP_CONN_AG_CTX_RCQ_CONS_EN_MASK 0x1 +#define MSTORM_IWARP_CONN_AG_CTX_RCQ_CONS_EN_SHIFT 6 +#define MSTORM_IWARP_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define MSTORM_IWARP_CONN_AG_CTX_RULE4EN_SHIFT 7 __le16 rcq_cons; __le16 rcq_cons_th; __le32 reg0; __le32 reg1; }; -struct e4_ustorm_iwarp_conn_ag_ctx { +struct ustorm_iwarp_conn_ag_ctx { u8 reserved; u8 byte1; u8 flags0; -#define E4_USTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 -#define E4_USTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 -#define E4_USTORM_IWARP_CONN_AG_CTX_BIT1_MASK 0x1 -#define E4_USTORM_IWARP_CONN_AG_CTX_BIT1_SHIFT 1 -#define E4_USTORM_IWARP_CONN_AG_CTX_CF0_MASK 0x3 -#define E4_USTORM_IWARP_CONN_AG_CTX_CF0_SHIFT 2 -#define E4_USTORM_IWARP_CONN_AG_CTX_CF1_MASK 0x3 -#define E4_USTORM_IWARP_CONN_AG_CTX_CF1_SHIFT 4 -#define E4_USTORM_IWARP_CONN_AG_CTX_CF2_MASK 0x3 -#define E4_USTORM_IWARP_CONN_AG_CTX_CF2_SHIFT 6 +#define USTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define USTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 +#define USTORM_IWARP_CONN_AG_CTX_BIT1_MASK 0x1 +#define USTORM_IWARP_CONN_AG_CTX_BIT1_SHIFT 1 +#define USTORM_IWARP_CONN_AG_CTX_CF0_MASK 0x3 +#define USTORM_IWARP_CONN_AG_CTX_CF0_SHIFT 2 +#define USTORM_IWARP_CONN_AG_CTX_CF1_MASK 0x3 +#define USTORM_IWARP_CONN_AG_CTX_CF1_SHIFT 4 +#define USTORM_IWARP_CONN_AG_CTX_CF2_MASK 0x3 +#define USTORM_IWARP_CONN_AG_CTX_CF2_SHIFT 6 u8 flags1; -#define E4_USTORM_IWARP_CONN_AG_CTX_CF3_MASK 0x3 -#define E4_USTORM_IWARP_CONN_AG_CTX_CF3_SHIFT 0 -#define E4_USTORM_IWARP_CONN_AG_CTX_CQ_ARM_SE_CF_MASK 0x3 -#define E4_USTORM_IWARP_CONN_AG_CTX_CQ_ARM_SE_CF_SHIFT 2 -#define E4_USTORM_IWARP_CONN_AG_CTX_CQ_ARM_CF_MASK 0x3 -#define E4_USTORM_IWARP_CONN_AG_CTX_CQ_ARM_CF_SHIFT 4 -#define E4_USTORM_IWARP_CONN_AG_CTX_CF6_MASK 0x3 -#define E4_USTORM_IWARP_CONN_AG_CTX_CF6_SHIFT 6 +#define USTORM_IWARP_CONN_AG_CTX_CF3_MASK 0x3 +#define USTORM_IWARP_CONN_AG_CTX_CF3_SHIFT 0 +#define USTORM_IWARP_CONN_AG_CTX_CQ_ARM_SE_CF_MASK 0x3 +#define USTORM_IWARP_CONN_AG_CTX_CQ_ARM_SE_CF_SHIFT 2 +#define USTORM_IWARP_CONN_AG_CTX_CQ_ARM_CF_MASK 0x3 +#define USTORM_IWARP_CONN_AG_CTX_CQ_ARM_CF_SHIFT 4 +#define USTORM_IWARP_CONN_AG_CTX_CF6_MASK 0x3 +#define USTORM_IWARP_CONN_AG_CTX_CF6_SHIFT 6 u8 flags2; -#define E4_USTORM_IWARP_CONN_AG_CTX_CF0EN_MASK 0x1 -#define E4_USTORM_IWARP_CONN_AG_CTX_CF0EN_SHIFT 0 -#define E4_USTORM_IWARP_CONN_AG_CTX_CF1EN_MASK 0x1 -#define E4_USTORM_IWARP_CONN_AG_CTX_CF1EN_SHIFT 1 -#define E4_USTORM_IWARP_CONN_AG_CTX_CF2EN_MASK 0x1 -#define E4_USTORM_IWARP_CONN_AG_CTX_CF2EN_SHIFT 2 -#define E4_USTORM_IWARP_CONN_AG_CTX_CF3EN_MASK 0x1 -#define E4_USTORM_IWARP_CONN_AG_CTX_CF3EN_SHIFT 3 -#define E4_USTORM_IWARP_CONN_AG_CTX_CQ_ARM_SE_CF_EN_MASK 0x1 -#define E4_USTORM_IWARP_CONN_AG_CTX_CQ_ARM_SE_CF_EN_SHIFT 4 -#define E4_USTORM_IWARP_CONN_AG_CTX_CQ_ARM_CF_EN_MASK 0x1 -#define E4_USTORM_IWARP_CONN_AG_CTX_CQ_ARM_CF_EN_SHIFT 5 -#define E4_USTORM_IWARP_CONN_AG_CTX_CF6EN_MASK 0x1 -#define E4_USTORM_IWARP_CONN_AG_CTX_CF6EN_SHIFT 6 -#define E4_USTORM_IWARP_CONN_AG_CTX_CQ_SE_EN_MASK 0x1 -#define E4_USTORM_IWARP_CONN_AG_CTX_CQ_SE_EN_SHIFT 7 +#define USTORM_IWARP_CONN_AG_CTX_CF0EN_MASK 0x1 +#define USTORM_IWARP_CONN_AG_CTX_CF0EN_SHIFT 0 +#define USTORM_IWARP_CONN_AG_CTX_CF1EN_MASK 0x1 +#define USTORM_IWARP_CONN_AG_CTX_CF1EN_SHIFT 1 +#define USTORM_IWARP_CONN_AG_CTX_CF2EN_MASK 0x1 +#define USTORM_IWARP_CONN_AG_CTX_CF2EN_SHIFT 2 +#define USTORM_IWARP_CONN_AG_CTX_CF3EN_MASK 0x1 +#define USTORM_IWARP_CONN_AG_CTX_CF3EN_SHIFT 3 +#define USTORM_IWARP_CONN_AG_CTX_CQ_ARM_SE_CF_EN_MASK 0x1 +#define USTORM_IWARP_CONN_AG_CTX_CQ_ARM_SE_CF_EN_SHIFT 4 +#define USTORM_IWARP_CONN_AG_CTX_CQ_ARM_CF_EN_MASK 0x1 +#define USTORM_IWARP_CONN_AG_CTX_CQ_ARM_CF_EN_SHIFT 5 +#define USTORM_IWARP_CONN_AG_CTX_CF6EN_MASK 0x1 +#define USTORM_IWARP_CONN_AG_CTX_CF6EN_SHIFT 6 +#define USTORM_IWARP_CONN_AG_CTX_CQ_SE_EN_MASK 0x1 +#define USTORM_IWARP_CONN_AG_CTX_CQ_SE_EN_SHIFT 7 u8 flags3; -#define E4_USTORM_IWARP_CONN_AG_CTX_CQ_EN_MASK 0x1 -#define E4_USTORM_IWARP_CONN_AG_CTX_CQ_EN_SHIFT 0 -#define E4_USTORM_IWARP_CONN_AG_CTX_RULE2EN_MASK 0x1 -#define E4_USTORM_IWARP_CONN_AG_CTX_RULE2EN_SHIFT 1 -#define E4_USTORM_IWARP_CONN_AG_CTX_RULE3EN_MASK 0x1 -#define E4_USTORM_IWARP_CONN_AG_CTX_RULE3EN_SHIFT 2 -#define E4_USTORM_IWARP_CONN_AG_CTX_RULE4EN_MASK 0x1 -#define E4_USTORM_IWARP_CONN_AG_CTX_RULE4EN_SHIFT 3 -#define E4_USTORM_IWARP_CONN_AG_CTX_RULE5EN_MASK 0x1 -#define E4_USTORM_IWARP_CONN_AG_CTX_RULE5EN_SHIFT 4 -#define E4_USTORM_IWARP_CONN_AG_CTX_RULE6EN_MASK 0x1 -#define E4_USTORM_IWARP_CONN_AG_CTX_RULE6EN_SHIFT 5 -#define E4_USTORM_IWARP_CONN_AG_CTX_RULE7EN_MASK 0x1 -#define E4_USTORM_IWARP_CONN_AG_CTX_RULE7EN_SHIFT 6 -#define E4_USTORM_IWARP_CONN_AG_CTX_RULE8EN_MASK 0x1 -#define E4_USTORM_IWARP_CONN_AG_CTX_RULE8EN_SHIFT 7 +#define USTORM_IWARP_CONN_AG_CTX_CQ_EN_MASK 0x1 +#define USTORM_IWARP_CONN_AG_CTX_CQ_EN_SHIFT 0 +#define USTORM_IWARP_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define USTORM_IWARP_CONN_AG_CTX_RULE2EN_SHIFT 1 +#define USTORM_IWARP_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define USTORM_IWARP_CONN_AG_CTX_RULE3EN_SHIFT 2 +#define USTORM_IWARP_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define USTORM_IWARP_CONN_AG_CTX_RULE4EN_SHIFT 3 +#define USTORM_IWARP_CONN_AG_CTX_RULE5EN_MASK 0x1 +#define USTORM_IWARP_CONN_AG_CTX_RULE5EN_SHIFT 4 +#define USTORM_IWARP_CONN_AG_CTX_RULE6EN_MASK 0x1 +#define USTORM_IWARP_CONN_AG_CTX_RULE6EN_SHIFT 5 +#define USTORM_IWARP_CONN_AG_CTX_RULE7EN_MASK 0x1 +#define USTORM_IWARP_CONN_AG_CTX_RULE7EN_SHIFT 6 +#define USTORM_IWARP_CONN_AG_CTX_RULE8EN_MASK 0x1 +#define USTORM_IWARP_CONN_AG_CTX_RULE8EN_SHIFT 7 u8 byte2; u8 byte3; __le16 word0; @@ -10077,37 +9510,37 @@ struct e4_ustorm_iwarp_conn_ag_ctx { __le16 word3; }; -struct e4_ystorm_iwarp_conn_ag_ctx { +struct ystorm_iwarp_conn_ag_ctx { u8 byte0; u8 byte1; u8 flags0; -#define E4_YSTORM_IWARP_CONN_AG_CTX_BIT0_MASK 0x1 -#define E4_YSTORM_IWARP_CONN_AG_CTX_BIT0_SHIFT 0 -#define E4_YSTORM_IWARP_CONN_AG_CTX_BIT1_MASK 0x1 -#define E4_YSTORM_IWARP_CONN_AG_CTX_BIT1_SHIFT 1 -#define E4_YSTORM_IWARP_CONN_AG_CTX_CF0_MASK 0x3 -#define E4_YSTORM_IWARP_CONN_AG_CTX_CF0_SHIFT 2 -#define E4_YSTORM_IWARP_CONN_AG_CTX_CF1_MASK 0x3 -#define E4_YSTORM_IWARP_CONN_AG_CTX_CF1_SHIFT 4 -#define E4_YSTORM_IWARP_CONN_AG_CTX_CF2_MASK 0x3 -#define E4_YSTORM_IWARP_CONN_AG_CTX_CF2_SHIFT 6 +#define YSTORM_IWARP_CONN_AG_CTX_BIT0_MASK 0x1 +#define YSTORM_IWARP_CONN_AG_CTX_BIT0_SHIFT 0 +#define YSTORM_IWARP_CONN_AG_CTX_BIT1_MASK 0x1 +#define YSTORM_IWARP_CONN_AG_CTX_BIT1_SHIFT 1 +#define YSTORM_IWARP_CONN_AG_CTX_CF0_MASK 0x3 +#define YSTORM_IWARP_CONN_AG_CTX_CF0_SHIFT 2 +#define YSTORM_IWARP_CONN_AG_CTX_CF1_MASK 0x3 +#define YSTORM_IWARP_CONN_AG_CTX_CF1_SHIFT 4 +#define YSTORM_IWARP_CONN_AG_CTX_CF2_MASK 0x3 +#define YSTORM_IWARP_CONN_AG_CTX_CF2_SHIFT 6 u8 flags1; -#define E4_YSTORM_IWARP_CONN_AG_CTX_CF0EN_MASK 0x1 -#define E4_YSTORM_IWARP_CONN_AG_CTX_CF0EN_SHIFT 0 -#define E4_YSTORM_IWARP_CONN_AG_CTX_CF1EN_MASK 0x1 -#define E4_YSTORM_IWARP_CONN_AG_CTX_CF1EN_SHIFT 1 -#define E4_YSTORM_IWARP_CONN_AG_CTX_CF2EN_MASK 0x1 -#define E4_YSTORM_IWARP_CONN_AG_CTX_CF2EN_SHIFT 2 -#define E4_YSTORM_IWARP_CONN_AG_CTX_RULE0EN_MASK 0x1 -#define E4_YSTORM_IWARP_CONN_AG_CTX_RULE0EN_SHIFT 3 -#define E4_YSTORM_IWARP_CONN_AG_CTX_RULE1EN_MASK 0x1 -#define E4_YSTORM_IWARP_CONN_AG_CTX_RULE1EN_SHIFT 4 -#define E4_YSTORM_IWARP_CONN_AG_CTX_RULE2EN_MASK 0x1 -#define E4_YSTORM_IWARP_CONN_AG_CTX_RULE2EN_SHIFT 5 -#define E4_YSTORM_IWARP_CONN_AG_CTX_RULE3EN_MASK 0x1 -#define E4_YSTORM_IWARP_CONN_AG_CTX_RULE3EN_SHIFT 6 -#define E4_YSTORM_IWARP_CONN_AG_CTX_RULE4EN_MASK 0x1 -#define E4_YSTORM_IWARP_CONN_AG_CTX_RULE4EN_SHIFT 7 +#define YSTORM_IWARP_CONN_AG_CTX_CF0EN_MASK 0x1 +#define YSTORM_IWARP_CONN_AG_CTX_CF0EN_SHIFT 0 +#define YSTORM_IWARP_CONN_AG_CTX_CF1EN_MASK 0x1 +#define YSTORM_IWARP_CONN_AG_CTX_CF1EN_SHIFT 1 +#define YSTORM_IWARP_CONN_AG_CTX_CF2EN_MASK 0x1 +#define YSTORM_IWARP_CONN_AG_CTX_CF2EN_SHIFT 2 +#define YSTORM_IWARP_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define YSTORM_IWARP_CONN_AG_CTX_RULE0EN_SHIFT 3 +#define YSTORM_IWARP_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define YSTORM_IWARP_CONN_AG_CTX_RULE1EN_SHIFT 4 +#define YSTORM_IWARP_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define YSTORM_IWARP_CONN_AG_CTX_RULE2EN_SHIFT 5 +#define YSTORM_IWARP_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define YSTORM_IWARP_CONN_AG_CTX_RULE3EN_SHIFT 6 +#define YSTORM_IWARP_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define YSTORM_IWARP_CONN_AG_CTX_RULE4EN_SHIFT 7 u8 byte2; u8 byte3; __le16 word0; @@ -10297,216 +9730,216 @@ struct xstorm_fcoe_conn_st_ctx { struct fcoe_wqe cached_wqes[16]; }; -struct e4_xstorm_fcoe_conn_ag_ctx { +struct xstorm_fcoe_conn_ag_ctx { u8 reserved0; u8 state; u8 flags0; -#define E4_XSTORM_FCOE_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 -#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED1_MASK 0x1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED1_SHIFT 1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED2_MASK 0x1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED2_SHIFT 2 -#define E4_XSTORM_FCOE_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3 -#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED3_MASK 0x1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED3_SHIFT 4 -#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED4_MASK 0x1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED4_SHIFT 5 -#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED5_MASK 0x1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED5_SHIFT 6 -#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED6_MASK 0x1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED6_SHIFT 7 +#define XSTORM_FCOE_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 +#define XSTORM_FCOE_CONN_AG_CTX_RESERVED1_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_RESERVED1_SHIFT 1 +#define XSTORM_FCOE_CONN_AG_CTX_RESERVED2_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_RESERVED2_SHIFT 2 +#define XSTORM_FCOE_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3 +#define XSTORM_FCOE_CONN_AG_CTX_RESERVED3_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_RESERVED3_SHIFT 4 +#define XSTORM_FCOE_CONN_AG_CTX_RESERVED4_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_RESERVED4_SHIFT 5 +#define XSTORM_FCOE_CONN_AG_CTX_RESERVED5_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_RESERVED5_SHIFT 6 +#define XSTORM_FCOE_CONN_AG_CTX_RESERVED6_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_RESERVED6_SHIFT 7 u8 flags1; -#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED7_MASK 0x1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED7_SHIFT 0 -#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED8_MASK 0x1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED8_SHIFT 1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED9_MASK 0x1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED9_SHIFT 2 -#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT11_MASK 0x1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT11_SHIFT 3 -#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT12_MASK 0x1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT12_SHIFT 4 -#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT13_MASK 0x1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT13_SHIFT 5 -#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT14_MASK 0x1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT14_SHIFT 6 -#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT15_MASK 0x1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT15_SHIFT 7 +#define XSTORM_FCOE_CONN_AG_CTX_RESERVED7_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_RESERVED7_SHIFT 0 +#define XSTORM_FCOE_CONN_AG_CTX_RESERVED8_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_RESERVED8_SHIFT 1 +#define XSTORM_FCOE_CONN_AG_CTX_RESERVED9_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_RESERVED9_SHIFT 2 +#define XSTORM_FCOE_CONN_AG_CTX_BIT11_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_BIT11_SHIFT 3 +#define XSTORM_FCOE_CONN_AG_CTX_BIT12_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_BIT12_SHIFT 4 +#define XSTORM_FCOE_CONN_AG_CTX_BIT13_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_BIT13_SHIFT 5 +#define XSTORM_FCOE_CONN_AG_CTX_BIT14_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_BIT14_SHIFT 6 +#define XSTORM_FCOE_CONN_AG_CTX_BIT15_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_BIT15_SHIFT 7 u8 flags2; -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF0_MASK 0x3 -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF0_SHIFT 0 -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF1_MASK 0x3 -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF1_SHIFT 2 -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF2_MASK 0x3 -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF2_SHIFT 4 -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF3_MASK 0x3 -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF3_SHIFT 6 +#define XSTORM_FCOE_CONN_AG_CTX_CF0_MASK 0x3 +#define XSTORM_FCOE_CONN_AG_CTX_CF0_SHIFT 0 +#define XSTORM_FCOE_CONN_AG_CTX_CF1_MASK 0x3 +#define XSTORM_FCOE_CONN_AG_CTX_CF1_SHIFT 2 +#define XSTORM_FCOE_CONN_AG_CTX_CF2_MASK 0x3 +#define XSTORM_FCOE_CONN_AG_CTX_CF2_SHIFT 4 +#define XSTORM_FCOE_CONN_AG_CTX_CF3_MASK 0x3 +#define XSTORM_FCOE_CONN_AG_CTX_CF3_SHIFT 6 u8 flags3; -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF4_MASK 0x3 -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF4_SHIFT 0 -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF5_MASK 0x3 -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF5_SHIFT 2 -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF6_MASK 0x3 -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF6_SHIFT 4 -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF7_MASK 0x3 -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF7_SHIFT 6 +#define XSTORM_FCOE_CONN_AG_CTX_CF4_MASK 0x3 +#define XSTORM_FCOE_CONN_AG_CTX_CF4_SHIFT 0 +#define XSTORM_FCOE_CONN_AG_CTX_CF5_MASK 0x3 +#define XSTORM_FCOE_CONN_AG_CTX_CF5_SHIFT 2 +#define XSTORM_FCOE_CONN_AG_CTX_CF6_MASK 0x3 +#define XSTORM_FCOE_CONN_AG_CTX_CF6_SHIFT 4 +#define XSTORM_FCOE_CONN_AG_CTX_CF7_MASK 0x3 +#define XSTORM_FCOE_CONN_AG_CTX_CF7_SHIFT 6 u8 flags4; -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF8_MASK 0x3 -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF8_SHIFT 0 -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF9_MASK 0x3 -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF9_SHIFT 2 -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF10_MASK 0x3 -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF10_SHIFT 4 -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF11_MASK 0x3 -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF11_SHIFT 6 +#define XSTORM_FCOE_CONN_AG_CTX_CF8_MASK 0x3 +#define XSTORM_FCOE_CONN_AG_CTX_CF8_SHIFT 0 +#define XSTORM_FCOE_CONN_AG_CTX_CF9_MASK 0x3 +#define XSTORM_FCOE_CONN_AG_CTX_CF9_SHIFT 2 +#define XSTORM_FCOE_CONN_AG_CTX_CF10_MASK 0x3 +#define XSTORM_FCOE_CONN_AG_CTX_CF10_SHIFT 4 +#define XSTORM_FCOE_CONN_AG_CTX_CF11_MASK 0x3 +#define XSTORM_FCOE_CONN_AG_CTX_CF11_SHIFT 6 u8 flags5; -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF12_MASK 0x3 -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF12_SHIFT 0 -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF13_MASK 0x3 -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF13_SHIFT 2 -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF14_MASK 0x3 -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF14_SHIFT 4 -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF15_MASK 0x3 -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF15_SHIFT 6 +#define XSTORM_FCOE_CONN_AG_CTX_CF12_MASK 0x3 +#define XSTORM_FCOE_CONN_AG_CTX_CF12_SHIFT 0 +#define XSTORM_FCOE_CONN_AG_CTX_CF13_MASK 0x3 +#define XSTORM_FCOE_CONN_AG_CTX_CF13_SHIFT 2 +#define XSTORM_FCOE_CONN_AG_CTX_CF14_MASK 0x3 +#define XSTORM_FCOE_CONN_AG_CTX_CF14_SHIFT 4 +#define XSTORM_FCOE_CONN_AG_CTX_CF15_MASK 0x3 +#define XSTORM_FCOE_CONN_AG_CTX_CF15_SHIFT 6 u8 flags6; -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF16_MASK 0x3 -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF16_SHIFT 0 -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF17_MASK 0x3 -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF17_SHIFT 2 -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF18_MASK 0x3 -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF18_SHIFT 4 -#define E4_XSTORM_FCOE_CONN_AG_CTX_DQ_CF_MASK 0x3 -#define E4_XSTORM_FCOE_CONN_AG_CTX_DQ_CF_SHIFT 6 +#define XSTORM_FCOE_CONN_AG_CTX_CF16_MASK 0x3 +#define XSTORM_FCOE_CONN_AG_CTX_CF16_SHIFT 0 +#define XSTORM_FCOE_CONN_AG_CTX_CF17_MASK 0x3 +#define XSTORM_FCOE_CONN_AG_CTX_CF17_SHIFT 2 +#define XSTORM_FCOE_CONN_AG_CTX_CF18_MASK 0x3 +#define XSTORM_FCOE_CONN_AG_CTX_CF18_SHIFT 4 +#define XSTORM_FCOE_CONN_AG_CTX_DQ_CF_MASK 0x3 +#define XSTORM_FCOE_CONN_AG_CTX_DQ_CF_SHIFT 6 u8 flags7; -#define E4_XSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_MASK 0x3 -#define E4_XSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_SHIFT 0 -#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED10_MASK 0x3 -#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED10_SHIFT 2 -#define E4_XSTORM_FCOE_CONN_AG_CTX_SLOW_PATH_MASK 0x3 -#define E4_XSTORM_FCOE_CONN_AG_CTX_SLOW_PATH_SHIFT 4 -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF0EN_MASK 0x1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF0EN_SHIFT 6 -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF1EN_MASK 0x1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF1EN_SHIFT 7 +#define XSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_MASK 0x3 +#define XSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_SHIFT 0 +#define XSTORM_FCOE_CONN_AG_CTX_RESERVED10_MASK 0x3 +#define XSTORM_FCOE_CONN_AG_CTX_RESERVED10_SHIFT 2 +#define XSTORM_FCOE_CONN_AG_CTX_SLOW_PATH_MASK 0x3 +#define XSTORM_FCOE_CONN_AG_CTX_SLOW_PATH_SHIFT 4 +#define XSTORM_FCOE_CONN_AG_CTX_CF0EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_CF0EN_SHIFT 6 +#define XSTORM_FCOE_CONN_AG_CTX_CF1EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_CF1EN_SHIFT 7 u8 flags8; -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF2EN_MASK 0x1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF2EN_SHIFT 0 -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF3EN_MASK 0x1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF3EN_SHIFT 1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF4EN_MASK 0x1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF4EN_SHIFT 2 -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF5EN_MASK 0x1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF5EN_SHIFT 3 -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF6EN_MASK 0x1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF6EN_SHIFT 4 -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF7EN_MASK 0x1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF7EN_SHIFT 5 -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF8EN_MASK 0x1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF8EN_SHIFT 6 -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF9EN_MASK 0x1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF9EN_SHIFT 7 +#define XSTORM_FCOE_CONN_AG_CTX_CF2EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_CF2EN_SHIFT 0 +#define XSTORM_FCOE_CONN_AG_CTX_CF3EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_CF3EN_SHIFT 1 +#define XSTORM_FCOE_CONN_AG_CTX_CF4EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_CF4EN_SHIFT 2 +#define XSTORM_FCOE_CONN_AG_CTX_CF5EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_CF5EN_SHIFT 3 +#define XSTORM_FCOE_CONN_AG_CTX_CF6EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_CF6EN_SHIFT 4 +#define XSTORM_FCOE_CONN_AG_CTX_CF7EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_CF7EN_SHIFT 5 +#define XSTORM_FCOE_CONN_AG_CTX_CF8EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_CF8EN_SHIFT 6 +#define XSTORM_FCOE_CONN_AG_CTX_CF9EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_CF9EN_SHIFT 7 u8 flags9; -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF10EN_MASK 0x1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF10EN_SHIFT 0 -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF11EN_MASK 0x1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF11EN_SHIFT 1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF12EN_MASK 0x1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF12EN_SHIFT 2 -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF13EN_MASK 0x1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF13EN_SHIFT 3 -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF14EN_MASK 0x1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF14EN_SHIFT 4 -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF15EN_MASK 0x1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF15EN_SHIFT 5 -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF16EN_MASK 0x1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF16EN_SHIFT 6 -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF17EN_MASK 0x1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF17EN_SHIFT 7 +#define XSTORM_FCOE_CONN_AG_CTX_CF10EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_CF10EN_SHIFT 0 +#define XSTORM_FCOE_CONN_AG_CTX_CF11EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_CF11EN_SHIFT 1 +#define XSTORM_FCOE_CONN_AG_CTX_CF12EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_CF12EN_SHIFT 2 +#define XSTORM_FCOE_CONN_AG_CTX_CF13EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_CF13EN_SHIFT 3 +#define XSTORM_FCOE_CONN_AG_CTX_CF14EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_CF14EN_SHIFT 4 +#define XSTORM_FCOE_CONN_AG_CTX_CF15EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_CF15EN_SHIFT 5 +#define XSTORM_FCOE_CONN_AG_CTX_CF16EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_CF16EN_SHIFT 6 +#define XSTORM_FCOE_CONN_AG_CTX_CF17EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_CF17EN_SHIFT 7 u8 flags10; -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF18EN_MASK 0x1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF18EN_SHIFT 0 -#define E4_XSTORM_FCOE_CONN_AG_CTX_DQ_CF_EN_MASK 0x1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_DQ_CF_EN_SHIFT 1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2 -#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED11_MASK 0x1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED11_SHIFT 3 -#define E4_XSTORM_FCOE_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4 -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF23EN_MASK 0x1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF23EN_SHIFT 5 -#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED12_MASK 0x1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED12_SHIFT 6 -#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED13_MASK 0x1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED13_SHIFT 7 +#define XSTORM_FCOE_CONN_AG_CTX_CF18EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_CF18EN_SHIFT 0 +#define XSTORM_FCOE_CONN_AG_CTX_DQ_CF_EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_DQ_CF_EN_SHIFT 1 +#define XSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2 +#define XSTORM_FCOE_CONN_AG_CTX_RESERVED11_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_RESERVED11_SHIFT 3 +#define XSTORM_FCOE_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4 +#define XSTORM_FCOE_CONN_AG_CTX_CF23EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_CF23EN_SHIFT 5 +#define XSTORM_FCOE_CONN_AG_CTX_RESERVED12_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_RESERVED12_SHIFT 6 +#define XSTORM_FCOE_CONN_AG_CTX_RESERVED13_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_RESERVED13_SHIFT 7 u8 flags11; -#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED14_MASK 0x1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED14_SHIFT 0 -#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED15_MASK 0x1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED15_SHIFT 1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED16_MASK 0x1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED16_SHIFT 2 -#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE5EN_MASK 0x1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE5EN_SHIFT 3 -#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE6EN_MASK 0x1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE6EN_SHIFT 4 -#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE7EN_MASK 0x1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE7EN_SHIFT 5 -#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED1_MASK 0x1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED1_SHIFT 6 -#define E4_XSTORM_FCOE_CONN_AG_CTX_XFERQ_DECISION_EN_MASK 0x1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_XFERQ_DECISION_EN_SHIFT 7 +#define XSTORM_FCOE_CONN_AG_CTX_RESERVED14_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_RESERVED14_SHIFT 0 +#define XSTORM_FCOE_CONN_AG_CTX_RESERVED15_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_RESERVED15_SHIFT 1 +#define XSTORM_FCOE_CONN_AG_CTX_RESERVED16_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_RESERVED16_SHIFT 2 +#define XSTORM_FCOE_CONN_AG_CTX_RULE5EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_RULE5EN_SHIFT 3 +#define XSTORM_FCOE_CONN_AG_CTX_RULE6EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_RULE6EN_SHIFT 4 +#define XSTORM_FCOE_CONN_AG_CTX_RULE7EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_RULE7EN_SHIFT 5 +#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED1_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED1_SHIFT 6 +#define XSTORM_FCOE_CONN_AG_CTX_XFERQ_DECISION_EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_XFERQ_DECISION_EN_SHIFT 7 u8 flags12; -#define E4_XSTORM_FCOE_CONN_AG_CTX_SQ_DECISION_EN_MASK 0x1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_SQ_DECISION_EN_SHIFT 0 -#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE11EN_MASK 0x1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE11EN_SHIFT 1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED2_MASK 0x1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED2_SHIFT 2 -#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED3_MASK 0x1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED3_SHIFT 3 -#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE14EN_MASK 0x1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE14EN_SHIFT 4 -#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE15EN_MASK 0x1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE15EN_SHIFT 5 -#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE16EN_MASK 0x1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE16EN_SHIFT 6 -#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE17EN_MASK 0x1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE17EN_SHIFT 7 +#define XSTORM_FCOE_CONN_AG_CTX_SQ_DECISION_EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_SQ_DECISION_EN_SHIFT 0 +#define XSTORM_FCOE_CONN_AG_CTX_RULE11EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_RULE11EN_SHIFT 1 +#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED2_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED2_SHIFT 2 +#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED3_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED3_SHIFT 3 +#define XSTORM_FCOE_CONN_AG_CTX_RULE14EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_RULE14EN_SHIFT 4 +#define XSTORM_FCOE_CONN_AG_CTX_RULE15EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_RULE15EN_SHIFT 5 +#define XSTORM_FCOE_CONN_AG_CTX_RULE16EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_RULE16EN_SHIFT 6 +#define XSTORM_FCOE_CONN_AG_CTX_RULE17EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_RULE17EN_SHIFT 7 u8 flags13; -#define E4_XSTORM_FCOE_CONN_AG_CTX_RESPQ_DECISION_EN_MASK 0x1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_RESPQ_DECISION_EN_SHIFT 0 -#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE19EN_MASK 0x1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE19EN_SHIFT 1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED4_MASK 0x1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED4_SHIFT 2 -#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED5_MASK 0x1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED5_SHIFT 3 -#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED6_MASK 0x1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED6_SHIFT 4 -#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED7_MASK 0x1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED7_SHIFT 5 -#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED8_MASK 0x1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED8_SHIFT 6 -#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED9_MASK 0x1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED9_SHIFT 7 +#define XSTORM_FCOE_CONN_AG_CTX_RESPQ_DECISION_EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_RESPQ_DECISION_EN_SHIFT 0 +#define XSTORM_FCOE_CONN_AG_CTX_RULE19EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_RULE19EN_SHIFT 1 +#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED4_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED4_SHIFT 2 +#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED5_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED5_SHIFT 3 +#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED6_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED6_SHIFT 4 +#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED7_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED7_SHIFT 5 +#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED8_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED8_SHIFT 6 +#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED9_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED9_SHIFT 7 u8 flags14; -#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT16_MASK 0x1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT16_SHIFT 0 -#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT17_MASK 0x1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT17_SHIFT 1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT18_MASK 0x1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT18_SHIFT 2 -#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT19_MASK 0x1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT19_SHIFT 3 -#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT20_MASK 0x1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT20_SHIFT 4 -#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT21_MASK 0x1 -#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT21_SHIFT 5 -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF23_MASK 0x3 -#define E4_XSTORM_FCOE_CONN_AG_CTX_CF23_SHIFT 6 +#define XSTORM_FCOE_CONN_AG_CTX_BIT16_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_BIT16_SHIFT 0 +#define XSTORM_FCOE_CONN_AG_CTX_BIT17_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_BIT17_SHIFT 1 +#define XSTORM_FCOE_CONN_AG_CTX_BIT18_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_BIT18_SHIFT 2 +#define XSTORM_FCOE_CONN_AG_CTX_BIT19_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_BIT19_SHIFT 3 +#define XSTORM_FCOE_CONN_AG_CTX_BIT20_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_BIT20_SHIFT 4 +#define XSTORM_FCOE_CONN_AG_CTX_BIT21_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_BIT21_SHIFT 5 +#define XSTORM_FCOE_CONN_AG_CTX_CF23_MASK 0x3 +#define XSTORM_FCOE_CONN_AG_CTX_CF23_SHIFT 6 u8 byte2; __le16 physical_q0; __le16 word1; @@ -10544,150 +9977,150 @@ struct ustorm_fcoe_conn_st_ctx { u8 reserved[2]; }; -struct e4_tstorm_fcoe_conn_ag_ctx { +struct tstorm_fcoe_conn_ag_ctx { u8 reserved0; u8 state; u8 flags0; -#define E4_TSTORM_FCOE_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 -#define E4_TSTORM_FCOE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 -#define E4_TSTORM_FCOE_CONN_AG_CTX_BIT1_MASK 0x1 -#define E4_TSTORM_FCOE_CONN_AG_CTX_BIT1_SHIFT 1 -#define E4_TSTORM_FCOE_CONN_AG_CTX_BIT2_MASK 0x1 -#define E4_TSTORM_FCOE_CONN_AG_CTX_BIT2_SHIFT 2 -#define E4_TSTORM_FCOE_CONN_AG_CTX_BIT3_MASK 0x1 -#define E4_TSTORM_FCOE_CONN_AG_CTX_BIT3_SHIFT 3 -#define E4_TSTORM_FCOE_CONN_AG_CTX_BIT4_MASK 0x1 -#define E4_TSTORM_FCOE_CONN_AG_CTX_BIT4_SHIFT 4 -#define E4_TSTORM_FCOE_CONN_AG_CTX_BIT5_MASK 0x1 -#define E4_TSTORM_FCOE_CONN_AG_CTX_BIT5_SHIFT 5 -#define E4_TSTORM_FCOE_CONN_AG_CTX_DUMMY_TIMER_CF_MASK 0x3 -#define E4_TSTORM_FCOE_CONN_AG_CTX_DUMMY_TIMER_CF_SHIFT 6 +#define TSTORM_FCOE_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define TSTORM_FCOE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 +#define TSTORM_FCOE_CONN_AG_CTX_BIT1_MASK 0x1 +#define TSTORM_FCOE_CONN_AG_CTX_BIT1_SHIFT 1 +#define TSTORM_FCOE_CONN_AG_CTX_BIT2_MASK 0x1 +#define TSTORM_FCOE_CONN_AG_CTX_BIT2_SHIFT 2 +#define TSTORM_FCOE_CONN_AG_CTX_BIT3_MASK 0x1 +#define TSTORM_FCOE_CONN_AG_CTX_BIT3_SHIFT 3 +#define TSTORM_FCOE_CONN_AG_CTX_BIT4_MASK 0x1 +#define TSTORM_FCOE_CONN_AG_CTX_BIT4_SHIFT 4 +#define TSTORM_FCOE_CONN_AG_CTX_BIT5_MASK 0x1 +#define TSTORM_FCOE_CONN_AG_CTX_BIT5_SHIFT 5 +#define TSTORM_FCOE_CONN_AG_CTX_DUMMY_TIMER_CF_MASK 0x3 +#define TSTORM_FCOE_CONN_AG_CTX_DUMMY_TIMER_CF_SHIFT 6 u8 flags1; -#define E4_TSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3 -#define E4_TSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 0 -#define E4_TSTORM_FCOE_CONN_AG_CTX_CF2_MASK 0x3 -#define E4_TSTORM_FCOE_CONN_AG_CTX_CF2_SHIFT 2 -#define E4_TSTORM_FCOE_CONN_AG_CTX_TIMER_STOP_ALL_CF_MASK 0x3 -#define E4_TSTORM_FCOE_CONN_AG_CTX_TIMER_STOP_ALL_CF_SHIFT 4 -#define E4_TSTORM_FCOE_CONN_AG_CTX_CF4_MASK 0x3 -#define E4_TSTORM_FCOE_CONN_AG_CTX_CF4_SHIFT 6 +#define TSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3 +#define TSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 0 +#define TSTORM_FCOE_CONN_AG_CTX_CF2_MASK 0x3 +#define TSTORM_FCOE_CONN_AG_CTX_CF2_SHIFT 2 +#define TSTORM_FCOE_CONN_AG_CTX_TIMER_STOP_ALL_CF_MASK 0x3 +#define TSTORM_FCOE_CONN_AG_CTX_TIMER_STOP_ALL_CF_SHIFT 4 +#define TSTORM_FCOE_CONN_AG_CTX_CF4_MASK 0x3 +#define TSTORM_FCOE_CONN_AG_CTX_CF4_SHIFT 6 u8 flags2; -#define E4_TSTORM_FCOE_CONN_AG_CTX_CF5_MASK 0x3 -#define E4_TSTORM_FCOE_CONN_AG_CTX_CF5_SHIFT 0 -#define E4_TSTORM_FCOE_CONN_AG_CTX_CF6_MASK 0x3 -#define E4_TSTORM_FCOE_CONN_AG_CTX_CF6_SHIFT 2 -#define E4_TSTORM_FCOE_CONN_AG_CTX_CF7_MASK 0x3 -#define E4_TSTORM_FCOE_CONN_AG_CTX_CF7_SHIFT 4 -#define E4_TSTORM_FCOE_CONN_AG_CTX_CF8_MASK 0x3 -#define E4_TSTORM_FCOE_CONN_AG_CTX_CF8_SHIFT 6 +#define TSTORM_FCOE_CONN_AG_CTX_CF5_MASK 0x3 +#define TSTORM_FCOE_CONN_AG_CTX_CF5_SHIFT 0 +#define TSTORM_FCOE_CONN_AG_CTX_CF6_MASK 0x3 +#define TSTORM_FCOE_CONN_AG_CTX_CF6_SHIFT 2 +#define TSTORM_FCOE_CONN_AG_CTX_CF7_MASK 0x3 +#define TSTORM_FCOE_CONN_AG_CTX_CF7_SHIFT 4 +#define TSTORM_FCOE_CONN_AG_CTX_CF8_MASK 0x3 +#define TSTORM_FCOE_CONN_AG_CTX_CF8_SHIFT 6 u8 flags3; -#define E4_TSTORM_FCOE_CONN_AG_CTX_CF9_MASK 0x3 -#define E4_TSTORM_FCOE_CONN_AG_CTX_CF9_SHIFT 0 -#define E4_TSTORM_FCOE_CONN_AG_CTX_CF10_MASK 0x3 -#define E4_TSTORM_FCOE_CONN_AG_CTX_CF10_SHIFT 2 -#define E4_TSTORM_FCOE_CONN_AG_CTX_DUMMY_TIMER_CF_EN_MASK 0x1 -#define E4_TSTORM_FCOE_CONN_AG_CTX_DUMMY_TIMER_CF_EN_SHIFT 4 -#define E4_TSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1 -#define E4_TSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 5 -#define E4_TSTORM_FCOE_CONN_AG_CTX_CF2EN_MASK 0x1 -#define E4_TSTORM_FCOE_CONN_AG_CTX_CF2EN_SHIFT 6 -#define E4_TSTORM_FCOE_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_MASK 0x1 -#define E4_TSTORM_FCOE_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_SHIFT 7 +#define TSTORM_FCOE_CONN_AG_CTX_CF9_MASK 0x3 +#define TSTORM_FCOE_CONN_AG_CTX_CF9_SHIFT 0 +#define TSTORM_FCOE_CONN_AG_CTX_CF10_MASK 0x3 +#define TSTORM_FCOE_CONN_AG_CTX_CF10_SHIFT 2 +#define TSTORM_FCOE_CONN_AG_CTX_DUMMY_TIMER_CF_EN_MASK 0x1 +#define TSTORM_FCOE_CONN_AG_CTX_DUMMY_TIMER_CF_EN_SHIFT 4 +#define TSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1 +#define TSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 5 +#define TSTORM_FCOE_CONN_AG_CTX_CF2EN_MASK 0x1 +#define TSTORM_FCOE_CONN_AG_CTX_CF2EN_SHIFT 6 +#define TSTORM_FCOE_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_MASK 0x1 +#define TSTORM_FCOE_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_SHIFT 7 u8 flags4; -#define E4_TSTORM_FCOE_CONN_AG_CTX_CF4EN_MASK 0x1 -#define E4_TSTORM_FCOE_CONN_AG_CTX_CF4EN_SHIFT 0 -#define E4_TSTORM_FCOE_CONN_AG_CTX_CF5EN_MASK 0x1 -#define E4_TSTORM_FCOE_CONN_AG_CTX_CF5EN_SHIFT 1 -#define E4_TSTORM_FCOE_CONN_AG_CTX_CF6EN_MASK 0x1 -#define E4_TSTORM_FCOE_CONN_AG_CTX_CF6EN_SHIFT 2 -#define E4_TSTORM_FCOE_CONN_AG_CTX_CF7EN_MASK 0x1 -#define E4_TSTORM_FCOE_CONN_AG_CTX_CF7EN_SHIFT 3 -#define E4_TSTORM_FCOE_CONN_AG_CTX_CF8EN_MASK 0x1 -#define E4_TSTORM_FCOE_CONN_AG_CTX_CF8EN_SHIFT 4 -#define E4_TSTORM_FCOE_CONN_AG_CTX_CF9EN_MASK 0x1 -#define E4_TSTORM_FCOE_CONN_AG_CTX_CF9EN_SHIFT 5 -#define E4_TSTORM_FCOE_CONN_AG_CTX_CF10EN_MASK 0x1 -#define E4_TSTORM_FCOE_CONN_AG_CTX_CF10EN_SHIFT 6 -#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE0EN_MASK 0x1 -#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE0EN_SHIFT 7 +#define TSTORM_FCOE_CONN_AG_CTX_CF4EN_MASK 0x1 +#define TSTORM_FCOE_CONN_AG_CTX_CF4EN_SHIFT 0 +#define TSTORM_FCOE_CONN_AG_CTX_CF5EN_MASK 0x1 +#define TSTORM_FCOE_CONN_AG_CTX_CF5EN_SHIFT 1 +#define TSTORM_FCOE_CONN_AG_CTX_CF6EN_MASK 0x1 +#define TSTORM_FCOE_CONN_AG_CTX_CF6EN_SHIFT 2 +#define TSTORM_FCOE_CONN_AG_CTX_CF7EN_MASK 0x1 +#define TSTORM_FCOE_CONN_AG_CTX_CF7EN_SHIFT 3 +#define TSTORM_FCOE_CONN_AG_CTX_CF8EN_MASK 0x1 +#define TSTORM_FCOE_CONN_AG_CTX_CF8EN_SHIFT 4 +#define TSTORM_FCOE_CONN_AG_CTX_CF9EN_MASK 0x1 +#define TSTORM_FCOE_CONN_AG_CTX_CF9EN_SHIFT 5 +#define TSTORM_FCOE_CONN_AG_CTX_CF10EN_MASK 0x1 +#define TSTORM_FCOE_CONN_AG_CTX_CF10EN_SHIFT 6 +#define TSTORM_FCOE_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define TSTORM_FCOE_CONN_AG_CTX_RULE0EN_SHIFT 7 u8 flags5; -#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE1EN_MASK 0x1 -#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE1EN_SHIFT 0 -#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE2EN_MASK 0x1 -#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE2EN_SHIFT 1 -#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE3EN_MASK 0x1 -#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE3EN_SHIFT 2 -#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE4EN_MASK 0x1 -#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE4EN_SHIFT 3 -#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE5EN_MASK 0x1 -#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE5EN_SHIFT 4 -#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE6EN_MASK 0x1 -#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE6EN_SHIFT 5 -#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE7EN_MASK 0x1 -#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE7EN_SHIFT 6 -#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE8EN_MASK 0x1 -#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE8EN_SHIFT 7 +#define TSTORM_FCOE_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define TSTORM_FCOE_CONN_AG_CTX_RULE1EN_SHIFT 0 +#define TSTORM_FCOE_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define TSTORM_FCOE_CONN_AG_CTX_RULE2EN_SHIFT 1 +#define TSTORM_FCOE_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define TSTORM_FCOE_CONN_AG_CTX_RULE3EN_SHIFT 2 +#define TSTORM_FCOE_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define TSTORM_FCOE_CONN_AG_CTX_RULE4EN_SHIFT 3 +#define TSTORM_FCOE_CONN_AG_CTX_RULE5EN_MASK 0x1 +#define TSTORM_FCOE_CONN_AG_CTX_RULE5EN_SHIFT 4 +#define TSTORM_FCOE_CONN_AG_CTX_RULE6EN_MASK 0x1 +#define TSTORM_FCOE_CONN_AG_CTX_RULE6EN_SHIFT 5 +#define TSTORM_FCOE_CONN_AG_CTX_RULE7EN_MASK 0x1 +#define TSTORM_FCOE_CONN_AG_CTX_RULE7EN_SHIFT 6 +#define TSTORM_FCOE_CONN_AG_CTX_RULE8EN_MASK 0x1 +#define TSTORM_FCOE_CONN_AG_CTX_RULE8EN_SHIFT 7 __le32 reg0; __le32 reg1; }; -struct e4_ustorm_fcoe_conn_ag_ctx { +struct ustorm_fcoe_conn_ag_ctx { u8 byte0; u8 byte1; u8 flags0; -#define E4_USTORM_FCOE_CONN_AG_CTX_BIT0_MASK 0x1 -#define E4_USTORM_FCOE_CONN_AG_CTX_BIT0_SHIFT 0 -#define E4_USTORM_FCOE_CONN_AG_CTX_BIT1_MASK 0x1 -#define E4_USTORM_FCOE_CONN_AG_CTX_BIT1_SHIFT 1 -#define E4_USTORM_FCOE_CONN_AG_CTX_CF0_MASK 0x3 -#define E4_USTORM_FCOE_CONN_AG_CTX_CF0_SHIFT 2 -#define E4_USTORM_FCOE_CONN_AG_CTX_CF1_MASK 0x3 -#define E4_USTORM_FCOE_CONN_AG_CTX_CF1_SHIFT 4 -#define E4_USTORM_FCOE_CONN_AG_CTX_CF2_MASK 0x3 -#define E4_USTORM_FCOE_CONN_AG_CTX_CF2_SHIFT 6 +#define USTORM_FCOE_CONN_AG_CTX_BIT0_MASK 0x1 +#define USTORM_FCOE_CONN_AG_CTX_BIT0_SHIFT 0 +#define USTORM_FCOE_CONN_AG_CTX_BIT1_MASK 0x1 +#define USTORM_FCOE_CONN_AG_CTX_BIT1_SHIFT 1 +#define USTORM_FCOE_CONN_AG_CTX_CF0_MASK 0x3 +#define USTORM_FCOE_CONN_AG_CTX_CF0_SHIFT 2 +#define USTORM_FCOE_CONN_AG_CTX_CF1_MASK 0x3 +#define USTORM_FCOE_CONN_AG_CTX_CF1_SHIFT 4 +#define USTORM_FCOE_CONN_AG_CTX_CF2_MASK 0x3 +#define USTORM_FCOE_CONN_AG_CTX_CF2_SHIFT 6 u8 flags1; -#define E4_USTORM_FCOE_CONN_AG_CTX_CF3_MASK 0x3 -#define E4_USTORM_FCOE_CONN_AG_CTX_CF3_SHIFT 0 -#define E4_USTORM_FCOE_CONN_AG_CTX_CF4_MASK 0x3 -#define E4_USTORM_FCOE_CONN_AG_CTX_CF4_SHIFT 2 -#define E4_USTORM_FCOE_CONN_AG_CTX_CF5_MASK 0x3 -#define E4_USTORM_FCOE_CONN_AG_CTX_CF5_SHIFT 4 -#define E4_USTORM_FCOE_CONN_AG_CTX_CF6_MASK 0x3 -#define E4_USTORM_FCOE_CONN_AG_CTX_CF6_SHIFT 6 +#define USTORM_FCOE_CONN_AG_CTX_CF3_MASK 0x3 +#define USTORM_FCOE_CONN_AG_CTX_CF3_SHIFT 0 +#define USTORM_FCOE_CONN_AG_CTX_CF4_MASK 0x3 +#define USTORM_FCOE_CONN_AG_CTX_CF4_SHIFT 2 +#define USTORM_FCOE_CONN_AG_CTX_CF5_MASK 0x3 +#define USTORM_FCOE_CONN_AG_CTX_CF5_SHIFT 4 +#define USTORM_FCOE_CONN_AG_CTX_CF6_MASK 0x3 +#define USTORM_FCOE_CONN_AG_CTX_CF6_SHIFT 6 u8 flags2; -#define E4_USTORM_FCOE_CONN_AG_CTX_CF0EN_MASK 0x1 -#define E4_USTORM_FCOE_CONN_AG_CTX_CF0EN_SHIFT 0 -#define E4_USTORM_FCOE_CONN_AG_CTX_CF1EN_MASK 0x1 -#define E4_USTORM_FCOE_CONN_AG_CTX_CF1EN_SHIFT 1 -#define E4_USTORM_FCOE_CONN_AG_CTX_CF2EN_MASK 0x1 -#define E4_USTORM_FCOE_CONN_AG_CTX_CF2EN_SHIFT 2 -#define E4_USTORM_FCOE_CONN_AG_CTX_CF3EN_MASK 0x1 -#define E4_USTORM_FCOE_CONN_AG_CTX_CF3EN_SHIFT 3 -#define E4_USTORM_FCOE_CONN_AG_CTX_CF4EN_MASK 0x1 -#define E4_USTORM_FCOE_CONN_AG_CTX_CF4EN_SHIFT 4 -#define E4_USTORM_FCOE_CONN_AG_CTX_CF5EN_MASK 0x1 -#define E4_USTORM_FCOE_CONN_AG_CTX_CF5EN_SHIFT 5 -#define E4_USTORM_FCOE_CONN_AG_CTX_CF6EN_MASK 0x1 -#define E4_USTORM_FCOE_CONN_AG_CTX_CF6EN_SHIFT 6 -#define E4_USTORM_FCOE_CONN_AG_CTX_RULE0EN_MASK 0x1 -#define E4_USTORM_FCOE_CONN_AG_CTX_RULE0EN_SHIFT 7 +#define USTORM_FCOE_CONN_AG_CTX_CF0EN_MASK 0x1 +#define USTORM_FCOE_CONN_AG_CTX_CF0EN_SHIFT 0 +#define USTORM_FCOE_CONN_AG_CTX_CF1EN_MASK 0x1 +#define USTORM_FCOE_CONN_AG_CTX_CF1EN_SHIFT 1 +#define USTORM_FCOE_CONN_AG_CTX_CF2EN_MASK 0x1 +#define USTORM_FCOE_CONN_AG_CTX_CF2EN_SHIFT 2 +#define USTORM_FCOE_CONN_AG_CTX_CF3EN_MASK 0x1 +#define USTORM_FCOE_CONN_AG_CTX_CF3EN_SHIFT 3 +#define USTORM_FCOE_CONN_AG_CTX_CF4EN_MASK 0x1 +#define USTORM_FCOE_CONN_AG_CTX_CF4EN_SHIFT 4 +#define USTORM_FCOE_CONN_AG_CTX_CF5EN_MASK 0x1 +#define USTORM_FCOE_CONN_AG_CTX_CF5EN_SHIFT 5 +#define USTORM_FCOE_CONN_AG_CTX_CF6EN_MASK 0x1 +#define USTORM_FCOE_CONN_AG_CTX_CF6EN_SHIFT 6 +#define USTORM_FCOE_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define USTORM_FCOE_CONN_AG_CTX_RULE0EN_SHIFT 7 u8 flags3; -#define E4_USTORM_FCOE_CONN_AG_CTX_RULE1EN_MASK 0x1 -#define E4_USTORM_FCOE_CONN_AG_CTX_RULE1EN_SHIFT 0 -#define E4_USTORM_FCOE_CONN_AG_CTX_RULE2EN_MASK 0x1 -#define E4_USTORM_FCOE_CONN_AG_CTX_RULE2EN_SHIFT 1 -#define E4_USTORM_FCOE_CONN_AG_CTX_RULE3EN_MASK 0x1 -#define E4_USTORM_FCOE_CONN_AG_CTX_RULE3EN_SHIFT 2 -#define E4_USTORM_FCOE_CONN_AG_CTX_RULE4EN_MASK 0x1 -#define E4_USTORM_FCOE_CONN_AG_CTX_RULE4EN_SHIFT 3 -#define E4_USTORM_FCOE_CONN_AG_CTX_RULE5EN_MASK 0x1 -#define E4_USTORM_FCOE_CONN_AG_CTX_RULE5EN_SHIFT 4 -#define E4_USTORM_FCOE_CONN_AG_CTX_RULE6EN_MASK 0x1 -#define E4_USTORM_FCOE_CONN_AG_CTX_RULE6EN_SHIFT 5 -#define E4_USTORM_FCOE_CONN_AG_CTX_RULE7EN_MASK 0x1 -#define E4_USTORM_FCOE_CONN_AG_CTX_RULE7EN_SHIFT 6 -#define E4_USTORM_FCOE_CONN_AG_CTX_RULE8EN_MASK 0x1 -#define E4_USTORM_FCOE_CONN_AG_CTX_RULE8EN_SHIFT 7 +#define USTORM_FCOE_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define USTORM_FCOE_CONN_AG_CTX_RULE1EN_SHIFT 0 +#define USTORM_FCOE_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define USTORM_FCOE_CONN_AG_CTX_RULE2EN_SHIFT 1 +#define USTORM_FCOE_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define USTORM_FCOE_CONN_AG_CTX_RULE3EN_SHIFT 2 +#define USTORM_FCOE_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define USTORM_FCOE_CONN_AG_CTX_RULE4EN_SHIFT 3 +#define USTORM_FCOE_CONN_AG_CTX_RULE5EN_MASK 0x1 +#define USTORM_FCOE_CONN_AG_CTX_RULE5EN_SHIFT 4 +#define USTORM_FCOE_CONN_AG_CTX_RULE6EN_MASK 0x1 +#define USTORM_FCOE_CONN_AG_CTX_RULE6EN_SHIFT 5 +#define USTORM_FCOE_CONN_AG_CTX_RULE7EN_MASK 0x1 +#define USTORM_FCOE_CONN_AG_CTX_RULE7EN_SHIFT 6 +#define USTORM_FCOE_CONN_AG_CTX_RULE8EN_MASK 0x1 +#define USTORM_FCOE_CONN_AG_CTX_RULE8EN_SHIFT 7 u8 byte2; u8 byte3; __le16 word0; @@ -10728,37 +10161,37 @@ struct tstorm_fcoe_conn_st_ctx { u8 reserved0[4]; }; -struct e4_mstorm_fcoe_conn_ag_ctx { +struct mstorm_fcoe_conn_ag_ctx { u8 byte0; u8 byte1; u8 flags0; -#define E4_MSTORM_FCOE_CONN_AG_CTX_BIT0_MASK 0x1 -#define E4_MSTORM_FCOE_CONN_AG_CTX_BIT0_SHIFT 0 -#define E4_MSTORM_FCOE_CONN_AG_CTX_BIT1_MASK 0x1 -#define E4_MSTORM_FCOE_CONN_AG_CTX_BIT1_SHIFT 1 -#define E4_MSTORM_FCOE_CONN_AG_CTX_CF0_MASK 0x3 -#define E4_MSTORM_FCOE_CONN_AG_CTX_CF0_SHIFT 2 -#define E4_MSTORM_FCOE_CONN_AG_CTX_CF1_MASK 0x3 -#define E4_MSTORM_FCOE_CONN_AG_CTX_CF1_SHIFT 4 -#define E4_MSTORM_FCOE_CONN_AG_CTX_CF2_MASK 0x3 -#define E4_MSTORM_FCOE_CONN_AG_CTX_CF2_SHIFT 6 +#define MSTORM_FCOE_CONN_AG_CTX_BIT0_MASK 0x1 +#define MSTORM_FCOE_CONN_AG_CTX_BIT0_SHIFT 0 +#define MSTORM_FCOE_CONN_AG_CTX_BIT1_MASK 0x1 +#define MSTORM_FCOE_CONN_AG_CTX_BIT1_SHIFT 1 +#define MSTORM_FCOE_CONN_AG_CTX_CF0_MASK 0x3 +#define MSTORM_FCOE_CONN_AG_CTX_CF0_SHIFT 2 +#define MSTORM_FCOE_CONN_AG_CTX_CF1_MASK 0x3 +#define MSTORM_FCOE_CONN_AG_CTX_CF1_SHIFT 4 +#define MSTORM_FCOE_CONN_AG_CTX_CF2_MASK 0x3 +#define MSTORM_FCOE_CONN_AG_CTX_CF2_SHIFT 6 u8 flags1; -#define E4_MSTORM_FCOE_CONN_AG_CTX_CF0EN_MASK 0x1 -#define E4_MSTORM_FCOE_CONN_AG_CTX_CF0EN_SHIFT 0 -#define E4_MSTORM_FCOE_CONN_AG_CTX_CF1EN_MASK 0x1 -#define E4_MSTORM_FCOE_CONN_AG_CTX_CF1EN_SHIFT 1 -#define E4_MSTORM_FCOE_CONN_AG_CTX_CF2EN_MASK 0x1 -#define E4_MSTORM_FCOE_CONN_AG_CTX_CF2EN_SHIFT 2 -#define E4_MSTORM_FCOE_CONN_AG_CTX_RULE0EN_MASK 0x1 -#define E4_MSTORM_FCOE_CONN_AG_CTX_RULE0EN_SHIFT 3 -#define E4_MSTORM_FCOE_CONN_AG_CTX_RULE1EN_MASK 0x1 -#define E4_MSTORM_FCOE_CONN_AG_CTX_RULE1EN_SHIFT 4 -#define E4_MSTORM_FCOE_CONN_AG_CTX_RULE2EN_MASK 0x1 -#define E4_MSTORM_FCOE_CONN_AG_CTX_RULE2EN_SHIFT 5 -#define E4_MSTORM_FCOE_CONN_AG_CTX_RULE3EN_MASK 0x1 -#define E4_MSTORM_FCOE_CONN_AG_CTX_RULE3EN_SHIFT 6 -#define E4_MSTORM_FCOE_CONN_AG_CTX_RULE4EN_MASK 0x1 -#define E4_MSTORM_FCOE_CONN_AG_CTX_RULE4EN_SHIFT 7 +#define MSTORM_FCOE_CONN_AG_CTX_CF0EN_MASK 0x1 +#define MSTORM_FCOE_CONN_AG_CTX_CF0EN_SHIFT 0 +#define MSTORM_FCOE_CONN_AG_CTX_CF1EN_MASK 0x1 +#define MSTORM_FCOE_CONN_AG_CTX_CF1EN_SHIFT 1 +#define MSTORM_FCOE_CONN_AG_CTX_CF2EN_MASK 0x1 +#define MSTORM_FCOE_CONN_AG_CTX_CF2EN_SHIFT 2 +#define MSTORM_FCOE_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define MSTORM_FCOE_CONN_AG_CTX_RULE0EN_SHIFT 3 +#define MSTORM_FCOE_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define MSTORM_FCOE_CONN_AG_CTX_RULE1EN_SHIFT 4 +#define MSTORM_FCOE_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define MSTORM_FCOE_CONN_AG_CTX_RULE2EN_SHIFT 5 +#define MSTORM_FCOE_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define MSTORM_FCOE_CONN_AG_CTX_RULE3EN_SHIFT 6 +#define MSTORM_FCOE_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define MSTORM_FCOE_CONN_AG_CTX_RULE4EN_SHIFT 7 __le16 word0; __le16 word1; __le32 reg0; @@ -10804,21 +10237,21 @@ struct mstorm_fcoe_conn_st_ctx { }; /* fcoe connection context */ -struct e4_fcoe_conn_context { +struct fcoe_conn_context { struct ystorm_fcoe_conn_st_ctx ystorm_st_context; struct pstorm_fcoe_conn_st_ctx pstorm_st_context; struct regpair pstorm_st_padding[2]; struct xstorm_fcoe_conn_st_ctx xstorm_st_context; - struct e4_xstorm_fcoe_conn_ag_ctx xstorm_ag_context; + struct xstorm_fcoe_conn_ag_ctx xstorm_ag_context; struct regpair xstorm_ag_padding[6]; struct ustorm_fcoe_conn_st_ctx ustorm_st_context; struct regpair ustorm_st_padding[2]; - struct e4_tstorm_fcoe_conn_ag_ctx tstorm_ag_context; + struct tstorm_fcoe_conn_ag_ctx tstorm_ag_context; struct regpair tstorm_ag_padding[2]; struct timers_context timer_context; - struct e4_ustorm_fcoe_conn_ag_ctx ustorm_ag_context; + struct ustorm_fcoe_conn_ag_ctx ustorm_ag_context; struct tstorm_fcoe_conn_st_ctx tstorm_st_context; - struct e4_mstorm_fcoe_conn_ag_ctx mstorm_ag_context; + struct mstorm_fcoe_conn_ag_ctx mstorm_ag_context; struct mstorm_fcoe_conn_st_ctx mstorm_st_context; }; @@ -10869,37 +10302,37 @@ struct fcoe_stat_ramrod_params { struct fcoe_stat_ramrod_data stat_ramrod_data; }; -struct e4_ystorm_fcoe_conn_ag_ctx { +struct ystorm_fcoe_conn_ag_ctx { u8 byte0; u8 byte1; u8 flags0; -#define E4_YSTORM_FCOE_CONN_AG_CTX_BIT0_MASK 0x1 -#define E4_YSTORM_FCOE_CONN_AG_CTX_BIT0_SHIFT 0 -#define E4_YSTORM_FCOE_CONN_AG_CTX_BIT1_MASK 0x1 -#define E4_YSTORM_FCOE_CONN_AG_CTX_BIT1_SHIFT 1 -#define E4_YSTORM_FCOE_CONN_AG_CTX_CF0_MASK 0x3 -#define E4_YSTORM_FCOE_CONN_AG_CTX_CF0_SHIFT 2 -#define E4_YSTORM_FCOE_CONN_AG_CTX_CF1_MASK 0x3 -#define E4_YSTORM_FCOE_CONN_AG_CTX_CF1_SHIFT 4 -#define E4_YSTORM_FCOE_CONN_AG_CTX_CF2_MASK 0x3 -#define E4_YSTORM_FCOE_CONN_AG_CTX_CF2_SHIFT 6 +#define YSTORM_FCOE_CONN_AG_CTX_BIT0_MASK 0x1 +#define YSTORM_FCOE_CONN_AG_CTX_BIT0_SHIFT 0 +#define YSTORM_FCOE_CONN_AG_CTX_BIT1_MASK 0x1 +#define YSTORM_FCOE_CONN_AG_CTX_BIT1_SHIFT 1 +#define YSTORM_FCOE_CONN_AG_CTX_CF0_MASK 0x3 +#define YSTORM_FCOE_CONN_AG_CTX_CF0_SHIFT 2 +#define YSTORM_FCOE_CONN_AG_CTX_CF1_MASK 0x3 +#define YSTORM_FCOE_CONN_AG_CTX_CF1_SHIFT 4 +#define YSTORM_FCOE_CONN_AG_CTX_CF2_MASK 0x3 +#define YSTORM_FCOE_CONN_AG_CTX_CF2_SHIFT 6 u8 flags1; -#define E4_YSTORM_FCOE_CONN_AG_CTX_CF0EN_MASK 0x1 -#define E4_YSTORM_FCOE_CONN_AG_CTX_CF0EN_SHIFT 0 -#define E4_YSTORM_FCOE_CONN_AG_CTX_CF1EN_MASK 0x1 -#define E4_YSTORM_FCOE_CONN_AG_CTX_CF1EN_SHIFT 1 -#define E4_YSTORM_FCOE_CONN_AG_CTX_CF2EN_MASK 0x1 -#define E4_YSTORM_FCOE_CONN_AG_CTX_CF2EN_SHIFT 2 -#define E4_YSTORM_FCOE_CONN_AG_CTX_RULE0EN_MASK 0x1 -#define E4_YSTORM_FCOE_CONN_AG_CTX_RULE0EN_SHIFT 3 -#define E4_YSTORM_FCOE_CONN_AG_CTX_RULE1EN_MASK 0x1 -#define E4_YSTORM_FCOE_CONN_AG_CTX_RULE1EN_SHIFT 4 -#define E4_YSTORM_FCOE_CONN_AG_CTX_RULE2EN_MASK 0x1 -#define E4_YSTORM_FCOE_CONN_AG_CTX_RULE2EN_SHIFT 5 -#define E4_YSTORM_FCOE_CONN_AG_CTX_RULE3EN_MASK 0x1 -#define E4_YSTORM_FCOE_CONN_AG_CTX_RULE3EN_SHIFT 6 -#define E4_YSTORM_FCOE_CONN_AG_CTX_RULE4EN_MASK 0x1 -#define E4_YSTORM_FCOE_CONN_AG_CTX_RULE4EN_SHIFT 7 +#define YSTORM_FCOE_CONN_AG_CTX_CF0EN_MASK 0x1 +#define YSTORM_FCOE_CONN_AG_CTX_CF0EN_SHIFT 0 +#define YSTORM_FCOE_CONN_AG_CTX_CF1EN_MASK 0x1 +#define YSTORM_FCOE_CONN_AG_CTX_CF1EN_SHIFT 1 +#define YSTORM_FCOE_CONN_AG_CTX_CF2EN_MASK 0x1 +#define YSTORM_FCOE_CONN_AG_CTX_CF2EN_SHIFT 2 +#define YSTORM_FCOE_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define YSTORM_FCOE_CONN_AG_CTX_RULE0EN_SHIFT 3 +#define YSTORM_FCOE_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define YSTORM_FCOE_CONN_AG_CTX_RULE1EN_SHIFT 4 +#define YSTORM_FCOE_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define YSTORM_FCOE_CONN_AG_CTX_RULE2EN_SHIFT 5 +#define YSTORM_FCOE_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define YSTORM_FCOE_CONN_AG_CTX_RULE3EN_SHIFT 6 +#define YSTORM_FCOE_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define YSTORM_FCOE_CONN_AG_CTX_RULE4EN_SHIFT 7 u8 byte2; u8 byte3; __le16 word0; @@ -10930,216 +10363,216 @@ struct xstorm_iscsi_tcp_conn_st_ctx { __le32 reserved_iscsi[44]; }; -struct e4_xstorm_iscsi_conn_ag_ctx { +struct xstorm_iscsi_conn_ag_ctx { u8 cdu_validation; u8 state; u8 flags0; -#define E4_XSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM1_MASK 0x1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM1_SHIFT 1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_RESERVED1_MASK 0x1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_RESERVED1_SHIFT 2 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT4_MASK 0x1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT4_SHIFT 4 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_RESERVED2_MASK 0x1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_RESERVED2_SHIFT 5 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT6_MASK 0x1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT6_SHIFT 6 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT7_MASK 0x1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT7_SHIFT 7 +#define XSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 +#define XSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM1_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM1_SHIFT 1 +#define XSTORM_ISCSI_CONN_AG_CTX_RESERVED1_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_RESERVED1_SHIFT 2 +#define XSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3 +#define XSTORM_ISCSI_CONN_AG_CTX_BIT4_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_BIT4_SHIFT 4 +#define XSTORM_ISCSI_CONN_AG_CTX_RESERVED2_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_RESERVED2_SHIFT 5 +#define XSTORM_ISCSI_CONN_AG_CTX_BIT6_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_BIT6_SHIFT 6 +#define XSTORM_ISCSI_CONN_AG_CTX_BIT7_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_BIT7_SHIFT 7 u8 flags1; -#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT8_MASK 0x1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT8_SHIFT 0 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT9_MASK 0x1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT9_SHIFT 1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT10_MASK 0x1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT10_SHIFT 2 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT11_MASK 0x1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT11_SHIFT 3 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT12_MASK 0x1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT12_SHIFT 4 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT13_MASK 0x1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT13_SHIFT 5 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT14_MASK 0x1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT14_SHIFT 6 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_TX_TRUNCATE_MASK 0x1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_TX_TRUNCATE_SHIFT 7 +#define XSTORM_ISCSI_CONN_AG_CTX_BIT8_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_BIT8_SHIFT 0 +#define XSTORM_ISCSI_CONN_AG_CTX_BIT9_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_BIT9_SHIFT 1 +#define XSTORM_ISCSI_CONN_AG_CTX_BIT10_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_BIT10_SHIFT 2 +#define XSTORM_ISCSI_CONN_AG_CTX_BIT11_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_BIT11_SHIFT 3 +#define XSTORM_ISCSI_CONN_AG_CTX_BIT12_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_BIT12_SHIFT 4 +#define XSTORM_ISCSI_CONN_AG_CTX_BIT13_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_BIT13_SHIFT 5 +#define XSTORM_ISCSI_CONN_AG_CTX_BIT14_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_BIT14_SHIFT 6 +#define XSTORM_ISCSI_CONN_AG_CTX_TX_TRUNCATE_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_TX_TRUNCATE_SHIFT 7 u8 flags2; -#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF0_MASK 0x3 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF0_SHIFT 0 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF1_MASK 0x3 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF1_SHIFT 2 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF2_MASK 0x3 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF2_SHIFT 4 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_MASK 0x3 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_SHIFT 6 +#define XSTORM_ISCSI_CONN_AG_CTX_CF0_MASK 0x3 +#define XSTORM_ISCSI_CONN_AG_CTX_CF0_SHIFT 0 +#define XSTORM_ISCSI_CONN_AG_CTX_CF1_MASK 0x3 +#define XSTORM_ISCSI_CONN_AG_CTX_CF1_SHIFT 2 +#define XSTORM_ISCSI_CONN_AG_CTX_CF2_MASK 0x3 +#define XSTORM_ISCSI_CONN_AG_CTX_CF2_SHIFT 4 +#define XSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_MASK 0x3 +#define XSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_SHIFT 6 u8 flags3; -#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF4_MASK 0x3 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF4_SHIFT 0 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF5_MASK 0x3 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF5_SHIFT 2 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF6_MASK 0x3 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF6_SHIFT 4 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF7_MASK 0x3 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF7_SHIFT 6 +#define XSTORM_ISCSI_CONN_AG_CTX_CF4_MASK 0x3 +#define XSTORM_ISCSI_CONN_AG_CTX_CF4_SHIFT 0 +#define XSTORM_ISCSI_CONN_AG_CTX_CF5_MASK 0x3 +#define XSTORM_ISCSI_CONN_AG_CTX_CF5_SHIFT 2 +#define XSTORM_ISCSI_CONN_AG_CTX_CF6_MASK 0x3 +#define XSTORM_ISCSI_CONN_AG_CTX_CF6_SHIFT 4 +#define XSTORM_ISCSI_CONN_AG_CTX_CF7_MASK 0x3 +#define XSTORM_ISCSI_CONN_AG_CTX_CF7_SHIFT 6 u8 flags4; -#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF8_MASK 0x3 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF8_SHIFT 0 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF9_MASK 0x3 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF9_SHIFT 2 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF10_MASK 0x3 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF10_SHIFT 4 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF11_MASK 0x3 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF11_SHIFT 6 +#define XSTORM_ISCSI_CONN_AG_CTX_CF8_MASK 0x3 +#define XSTORM_ISCSI_CONN_AG_CTX_CF8_SHIFT 0 +#define XSTORM_ISCSI_CONN_AG_CTX_CF9_MASK 0x3 +#define XSTORM_ISCSI_CONN_AG_CTX_CF9_SHIFT 2 +#define XSTORM_ISCSI_CONN_AG_CTX_CF10_MASK 0x3 +#define XSTORM_ISCSI_CONN_AG_CTX_CF10_SHIFT 4 +#define XSTORM_ISCSI_CONN_AG_CTX_CF11_MASK 0x3 +#define XSTORM_ISCSI_CONN_AG_CTX_CF11_SHIFT 6 u8 flags5; -#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF12_MASK 0x3 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF12_SHIFT 0 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF13_MASK 0x3 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF13_SHIFT 2 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF14_MASK 0x3 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF14_SHIFT 4 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_UPDATE_STATE_TO_BASE_CF_MASK 0x3 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_UPDATE_STATE_TO_BASE_CF_SHIFT 6 +#define XSTORM_ISCSI_CONN_AG_CTX_CF12_MASK 0x3 +#define XSTORM_ISCSI_CONN_AG_CTX_CF12_SHIFT 0 +#define XSTORM_ISCSI_CONN_AG_CTX_CF13_MASK 0x3 +#define XSTORM_ISCSI_CONN_AG_CTX_CF13_SHIFT 2 +#define XSTORM_ISCSI_CONN_AG_CTX_CF14_MASK 0x3 +#define XSTORM_ISCSI_CONN_AG_CTX_CF14_SHIFT 4 +#define XSTORM_ISCSI_CONN_AG_CTX_UPDATE_STATE_TO_BASE_CF_MASK 0x3 +#define XSTORM_ISCSI_CONN_AG_CTX_UPDATE_STATE_TO_BASE_CF_SHIFT 6 u8 flags6; -#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF16_MASK 0x3 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF16_SHIFT 0 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF17_MASK 0x3 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF17_SHIFT 2 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF18_MASK 0x3 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF18_SHIFT 4 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_DQ_FLUSH_MASK 0x3 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_DQ_FLUSH_SHIFT 6 +#define XSTORM_ISCSI_CONN_AG_CTX_CF16_MASK 0x3 +#define XSTORM_ISCSI_CONN_AG_CTX_CF16_SHIFT 0 +#define XSTORM_ISCSI_CONN_AG_CTX_CF17_MASK 0x3 +#define XSTORM_ISCSI_CONN_AG_CTX_CF17_SHIFT 2 +#define XSTORM_ISCSI_CONN_AG_CTX_CF18_MASK 0x3 +#define XSTORM_ISCSI_CONN_AG_CTX_CF18_SHIFT 4 +#define XSTORM_ISCSI_CONN_AG_CTX_DQ_FLUSH_MASK 0x3 +#define XSTORM_ISCSI_CONN_AG_CTX_DQ_FLUSH_SHIFT 6 u8 flags7; -#define E4_XSTORM_ISCSI_CONN_AG_CTX_MST_XCM_Q0_FLUSH_CF_MASK 0x3 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_MST_XCM_Q0_FLUSH_CF_SHIFT 0 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_UST_XCM_Q1_FLUSH_CF_MASK 0x3 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_UST_XCM_Q1_FLUSH_CF_SHIFT 2 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_SLOW_PATH_MASK 0x3 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_SLOW_PATH_SHIFT 4 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK 0x1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF0EN_SHIFT 6 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF1EN_MASK 0x1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF1EN_SHIFT 7 +#define XSTORM_ISCSI_CONN_AG_CTX_MST_XCM_Q0_FLUSH_CF_MASK 0x3 +#define XSTORM_ISCSI_CONN_AG_CTX_MST_XCM_Q0_FLUSH_CF_SHIFT 0 +#define XSTORM_ISCSI_CONN_AG_CTX_UST_XCM_Q1_FLUSH_CF_MASK 0x3 +#define XSTORM_ISCSI_CONN_AG_CTX_UST_XCM_Q1_FLUSH_CF_SHIFT 2 +#define XSTORM_ISCSI_CONN_AG_CTX_SLOW_PATH_MASK 0x3 +#define XSTORM_ISCSI_CONN_AG_CTX_SLOW_PATH_SHIFT 4 +#define XSTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_CF0EN_SHIFT 6 +#define XSTORM_ISCSI_CONN_AG_CTX_CF1EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_CF1EN_SHIFT 7 u8 flags8; -#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF2EN_MASK 0x1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF2EN_SHIFT 0 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_EN_MASK 0x1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_EN_SHIFT 1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF4EN_MASK 0x1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF4EN_SHIFT 2 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF5EN_MASK 0x1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF5EN_SHIFT 3 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF6EN_MASK 0x1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF6EN_SHIFT 4 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF7EN_MASK 0x1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF7EN_SHIFT 5 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF8EN_MASK 0x1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF8EN_SHIFT 6 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF9EN_MASK 0x1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF9EN_SHIFT 7 +#define XSTORM_ISCSI_CONN_AG_CTX_CF2EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_CF2EN_SHIFT 0 +#define XSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_EN_SHIFT 1 +#define XSTORM_ISCSI_CONN_AG_CTX_CF4EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_CF4EN_SHIFT 2 +#define XSTORM_ISCSI_CONN_AG_CTX_CF5EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_CF5EN_SHIFT 3 +#define XSTORM_ISCSI_CONN_AG_CTX_CF6EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_CF6EN_SHIFT 4 +#define XSTORM_ISCSI_CONN_AG_CTX_CF7EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_CF7EN_SHIFT 5 +#define XSTORM_ISCSI_CONN_AG_CTX_CF8EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_CF8EN_SHIFT 6 +#define XSTORM_ISCSI_CONN_AG_CTX_CF9EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_CF9EN_SHIFT 7 u8 flags9; -#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF10EN_MASK 0x1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF10EN_SHIFT 0 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF11EN_MASK 0x1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF11EN_SHIFT 1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF12EN_MASK 0x1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF12EN_SHIFT 2 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF13EN_MASK 0x1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF13EN_SHIFT 3 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF14EN_MASK 0x1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF14EN_SHIFT 4 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_UPDATE_STATE_TO_BASE_CF_EN_MASK 0x1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_UPDATE_STATE_TO_BASE_CF_EN_SHIFT 5 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF16EN_MASK 0x1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF16EN_SHIFT 6 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF17EN_MASK 0x1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF17EN_SHIFT 7 +#define XSTORM_ISCSI_CONN_AG_CTX_CF10EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_CF10EN_SHIFT 0 +#define XSTORM_ISCSI_CONN_AG_CTX_CF11EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_CF11EN_SHIFT 1 +#define XSTORM_ISCSI_CONN_AG_CTX_CF12EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_CF12EN_SHIFT 2 +#define XSTORM_ISCSI_CONN_AG_CTX_CF13EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_CF13EN_SHIFT 3 +#define XSTORM_ISCSI_CONN_AG_CTX_CF14EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_CF14EN_SHIFT 4 +#define XSTORM_ISCSI_CONN_AG_CTX_UPDATE_STATE_TO_BASE_CF_EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_UPDATE_STATE_TO_BASE_CF_EN_SHIFT 5 +#define XSTORM_ISCSI_CONN_AG_CTX_CF16EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_CF16EN_SHIFT 6 +#define XSTORM_ISCSI_CONN_AG_CTX_CF17EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_CF17EN_SHIFT 7 u8 flags10; -#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF18EN_MASK 0x1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF18EN_SHIFT 0 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_DQ_FLUSH_EN_MASK 0x1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_DQ_FLUSH_EN_SHIFT 1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_MST_XCM_Q0_FLUSH_CF_EN_MASK 0x1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_MST_XCM_Q0_FLUSH_CF_EN_SHIFT 2 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_UST_XCM_Q1_FLUSH_CF_EN_MASK 0x1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_UST_XCM_Q1_FLUSH_CF_EN_SHIFT 3 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_PROC_ONLY_CLEANUP_EN_MASK 0x1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_PROC_ONLY_CLEANUP_EN_SHIFT 5 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE0EN_MASK 0x1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE0EN_SHIFT 6 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_MORE_TO_SEND_DEC_RULE_EN_MASK 0x1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_MORE_TO_SEND_DEC_RULE_EN_SHIFT 7 +#define XSTORM_ISCSI_CONN_AG_CTX_CF18EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_CF18EN_SHIFT 0 +#define XSTORM_ISCSI_CONN_AG_CTX_DQ_FLUSH_EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_DQ_FLUSH_EN_SHIFT 1 +#define XSTORM_ISCSI_CONN_AG_CTX_MST_XCM_Q0_FLUSH_CF_EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_MST_XCM_Q0_FLUSH_CF_EN_SHIFT 2 +#define XSTORM_ISCSI_CONN_AG_CTX_UST_XCM_Q1_FLUSH_CF_EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_UST_XCM_Q1_FLUSH_CF_EN_SHIFT 3 +#define XSTORM_ISCSI_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4 +#define XSTORM_ISCSI_CONN_AG_CTX_PROC_ONLY_CLEANUP_EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_PROC_ONLY_CLEANUP_EN_SHIFT 5 +#define XSTORM_ISCSI_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_RULE0EN_SHIFT 6 +#define XSTORM_ISCSI_CONN_AG_CTX_MORE_TO_SEND_DEC_RULE_EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_MORE_TO_SEND_DEC_RULE_EN_SHIFT 7 u8 flags11; -#define E4_XSTORM_ISCSI_CONN_AG_CTX_TX_BLOCKED_EN_MASK 0x1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_TX_BLOCKED_EN_SHIFT 0 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE3EN_MASK 0x1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE3EN_SHIFT 1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_RESERVED3_MASK 0x1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_RESERVED3_SHIFT 2 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE5EN_MASK 0x1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE5EN_SHIFT 3 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE6EN_MASK 0x1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE6EN_SHIFT 4 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE7EN_MASK 0x1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE7EN_SHIFT 5 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED1_MASK 0x1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED1_SHIFT 6 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE9EN_MASK 0x1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE9EN_SHIFT 7 +#define XSTORM_ISCSI_CONN_AG_CTX_TX_BLOCKED_EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_TX_BLOCKED_EN_SHIFT 0 +#define XSTORM_ISCSI_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_RULE3EN_SHIFT 1 +#define XSTORM_ISCSI_CONN_AG_CTX_RESERVED3_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_RESERVED3_SHIFT 2 +#define XSTORM_ISCSI_CONN_AG_CTX_RULE5EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_RULE5EN_SHIFT 3 +#define XSTORM_ISCSI_CONN_AG_CTX_RULE6EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_RULE6EN_SHIFT 4 +#define XSTORM_ISCSI_CONN_AG_CTX_RULE7EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_RULE7EN_SHIFT 5 +#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED1_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED1_SHIFT 6 +#define XSTORM_ISCSI_CONN_AG_CTX_RULE9EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_RULE9EN_SHIFT 7 u8 flags12; -#define E4_XSTORM_ISCSI_CONN_AG_CTX_SQ_DEC_RULE_EN_MASK 0x1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_SQ_DEC_RULE_EN_SHIFT 0 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE11EN_MASK 0x1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE11EN_SHIFT 1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED2_MASK 0x1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED2_SHIFT 2 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED3_MASK 0x1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED3_SHIFT 3 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE14EN_MASK 0x1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE14EN_SHIFT 4 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE15EN_MASK 0x1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE15EN_SHIFT 5 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE16EN_MASK 0x1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE16EN_SHIFT 6 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE17EN_MASK 0x1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE17EN_SHIFT 7 +#define XSTORM_ISCSI_CONN_AG_CTX_SQ_DEC_RULE_EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_SQ_DEC_RULE_EN_SHIFT 0 +#define XSTORM_ISCSI_CONN_AG_CTX_RULE11EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_RULE11EN_SHIFT 1 +#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED2_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED2_SHIFT 2 +#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED3_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED3_SHIFT 3 +#define XSTORM_ISCSI_CONN_AG_CTX_RULE14EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_RULE14EN_SHIFT 4 +#define XSTORM_ISCSI_CONN_AG_CTX_RULE15EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_RULE15EN_SHIFT 5 +#define XSTORM_ISCSI_CONN_AG_CTX_RULE16EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_RULE16EN_SHIFT 6 +#define XSTORM_ISCSI_CONN_AG_CTX_RULE17EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_RULE17EN_SHIFT 7 u8 flags13; -#define E4_XSTORM_ISCSI_CONN_AG_CTX_R2TQ_DEC_RULE_EN_MASK 0x1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_R2TQ_DEC_RULE_EN_SHIFT 0 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_HQ_DEC_RULE_EN_MASK 0x1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_HQ_DEC_RULE_EN_SHIFT 1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED4_MASK 0x1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED4_SHIFT 2 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED5_MASK 0x1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED5_SHIFT 3 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED6_MASK 0x1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED6_SHIFT 4 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED7_MASK 0x1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED7_SHIFT 5 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED8_MASK 0x1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED8_SHIFT 6 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED9_MASK 0x1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED9_SHIFT 7 +#define XSTORM_ISCSI_CONN_AG_CTX_R2TQ_DEC_RULE_EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_R2TQ_DEC_RULE_EN_SHIFT 0 +#define XSTORM_ISCSI_CONN_AG_CTX_HQ_DEC_RULE_EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_HQ_DEC_RULE_EN_SHIFT 1 +#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED4_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED4_SHIFT 2 +#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED5_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED5_SHIFT 3 +#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED6_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED6_SHIFT 4 +#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED7_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED7_SHIFT 5 +#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED8_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED8_SHIFT 6 +#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED9_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED9_SHIFT 7 u8 flags14; -#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT16_MASK 0x1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT16_SHIFT 0 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT17_MASK 0x1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT17_SHIFT 1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT18_MASK 0x1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT18_SHIFT 2 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT19_MASK 0x1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT19_SHIFT 3 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT20_MASK 0x1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT20_SHIFT 4 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_DUMMY_READ_DONE_MASK 0x1 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_DUMMY_READ_DONE_SHIFT 5 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_PROC_ONLY_CLEANUP_MASK 0x3 -#define E4_XSTORM_ISCSI_CONN_AG_CTX_PROC_ONLY_CLEANUP_SHIFT 6 +#define XSTORM_ISCSI_CONN_AG_CTX_BIT16_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_BIT16_SHIFT 0 +#define XSTORM_ISCSI_CONN_AG_CTX_BIT17_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_BIT17_SHIFT 1 +#define XSTORM_ISCSI_CONN_AG_CTX_BIT18_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_BIT18_SHIFT 2 +#define XSTORM_ISCSI_CONN_AG_CTX_BIT19_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_BIT19_SHIFT 3 +#define XSTORM_ISCSI_CONN_AG_CTX_BIT20_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_BIT20_SHIFT 4 +#define XSTORM_ISCSI_CONN_AG_CTX_DUMMY_READ_DONE_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_DUMMY_READ_DONE_SHIFT 5 +#define XSTORM_ISCSI_CONN_AG_CTX_PROC_ONLY_CLEANUP_MASK 0x3 +#define XSTORM_ISCSI_CONN_AG_CTX_PROC_ONLY_CLEANUP_SHIFT 6 u8 byte2; __le16 physical_q0; __le16 physical_q1; @@ -11187,89 +10620,89 @@ struct e4_xstorm_iscsi_conn_ag_ctx { __le32 reg17; }; -struct e4_tstorm_iscsi_conn_ag_ctx { +struct tstorm_iscsi_conn_ag_ctx { u8 reserved0; u8 state; u8 flags0; -#define E4_TSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 -#define E4_TSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 -#define E4_TSTORM_ISCSI_CONN_AG_CTX_BIT1_MASK 0x1 -#define E4_TSTORM_ISCSI_CONN_AG_CTX_BIT1_SHIFT 1 -#define E4_TSTORM_ISCSI_CONN_AG_CTX_BIT2_MASK 0x1 -#define E4_TSTORM_ISCSI_CONN_AG_CTX_BIT2_SHIFT 2 -#define E4_TSTORM_ISCSI_CONN_AG_CTX_BIT3_MASK 0x1 -#define E4_TSTORM_ISCSI_CONN_AG_CTX_BIT3_SHIFT 3 -#define E4_TSTORM_ISCSI_CONN_AG_CTX_BIT4_MASK 0x1 -#define E4_TSTORM_ISCSI_CONN_AG_CTX_BIT4_SHIFT 4 -#define E4_TSTORM_ISCSI_CONN_AG_CTX_BIT5_MASK 0x1 -#define E4_TSTORM_ISCSI_CONN_AG_CTX_BIT5_SHIFT 5 -#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF0_MASK 0x3 -#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF0_SHIFT 6 +#define TSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define TSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 +#define TSTORM_ISCSI_CONN_AG_CTX_BIT1_MASK 0x1 +#define TSTORM_ISCSI_CONN_AG_CTX_BIT1_SHIFT 1 +#define TSTORM_ISCSI_CONN_AG_CTX_BIT2_MASK 0x1 +#define TSTORM_ISCSI_CONN_AG_CTX_BIT2_SHIFT 2 +#define TSTORM_ISCSI_CONN_AG_CTX_BIT3_MASK 0x1 +#define TSTORM_ISCSI_CONN_AG_CTX_BIT3_SHIFT 3 +#define TSTORM_ISCSI_CONN_AG_CTX_BIT4_MASK 0x1 +#define TSTORM_ISCSI_CONN_AG_CTX_BIT4_SHIFT 4 +#define TSTORM_ISCSI_CONN_AG_CTX_BIT5_MASK 0x1 +#define TSTORM_ISCSI_CONN_AG_CTX_BIT5_SHIFT 5 +#define TSTORM_ISCSI_CONN_AG_CTX_CF0_MASK 0x3 +#define TSTORM_ISCSI_CONN_AG_CTX_CF0_SHIFT 6 u8 flags1; -#define E4_TSTORM_ISCSI_CONN_AG_CTX_P2T_FLUSH_CF_MASK 0x3 -#define E4_TSTORM_ISCSI_CONN_AG_CTX_P2T_FLUSH_CF_SHIFT 0 -#define E4_TSTORM_ISCSI_CONN_AG_CTX_M2T_FLUSH_CF_MASK 0x3 -#define E4_TSTORM_ISCSI_CONN_AG_CTX_M2T_FLUSH_CF_SHIFT 2 -#define E4_TSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_MASK 0x3 -#define E4_TSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_SHIFT 4 -#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF4_MASK 0x3 -#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF4_SHIFT 6 +#define TSTORM_ISCSI_CONN_AG_CTX_P2T_FLUSH_CF_MASK 0x3 +#define TSTORM_ISCSI_CONN_AG_CTX_P2T_FLUSH_CF_SHIFT 0 +#define TSTORM_ISCSI_CONN_AG_CTX_M2T_FLUSH_CF_MASK 0x3 +#define TSTORM_ISCSI_CONN_AG_CTX_M2T_FLUSH_CF_SHIFT 2 +#define TSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_MASK 0x3 +#define TSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_SHIFT 4 +#define TSTORM_ISCSI_CONN_AG_CTX_CF4_MASK 0x3 +#define TSTORM_ISCSI_CONN_AG_CTX_CF4_SHIFT 6 u8 flags2; -#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF5_MASK 0x3 -#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF5_SHIFT 0 -#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF6_MASK 0x3 -#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF6_SHIFT 2 -#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF7_MASK 0x3 -#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF7_SHIFT 4 -#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF8_MASK 0x3 -#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF8_SHIFT 6 +#define TSTORM_ISCSI_CONN_AG_CTX_CF5_MASK 0x3 +#define TSTORM_ISCSI_CONN_AG_CTX_CF5_SHIFT 0 +#define TSTORM_ISCSI_CONN_AG_CTX_CF6_MASK 0x3 +#define TSTORM_ISCSI_CONN_AG_CTX_CF6_SHIFT 2 +#define TSTORM_ISCSI_CONN_AG_CTX_CF7_MASK 0x3 +#define TSTORM_ISCSI_CONN_AG_CTX_CF7_SHIFT 4 +#define TSTORM_ISCSI_CONN_AG_CTX_CF8_MASK 0x3 +#define TSTORM_ISCSI_CONN_AG_CTX_CF8_SHIFT 6 u8 flags3; -#define E4_TSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_MASK 0x3 -#define E4_TSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_SHIFT 0 -#define E4_TSTORM_ISCSI_CONN_AG_CTX_FLUSH_OOO_ISLES_CF_MASK 0x3 -#define E4_TSTORM_ISCSI_CONN_AG_CTX_FLUSH_OOO_ISLES_CF_SHIFT 2 -#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK 0x1 -#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF0EN_SHIFT 4 -#define E4_TSTORM_ISCSI_CONN_AG_CTX_P2T_FLUSH_CF_EN_MASK 0x1 -#define E4_TSTORM_ISCSI_CONN_AG_CTX_P2T_FLUSH_CF_EN_SHIFT 5 -#define E4_TSTORM_ISCSI_CONN_AG_CTX_M2T_FLUSH_CF_EN_MASK 0x1 -#define E4_TSTORM_ISCSI_CONN_AG_CTX_M2T_FLUSH_CF_EN_SHIFT 6 -#define E4_TSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_EN_MASK 0x1 -#define E4_TSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_EN_SHIFT 7 +#define TSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_MASK 0x3 +#define TSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_SHIFT 0 +#define TSTORM_ISCSI_CONN_AG_CTX_FLUSH_OOO_ISLES_CF_MASK 0x3 +#define TSTORM_ISCSI_CONN_AG_CTX_FLUSH_OOO_ISLES_CF_SHIFT 2 +#define TSTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK 0x1 +#define TSTORM_ISCSI_CONN_AG_CTX_CF0EN_SHIFT 4 +#define TSTORM_ISCSI_CONN_AG_CTX_P2T_FLUSH_CF_EN_MASK 0x1 +#define TSTORM_ISCSI_CONN_AG_CTX_P2T_FLUSH_CF_EN_SHIFT 5 +#define TSTORM_ISCSI_CONN_AG_CTX_M2T_FLUSH_CF_EN_MASK 0x1 +#define TSTORM_ISCSI_CONN_AG_CTX_M2T_FLUSH_CF_EN_SHIFT 6 +#define TSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_EN_MASK 0x1 +#define TSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_EN_SHIFT 7 u8 flags4; -#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF4EN_MASK 0x1 -#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF4EN_SHIFT 0 -#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF5EN_MASK 0x1 -#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF5EN_SHIFT 1 -#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF6EN_MASK 0x1 -#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF6EN_SHIFT 2 -#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF7EN_MASK 0x1 -#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF7EN_SHIFT 3 -#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF8EN_MASK 0x1 -#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF8EN_SHIFT 4 -#define E4_TSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1 -#define E4_TSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 5 -#define E4_TSTORM_ISCSI_CONN_AG_CTX_FLUSH_OOO_ISLES_CF_EN_MASK 0x1 -#define E4_TSTORM_ISCSI_CONN_AG_CTX_FLUSH_OOO_ISLES_CF_EN_SHIFT 6 -#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE0EN_MASK 0x1 -#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE0EN_SHIFT 7 +#define TSTORM_ISCSI_CONN_AG_CTX_CF4EN_MASK 0x1 +#define TSTORM_ISCSI_CONN_AG_CTX_CF4EN_SHIFT 0 +#define TSTORM_ISCSI_CONN_AG_CTX_CF5EN_MASK 0x1 +#define TSTORM_ISCSI_CONN_AG_CTX_CF5EN_SHIFT 1 +#define TSTORM_ISCSI_CONN_AG_CTX_CF6EN_MASK 0x1 +#define TSTORM_ISCSI_CONN_AG_CTX_CF6EN_SHIFT 2 +#define TSTORM_ISCSI_CONN_AG_CTX_CF7EN_MASK 0x1 +#define TSTORM_ISCSI_CONN_AG_CTX_CF7EN_SHIFT 3 +#define TSTORM_ISCSI_CONN_AG_CTX_CF8EN_MASK 0x1 +#define TSTORM_ISCSI_CONN_AG_CTX_CF8EN_SHIFT 4 +#define TSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1 +#define TSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 5 +#define TSTORM_ISCSI_CONN_AG_CTX_FLUSH_OOO_ISLES_CF_EN_MASK 0x1 +#define TSTORM_ISCSI_CONN_AG_CTX_FLUSH_OOO_ISLES_CF_EN_SHIFT 6 +#define TSTORM_ISCSI_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define TSTORM_ISCSI_CONN_AG_CTX_RULE0EN_SHIFT 7 u8 flags5; -#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE1EN_MASK 0x1 -#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE1EN_SHIFT 0 -#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE2EN_MASK 0x1 -#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE2EN_SHIFT 1 -#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE3EN_MASK 0x1 -#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE3EN_SHIFT 2 -#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE4EN_MASK 0x1 -#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE4EN_SHIFT 3 -#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE5EN_MASK 0x1 -#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE5EN_SHIFT 4 -#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE6EN_MASK 0x1 -#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE6EN_SHIFT 5 -#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE7EN_MASK 0x1 -#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE7EN_SHIFT 6 -#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE8EN_MASK 0x1 -#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE8EN_SHIFT 7 +#define TSTORM_ISCSI_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define TSTORM_ISCSI_CONN_AG_CTX_RULE1EN_SHIFT 0 +#define TSTORM_ISCSI_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define TSTORM_ISCSI_CONN_AG_CTX_RULE2EN_SHIFT 1 +#define TSTORM_ISCSI_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define TSTORM_ISCSI_CONN_AG_CTX_RULE3EN_SHIFT 2 +#define TSTORM_ISCSI_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define TSTORM_ISCSI_CONN_AG_CTX_RULE4EN_SHIFT 3 +#define TSTORM_ISCSI_CONN_AG_CTX_RULE5EN_MASK 0x1 +#define TSTORM_ISCSI_CONN_AG_CTX_RULE5EN_SHIFT 4 +#define TSTORM_ISCSI_CONN_AG_CTX_RULE6EN_MASK 0x1 +#define TSTORM_ISCSI_CONN_AG_CTX_RULE6EN_SHIFT 5 +#define TSTORM_ISCSI_CONN_AG_CTX_RULE7EN_MASK 0x1 +#define TSTORM_ISCSI_CONN_AG_CTX_RULE7EN_SHIFT 6 +#define TSTORM_ISCSI_CONN_AG_CTX_RULE8EN_MASK 0x1 +#define TSTORM_ISCSI_CONN_AG_CTX_RULE8EN_SHIFT 7 __le32 reg0; __le32 reg1; __le32 rx_tcp_checksum_err_cnt; @@ -11284,63 +10717,63 @@ struct e4_tstorm_iscsi_conn_ag_ctx { __le16 word0; }; -struct e4_ustorm_iscsi_conn_ag_ctx { +struct ustorm_iscsi_conn_ag_ctx { u8 byte0; u8 byte1; u8 flags0; -#define E4_USTORM_ISCSI_CONN_AG_CTX_BIT0_MASK 0x1 -#define E4_USTORM_ISCSI_CONN_AG_CTX_BIT0_SHIFT 0 -#define E4_USTORM_ISCSI_CONN_AG_CTX_BIT1_MASK 0x1 -#define E4_USTORM_ISCSI_CONN_AG_CTX_BIT1_SHIFT 1 -#define E4_USTORM_ISCSI_CONN_AG_CTX_CF0_MASK 0x3 -#define E4_USTORM_ISCSI_CONN_AG_CTX_CF0_SHIFT 2 -#define E4_USTORM_ISCSI_CONN_AG_CTX_CF1_MASK 0x3 -#define E4_USTORM_ISCSI_CONN_AG_CTX_CF1_SHIFT 4 -#define E4_USTORM_ISCSI_CONN_AG_CTX_CF2_MASK 0x3 -#define E4_USTORM_ISCSI_CONN_AG_CTX_CF2_SHIFT 6 +#define USTORM_ISCSI_CONN_AG_CTX_BIT0_MASK 0x1 +#define USTORM_ISCSI_CONN_AG_CTX_BIT0_SHIFT 0 +#define USTORM_ISCSI_CONN_AG_CTX_BIT1_MASK 0x1 +#define USTORM_ISCSI_CONN_AG_CTX_BIT1_SHIFT 1 +#define USTORM_ISCSI_CONN_AG_CTX_CF0_MASK 0x3 +#define USTORM_ISCSI_CONN_AG_CTX_CF0_SHIFT 2 +#define USTORM_ISCSI_CONN_AG_CTX_CF1_MASK 0x3 +#define USTORM_ISCSI_CONN_AG_CTX_CF1_SHIFT 4 +#define USTORM_ISCSI_CONN_AG_CTX_CF2_MASK 0x3 +#define USTORM_ISCSI_CONN_AG_CTX_CF2_SHIFT 6 u8 flags1; -#define E4_USTORM_ISCSI_CONN_AG_CTX_CF3_MASK 0x3 -#define E4_USTORM_ISCSI_CONN_AG_CTX_CF3_SHIFT 0 -#define E4_USTORM_ISCSI_CONN_AG_CTX_CF4_MASK 0x3 -#define E4_USTORM_ISCSI_CONN_AG_CTX_CF4_SHIFT 2 -#define E4_USTORM_ISCSI_CONN_AG_CTX_CF5_MASK 0x3 -#define E4_USTORM_ISCSI_CONN_AG_CTX_CF5_SHIFT 4 -#define E4_USTORM_ISCSI_CONN_AG_CTX_CF6_MASK 0x3 -#define E4_USTORM_ISCSI_CONN_AG_CTX_CF6_SHIFT 6 +#define USTORM_ISCSI_CONN_AG_CTX_CF3_MASK 0x3 +#define USTORM_ISCSI_CONN_AG_CTX_CF3_SHIFT 0 +#define USTORM_ISCSI_CONN_AG_CTX_CF4_MASK 0x3 +#define USTORM_ISCSI_CONN_AG_CTX_CF4_SHIFT 2 +#define USTORM_ISCSI_CONN_AG_CTX_CF5_MASK 0x3 +#define USTORM_ISCSI_CONN_AG_CTX_CF5_SHIFT 4 +#define USTORM_ISCSI_CONN_AG_CTX_CF6_MASK 0x3 +#define USTORM_ISCSI_CONN_AG_CTX_CF6_SHIFT 6 u8 flags2; -#define E4_USTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK 0x1 -#define E4_USTORM_ISCSI_CONN_AG_CTX_CF0EN_SHIFT 0 -#define E4_USTORM_ISCSI_CONN_AG_CTX_CF1EN_MASK 0x1 -#define E4_USTORM_ISCSI_CONN_AG_CTX_CF1EN_SHIFT 1 -#define E4_USTORM_ISCSI_CONN_AG_CTX_CF2EN_MASK 0x1 -#define E4_USTORM_ISCSI_CONN_AG_CTX_CF2EN_SHIFT 2 -#define E4_USTORM_ISCSI_CONN_AG_CTX_CF3EN_MASK 0x1 -#define E4_USTORM_ISCSI_CONN_AG_CTX_CF3EN_SHIFT 3 -#define E4_USTORM_ISCSI_CONN_AG_CTX_CF4EN_MASK 0x1 -#define E4_USTORM_ISCSI_CONN_AG_CTX_CF4EN_SHIFT 4 -#define E4_USTORM_ISCSI_CONN_AG_CTX_CF5EN_MASK 0x1 -#define E4_USTORM_ISCSI_CONN_AG_CTX_CF5EN_SHIFT 5 -#define E4_USTORM_ISCSI_CONN_AG_CTX_CF6EN_MASK 0x1 -#define E4_USTORM_ISCSI_CONN_AG_CTX_CF6EN_SHIFT 6 -#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE0EN_MASK 0x1 -#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE0EN_SHIFT 7 +#define USTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK 0x1 +#define USTORM_ISCSI_CONN_AG_CTX_CF0EN_SHIFT 0 +#define USTORM_ISCSI_CONN_AG_CTX_CF1EN_MASK 0x1 +#define USTORM_ISCSI_CONN_AG_CTX_CF1EN_SHIFT 1 +#define USTORM_ISCSI_CONN_AG_CTX_CF2EN_MASK 0x1 +#define USTORM_ISCSI_CONN_AG_CTX_CF2EN_SHIFT 2 +#define USTORM_ISCSI_CONN_AG_CTX_CF3EN_MASK 0x1 +#define USTORM_ISCSI_CONN_AG_CTX_CF3EN_SHIFT 3 +#define USTORM_ISCSI_CONN_AG_CTX_CF4EN_MASK 0x1 +#define USTORM_ISCSI_CONN_AG_CTX_CF4EN_SHIFT 4 +#define USTORM_ISCSI_CONN_AG_CTX_CF5EN_MASK 0x1 +#define USTORM_ISCSI_CONN_AG_CTX_CF5EN_SHIFT 5 +#define USTORM_ISCSI_CONN_AG_CTX_CF6EN_MASK 0x1 +#define USTORM_ISCSI_CONN_AG_CTX_CF6EN_SHIFT 6 +#define USTORM_ISCSI_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define USTORM_ISCSI_CONN_AG_CTX_RULE0EN_SHIFT 7 u8 flags3; -#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE1EN_MASK 0x1 -#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE1EN_SHIFT 0 -#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE2EN_MASK 0x1 -#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE2EN_SHIFT 1 -#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE3EN_MASK 0x1 -#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE3EN_SHIFT 2 -#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE4EN_MASK 0x1 -#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE4EN_SHIFT 3 -#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE5EN_MASK 0x1 -#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE5EN_SHIFT 4 -#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE6EN_MASK 0x1 -#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE6EN_SHIFT 5 -#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE7EN_MASK 0x1 -#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE7EN_SHIFT 6 -#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE8EN_MASK 0x1 -#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE8EN_SHIFT 7 +#define USTORM_ISCSI_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define USTORM_ISCSI_CONN_AG_CTX_RULE1EN_SHIFT 0 +#define USTORM_ISCSI_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define USTORM_ISCSI_CONN_AG_CTX_RULE2EN_SHIFT 1 +#define USTORM_ISCSI_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define USTORM_ISCSI_CONN_AG_CTX_RULE3EN_SHIFT 2 +#define USTORM_ISCSI_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define USTORM_ISCSI_CONN_AG_CTX_RULE4EN_SHIFT 3 +#define USTORM_ISCSI_CONN_AG_CTX_RULE5EN_MASK 0x1 +#define USTORM_ISCSI_CONN_AG_CTX_RULE5EN_SHIFT 4 +#define USTORM_ISCSI_CONN_AG_CTX_RULE6EN_MASK 0x1 +#define USTORM_ISCSI_CONN_AG_CTX_RULE6EN_SHIFT 5 +#define USTORM_ISCSI_CONN_AG_CTX_RULE7EN_MASK 0x1 +#define USTORM_ISCSI_CONN_AG_CTX_RULE7EN_SHIFT 6 +#define USTORM_ISCSI_CONN_AG_CTX_RULE8EN_MASK 0x1 +#define USTORM_ISCSI_CONN_AG_CTX_RULE8EN_SHIFT 7 u8 byte2; u8 byte3; __le16 word0; @@ -11358,37 +10791,37 @@ struct tstorm_iscsi_conn_st_ctx { __le32 reserved[44]; }; -struct e4_mstorm_iscsi_conn_ag_ctx { +struct mstorm_iscsi_conn_ag_ctx { u8 reserved; u8 state; u8 flags0; -#define E4_MSTORM_ISCSI_CONN_AG_CTX_BIT0_MASK 0x1 -#define E4_MSTORM_ISCSI_CONN_AG_CTX_BIT0_SHIFT 0 -#define E4_MSTORM_ISCSI_CONN_AG_CTX_BIT1_MASK 0x1 -#define E4_MSTORM_ISCSI_CONN_AG_CTX_BIT1_SHIFT 1 -#define E4_MSTORM_ISCSI_CONN_AG_CTX_CF0_MASK 0x3 -#define E4_MSTORM_ISCSI_CONN_AG_CTX_CF0_SHIFT 2 -#define E4_MSTORM_ISCSI_CONN_AG_CTX_CF1_MASK 0x3 -#define E4_MSTORM_ISCSI_CONN_AG_CTX_CF1_SHIFT 4 -#define E4_MSTORM_ISCSI_CONN_AG_CTX_CF2_MASK 0x3 -#define E4_MSTORM_ISCSI_CONN_AG_CTX_CF2_SHIFT 6 +#define MSTORM_ISCSI_CONN_AG_CTX_BIT0_MASK 0x1 +#define MSTORM_ISCSI_CONN_AG_CTX_BIT0_SHIFT 0 +#define MSTORM_ISCSI_CONN_AG_CTX_BIT1_MASK 0x1 +#define MSTORM_ISCSI_CONN_AG_CTX_BIT1_SHIFT 1 +#define MSTORM_ISCSI_CONN_AG_CTX_CF0_MASK 0x3 +#define MSTORM_ISCSI_CONN_AG_CTX_CF0_SHIFT 2 +#define MSTORM_ISCSI_CONN_AG_CTX_CF1_MASK 0x3 +#define MSTORM_ISCSI_CONN_AG_CTX_CF1_SHIFT 4 +#define MSTORM_ISCSI_CONN_AG_CTX_CF2_MASK 0x3 +#define MSTORM_ISCSI_CONN_AG_CTX_CF2_SHIFT 6 u8 flags1; -#define E4_MSTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK 0x1 -#define E4_MSTORM_ISCSI_CONN_AG_CTX_CF0EN_SHIFT 0 -#define E4_MSTORM_ISCSI_CONN_AG_CTX_CF1EN_MASK 0x1 -#define E4_MSTORM_ISCSI_CONN_AG_CTX_CF1EN_SHIFT 1 -#define E4_MSTORM_ISCSI_CONN_AG_CTX_CF2EN_MASK 0x1 -#define E4_MSTORM_ISCSI_CONN_AG_CTX_CF2EN_SHIFT 2 -#define E4_MSTORM_ISCSI_CONN_AG_CTX_RULE0EN_MASK 0x1 -#define E4_MSTORM_ISCSI_CONN_AG_CTX_RULE0EN_SHIFT 3 -#define E4_MSTORM_ISCSI_CONN_AG_CTX_RULE1EN_MASK 0x1 -#define E4_MSTORM_ISCSI_CONN_AG_CTX_RULE1EN_SHIFT 4 -#define E4_MSTORM_ISCSI_CONN_AG_CTX_RULE2EN_MASK 0x1 -#define E4_MSTORM_ISCSI_CONN_AG_CTX_RULE2EN_SHIFT 5 -#define E4_MSTORM_ISCSI_CONN_AG_CTX_RULE3EN_MASK 0x1 -#define E4_MSTORM_ISCSI_CONN_AG_CTX_RULE3EN_SHIFT 6 -#define E4_MSTORM_ISCSI_CONN_AG_CTX_RULE4EN_MASK 0x1 -#define E4_MSTORM_ISCSI_CONN_AG_CTX_RULE4EN_SHIFT 7 +#define MSTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK 0x1 +#define MSTORM_ISCSI_CONN_AG_CTX_CF0EN_SHIFT 0 +#define MSTORM_ISCSI_CONN_AG_CTX_CF1EN_MASK 0x1 +#define MSTORM_ISCSI_CONN_AG_CTX_CF1EN_SHIFT 1 +#define MSTORM_ISCSI_CONN_AG_CTX_CF2EN_MASK 0x1 +#define MSTORM_ISCSI_CONN_AG_CTX_CF2EN_SHIFT 2 +#define MSTORM_ISCSI_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define MSTORM_ISCSI_CONN_AG_CTX_RULE0EN_SHIFT 3 +#define MSTORM_ISCSI_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define MSTORM_ISCSI_CONN_AG_CTX_RULE1EN_SHIFT 4 +#define MSTORM_ISCSI_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define MSTORM_ISCSI_CONN_AG_CTX_RULE2EN_SHIFT 5 +#define MSTORM_ISCSI_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define MSTORM_ISCSI_CONN_AG_CTX_RULE3EN_SHIFT 6 +#define MSTORM_ISCSI_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define MSTORM_ISCSI_CONN_AG_CTX_RULE4EN_SHIFT 7 __le16 word0; __le16 word1; __le32 reg0; @@ -11407,22 +10840,22 @@ struct ustorm_iscsi_conn_st_ctx { }; /* iscsi connection context */ -struct e4_iscsi_conn_context { +struct iscsi_conn_context { struct ystorm_iscsi_conn_st_ctx ystorm_st_context; struct pstorm_iscsi_tcp_conn_st_ctx pstorm_st_context; struct regpair pstorm_st_padding[2]; struct pb_context xpb2_context; struct xstorm_iscsi_tcp_conn_st_ctx xstorm_st_context; struct regpair xstorm_st_padding[2]; - struct e4_xstorm_iscsi_conn_ag_ctx xstorm_ag_context; - struct e4_tstorm_iscsi_conn_ag_ctx tstorm_ag_context; + struct xstorm_iscsi_conn_ag_ctx xstorm_ag_context; + struct tstorm_iscsi_conn_ag_ctx tstorm_ag_context; struct regpair tstorm_ag_padding[2]; struct timers_context timer_context; - struct e4_ustorm_iscsi_conn_ag_ctx ustorm_ag_context; + struct ustorm_iscsi_conn_ag_ctx ustorm_ag_context; struct pb_context upb_context; struct tstorm_iscsi_conn_st_ctx tstorm_st_context; struct regpair tstorm_st_padding[2]; - struct e4_mstorm_iscsi_conn_ag_ctx mstorm_ag_context; + struct mstorm_iscsi_conn_ag_ctx mstorm_ag_context; struct mstorm_iscsi_tcp_conn_st_ctx mstorm_st_context; struct ustorm_iscsi_conn_st_ctx ustorm_st_context; }; @@ -11433,37 +10866,37 @@ struct iscsi_init_ramrod_params { struct tcp_init_params tcp_init; }; -struct e4_ystorm_iscsi_conn_ag_ctx { +struct ystorm_iscsi_conn_ag_ctx { u8 byte0; u8 byte1; u8 flags0; -#define E4_YSTORM_ISCSI_CONN_AG_CTX_BIT0_MASK 0x1 -#define E4_YSTORM_ISCSI_CONN_AG_CTX_BIT0_SHIFT 0 -#define E4_YSTORM_ISCSI_CONN_AG_CTX_BIT1_MASK 0x1 -#define E4_YSTORM_ISCSI_CONN_AG_CTX_BIT1_SHIFT 1 -#define E4_YSTORM_ISCSI_CONN_AG_CTX_CF0_MASK 0x3 -#define E4_YSTORM_ISCSI_CONN_AG_CTX_CF0_SHIFT 2 -#define E4_YSTORM_ISCSI_CONN_AG_CTX_CF1_MASK 0x3 -#define E4_YSTORM_ISCSI_CONN_AG_CTX_CF1_SHIFT 4 -#define E4_YSTORM_ISCSI_CONN_AG_CTX_CF2_MASK 0x3 -#define E4_YSTORM_ISCSI_CONN_AG_CTX_CF2_SHIFT 6 +#define YSTORM_ISCSI_CONN_AG_CTX_BIT0_MASK 0x1 +#define YSTORM_ISCSI_CONN_AG_CTX_BIT0_SHIFT 0 +#define YSTORM_ISCSI_CONN_AG_CTX_BIT1_MASK 0x1 +#define YSTORM_ISCSI_CONN_AG_CTX_BIT1_SHIFT 1 +#define YSTORM_ISCSI_CONN_AG_CTX_CF0_MASK 0x3 +#define YSTORM_ISCSI_CONN_AG_CTX_CF0_SHIFT 2 +#define YSTORM_ISCSI_CONN_AG_CTX_CF1_MASK 0x3 +#define YSTORM_ISCSI_CONN_AG_CTX_CF1_SHIFT 4 +#define YSTORM_ISCSI_CONN_AG_CTX_CF2_MASK 0x3 +#define YSTORM_ISCSI_CONN_AG_CTX_CF2_SHIFT 6 u8 flags1; -#define E4_YSTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK 0x1 -#define E4_YSTORM_ISCSI_CONN_AG_CTX_CF0EN_SHIFT 0 -#define E4_YSTORM_ISCSI_CONN_AG_CTX_CF1EN_MASK 0x1 -#define E4_YSTORM_ISCSI_CONN_AG_CTX_CF1EN_SHIFT 1 -#define E4_YSTORM_ISCSI_CONN_AG_CTX_CF2EN_MASK 0x1 -#define E4_YSTORM_ISCSI_CONN_AG_CTX_CF2EN_SHIFT 2 -#define E4_YSTORM_ISCSI_CONN_AG_CTX_RULE0EN_MASK 0x1 -#define E4_YSTORM_ISCSI_CONN_AG_CTX_RULE0EN_SHIFT 3 -#define E4_YSTORM_ISCSI_CONN_AG_CTX_RULE1EN_MASK 0x1 -#define E4_YSTORM_ISCSI_CONN_AG_CTX_RULE1EN_SHIFT 4 -#define E4_YSTORM_ISCSI_CONN_AG_CTX_RULE2EN_MASK 0x1 -#define E4_YSTORM_ISCSI_CONN_AG_CTX_RULE2EN_SHIFT 5 -#define E4_YSTORM_ISCSI_CONN_AG_CTX_RULE3EN_MASK 0x1 -#define E4_YSTORM_ISCSI_CONN_AG_CTX_RULE3EN_SHIFT 6 -#define E4_YSTORM_ISCSI_CONN_AG_CTX_RULE4EN_MASK 0x1 -#define E4_YSTORM_ISCSI_CONN_AG_CTX_RULE4EN_SHIFT 7 +#define YSTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK 0x1 +#define YSTORM_ISCSI_CONN_AG_CTX_CF0EN_SHIFT 0 +#define YSTORM_ISCSI_CONN_AG_CTX_CF1EN_MASK 0x1 +#define YSTORM_ISCSI_CONN_AG_CTX_CF1EN_SHIFT 1 +#define YSTORM_ISCSI_CONN_AG_CTX_CF2EN_MASK 0x1 +#define YSTORM_ISCSI_CONN_AG_CTX_CF2EN_SHIFT 2 +#define YSTORM_ISCSI_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define YSTORM_ISCSI_CONN_AG_CTX_RULE0EN_SHIFT 3 +#define YSTORM_ISCSI_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define YSTORM_ISCSI_CONN_AG_CTX_RULE1EN_SHIFT 4 +#define YSTORM_ISCSI_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define YSTORM_ISCSI_CONN_AG_CTX_RULE2EN_SHIFT 5 +#define YSTORM_ISCSI_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define YSTORM_ISCSI_CONN_AG_CTX_RULE3EN_SHIFT 6 +#define YSTORM_ISCSI_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define YSTORM_ISCSI_CONN_AG_CTX_RULE4EN_SHIFT 7 u8 byte2; u8 byte3; __le16 word0; @@ -11477,1922 +10910,4 @@ struct e4_ystorm_iscsi_conn_ag_ctx { __le32 reg3; }; -#define MFW_TRACE_SIGNATURE 0x25071946 - -/* The trace in the buffer */ -#define MFW_TRACE_EVENTID_MASK 0x00ffff -#define MFW_TRACE_PRM_SIZE_MASK 0x0f0000 -#define MFW_TRACE_PRM_SIZE_OFFSET 16 -#define MFW_TRACE_ENTRY_SIZE 3 - -struct mcp_trace { - u32 signature; /* Help to identify that the trace is valid */ - u32 size; /* the size of the trace buffer in bytes */ - u32 curr_level; /* 2 - all will be written to the buffer - * 1 - debug trace will not be written - * 0 - just errors will be written to the buffer - */ - u32 modules_mask[2]; /* a bit per module, 1 means write it, 0 means - * mask it. - */ - - /* Warning: the following pointers are assumed to be 32bits as they are - * used only in the MFW. - */ - u32 trace_prod; /* The next trace will be written to this offset */ - u32 trace_oldest; /* The oldest valid trace starts at this offset - * (usually very close after the current producer). - */ -}; - -#define VF_MAX_STATIC 192 - -#define MCP_GLOB_PATH_MAX 2 -#define MCP_PORT_MAX 2 -#define MCP_GLOB_PORT_MAX 4 -#define MCP_GLOB_FUNC_MAX 16 - -typedef u32 offsize_t; /* In DWORDS !!! */ -/* Offset from the beginning of the MCP scratchpad */ -#define OFFSIZE_OFFSET_SHIFT 0 -#define OFFSIZE_OFFSET_MASK 0x0000ffff -/* Size of specific element (not the whole array if any) */ -#define OFFSIZE_SIZE_SHIFT 16 -#define OFFSIZE_SIZE_MASK 0xffff0000 - -#define SECTION_OFFSET(_offsize) ((((_offsize & \ - OFFSIZE_OFFSET_MASK) >> \ - OFFSIZE_OFFSET_SHIFT) << 2)) - -#define QED_SECTION_SIZE(_offsize) (((_offsize & \ - OFFSIZE_SIZE_MASK) >> \ - OFFSIZE_SIZE_SHIFT) << 2) - -#define SECTION_ADDR(_offsize, idx) (MCP_REG_SCRATCH + \ - SECTION_OFFSET(_offsize) + \ - (QED_SECTION_SIZE(_offsize) * idx)) - -#define SECTION_OFFSIZE_ADDR(_pub_base, _section) \ - (_pub_base + offsetof(struct mcp_public_data, sections[_section])) - -/* PHY configuration */ -struct eth_phy_cfg { - u32 speed; -#define ETH_SPEED_AUTONEG 0x0 -#define ETH_SPEED_SMARTLINQ 0x8 - - u32 pause; -#define ETH_PAUSE_NONE 0x0 -#define ETH_PAUSE_AUTONEG 0x1 -#define ETH_PAUSE_RX 0x2 -#define ETH_PAUSE_TX 0x4 - - u32 adv_speed; - - u32 loopback_mode; -#define ETH_LOOPBACK_NONE 0x0 -#define ETH_LOOPBACK_INT_PHY 0x1 -#define ETH_LOOPBACK_EXT_PHY 0x2 -#define ETH_LOOPBACK_EXT 0x3 -#define ETH_LOOPBACK_MAC 0x4 -#define ETH_LOOPBACK_CNIG_AH_ONLY_0123 0x5 -#define ETH_LOOPBACK_CNIG_AH_ONLY_2301 0x6 -#define ETH_LOOPBACK_PCS_AH_ONLY 0x7 -#define ETH_LOOPBACK_REVERSE_MAC_AH_ONLY 0x8 -#define ETH_LOOPBACK_INT_PHY_FEA_AH_ONLY 0x9 - - u32 eee_cfg; -#define EEE_CFG_EEE_ENABLED BIT(0) -#define EEE_CFG_TX_LPI BIT(1) -#define EEE_CFG_ADV_SPEED_1G BIT(2) -#define EEE_CFG_ADV_SPEED_10G BIT(3) -#define EEE_TX_TIMER_USEC_MASK 0xfffffff0 -#define EEE_TX_TIMER_USEC_OFFSET 4 -#define EEE_TX_TIMER_USEC_BALANCED_TIME 0xa00 -#define EEE_TX_TIMER_USEC_AGGRESSIVE_TIME 0x100 -#define EEE_TX_TIMER_USEC_LATENCY_TIME 0x6000 - - u32 deprecated; - - u32 fec_mode; -#define FEC_FORCE_MODE_MASK 0x000000ff -#define FEC_FORCE_MODE_OFFSET 0 -#define FEC_FORCE_MODE_NONE 0x00 -#define FEC_FORCE_MODE_FIRECODE 0x01 -#define FEC_FORCE_MODE_RS 0x02 -#define FEC_FORCE_MODE_AUTO 0x07 -#define FEC_EXTENDED_MODE_MASK 0xffffff00 -#define FEC_EXTENDED_MODE_OFFSET 8 -#define ETH_EXT_FEC_NONE 0x00000100 -#define ETH_EXT_FEC_10G_NONE 0x00000200 -#define ETH_EXT_FEC_10G_BASE_R 0x00000400 -#define ETH_EXT_FEC_20G_NONE 0x00000800 -#define ETH_EXT_FEC_20G_BASE_R 0x00001000 -#define ETH_EXT_FEC_25G_NONE 0x00002000 -#define ETH_EXT_FEC_25G_BASE_R 0x00004000 -#define ETH_EXT_FEC_25G_RS528 0x00008000 -#define ETH_EXT_FEC_40G_NONE 0x00010000 -#define ETH_EXT_FEC_40G_BASE_R 0x00020000 -#define ETH_EXT_FEC_50G_NONE 0x00040000 -#define ETH_EXT_FEC_50G_BASE_R 0x00080000 -#define ETH_EXT_FEC_50G_RS528 0x00100000 -#define ETH_EXT_FEC_50G_RS544 0x00200000 -#define ETH_EXT_FEC_100G_NONE 0x00400000 -#define ETH_EXT_FEC_100G_BASE_R 0x00800000 -#define ETH_EXT_FEC_100G_RS528 0x01000000 -#define ETH_EXT_FEC_100G_RS544 0x02000000 - - u32 extended_speed; -#define ETH_EXT_SPEED_MASK 0x0000ffff -#define ETH_EXT_SPEED_OFFSET 0 -#define ETH_EXT_SPEED_AN 0x00000001 -#define ETH_EXT_SPEED_1G 0x00000002 -#define ETH_EXT_SPEED_10G 0x00000004 -#define ETH_EXT_SPEED_20G 0x00000008 -#define ETH_EXT_SPEED_25G 0x00000010 -#define ETH_EXT_SPEED_40G 0x00000020 -#define ETH_EXT_SPEED_50G_BASE_R 0x00000040 -#define ETH_EXT_SPEED_50G_BASE_R2 0x00000080 -#define ETH_EXT_SPEED_100G_BASE_R2 0x00000100 -#define ETH_EXT_SPEED_100G_BASE_R4 0x00000200 -#define ETH_EXT_SPEED_100G_BASE_P4 0x00000400 -#define ETH_EXT_ADV_SPEED_MASK 0xffff0000 -#define ETH_EXT_ADV_SPEED_OFFSET 16 -#define ETH_EXT_ADV_SPEED_RESERVED 0x00010000 -#define ETH_EXT_ADV_SPEED_1G 0x00020000 -#define ETH_EXT_ADV_SPEED_10G 0x00040000 -#define ETH_EXT_ADV_SPEED_20G 0x00080000 -#define ETH_EXT_ADV_SPEED_25G 0x00100000 -#define ETH_EXT_ADV_SPEED_40G 0x00200000 -#define ETH_EXT_ADV_SPEED_50G_BASE_R 0x00400000 -#define ETH_EXT_ADV_SPEED_50G_BASE_R2 0x00800000 -#define ETH_EXT_ADV_SPEED_100G_BASE_R2 0x01000000 -#define ETH_EXT_ADV_SPEED_100G_BASE_R4 0x02000000 -#define ETH_EXT_ADV_SPEED_100G_BASE_P4 0x04000000 -}; - -struct port_mf_cfg { - u32 dynamic_cfg; -#define PORT_MF_CFG_OV_TAG_MASK 0x0000ffff -#define PORT_MF_CFG_OV_TAG_SHIFT 0 -#define PORT_MF_CFG_OV_TAG_DEFAULT PORT_MF_CFG_OV_TAG_MASK - - u32 reserved[1]; -}; - -struct eth_stats { - u64 r64; - u64 r127; - u64 r255; - u64 r511; - u64 r1023; - u64 r1518; - - union { - struct { - u64 r1522; - u64 r2047; - u64 r4095; - u64 r9216; - u64 r16383; - } bb0; - struct { - u64 unused1; - u64 r1519_to_max; - u64 unused2; - u64 unused3; - u64 unused4; - } ah0; - } u0; - - u64 rfcs; - u64 rxcf; - u64 rxpf; - u64 rxpp; - u64 raln; - u64 rfcr; - u64 rovr; - u64 rjbr; - u64 rund; - u64 rfrg; - u64 t64; - u64 t127; - u64 t255; - u64 t511; - u64 t1023; - u64 t1518; - - union { - struct { - u64 t2047; - u64 t4095; - u64 t9216; - u64 t16383; - } bb1; - struct { - u64 t1519_to_max; - u64 unused6; - u64 unused7; - u64 unused8; - } ah1; - } u1; - - u64 txpf; - u64 txpp; - - union { - struct { - u64 tlpiec; - u64 tncl; - } bb2; - struct { - u64 unused9; - u64 unused10; - } ah2; - } u2; - - u64 rbyte; - u64 rxuca; - u64 rxmca; - u64 rxbca; - u64 rxpok; - u64 tbyte; - u64 txuca; - u64 txmca; - u64 txbca; - u64 txcf; -}; - -struct brb_stats { - u64 brb_truncate[8]; - u64 brb_discard[8]; -}; - -struct port_stats { - struct brb_stats brb; - struct eth_stats eth; -}; - -struct couple_mode_teaming { - u8 port_cmt[MCP_GLOB_PORT_MAX]; -#define PORT_CMT_IN_TEAM (1 << 0) - -#define PORT_CMT_PORT_ROLE (1 << 1) -#define PORT_CMT_PORT_INACTIVE (0 << 1) -#define PORT_CMT_PORT_ACTIVE (1 << 1) - -#define PORT_CMT_TEAM_MASK (1 << 2) -#define PORT_CMT_TEAM0 (0 << 2) -#define PORT_CMT_TEAM1 (1 << 2) -}; - -#define LLDP_CHASSIS_ID_STAT_LEN 4 -#define LLDP_PORT_ID_STAT_LEN 4 -#define DCBX_MAX_APP_PROTOCOL 32 -#define MAX_SYSTEM_LLDP_TLV_DATA 32 - -enum _lldp_agent { - LLDP_NEAREST_BRIDGE = 0, - LLDP_NEAREST_NON_TPMR_BRIDGE, - LLDP_NEAREST_CUSTOMER_BRIDGE, - LLDP_MAX_LLDP_AGENTS -}; - -struct lldp_config_params_s { - u32 config; -#define LLDP_CONFIG_TX_INTERVAL_MASK 0x000000ff -#define LLDP_CONFIG_TX_INTERVAL_SHIFT 0 -#define LLDP_CONFIG_HOLD_MASK 0x00000f00 -#define LLDP_CONFIG_HOLD_SHIFT 8 -#define LLDP_CONFIG_MAX_CREDIT_MASK 0x0000f000 -#define LLDP_CONFIG_MAX_CREDIT_SHIFT 12 -#define LLDP_CONFIG_ENABLE_RX_MASK 0x40000000 -#define LLDP_CONFIG_ENABLE_RX_SHIFT 30 -#define LLDP_CONFIG_ENABLE_TX_MASK 0x80000000 -#define LLDP_CONFIG_ENABLE_TX_SHIFT 31 - u32 local_chassis_id[LLDP_CHASSIS_ID_STAT_LEN]; - u32 local_port_id[LLDP_PORT_ID_STAT_LEN]; -}; - -struct lldp_status_params_s { - u32 prefix_seq_num; - u32 status; - u32 peer_chassis_id[LLDP_CHASSIS_ID_STAT_LEN]; - u32 peer_port_id[LLDP_PORT_ID_STAT_LEN]; - u32 suffix_seq_num; -}; - -struct dcbx_ets_feature { - u32 flags; -#define DCBX_ETS_ENABLED_MASK 0x00000001 -#define DCBX_ETS_ENABLED_SHIFT 0 -#define DCBX_ETS_WILLING_MASK 0x00000002 -#define DCBX_ETS_WILLING_SHIFT 1 -#define DCBX_ETS_ERROR_MASK 0x00000004 -#define DCBX_ETS_ERROR_SHIFT 2 -#define DCBX_ETS_CBS_MASK 0x00000008 -#define DCBX_ETS_CBS_SHIFT 3 -#define DCBX_ETS_MAX_TCS_MASK 0x000000f0 -#define DCBX_ETS_MAX_TCS_SHIFT 4 -#define DCBX_OOO_TC_MASK 0x00000f00 -#define DCBX_OOO_TC_SHIFT 8 - u32 pri_tc_tbl[1]; -#define DCBX_TCP_OOO_TC (4) - -#define NIG_ETS_ISCSI_OOO_CLIENT_OFFSET (DCBX_TCP_OOO_TC + 1) -#define DCBX_CEE_STRICT_PRIORITY 0xf - u32 tc_bw_tbl[2]; - u32 tc_tsa_tbl[2]; -#define DCBX_ETS_TSA_STRICT 0 -#define DCBX_ETS_TSA_CBS 1 -#define DCBX_ETS_TSA_ETS 2 -}; - -#define DCBX_TCP_OOO_TC (4) -#define DCBX_TCP_OOO_K2_4PORT_TC (3) - -struct dcbx_app_priority_entry { - u32 entry; -#define DCBX_APP_PRI_MAP_MASK 0x000000ff -#define DCBX_APP_PRI_MAP_SHIFT 0 -#define DCBX_APP_PRI_0 0x01 -#define DCBX_APP_PRI_1 0x02 -#define DCBX_APP_PRI_2 0x04 -#define DCBX_APP_PRI_3 0x08 -#define DCBX_APP_PRI_4 0x10 -#define DCBX_APP_PRI_5 0x20 -#define DCBX_APP_PRI_6 0x40 -#define DCBX_APP_PRI_7 0x80 -#define DCBX_APP_SF_MASK 0x00000300 -#define DCBX_APP_SF_SHIFT 8 -#define DCBX_APP_SF_ETHTYPE 0 -#define DCBX_APP_SF_PORT 1 -#define DCBX_APP_SF_IEEE_MASK 0x0000f000 -#define DCBX_APP_SF_IEEE_SHIFT 12 -#define DCBX_APP_SF_IEEE_RESERVED 0 -#define DCBX_APP_SF_IEEE_ETHTYPE 1 -#define DCBX_APP_SF_IEEE_TCP_PORT 2 -#define DCBX_APP_SF_IEEE_UDP_PORT 3 -#define DCBX_APP_SF_IEEE_TCP_UDP_PORT 4 - -#define DCBX_APP_PROTOCOL_ID_MASK 0xffff0000 -#define DCBX_APP_PROTOCOL_ID_SHIFT 16 -}; - -struct dcbx_app_priority_feature { - u32 flags; -#define DCBX_APP_ENABLED_MASK 0x00000001 -#define DCBX_APP_ENABLED_SHIFT 0 -#define DCBX_APP_WILLING_MASK 0x00000002 -#define DCBX_APP_WILLING_SHIFT 1 -#define DCBX_APP_ERROR_MASK 0x00000004 -#define DCBX_APP_ERROR_SHIFT 2 -#define DCBX_APP_MAX_TCS_MASK 0x0000f000 -#define DCBX_APP_MAX_TCS_SHIFT 12 -#define DCBX_APP_NUM_ENTRIES_MASK 0x00ff0000 -#define DCBX_APP_NUM_ENTRIES_SHIFT 16 - struct dcbx_app_priority_entry app_pri_tbl[DCBX_MAX_APP_PROTOCOL]; -}; - -struct dcbx_features { - struct dcbx_ets_feature ets; - u32 pfc; -#define DCBX_PFC_PRI_EN_BITMAP_MASK 0x000000ff -#define DCBX_PFC_PRI_EN_BITMAP_SHIFT 0 -#define DCBX_PFC_PRI_EN_BITMAP_PRI_0 0x01 -#define DCBX_PFC_PRI_EN_BITMAP_PRI_1 0x02 -#define DCBX_PFC_PRI_EN_BITMAP_PRI_2 0x04 -#define DCBX_PFC_PRI_EN_BITMAP_PRI_3 0x08 -#define DCBX_PFC_PRI_EN_BITMAP_PRI_4 0x10 -#define DCBX_PFC_PRI_EN_BITMAP_PRI_5 0x20 -#define DCBX_PFC_PRI_EN_BITMAP_PRI_6 0x40 -#define DCBX_PFC_PRI_EN_BITMAP_PRI_7 0x80 - -#define DCBX_PFC_FLAGS_MASK 0x0000ff00 -#define DCBX_PFC_FLAGS_SHIFT 8 -#define DCBX_PFC_CAPS_MASK 0x00000f00 -#define DCBX_PFC_CAPS_SHIFT 8 -#define DCBX_PFC_MBC_MASK 0x00004000 -#define DCBX_PFC_MBC_SHIFT 14 -#define DCBX_PFC_WILLING_MASK 0x00008000 -#define DCBX_PFC_WILLING_SHIFT 15 -#define DCBX_PFC_ENABLED_MASK 0x00010000 -#define DCBX_PFC_ENABLED_SHIFT 16 -#define DCBX_PFC_ERROR_MASK 0x00020000 -#define DCBX_PFC_ERROR_SHIFT 17 - - struct dcbx_app_priority_feature app; -}; - -struct dcbx_local_params { - u32 config; -#define DCBX_CONFIG_VERSION_MASK 0x00000007 -#define DCBX_CONFIG_VERSION_SHIFT 0 -#define DCBX_CONFIG_VERSION_DISABLED 0 -#define DCBX_CONFIG_VERSION_IEEE 1 -#define DCBX_CONFIG_VERSION_CEE 2 -#define DCBX_CONFIG_VERSION_STATIC 4 - - u32 flags; - struct dcbx_features features; -}; - -struct dcbx_mib { - u32 prefix_seq_num; - u32 flags; - struct dcbx_features features; - u32 suffix_seq_num; -}; - -struct lldp_system_tlvs_buffer_s { - u16 valid; - u16 length; - u32 data[MAX_SYSTEM_LLDP_TLV_DATA]; -}; - -struct dcb_dscp_map { - u32 flags; -#define DCB_DSCP_ENABLE_MASK 0x1 -#define DCB_DSCP_ENABLE_SHIFT 0 -#define DCB_DSCP_ENABLE 1 - u32 dscp_pri_map[8]; -}; - -struct public_global { - u32 max_path; - u32 max_ports; -#define MODE_1P 1 -#define MODE_2P 2 -#define MODE_3P 3 -#define MODE_4P 4 - u32 debug_mb_offset; - u32 phymod_dbg_mb_offset; - struct couple_mode_teaming cmt; - s32 internal_temperature; - u32 mfw_ver; - u32 running_bundle_id; - s32 external_temperature; - u32 mdump_reason; - u64 reserved; - u32 data_ptr; - u32 data_size; -}; - -struct fw_flr_mb { - u32 aggint; - u32 opgen_addr; - u32 accum_ack; -}; - -struct public_path { - struct fw_flr_mb flr_mb; - u32 mcp_vf_disabled[VF_MAX_STATIC / 32]; - - u32 process_kill; -#define PROCESS_KILL_COUNTER_MASK 0x0000ffff -#define PROCESS_KILL_COUNTER_SHIFT 0 -#define PROCESS_KILL_GLOB_AEU_BIT_MASK 0xffff0000 -#define PROCESS_KILL_GLOB_AEU_BIT_SHIFT 16 -#define GLOBAL_AEU_BIT(aeu_reg_id, aeu_bit) (aeu_reg_id * 32 + aeu_bit) -}; - -struct public_port { - u32 validity_map; - - u32 link_status; -#define LINK_STATUS_LINK_UP 0x00000001 -#define LINK_STATUS_SPEED_AND_DUPLEX_MASK 0x0000001e -#define LINK_STATUS_SPEED_AND_DUPLEX_1000THD (1 << 1) -#define LINK_STATUS_SPEED_AND_DUPLEX_1000TFD (2 << 1) -#define LINK_STATUS_SPEED_AND_DUPLEX_10G (3 << 1) -#define LINK_STATUS_SPEED_AND_DUPLEX_20G (4 << 1) -#define LINK_STATUS_SPEED_AND_DUPLEX_40G (5 << 1) -#define LINK_STATUS_SPEED_AND_DUPLEX_50G (6 << 1) -#define LINK_STATUS_SPEED_AND_DUPLEX_100G (7 << 1) -#define LINK_STATUS_SPEED_AND_DUPLEX_25G (8 << 1) -#define LINK_STATUS_AUTO_NEGOTIATE_ENABLED 0x00000020 -#define LINK_STATUS_AUTO_NEGOTIATE_COMPLETE 0x00000040 -#define LINK_STATUS_PARALLEL_DETECTION_USED 0x00000080 -#define LINK_STATUS_PFC_ENABLED 0x00000100 -#define LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE 0x00000200 -#define LINK_STATUS_LINK_PARTNER_1000THD_CAPABLE 0x00000400 -#define LINK_STATUS_LINK_PARTNER_10G_CAPABLE 0x00000800 -#define LINK_STATUS_LINK_PARTNER_20G_CAPABLE 0x00001000 -#define LINK_STATUS_LINK_PARTNER_40G_CAPABLE 0x00002000 -#define LINK_STATUS_LINK_PARTNER_50G_CAPABLE 0x00004000 -#define LINK_STATUS_LINK_PARTNER_100G_CAPABLE 0x00008000 -#define LINK_STATUS_LINK_PARTNER_25G_CAPABLE 0x00010000 -#define LINK_STATUS_LINK_PARTNER_FLOW_CONTROL_MASK 0x000c0000 -#define LINK_STATUS_LINK_PARTNER_NOT_PAUSE_CAPABLE (0 << 18) -#define LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE (1 << 18) -#define LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE (2 << 18) -#define LINK_STATUS_LINK_PARTNER_BOTH_PAUSE (3 << 18) -#define LINK_STATUS_SFP_TX_FAULT 0x00100000 -#define LINK_STATUS_TX_FLOW_CONTROL_ENABLED 0x00200000 -#define LINK_STATUS_RX_FLOW_CONTROL_ENABLED 0x00400000 -#define LINK_STATUS_RX_SIGNAL_PRESENT 0x00800000 -#define LINK_STATUS_MAC_LOCAL_FAULT 0x01000000 -#define LINK_STATUS_MAC_REMOTE_FAULT 0x02000000 -#define LINK_STATUS_UNSUPPORTED_SPD_REQ 0x04000000 - -#define LINK_STATUS_FEC_MODE_MASK 0x38000000 -#define LINK_STATUS_FEC_MODE_NONE (0 << 27) -#define LINK_STATUS_FEC_MODE_FIRECODE_CL74 (1 << 27) -#define LINK_STATUS_FEC_MODE_RS_CL91 (2 << 27) - - u32 link_status1; - u32 ext_phy_fw_version; - u32 drv_phy_cfg_addr; - - u32 port_stx; - - u32 stat_nig_timer; - - struct port_mf_cfg port_mf_config; - struct port_stats stats; - - u32 media_type; -#define MEDIA_UNSPECIFIED 0x0 -#define MEDIA_SFPP_10G_FIBER 0x1 -#define MEDIA_XFP_FIBER 0x2 -#define MEDIA_DA_TWINAX 0x3 -#define MEDIA_BASE_T 0x4 -#define MEDIA_SFP_1G_FIBER 0x5 -#define MEDIA_MODULE_FIBER 0x6 -#define MEDIA_KR 0xf0 -#define MEDIA_NOT_PRESENT 0xff - - u32 lfa_status; - u32 link_change_count; - - struct lldp_config_params_s lldp_config_params[LLDP_MAX_LLDP_AGENTS]; - struct lldp_status_params_s lldp_status_params[LLDP_MAX_LLDP_AGENTS]; - struct lldp_system_tlvs_buffer_s system_lldp_tlvs_buf; - - /* DCBX related MIB */ - struct dcbx_local_params local_admin_dcbx_mib; - struct dcbx_mib remote_dcbx_mib; - struct dcbx_mib operational_dcbx_mib; - - u32 reserved[2]; - - u32 transceiver_data; -#define ETH_TRANSCEIVER_STATE_MASK 0x000000ff -#define ETH_TRANSCEIVER_STATE_SHIFT 0x00000000 -#define ETH_TRANSCEIVER_STATE_OFFSET 0x00000000 -#define ETH_TRANSCEIVER_STATE_UNPLUGGED 0x00000000 -#define ETH_TRANSCEIVER_STATE_PRESENT 0x00000001 -#define ETH_TRANSCEIVER_STATE_VALID 0x00000003 -#define ETH_TRANSCEIVER_STATE_UPDATING 0x00000008 -#define ETH_TRANSCEIVER_TYPE_MASK 0x0000ff00 -#define ETH_TRANSCEIVER_TYPE_OFFSET 0x8 -#define ETH_TRANSCEIVER_TYPE_NONE 0x00 -#define ETH_TRANSCEIVER_TYPE_UNKNOWN 0xff -#define ETH_TRANSCEIVER_TYPE_1G_PCC 0x01 -#define ETH_TRANSCEIVER_TYPE_1G_ACC 0x02 -#define ETH_TRANSCEIVER_TYPE_1G_LX 0x03 -#define ETH_TRANSCEIVER_TYPE_1G_SX 0x04 -#define ETH_TRANSCEIVER_TYPE_10G_SR 0x05 -#define ETH_TRANSCEIVER_TYPE_10G_LR 0x06 -#define ETH_TRANSCEIVER_TYPE_10G_LRM 0x07 -#define ETH_TRANSCEIVER_TYPE_10G_ER 0x08 -#define ETH_TRANSCEIVER_TYPE_10G_PCC 0x09 -#define ETH_TRANSCEIVER_TYPE_10G_ACC 0x0a -#define ETH_TRANSCEIVER_TYPE_XLPPI 0x0b -#define ETH_TRANSCEIVER_TYPE_40G_LR4 0x0c -#define ETH_TRANSCEIVER_TYPE_40G_SR4 0x0d -#define ETH_TRANSCEIVER_TYPE_40G_CR4 0x0e -#define ETH_TRANSCEIVER_TYPE_100G_AOC 0x0f -#define ETH_TRANSCEIVER_TYPE_100G_SR4 0x10 -#define ETH_TRANSCEIVER_TYPE_100G_LR4 0x11 -#define ETH_TRANSCEIVER_TYPE_100G_ER4 0x12 -#define ETH_TRANSCEIVER_TYPE_100G_ACC 0x13 -#define ETH_TRANSCEIVER_TYPE_100G_CR4 0x14 -#define ETH_TRANSCEIVER_TYPE_4x10G_SR 0x15 -#define ETH_TRANSCEIVER_TYPE_25G_CA_N 0x16 -#define ETH_TRANSCEIVER_TYPE_25G_ACC_S 0x17 -#define ETH_TRANSCEIVER_TYPE_25G_CA_S 0x18 -#define ETH_TRANSCEIVER_TYPE_25G_ACC_M 0x19 -#define ETH_TRANSCEIVER_TYPE_25G_CA_L 0x1a -#define ETH_TRANSCEIVER_TYPE_25G_ACC_L 0x1b -#define ETH_TRANSCEIVER_TYPE_25G_SR 0x1c -#define ETH_TRANSCEIVER_TYPE_25G_LR 0x1d -#define ETH_TRANSCEIVER_TYPE_25G_AOC 0x1e -#define ETH_TRANSCEIVER_TYPE_4x10G 0x1f -#define ETH_TRANSCEIVER_TYPE_4x25G_CR 0x20 -#define ETH_TRANSCEIVER_TYPE_1000BASET 0x21 -#define ETH_TRANSCEIVER_TYPE_10G_BASET 0x22 -#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_SR 0x30 -#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_CR 0x31 -#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_LR 0x32 -#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_SR 0x33 -#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_CR 0x34 -#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_LR 0x35 -#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_AOC 0x36 -#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_25G_SR 0x37 -#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_25G_LR 0x38 -#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_1G_10G_SR 0x39 -#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_1G_10G_LR 0x3a - - u32 wol_info; - u32 wol_pkt_len; - u32 wol_pkt_details; - struct dcb_dscp_map dcb_dscp_map; - - u32 eee_status; -#define EEE_ACTIVE_BIT BIT(0) -#define EEE_LD_ADV_STATUS_MASK 0x000000f0 -#define EEE_LD_ADV_STATUS_OFFSET 4 -#define EEE_1G_ADV BIT(1) -#define EEE_10G_ADV BIT(2) -#define EEE_LP_ADV_STATUS_MASK 0x00000f00 -#define EEE_LP_ADV_STATUS_OFFSET 8 -#define EEE_SUPPORTED_SPEED_MASK 0x0000f000 -#define EEE_SUPPORTED_SPEED_OFFSET 12 -#define EEE_1G_SUPPORTED BIT(1) -#define EEE_10G_SUPPORTED BIT(2) - - u32 eee_remote; -#define EEE_REMOTE_TW_TX_MASK 0x0000ffff -#define EEE_REMOTE_TW_TX_OFFSET 0 -#define EEE_REMOTE_TW_RX_MASK 0xffff0000 -#define EEE_REMOTE_TW_RX_OFFSET 16 - - u32 reserved1; - u32 oem_cfg_port; -#define OEM_CFG_CHANNEL_TYPE_MASK 0x00000003 -#define OEM_CFG_CHANNEL_TYPE_OFFSET 0 -#define OEM_CFG_CHANNEL_TYPE_VLAN_PARTITION 0x1 -#define OEM_CFG_CHANNEL_TYPE_STAGGED 0x2 -#define OEM_CFG_SCHED_TYPE_MASK 0x0000000C -#define OEM_CFG_SCHED_TYPE_OFFSET 2 -#define OEM_CFG_SCHED_TYPE_ETS 0x1 -#define OEM_CFG_SCHED_TYPE_VNIC_BW 0x2 -}; - -struct public_func { - u32 reserved0[2]; - - u32 mtu_size; - - u32 reserved[7]; - - u32 config; -#define FUNC_MF_CFG_FUNC_HIDE 0x00000001 -#define FUNC_MF_CFG_PAUSE_ON_HOST_RING 0x00000002 -#define FUNC_MF_CFG_PAUSE_ON_HOST_RING_SHIFT 0x00000001 - -#define FUNC_MF_CFG_PROTOCOL_MASK 0x000000f0 -#define FUNC_MF_CFG_PROTOCOL_SHIFT 4 -#define FUNC_MF_CFG_PROTOCOL_ETHERNET 0x00000000 -#define FUNC_MF_CFG_PROTOCOL_ISCSI 0x00000010 -#define FUNC_MF_CFG_PROTOCOL_FCOE 0x00000020 -#define FUNC_MF_CFG_PROTOCOL_ROCE 0x00000030 -#define FUNC_MF_CFG_PROTOCOL_NVMETCP 0x00000040 -#define FUNC_MF_CFG_PROTOCOL_MAX 0x00000040 - -#define FUNC_MF_CFG_MIN_BW_MASK 0x0000ff00 -#define FUNC_MF_CFG_MIN_BW_SHIFT 8 -#define FUNC_MF_CFG_MIN_BW_DEFAULT 0x00000000 -#define FUNC_MF_CFG_MAX_BW_MASK 0x00ff0000 -#define FUNC_MF_CFG_MAX_BW_SHIFT 16 -#define FUNC_MF_CFG_MAX_BW_DEFAULT 0x00640000 - - u32 status; -#define FUNC_STATUS_VIRTUAL_LINK_UP 0x00000001 - - u32 mac_upper; -#define FUNC_MF_CFG_UPPERMAC_MASK 0x0000ffff -#define FUNC_MF_CFG_UPPERMAC_SHIFT 0 -#define FUNC_MF_CFG_UPPERMAC_DEFAULT FUNC_MF_CFG_UPPERMAC_MASK - u32 mac_lower; -#define FUNC_MF_CFG_LOWERMAC_DEFAULT 0xffffffff - - u32 fcoe_wwn_port_name_upper; - u32 fcoe_wwn_port_name_lower; - - u32 fcoe_wwn_node_name_upper; - u32 fcoe_wwn_node_name_lower; - - u32 ovlan_stag; -#define FUNC_MF_CFG_OV_STAG_MASK 0x0000ffff -#define FUNC_MF_CFG_OV_STAG_SHIFT 0 -#define FUNC_MF_CFG_OV_STAG_DEFAULT FUNC_MF_CFG_OV_STAG_MASK - - u32 pf_allocation; - - u32 preserve_data; - - u32 driver_last_activity_ts; - - u32 drv_ack_vf_disabled[VF_MAX_STATIC / 32]; - - u32 drv_id; -#define DRV_ID_PDA_COMP_VER_MASK 0x0000ffff -#define DRV_ID_PDA_COMP_VER_SHIFT 0 - -#define LOAD_REQ_HSI_VERSION 2 -#define DRV_ID_MCP_HSI_VER_MASK 0x00ff0000 -#define DRV_ID_MCP_HSI_VER_SHIFT 16 -#define DRV_ID_MCP_HSI_VER_CURRENT (LOAD_REQ_HSI_VERSION << \ - DRV_ID_MCP_HSI_VER_SHIFT) - -#define DRV_ID_DRV_TYPE_MASK 0x7f000000 -#define DRV_ID_DRV_TYPE_SHIFT 24 -#define DRV_ID_DRV_TYPE_UNKNOWN (0 << DRV_ID_DRV_TYPE_SHIFT) -#define DRV_ID_DRV_TYPE_LINUX (1 << DRV_ID_DRV_TYPE_SHIFT) - -#define DRV_ID_DRV_INIT_HW_MASK 0x80000000 -#define DRV_ID_DRV_INIT_HW_SHIFT 31 -#define DRV_ID_DRV_INIT_HW_FLAG (1 << DRV_ID_DRV_INIT_HW_SHIFT) - - u32 oem_cfg_func; -#define OEM_CFG_FUNC_TC_MASK 0x0000000F -#define OEM_CFG_FUNC_TC_OFFSET 0 -#define OEM_CFG_FUNC_TC_0 0x0 -#define OEM_CFG_FUNC_TC_1 0x1 -#define OEM_CFG_FUNC_TC_2 0x2 -#define OEM_CFG_FUNC_TC_3 0x3 -#define OEM_CFG_FUNC_TC_4 0x4 -#define OEM_CFG_FUNC_TC_5 0x5 -#define OEM_CFG_FUNC_TC_6 0x6 -#define OEM_CFG_FUNC_TC_7 0x7 - -#define OEM_CFG_FUNC_HOST_PRI_CTRL_MASK 0x00000030 -#define OEM_CFG_FUNC_HOST_PRI_CTRL_OFFSET 4 -#define OEM_CFG_FUNC_HOST_PRI_CTRL_VNIC 0x1 -#define OEM_CFG_FUNC_HOST_PRI_CTRL_OS 0x2 -}; - -struct mcp_mac { - u32 mac_upper; - u32 mac_lower; -}; - -struct mcp_val64 { - u32 lo; - u32 hi; -}; - -struct mcp_file_att { - u32 nvm_start_addr; - u32 len; -}; - -struct bist_nvm_image_att { - u32 return_code; - u32 image_type; - u32 nvm_start_addr; - u32 len; -}; - -#define MCP_DRV_VER_STR_SIZE 16 -#define MCP_DRV_VER_STR_SIZE_DWORD (MCP_DRV_VER_STR_SIZE / sizeof(u32)) -#define MCP_DRV_NVM_BUF_LEN 32 -struct drv_version_stc { - u32 version; - u8 name[MCP_DRV_VER_STR_SIZE - 4]; -}; - -struct lan_stats_stc { - u64 ucast_rx_pkts; - u64 ucast_tx_pkts; - u32 fcs_err; - u32 rserved; -}; - -struct fcoe_stats_stc { - u64 rx_pkts; - u64 tx_pkts; - u32 fcs_err; - u32 login_failure; -}; - -struct ocbb_data_stc { - u32 ocbb_host_addr; - u32 ocsd_host_addr; - u32 ocsd_req_update_interval; -}; - -#define MAX_NUM_OF_SENSORS 7 -struct temperature_status_stc { - u32 num_of_sensors; - u32 sensor[MAX_NUM_OF_SENSORS]; -}; - -/* crash dump configuration header */ -struct mdump_config_stc { - u32 version; - u32 config; - u32 epoc; - u32 num_of_logs; - u32 valid_logs; -}; - -enum resource_id_enum { - RESOURCE_NUM_SB_E = 0, - RESOURCE_NUM_L2_QUEUE_E = 1, - RESOURCE_NUM_VPORT_E = 2, - RESOURCE_NUM_VMQ_E = 3, - RESOURCE_FACTOR_NUM_RSS_PF_E = 4, - RESOURCE_FACTOR_RSS_PER_VF_E = 5, - RESOURCE_NUM_RL_E = 6, - RESOURCE_NUM_PQ_E = 7, - RESOURCE_NUM_VF_E = 8, - RESOURCE_VFC_FILTER_E = 9, - RESOURCE_ILT_E = 10, - RESOURCE_CQS_E = 11, - RESOURCE_GFT_PROFILES_E = 12, - RESOURCE_NUM_TC_E = 13, - RESOURCE_NUM_RSS_ENGINES_E = 14, - RESOURCE_LL2_QUEUE_E = 15, - RESOURCE_RDMA_STATS_QUEUE_E = 16, - RESOURCE_BDQ_E = 17, - RESOURCE_QCN_E = 18, - RESOURCE_LLH_FILTER_E = 19, - RESOURCE_VF_MAC_ADDR = 20, - RESOURCE_LL2_CQS_E = 21, - RESOURCE_VF_CNQS = 22, - RESOURCE_MAX_NUM, - RESOURCE_NUM_INVALID = 0xFFFFFFFF -}; - -/* Resource ID is to be filled by the driver in the MB request - * Size, offset & flags to be filled by the MFW in the MB response - */ -struct resource_info { - enum resource_id_enum res_id; - u32 size; /* number of allocated resources */ - u32 offset; /* Offset of the 1st resource */ - u32 vf_size; - u32 vf_offset; - u32 flags; -#define RESOURCE_ELEMENT_STRICT (1 << 0) -}; - -#define DRV_ROLE_NONE 0 -#define DRV_ROLE_PREBOOT 1 -#define DRV_ROLE_OS 2 -#define DRV_ROLE_KDUMP 3 - -struct load_req_stc { - u32 drv_ver_0; - u32 drv_ver_1; - u32 fw_ver; - u32 misc0; -#define LOAD_REQ_ROLE_MASK 0x000000FF -#define LOAD_REQ_ROLE_SHIFT 0 -#define LOAD_REQ_LOCK_TO_MASK 0x0000FF00 -#define LOAD_REQ_LOCK_TO_SHIFT 8 -#define LOAD_REQ_LOCK_TO_DEFAULT 0 -#define LOAD_REQ_LOCK_TO_NONE 255 -#define LOAD_REQ_FORCE_MASK 0x000F0000 -#define LOAD_REQ_FORCE_SHIFT 16 -#define LOAD_REQ_FORCE_NONE 0 -#define LOAD_REQ_FORCE_PF 1 -#define LOAD_REQ_FORCE_ALL 2 -#define LOAD_REQ_FLAGS0_MASK 0x00F00000 -#define LOAD_REQ_FLAGS0_SHIFT 20 -#define LOAD_REQ_FLAGS0_AVOID_RESET (0x1 << 0) -}; - -struct load_rsp_stc { - u32 drv_ver_0; - u32 drv_ver_1; - u32 fw_ver; - u32 misc0; -#define LOAD_RSP_ROLE_MASK 0x000000FF -#define LOAD_RSP_ROLE_SHIFT 0 -#define LOAD_RSP_HSI_MASK 0x0000FF00 -#define LOAD_RSP_HSI_SHIFT 8 -#define LOAD_RSP_FLAGS0_MASK 0x000F0000 -#define LOAD_RSP_FLAGS0_SHIFT 16 -#define LOAD_RSP_FLAGS0_DRV_EXISTS (0x1 << 0) -}; - -struct mdump_retain_data_stc { - u32 valid; - u32 epoch; - u32 pf; - u32 status; -}; - -union drv_union_data { - u32 ver_str[MCP_DRV_VER_STR_SIZE_DWORD]; - struct mcp_mac wol_mac; - - struct eth_phy_cfg drv_phy_cfg; - - struct mcp_val64 val64; - - u8 raw_data[MCP_DRV_NVM_BUF_LEN]; - - struct mcp_file_att file_att; - - u32 ack_vf_disabled[VF_MAX_STATIC / 32]; - - struct drv_version_stc drv_version; - - struct lan_stats_stc lan_stats; - struct fcoe_stats_stc fcoe_stats; - struct ocbb_data_stc ocbb_info; - struct temperature_status_stc temp_info; - struct resource_info resource; - struct bist_nvm_image_att nvm_image_att; - struct mdump_config_stc mdump_config; -}; - -struct public_drv_mb { - u32 drv_mb_header; -#define DRV_MSG_CODE_MASK 0xffff0000 -#define DRV_MSG_CODE_LOAD_REQ 0x10000000 -#define DRV_MSG_CODE_LOAD_DONE 0x11000000 -#define DRV_MSG_CODE_INIT_HW 0x12000000 -#define DRV_MSG_CODE_CANCEL_LOAD_REQ 0x13000000 -#define DRV_MSG_CODE_UNLOAD_REQ 0x20000000 -#define DRV_MSG_CODE_UNLOAD_DONE 0x21000000 -#define DRV_MSG_CODE_INIT_PHY 0x22000000 -#define DRV_MSG_CODE_LINK_RESET 0x23000000 -#define DRV_MSG_CODE_SET_DCBX 0x25000000 -#define DRV_MSG_CODE_OV_UPDATE_CURR_CFG 0x26000000 -#define DRV_MSG_CODE_OV_UPDATE_BUS_NUM 0x27000000 -#define DRV_MSG_CODE_OV_UPDATE_BOOT_PROGRESS 0x28000000 -#define DRV_MSG_CODE_OV_UPDATE_STORM_FW_VER 0x29000000 -#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE 0x31000000 -#define DRV_MSG_CODE_BW_UPDATE_ACK 0x32000000 -#define DRV_MSG_CODE_OV_UPDATE_MTU 0x33000000 -#define DRV_MSG_GET_RESOURCE_ALLOC_MSG 0x34000000 -#define DRV_MSG_SET_RESOURCE_VALUE_MSG 0x35000000 -#define DRV_MSG_CODE_OV_UPDATE_WOL 0x38000000 -#define DRV_MSG_CODE_OV_UPDATE_ESWITCH_MODE 0x39000000 -#define DRV_MSG_CODE_GET_OEM_UPDATES 0x41000000 - -#define DRV_MSG_CODE_BW_UPDATE_ACK 0x32000000 -#define DRV_MSG_CODE_NIG_DRAIN 0x30000000 -#define DRV_MSG_CODE_S_TAG_UPDATE_ACK 0x3b000000 -#define DRV_MSG_CODE_GET_NVM_CFG_OPTION 0x003e0000 -#define DRV_MSG_CODE_SET_NVM_CFG_OPTION 0x003f0000 -#define DRV_MSG_CODE_INITIATE_PF_FLR 0x02010000 -#define DRV_MSG_CODE_VF_DISABLED_DONE 0xc0000000 -#define DRV_MSG_CODE_CFG_VF_MSIX 0xc0010000 -#define DRV_MSG_CODE_CFG_PF_VFS_MSIX 0xc0020000 -#define DRV_MSG_CODE_NVM_PUT_FILE_BEGIN 0x00010000 -#define DRV_MSG_CODE_NVM_PUT_FILE_DATA 0x00020000 -#define DRV_MSG_CODE_NVM_GET_FILE_ATT 0x00030000 -#define DRV_MSG_CODE_NVM_READ_NVRAM 0x00050000 -#define DRV_MSG_CODE_NVM_WRITE_NVRAM 0x00060000 -#define DRV_MSG_CODE_MCP_RESET 0x00090000 -#define DRV_MSG_CODE_SET_VERSION 0x000f0000 -#define DRV_MSG_CODE_MCP_HALT 0x00100000 -#define DRV_MSG_CODE_SET_VMAC 0x00110000 -#define DRV_MSG_CODE_GET_VMAC 0x00120000 -#define DRV_MSG_CODE_VMAC_TYPE_SHIFT 4 -#define DRV_MSG_CODE_VMAC_TYPE_MASK 0x30 -#define DRV_MSG_CODE_VMAC_TYPE_MAC 1 -#define DRV_MSG_CODE_VMAC_TYPE_WWNN 2 -#define DRV_MSG_CODE_VMAC_TYPE_WWPN 3 - -#define DRV_MSG_CODE_GET_STATS 0x00130000 -#define DRV_MSG_CODE_STATS_TYPE_LAN 1 -#define DRV_MSG_CODE_STATS_TYPE_FCOE 2 -#define DRV_MSG_CODE_STATS_TYPE_ISCSI 3 -#define DRV_MSG_CODE_STATS_TYPE_RDMA 4 - -#define DRV_MSG_CODE_TRANSCEIVER_READ 0x00160000 - -#define DRV_MSG_CODE_MASK_PARITIES 0x001a0000 - -#define DRV_MSG_CODE_BIST_TEST 0x001e0000 -#define DRV_MSG_CODE_SET_LED_MODE 0x00200000 -#define DRV_MSG_CODE_RESOURCE_CMD 0x00230000 -/* Send crash dump commands with param[3:0] - opcode */ -#define DRV_MSG_CODE_MDUMP_CMD 0x00250000 -#define DRV_MSG_CODE_GET_TLV_DONE 0x002f0000 -#define DRV_MSG_CODE_GET_ENGINE_CONFIG 0x00370000 -#define DRV_MSG_CODE_GET_PPFID_BITMAP 0x43000000 - -#define DRV_MSG_CODE_DEBUG_DATA_SEND 0xc0040000 - -#define RESOURCE_CMD_REQ_RESC_MASK 0x0000001F -#define RESOURCE_CMD_REQ_RESC_SHIFT 0 -#define RESOURCE_CMD_REQ_OPCODE_MASK 0x000000E0 -#define RESOURCE_CMD_REQ_OPCODE_SHIFT 5 -#define RESOURCE_OPCODE_REQ 1 -#define RESOURCE_OPCODE_REQ_WO_AGING 2 -#define RESOURCE_OPCODE_REQ_W_AGING 3 -#define RESOURCE_OPCODE_RELEASE 4 -#define RESOURCE_OPCODE_FORCE_RELEASE 5 -#define RESOURCE_CMD_REQ_AGE_MASK 0x0000FF00 -#define RESOURCE_CMD_REQ_AGE_SHIFT 8 - -#define RESOURCE_CMD_RSP_OWNER_MASK 0x000000FF -#define RESOURCE_CMD_RSP_OWNER_SHIFT 0 -#define RESOURCE_CMD_RSP_OPCODE_MASK 0x00000700 -#define RESOURCE_CMD_RSP_OPCODE_SHIFT 8 -#define RESOURCE_OPCODE_GNT 1 -#define RESOURCE_OPCODE_BUSY 2 -#define RESOURCE_OPCODE_RELEASED 3 -#define RESOURCE_OPCODE_RELEASED_PREVIOUS 4 -#define RESOURCE_OPCODE_WRONG_OWNER 5 -#define RESOURCE_OPCODE_UNKNOWN_CMD 255 - -#define RESOURCE_DUMP 0 - -/* DRV_MSG_CODE_MDUMP_CMD parameters */ -#define MDUMP_DRV_PARAM_OPCODE_MASK 0x0000000f -#define DRV_MSG_CODE_MDUMP_ACK 0x01 -#define DRV_MSG_CODE_MDUMP_SET_VALUES 0x02 -#define DRV_MSG_CODE_MDUMP_TRIGGER 0x03 -#define DRV_MSG_CODE_MDUMP_GET_CONFIG 0x04 -#define DRV_MSG_CODE_MDUMP_SET_ENABLE 0x05 -#define DRV_MSG_CODE_MDUMP_CLEAR_LOGS 0x06 -#define DRV_MSG_CODE_MDUMP_GET_RETAIN 0x07 -#define DRV_MSG_CODE_MDUMP_CLR_RETAIN 0x08 - -#define DRV_MSG_CODE_HW_DUMP_TRIGGER 0x0a -#define DRV_MSG_CODE_MDUMP_GEN_MDUMP2 0x0b -#define DRV_MSG_CODE_MDUMP_FREE_MDUMP2 0x0c - -#define DRV_MSG_CODE_GET_PF_RDMA_PROTOCOL 0x002b0000 -#define DRV_MSG_CODE_OS_WOL 0x002e0000 - -#define DRV_MSG_CODE_FEATURE_SUPPORT 0x00300000 -#define DRV_MSG_CODE_GET_MFW_FEATURE_SUPPORT 0x00310000 -#define DRV_MSG_SEQ_NUMBER_MASK 0x0000ffff - - u32 drv_mb_param; -#define DRV_MB_PARAM_UNLOAD_WOL_UNKNOWN 0x00000000 -#define DRV_MB_PARAM_UNLOAD_WOL_MCP 0x00000001 -#define DRV_MB_PARAM_UNLOAD_WOL_DISABLED 0x00000002 -#define DRV_MB_PARAM_UNLOAD_WOL_ENABLED 0x00000003 -#define DRV_MB_PARAM_DCBX_NOTIFY_MASK 0x000000FF -#define DRV_MB_PARAM_DCBX_NOTIFY_SHIFT 3 - -#define DRV_MB_PARAM_NVM_PUT_FILE_BEGIN_MBI 0x3 -#define DRV_MB_PARAM_NVM_OFFSET_OFFSET 0 -#define DRV_MB_PARAM_NVM_OFFSET_MASK 0x00FFFFFF -#define DRV_MB_PARAM_NVM_LEN_OFFSET 24 -#define DRV_MB_PARAM_NVM_LEN_MASK 0xFF000000 - -#define DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_SHIFT 0 -#define DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_MASK 0x000000FF -#define DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_SHIFT 8 -#define DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_MASK 0x0000FF00 -#define DRV_MB_PARAM_LLDP_SEND_MASK 0x00000001 -#define DRV_MB_PARAM_LLDP_SEND_SHIFT 0 - -#define DRV_MB_PARAM_OV_CURR_CFG_SHIFT 0 -#define DRV_MB_PARAM_OV_CURR_CFG_MASK 0x0000000F -#define DRV_MB_PARAM_OV_CURR_CFG_NONE 0 -#define DRV_MB_PARAM_OV_CURR_CFG_OS 1 -#define DRV_MB_PARAM_OV_CURR_CFG_VENDOR_SPEC 2 -#define DRV_MB_PARAM_OV_CURR_CFG_OTHER 3 - -#define DRV_MB_PARAM_OV_STORM_FW_VER_SHIFT 0 -#define DRV_MB_PARAM_OV_STORM_FW_VER_MASK 0xFFFFFFFF -#define DRV_MB_PARAM_OV_STORM_FW_VER_MAJOR_MASK 0xFF000000 -#define DRV_MB_PARAM_OV_STORM_FW_VER_MINOR_MASK 0x00FF0000 -#define DRV_MB_PARAM_OV_STORM_FW_VER_BUILD_MASK 0x0000FF00 -#define DRV_MB_PARAM_OV_STORM_FW_VER_DROP_MASK 0x000000FF - -#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_SHIFT 0 -#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_MASK 0xF -#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_UNKNOWN 0x1 -#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_NOT_LOADED 0x2 -#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_LOADING 0x3 -#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_DISABLED 0x4 -#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_ACTIVE 0x5 - -#define DRV_MB_PARAM_OV_MTU_SIZE_SHIFT 0 -#define DRV_MB_PARAM_OV_MTU_SIZE_MASK 0xFFFFFFFF - -#define DRV_MB_PARAM_WOL_MASK (DRV_MB_PARAM_WOL_DEFAULT | \ - DRV_MB_PARAM_WOL_DISABLED | \ - DRV_MB_PARAM_WOL_ENABLED) -#define DRV_MB_PARAM_WOL_DEFAULT DRV_MB_PARAM_UNLOAD_WOL_MCP -#define DRV_MB_PARAM_WOL_DISABLED DRV_MB_PARAM_UNLOAD_WOL_DISABLED -#define DRV_MB_PARAM_WOL_ENABLED DRV_MB_PARAM_UNLOAD_WOL_ENABLED - -#define DRV_MB_PARAM_ESWITCH_MODE_MASK (DRV_MB_PARAM_ESWITCH_MODE_NONE | \ - DRV_MB_PARAM_ESWITCH_MODE_VEB | \ - DRV_MB_PARAM_ESWITCH_MODE_VEPA) -#define DRV_MB_PARAM_ESWITCH_MODE_NONE 0x0 -#define DRV_MB_PARAM_ESWITCH_MODE_VEB 0x1 -#define DRV_MB_PARAM_ESWITCH_MODE_VEPA 0x2 - -#define DRV_MB_PARAM_DUMMY_OEM_UPDATES_MASK 0x1 -#define DRV_MB_PARAM_DUMMY_OEM_UPDATES_OFFSET 0 - -#define DRV_MB_PARAM_SET_LED_MODE_OPER 0x0 -#define DRV_MB_PARAM_SET_LED_MODE_ON 0x1 -#define DRV_MB_PARAM_SET_LED_MODE_OFF 0x2 - -#define DRV_MB_PARAM_TRANSCEIVER_PORT_OFFSET 0 -#define DRV_MB_PARAM_TRANSCEIVER_PORT_MASK 0x00000003 -#define DRV_MB_PARAM_TRANSCEIVER_SIZE_OFFSET 2 -#define DRV_MB_PARAM_TRANSCEIVER_SIZE_MASK 0x000000fc -#define DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_OFFSET 8 -#define DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_MASK 0x0000ff00 -#define DRV_MB_PARAM_TRANSCEIVER_OFFSET_OFFSET 16 -#define DRV_MB_PARAM_TRANSCEIVER_OFFSET_MASK 0xffff0000 - - /* Resource Allocation params - Driver version support */ -#define DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_MASK 0xffff0000 -#define DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_SHIFT 16 -#define DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_MASK 0x0000ffff -#define DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_SHIFT 0 - -#define DRV_MB_PARAM_BIST_REGISTER_TEST 1 -#define DRV_MB_PARAM_BIST_CLOCK_TEST 2 -#define DRV_MB_PARAM_BIST_NVM_TEST_NUM_IMAGES 3 -#define DRV_MB_PARAM_BIST_NVM_TEST_IMAGE_BY_INDEX 4 - -#define DRV_MB_PARAM_BIST_RC_UNKNOWN 0 -#define DRV_MB_PARAM_BIST_RC_PASSED 1 -#define DRV_MB_PARAM_BIST_RC_FAILED 2 -#define DRV_MB_PARAM_BIST_RC_INVALID_PARAMETER 3 - -#define DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT 0 -#define DRV_MB_PARAM_BIST_TEST_INDEX_MASK 0x000000ff -#define DRV_MB_PARAM_BIST_TEST_IMAGE_INDEX_SHIFT 8 -#define DRV_MB_PARAM_BIST_TEST_IMAGE_INDEX_MASK 0x0000ff00 - -#define DRV_MB_PARAM_FEATURE_SUPPORT_PORT_MASK 0x0000ffff -#define DRV_MB_PARAM_FEATURE_SUPPORT_PORT_OFFSET 0 -#define DRV_MB_PARAM_FEATURE_SUPPORT_PORT_EEE 0x00000002 -#define DRV_MB_PARAM_FEATURE_SUPPORT_PORT_FEC_CONTROL 0x00000004 -#define DRV_MB_PARAM_FEATURE_SUPPORT_PORT_EXT_SPEED_FEC_CONTROL 0x00000008 -#define DRV_MB_PARAM_FEATURE_SUPPORT_FUNC_VLINK 0x00010000 - -/* DRV_MSG_CODE_DEBUG_DATA_SEND parameters */ -#define DRV_MSG_CODE_DEBUG_DATA_SEND_SIZE_OFFSET 0 -#define DRV_MSG_CODE_DEBUG_DATA_SEND_SIZE_MASK 0xff - -/* Driver attributes params */ -#define DRV_MB_PARAM_ATTRIBUTE_KEY_OFFSET 0 -#define DRV_MB_PARAM_ATTRIBUTE_KEY_MASK 0x00ffffff -#define DRV_MB_PARAM_ATTRIBUTE_CMD_OFFSET 24 -#define DRV_MB_PARAM_ATTRIBUTE_CMD_MASK 0xff000000 - -#define DRV_MB_PARAM_NVM_CFG_OPTION_ID_OFFSET 0 -#define DRV_MB_PARAM_NVM_CFG_OPTION_ID_SHIFT 0 -#define DRV_MB_PARAM_NVM_CFG_OPTION_ID_MASK 0x0000ffff -#define DRV_MB_PARAM_NVM_CFG_OPTION_ALL_SHIFT 16 -#define DRV_MB_PARAM_NVM_CFG_OPTION_ALL_MASK 0x00010000 -#define DRV_MB_PARAM_NVM_CFG_OPTION_INIT_SHIFT 17 -#define DRV_MB_PARAM_NVM_CFG_OPTION_INIT_MASK 0x00020000 -#define DRV_MB_PARAM_NVM_CFG_OPTION_COMMIT_SHIFT 18 -#define DRV_MB_PARAM_NVM_CFG_OPTION_COMMIT_MASK 0x00040000 -#define DRV_MB_PARAM_NVM_CFG_OPTION_FREE_SHIFT 19 -#define DRV_MB_PARAM_NVM_CFG_OPTION_FREE_MASK 0x00080000 -#define DRV_MB_PARAM_NVM_CFG_OPTION_ENTITY_SEL_SHIFT 20 -#define DRV_MB_PARAM_NVM_CFG_OPTION_ENTITY_SEL_MASK 0x00100000 -#define DRV_MB_PARAM_NVM_CFG_OPTION_ENTITY_ID_SHIFT 24 -#define DRV_MB_PARAM_NVM_CFG_OPTION_ENTITY_ID_MASK 0x0f000000 - - u32 fw_mb_header; -#define FW_MSG_CODE_MASK 0xffff0000 -#define FW_MSG_CODE_UNSUPPORTED 0x00000000 -#define FW_MSG_CODE_DRV_LOAD_ENGINE 0x10100000 -#define FW_MSG_CODE_DRV_LOAD_PORT 0x10110000 -#define FW_MSG_CODE_DRV_LOAD_FUNCTION 0x10120000 -#define FW_MSG_CODE_DRV_LOAD_REFUSED_PDA 0x10200000 -#define FW_MSG_CODE_DRV_LOAD_REFUSED_HSI_1 0x10210000 -#define FW_MSG_CODE_DRV_LOAD_REFUSED_DIAG 0x10220000 -#define FW_MSG_CODE_DRV_LOAD_REFUSED_HSI 0x10230000 -#define FW_MSG_CODE_DRV_LOAD_REFUSED_REQUIRES_FORCE 0x10300000 -#define FW_MSG_CODE_DRV_LOAD_REFUSED_REJECT 0x10310000 -#define FW_MSG_CODE_DRV_LOAD_DONE 0x11100000 -#define FW_MSG_CODE_DRV_UNLOAD_ENGINE 0x20110000 -#define FW_MSG_CODE_DRV_UNLOAD_PORT 0x20120000 -#define FW_MSG_CODE_DRV_UNLOAD_FUNCTION 0x20130000 -#define FW_MSG_CODE_DRV_UNLOAD_DONE 0x21100000 -#define FW_MSG_CODE_RESOURCE_ALLOC_OK 0x34000000 -#define FW_MSG_CODE_RESOURCE_ALLOC_UNKNOWN 0x35000000 -#define FW_MSG_CODE_RESOURCE_ALLOC_DEPRECATED 0x36000000 -#define FW_MSG_CODE_S_TAG_UPDATE_ACK_DONE 0x3b000000 -#define FW_MSG_CODE_DRV_CFG_VF_MSIX_DONE 0xb0010000 - -#define FW_MSG_CODE_NVM_OK 0x00010000 -#define FW_MSG_CODE_NVM_PUT_FILE_FINISH_OK 0x00400000 -#define FW_MSG_CODE_PHY_OK 0x00110000 -#define FW_MSG_CODE_OK 0x00160000 -#define FW_MSG_CODE_ERROR 0x00170000 -#define FW_MSG_CODE_TRANSCEIVER_DIAG_OK 0x00160000 -#define FW_MSG_CODE_TRANSCEIVER_DIAG_ERROR 0x00170000 -#define FW_MSG_CODE_TRANSCEIVER_NOT_PRESENT 0x00020000 - -#define FW_MSG_CODE_OS_WOL_SUPPORTED 0x00800000 -#define FW_MSG_CODE_OS_WOL_NOT_SUPPORTED 0x00810000 -#define FW_MSG_CODE_DRV_CFG_PF_VFS_MSIX_DONE 0x00870000 -#define FW_MSG_SEQ_NUMBER_MASK 0x0000ffff - -#define FW_MSG_CODE_DEBUG_DATA_SEND_INV_ARG 0xb0070000 -#define FW_MSG_CODE_DEBUG_DATA_SEND_BUF_FULL 0xb0080000 -#define FW_MSG_CODE_DEBUG_DATA_SEND_NO_BUF 0xb0090000 -#define FW_MSG_CODE_DEBUG_NOT_ENABLED 0xb00a0000 -#define FW_MSG_CODE_DEBUG_DATA_SEND_OK 0xb00b0000 - -#define FW_MSG_CODE_MDUMP_INVALID_CMD 0x00030000 - - u32 fw_mb_param; -#define FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_MASK 0xffff0000 -#define FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_SHIFT 16 -#define FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_MASK 0x0000ffff -#define FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_SHIFT 0 - - /* Get PF RDMA protocol command response */ -#define FW_MB_PARAM_GET_PF_RDMA_NONE 0x0 -#define FW_MB_PARAM_GET_PF_RDMA_ROCE 0x1 -#define FW_MB_PARAM_GET_PF_RDMA_IWARP 0x2 -#define FW_MB_PARAM_GET_PF_RDMA_BOTH 0x3 - - /* Get MFW feature support response */ -#define FW_MB_PARAM_FEATURE_SUPPORT_SMARTLINQ BIT(0) -#define FW_MB_PARAM_FEATURE_SUPPORT_EEE BIT(1) -#define FW_MB_PARAM_FEATURE_SUPPORT_FEC_CONTROL BIT(5) -#define FW_MB_PARAM_FEATURE_SUPPORT_EXT_SPEED_FEC_CONTROL BIT(6) -#define FW_MB_PARAM_FEATURE_SUPPORT_VLINK BIT(16) - -#define FW_MB_PARAM_LOAD_DONE_DID_EFUSE_ERROR BIT(0) - -#define FW_MB_PARAM_ENG_CFG_FIR_AFFIN_VALID_MASK 0x00000001 -#define FW_MB_PARAM_ENG_CFG_FIR_AFFIN_VALID_SHIFT 0 -#define FW_MB_PARAM_ENG_CFG_FIR_AFFIN_VALUE_MASK 0x00000002 -#define FW_MB_PARAM_ENG_CFG_FIR_AFFIN_VALUE_SHIFT 1 -#define FW_MB_PARAM_ENG_CFG_L2_AFFIN_VALID_MASK 0x00000004 -#define FW_MB_PARAM_ENG_CFG_L2_AFFIN_VALID_SHIFT 2 -#define FW_MB_PARAM_ENG_CFG_L2_AFFIN_VALUE_MASK 0x00000008 -#define FW_MB_PARAM_ENG_CFG_L2_AFFIN_VALUE_SHIFT 3 - -#define FW_MB_PARAM_PPFID_BITMAP_MASK 0xff -#define FW_MB_PARAM_PPFID_BITMAP_SHIFT 0 - - u32 drv_pulse_mb; -#define DRV_PULSE_SEQ_MASK 0x00007fff -#define DRV_PULSE_SYSTEM_TIME_MASK 0xffff0000 -#define DRV_PULSE_ALWAYS_ALIVE 0x00008000 - - u32 mcp_pulse_mb; -#define MCP_PULSE_SEQ_MASK 0x00007fff -#define MCP_PULSE_ALWAYS_ALIVE 0x00008000 -#define MCP_EVENT_MASK 0xffff0000 -#define MCP_EVENT_OTHER_DRIVER_RESET_REQ 0x00010000 - - union drv_union_data union_data; -}; - -#define FW_MB_PARAM_NVM_PUT_FILE_REQ_OFFSET_MASK 0x00ffffff -#define FW_MB_PARAM_NVM_PUT_FILE_REQ_OFFSET_SHIFT 0 -#define FW_MB_PARAM_NVM_PUT_FILE_REQ_SIZE_MASK 0xff000000 -#define FW_MB_PARAM_NVM_PUT_FILE_REQ_SIZE_SHIFT 24 - -enum MFW_DRV_MSG_TYPE { - MFW_DRV_MSG_LINK_CHANGE, - MFW_DRV_MSG_FLR_FW_ACK_FAILED, - MFW_DRV_MSG_VF_DISABLED, - MFW_DRV_MSG_LLDP_DATA_UPDATED, - MFW_DRV_MSG_DCBX_REMOTE_MIB_UPDATED, - MFW_DRV_MSG_DCBX_OPERATIONAL_MIB_UPDATED, - MFW_DRV_MSG_ERROR_RECOVERY, - MFW_DRV_MSG_BW_UPDATE, - MFW_DRV_MSG_S_TAG_UPDATE, - MFW_DRV_MSG_GET_LAN_STATS, - MFW_DRV_MSG_GET_FCOE_STATS, - MFW_DRV_MSG_GET_ISCSI_STATS, - MFW_DRV_MSG_GET_RDMA_STATS, - MFW_DRV_MSG_FAILURE_DETECTED, - MFW_DRV_MSG_TRANSCEIVER_STATE_CHANGE, - MFW_DRV_MSG_CRITICAL_ERROR_OCCURRED, - MFW_DRV_MSG_RESERVED, - MFW_DRV_MSG_GET_TLV_REQ, - MFW_DRV_MSG_OEM_CFG_UPDATE, - MFW_DRV_MSG_MAX -}; - -#define MFW_DRV_MSG_MAX_DWORDS(msgs) (((msgs - 1) >> 2) + 1) -#define MFW_DRV_MSG_DWORD(msg_id) (msg_id >> 2) -#define MFW_DRV_MSG_OFFSET(msg_id) ((msg_id & 0x3) << 3) -#define MFW_DRV_MSG_MASK(msg_id) (0xff << MFW_DRV_MSG_OFFSET(msg_id)) - -struct public_mfw_mb { - u32 sup_msgs; - u32 msg[MFW_DRV_MSG_MAX_DWORDS(MFW_DRV_MSG_MAX)]; - u32 ack[MFW_DRV_MSG_MAX_DWORDS(MFW_DRV_MSG_MAX)]; -}; - -enum public_sections { - PUBLIC_DRV_MB, - PUBLIC_MFW_MB, - PUBLIC_GLOBAL, - PUBLIC_PATH, - PUBLIC_PORT, - PUBLIC_FUNC, - PUBLIC_MAX_SECTIONS -}; - -struct mcp_public_data { - u32 num_sections; - u32 sections[PUBLIC_MAX_SECTIONS]; - struct public_drv_mb drv_mb[MCP_GLOB_FUNC_MAX]; - struct public_mfw_mb mfw_mb[MCP_GLOB_FUNC_MAX]; - struct public_global global; - struct public_path path[MCP_GLOB_PATH_MAX]; - struct public_port port[MCP_GLOB_PORT_MAX]; - struct public_func func[MCP_GLOB_FUNC_MAX]; -}; - -#define MAX_I2C_TRANSACTION_SIZE 16 - -/* OCBB definitions */ -enum tlvs { - /* Category 1: Device Properties */ - DRV_TLV_CLP_STR, - DRV_TLV_CLP_STR_CTD, - /* Category 6: Device Configuration */ - DRV_TLV_SCSI_TO, - DRV_TLV_R_T_TOV, - DRV_TLV_R_A_TOV, - DRV_TLV_E_D_TOV, - DRV_TLV_CR_TOV, - DRV_TLV_BOOT_TYPE, - /* Category 8: Port Configuration */ - DRV_TLV_NPIV_ENABLED, - /* Category 10: Function Configuration */ - DRV_TLV_FEATURE_FLAGS, - DRV_TLV_LOCAL_ADMIN_ADDR, - DRV_TLV_ADDITIONAL_MAC_ADDR_1, - DRV_TLV_ADDITIONAL_MAC_ADDR_2, - DRV_TLV_LSO_MAX_OFFLOAD_SIZE, - DRV_TLV_LSO_MIN_SEGMENT_COUNT, - DRV_TLV_PROMISCUOUS_MODE, - DRV_TLV_TX_DESCRIPTORS_QUEUE_SIZE, - DRV_TLV_RX_DESCRIPTORS_QUEUE_SIZE, - DRV_TLV_NUM_OF_NET_QUEUE_VMQ_CFG, - DRV_TLV_FLEX_NIC_OUTER_VLAN_ID, - DRV_TLV_OS_DRIVER_STATES, - DRV_TLV_PXE_BOOT_PROGRESS, - /* Category 12: FC/FCoE Configuration */ - DRV_TLV_NPIV_STATE, - DRV_TLV_NUM_OF_NPIV_IDS, - DRV_TLV_SWITCH_NAME, - DRV_TLV_SWITCH_PORT_NUM, - DRV_TLV_SWITCH_PORT_ID, - DRV_TLV_VENDOR_NAME, - DRV_TLV_SWITCH_MODEL, - DRV_TLV_SWITCH_FW_VER, - DRV_TLV_QOS_PRIORITY_PER_802_1P, - DRV_TLV_PORT_ALIAS, - DRV_TLV_PORT_STATE, - DRV_TLV_FIP_TX_DESCRIPTORS_QUEUE_SIZE, - DRV_TLV_FCOE_RX_DESCRIPTORS_QUEUE_SIZE, - DRV_TLV_LINK_FAILURE_COUNT, - DRV_TLV_FCOE_BOOT_PROGRESS, - /* Category 13: iSCSI Configuration */ - DRV_TLV_TARGET_LLMNR_ENABLED, - DRV_TLV_HEADER_DIGEST_FLAG_ENABLED, - DRV_TLV_DATA_DIGEST_FLAG_ENABLED, - DRV_TLV_AUTHENTICATION_METHOD, - DRV_TLV_ISCSI_BOOT_TARGET_PORTAL, - DRV_TLV_MAX_FRAME_SIZE, - DRV_TLV_PDU_TX_DESCRIPTORS_QUEUE_SIZE, - DRV_TLV_PDU_RX_DESCRIPTORS_QUEUE_SIZE, - DRV_TLV_ISCSI_BOOT_PROGRESS, - /* Category 20: Device Data */ - DRV_TLV_PCIE_BUS_RX_UTILIZATION, - DRV_TLV_PCIE_BUS_TX_UTILIZATION, - DRV_TLV_DEVICE_CPU_CORES_UTILIZATION, - DRV_TLV_LAST_VALID_DCC_TLV_RECEIVED, - DRV_TLV_NCSI_RX_BYTES_RECEIVED, - DRV_TLV_NCSI_TX_BYTES_SENT, - /* Category 22: Base Port Data */ - DRV_TLV_RX_DISCARDS, - DRV_TLV_RX_ERRORS, - DRV_TLV_TX_ERRORS, - DRV_TLV_TX_DISCARDS, - DRV_TLV_RX_FRAMES_RECEIVED, - DRV_TLV_TX_FRAMES_SENT, - /* Category 23: FC/FCoE Port Data */ - DRV_TLV_RX_BROADCAST_PACKETS, - DRV_TLV_TX_BROADCAST_PACKETS, - /* Category 28: Base Function Data */ - DRV_TLV_NUM_OFFLOADED_CONNECTIONS_TCP_IPV4, - DRV_TLV_NUM_OFFLOADED_CONNECTIONS_TCP_IPV6, - DRV_TLV_TX_DESCRIPTOR_QUEUE_AVG_DEPTH, - DRV_TLV_RX_DESCRIPTORS_QUEUE_AVG_DEPTH, - DRV_TLV_PF_RX_FRAMES_RECEIVED, - DRV_TLV_RX_BYTES_RECEIVED, - DRV_TLV_PF_TX_FRAMES_SENT, - DRV_TLV_TX_BYTES_SENT, - DRV_TLV_IOV_OFFLOAD, - DRV_TLV_PCI_ERRORS_CAP_ID, - DRV_TLV_UNCORRECTABLE_ERROR_STATUS, - DRV_TLV_UNCORRECTABLE_ERROR_MASK, - DRV_TLV_CORRECTABLE_ERROR_STATUS, - DRV_TLV_CORRECTABLE_ERROR_MASK, - DRV_TLV_PCI_ERRORS_AECC_REGISTER, - DRV_TLV_TX_QUEUES_EMPTY, - DRV_TLV_RX_QUEUES_EMPTY, - DRV_TLV_TX_QUEUES_FULL, - DRV_TLV_RX_QUEUES_FULL, - /* Category 29: FC/FCoE Function Data */ - DRV_TLV_FCOE_TX_DESCRIPTOR_QUEUE_AVG_DEPTH, - DRV_TLV_FCOE_RX_DESCRIPTORS_QUEUE_AVG_DEPTH, - DRV_TLV_FCOE_RX_FRAMES_RECEIVED, - DRV_TLV_FCOE_RX_BYTES_RECEIVED, - DRV_TLV_FCOE_TX_FRAMES_SENT, - DRV_TLV_FCOE_TX_BYTES_SENT, - DRV_TLV_CRC_ERROR_COUNT, - DRV_TLV_CRC_ERROR_1_RECEIVED_SOURCE_FC_ID, - DRV_TLV_CRC_ERROR_1_TIMESTAMP, - DRV_TLV_CRC_ERROR_2_RECEIVED_SOURCE_FC_ID, - DRV_TLV_CRC_ERROR_2_TIMESTAMP, - DRV_TLV_CRC_ERROR_3_RECEIVED_SOURCE_FC_ID, - DRV_TLV_CRC_ERROR_3_TIMESTAMP, - DRV_TLV_CRC_ERROR_4_RECEIVED_SOURCE_FC_ID, - DRV_TLV_CRC_ERROR_4_TIMESTAMP, - DRV_TLV_CRC_ERROR_5_RECEIVED_SOURCE_FC_ID, - DRV_TLV_CRC_ERROR_5_TIMESTAMP, - DRV_TLV_LOSS_OF_SYNC_ERROR_COUNT, - DRV_TLV_LOSS_OF_SIGNAL_ERRORS, - DRV_TLV_PRIMITIVE_SEQUENCE_PROTOCOL_ERROR_COUNT, - DRV_TLV_DISPARITY_ERROR_COUNT, - DRV_TLV_CODE_VIOLATION_ERROR_COUNT, - DRV_TLV_LAST_FLOGI_ISSUED_COMMON_PARAMETERS_WORD_1, - DRV_TLV_LAST_FLOGI_ISSUED_COMMON_PARAMETERS_WORD_2, - DRV_TLV_LAST_FLOGI_ISSUED_COMMON_PARAMETERS_WORD_3, - DRV_TLV_LAST_FLOGI_ISSUED_COMMON_PARAMETERS_WORD_4, - DRV_TLV_LAST_FLOGI_TIMESTAMP, - DRV_TLV_LAST_FLOGI_ACC_COMMON_PARAMETERS_WORD_1, - DRV_TLV_LAST_FLOGI_ACC_COMMON_PARAMETERS_WORD_2, - DRV_TLV_LAST_FLOGI_ACC_COMMON_PARAMETERS_WORD_3, - DRV_TLV_LAST_FLOGI_ACC_COMMON_PARAMETERS_WORD_4, - DRV_TLV_LAST_FLOGI_ACC_TIMESTAMP, - DRV_TLV_LAST_FLOGI_RJT, - DRV_TLV_LAST_FLOGI_RJT_TIMESTAMP, - DRV_TLV_FDISCS_SENT_COUNT, - DRV_TLV_FDISC_ACCS_RECEIVED, - DRV_TLV_FDISC_RJTS_RECEIVED, - DRV_TLV_PLOGI_SENT_COUNT, - DRV_TLV_PLOGI_ACCS_RECEIVED, - DRV_TLV_PLOGI_RJTS_RECEIVED, - DRV_TLV_PLOGI_1_SENT_DESTINATION_FC_ID, - DRV_TLV_PLOGI_1_TIMESTAMP, - DRV_TLV_PLOGI_2_SENT_DESTINATION_FC_ID, - DRV_TLV_PLOGI_2_TIMESTAMP, - DRV_TLV_PLOGI_3_SENT_DESTINATION_FC_ID, - DRV_TLV_PLOGI_3_TIMESTAMP, - DRV_TLV_PLOGI_4_SENT_DESTINATION_FC_ID, - DRV_TLV_PLOGI_4_TIMESTAMP, - DRV_TLV_PLOGI_5_SENT_DESTINATION_FC_ID, - DRV_TLV_PLOGI_5_TIMESTAMP, - DRV_TLV_PLOGI_1_ACC_RECEIVED_SOURCE_FC_ID, - DRV_TLV_PLOGI_1_ACC_TIMESTAMP, - DRV_TLV_PLOGI_2_ACC_RECEIVED_SOURCE_FC_ID, - DRV_TLV_PLOGI_2_ACC_TIMESTAMP, - DRV_TLV_PLOGI_3_ACC_RECEIVED_SOURCE_FC_ID, - DRV_TLV_PLOGI_3_ACC_TIMESTAMP, - DRV_TLV_PLOGI_4_ACC_RECEIVED_SOURCE_FC_ID, - DRV_TLV_PLOGI_4_ACC_TIMESTAMP, - DRV_TLV_PLOGI_5_ACC_RECEIVED_SOURCE_FC_ID, - DRV_TLV_PLOGI_5_ACC_TIMESTAMP, - DRV_TLV_LOGOS_ISSUED, - DRV_TLV_LOGO_ACCS_RECEIVED, - DRV_TLV_LOGO_RJTS_RECEIVED, - DRV_TLV_LOGO_1_RECEIVED_SOURCE_FC_ID, - DRV_TLV_LOGO_1_TIMESTAMP, - DRV_TLV_LOGO_2_RECEIVED_SOURCE_FC_ID, - DRV_TLV_LOGO_2_TIMESTAMP, - DRV_TLV_LOGO_3_RECEIVED_SOURCE_FC_ID, - DRV_TLV_LOGO_3_TIMESTAMP, - DRV_TLV_LOGO_4_RECEIVED_SOURCE_FC_ID, - DRV_TLV_LOGO_4_TIMESTAMP, - DRV_TLV_LOGO_5_RECEIVED_SOURCE_FC_ID, - DRV_TLV_LOGO_5_TIMESTAMP, - DRV_TLV_LOGOS_RECEIVED, - DRV_TLV_ACCS_ISSUED, - DRV_TLV_PRLIS_ISSUED, - DRV_TLV_ACCS_RECEIVED, - DRV_TLV_ABTS_SENT_COUNT, - DRV_TLV_ABTS_ACCS_RECEIVED, - DRV_TLV_ABTS_RJTS_RECEIVED, - DRV_TLV_ABTS_1_SENT_DESTINATION_FC_ID, - DRV_TLV_ABTS_1_TIMESTAMP, - DRV_TLV_ABTS_2_SENT_DESTINATION_FC_ID, - DRV_TLV_ABTS_2_TIMESTAMP, - DRV_TLV_ABTS_3_SENT_DESTINATION_FC_ID, - DRV_TLV_ABTS_3_TIMESTAMP, - DRV_TLV_ABTS_4_SENT_DESTINATION_FC_ID, - DRV_TLV_ABTS_4_TIMESTAMP, - DRV_TLV_ABTS_5_SENT_DESTINATION_FC_ID, - DRV_TLV_ABTS_5_TIMESTAMP, - DRV_TLV_RSCNS_RECEIVED, - DRV_TLV_LAST_RSCN_RECEIVED_N_PORT_1, - DRV_TLV_LAST_RSCN_RECEIVED_N_PORT_2, - DRV_TLV_LAST_RSCN_RECEIVED_N_PORT_3, - DRV_TLV_LAST_RSCN_RECEIVED_N_PORT_4, - DRV_TLV_LUN_RESETS_ISSUED, - DRV_TLV_ABORT_TASK_SETS_ISSUED, - DRV_TLV_TPRLOS_SENT, - DRV_TLV_NOS_SENT_COUNT, - DRV_TLV_NOS_RECEIVED_COUNT, - DRV_TLV_OLS_COUNT, - DRV_TLV_LR_COUNT, - DRV_TLV_LRR_COUNT, - DRV_TLV_LIP_SENT_COUNT, - DRV_TLV_LIP_RECEIVED_COUNT, - DRV_TLV_EOFA_COUNT, - DRV_TLV_EOFNI_COUNT, - DRV_TLV_SCSI_STATUS_CHECK_CONDITION_COUNT, - DRV_TLV_SCSI_STATUS_CONDITION_MET_COUNT, - DRV_TLV_SCSI_STATUS_BUSY_COUNT, - DRV_TLV_SCSI_STATUS_INTERMEDIATE_COUNT, - DRV_TLV_SCSI_STATUS_INTERMEDIATE_CONDITION_MET_COUNT, - DRV_TLV_SCSI_STATUS_RESERVATION_CONFLICT_COUNT, - DRV_TLV_SCSI_STATUS_TASK_SET_FULL_COUNT, - DRV_TLV_SCSI_STATUS_ACA_ACTIVE_COUNT, - DRV_TLV_SCSI_STATUS_TASK_ABORTED_COUNT, - DRV_TLV_SCSI_CHECK_CONDITION_1_RECEIVED_SK_ASC_ASCQ, - DRV_TLV_SCSI_CHECK_1_TIMESTAMP, - DRV_TLV_SCSI_CHECK_CONDITION_2_RECEIVED_SK_ASC_ASCQ, - DRV_TLV_SCSI_CHECK_2_TIMESTAMP, - DRV_TLV_SCSI_CHECK_CONDITION_3_RECEIVED_SK_ASC_ASCQ, - DRV_TLV_SCSI_CHECK_3_TIMESTAMP, - DRV_TLV_SCSI_CHECK_CONDITION_4_RECEIVED_SK_ASC_ASCQ, - DRV_TLV_SCSI_CHECK_4_TIMESTAMP, - DRV_TLV_SCSI_CHECK_CONDITION_5_RECEIVED_SK_ASC_ASCQ, - DRV_TLV_SCSI_CHECK_5_TIMESTAMP, - /* Category 30: iSCSI Function Data */ - DRV_TLV_PDU_TX_DESCRIPTOR_QUEUE_AVG_DEPTH, - DRV_TLV_PDU_RX_DESCRIPTORS_QUEUE_AVG_DEPTH, - DRV_TLV_ISCSI_PDU_RX_FRAMES_RECEIVED, - DRV_TLV_ISCSI_PDU_RX_BYTES_RECEIVED, - DRV_TLV_ISCSI_PDU_TX_FRAMES_SENT, - DRV_TLV_ISCSI_PDU_TX_BYTES_SENT -}; - -struct nvm_cfg_mac_address { - u32 mac_addr_hi; -#define NVM_CFG_MAC_ADDRESS_HI_MASK 0x0000ffff -#define NVM_CFG_MAC_ADDRESS_HI_OFFSET 0 - - u32 mac_addr_lo; -}; - -struct nvm_cfg1_glob { - u32 generic_cont0; -#define NVM_CFG1_GLOB_MF_MODE_MASK 0x00000ff0 -#define NVM_CFG1_GLOB_MF_MODE_OFFSET 4 -#define NVM_CFG1_GLOB_MF_MODE_MF_ALLOWED 0x0 -#define NVM_CFG1_GLOB_MF_MODE_DEFAULT 0x1 -#define NVM_CFG1_GLOB_MF_MODE_SPIO4 0x2 -#define NVM_CFG1_GLOB_MF_MODE_NPAR1_0 0x3 -#define NVM_CFG1_GLOB_MF_MODE_NPAR1_5 0x4 -#define NVM_CFG1_GLOB_MF_MODE_NPAR2_0 0x5 -#define NVM_CFG1_GLOB_MF_MODE_BD 0x6 -#define NVM_CFG1_GLOB_MF_MODE_UFP 0x7 - - u32 engineering_change[3]; - u32 manufacturing_id; - u32 serial_number[4]; - u32 pcie_cfg; - u32 mgmt_traffic; - - u32 core_cfg; -#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_MASK 0x000000ff -#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_OFFSET 0 -#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_2X40G 0x0 -#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X50G 0x1 -#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_1X100G 0x2 -#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_4X10G_F 0x3 -#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_4X10G_E 0x4 -#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_4X20G 0x5 -#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_1X40G 0xb -#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X25G 0xc -#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_1X25G 0xd -#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_4X25G 0xe -#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X10G 0xf -#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_AHP_2X50G_R1 0x11 -#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_AHP_4X50G_R1 0x12 -#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_AHP_1X100G_R2 0x13 -#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_AHP_2X100G_R2 0x14 -#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_AHP_1X100G_R4 0x15 - - u32 e_lane_cfg1; - u32 e_lane_cfg2; - u32 f_lane_cfg1; - u32 f_lane_cfg2; - u32 mps10_preemphasis; - u32 mps10_driver_current; - u32 mps25_preemphasis; - u32 mps25_driver_current; - u32 pci_id; - u32 pci_subsys_id; - u32 bar; - u32 mps10_txfir_main; - u32 mps10_txfir_post; - u32 mps25_txfir_main; - u32 mps25_txfir_post; - u32 manufacture_ver; - u32 manufacture_time; - u32 led_global_settings; - u32 generic_cont1; - - u32 mbi_version; -#define NVM_CFG1_GLOB_MBI_VERSION_0_MASK 0x000000ff -#define NVM_CFG1_GLOB_MBI_VERSION_0_OFFSET 0 -#define NVM_CFG1_GLOB_MBI_VERSION_1_MASK 0x0000ff00 -#define NVM_CFG1_GLOB_MBI_VERSION_1_OFFSET 8 -#define NVM_CFG1_GLOB_MBI_VERSION_2_MASK 0x00ff0000 -#define NVM_CFG1_GLOB_MBI_VERSION_2_OFFSET 16 - - u32 mbi_date; - u32 misc_sig; - - u32 device_capabilities; -#define NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ETHERNET 0x1 -#define NVM_CFG1_GLOB_DEVICE_CAPABILITIES_FCOE 0x2 -#define NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ISCSI 0x4 -#define NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ROCE 0x8 - - u32 power_dissipated; - u32 power_consumed; - u32 efi_version; - u32 multi_net_modes_cap; - u32 reserved[41]; -}; - -struct nvm_cfg1_path { - u32 reserved[30]; -}; - -struct nvm_cfg1_port { - u32 rel_to_opt123; - u32 rel_to_opt124; - - u32 generic_cont0; -#define NVM_CFG1_PORT_DCBX_MODE_MASK 0x000f0000 -#define NVM_CFG1_PORT_DCBX_MODE_OFFSET 16 -#define NVM_CFG1_PORT_DCBX_MODE_DISABLED 0x0 -#define NVM_CFG1_PORT_DCBX_MODE_IEEE 0x1 -#define NVM_CFG1_PORT_DCBX_MODE_CEE 0x2 -#define NVM_CFG1_PORT_DCBX_MODE_DYNAMIC 0x3 -#define NVM_CFG1_PORT_DEFAULT_ENABLED_PROTOCOLS_MASK 0x00f00000 -#define NVM_CFG1_PORT_DEFAULT_ENABLED_PROTOCOLS_OFFSET 20 -#define NVM_CFG1_PORT_DEFAULT_ENABLED_PROTOCOLS_ETHERNET 0x1 -#define NVM_CFG1_PORT_DEFAULT_ENABLED_PROTOCOLS_FCOE 0x2 -#define NVM_CFG1_PORT_DEFAULT_ENABLED_PROTOCOLS_ISCSI 0x4 - - u32 pcie_cfg; - u32 features; - - u32 speed_cap_mask; -#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_MASK 0x0000ffff -#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_OFFSET 0 -#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G 0x1 -#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G 0x2 -#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G 0x4 -#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G 0x8 -#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G 0x10 -#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G 0x20 -#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G 0x40 - - u32 link_settings; -#define NVM_CFG1_PORT_DRV_LINK_SPEED_MASK 0x0000000f -#define NVM_CFG1_PORT_DRV_LINK_SPEED_OFFSET 0 -#define NVM_CFG1_PORT_DRV_LINK_SPEED_AUTONEG 0x0 -#define NVM_CFG1_PORT_DRV_LINK_SPEED_1G 0x1 -#define NVM_CFG1_PORT_DRV_LINK_SPEED_10G 0x2 -#define NVM_CFG1_PORT_DRV_LINK_SPEED_20G 0x3 -#define NVM_CFG1_PORT_DRV_LINK_SPEED_25G 0x4 -#define NVM_CFG1_PORT_DRV_LINK_SPEED_40G 0x5 -#define NVM_CFG1_PORT_DRV_LINK_SPEED_50G 0x6 -#define NVM_CFG1_PORT_DRV_LINK_SPEED_BB_100G 0x7 -#define NVM_CFG1_PORT_DRV_LINK_SPEED_SMARTLINQ 0x8 -#define NVM_CFG1_PORT_DRV_FLOW_CONTROL_MASK 0x00000070 -#define NVM_CFG1_PORT_DRV_FLOW_CONTROL_OFFSET 4 -#define NVM_CFG1_PORT_DRV_FLOW_CONTROL_AUTONEG 0x1 -#define NVM_CFG1_PORT_DRV_FLOW_CONTROL_RX 0x2 -#define NVM_CFG1_PORT_DRV_FLOW_CONTROL_TX 0x4 -#define NVM_CFG1_PORT_FEC_FORCE_MODE_MASK 0x000e0000 -#define NVM_CFG1_PORT_FEC_FORCE_MODE_OFFSET 17 -#define NVM_CFG1_PORT_FEC_FORCE_MODE_NONE 0x0 -#define NVM_CFG1_PORT_FEC_FORCE_MODE_FIRECODE 0x1 -#define NVM_CFG1_PORT_FEC_FORCE_MODE_RS 0x2 -#define NVM_CFG1_PORT_FEC_FORCE_MODE_AUTO 0x7 - - u32 phy_cfg; - u32 mgmt_traffic; - - u32 ext_phy; - /* EEE power saving mode */ -#define NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_MASK 0x00ff0000 -#define NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_OFFSET 16 -#define NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_DISABLED 0x0 -#define NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_BALANCED 0x1 -#define NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_AGGRESSIVE 0x2 -#define NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_LOW_LATENCY 0x3 - - u32 mba_cfg1; - u32 mba_cfg2; - u32 vf_cfg; - struct nvm_cfg_mac_address lldp_mac_address; - u32 led_port_settings; - u32 transceiver_00; - u32 device_ids; - - u32 board_cfg; -#define NVM_CFG1_PORT_PORT_TYPE_MASK 0x000000ff -#define NVM_CFG1_PORT_PORT_TYPE_OFFSET 0 -#define NVM_CFG1_PORT_PORT_TYPE_UNDEFINED 0x0 -#define NVM_CFG1_PORT_PORT_TYPE_MODULE 0x1 -#define NVM_CFG1_PORT_PORT_TYPE_BACKPLANE 0x2 -#define NVM_CFG1_PORT_PORT_TYPE_EXT_PHY 0x3 -#define NVM_CFG1_PORT_PORT_TYPE_MODULE_SLAVE 0x4 - - u32 mnm_10g_cap; - u32 mnm_10g_ctrl; - u32 mnm_10g_misc; - u32 mnm_25g_cap; - u32 mnm_25g_ctrl; - u32 mnm_25g_misc; - u32 mnm_40g_cap; - u32 mnm_40g_ctrl; - u32 mnm_40g_misc; - u32 mnm_50g_cap; - u32 mnm_50g_ctrl; - u32 mnm_50g_misc; - u32 mnm_100g_cap; - u32 mnm_100g_ctrl; - u32 mnm_100g_misc; - - u32 temperature; - u32 ext_phy_cfg1; - - u32 extended_speed; -#define NVM_CFG1_PORT_EXTENDED_SPEED_MASK 0x0000ffff -#define NVM_CFG1_PORT_EXTENDED_SPEED_OFFSET 0 -#define NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_AN 0x1 -#define NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_1G 0x2 -#define NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_10G 0x4 -#define NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_20G 0x8 -#define NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_25G 0x10 -#define NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_40G 0x20 -#define NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_50G_R 0x40 -#define NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_50G_R2 0x80 -#define NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_100G_R2 0x100 -#define NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_100G_R4 0x200 -#define NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_100G_P4 0x400 -#define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_MASK 0xffff0000 -#define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_OFFSET 16 -#define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_RESERVED 0x1 -#define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_1G 0x2 -#define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_10G 0x4 -#define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_20G 0x8 -#define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_25G 0x10 -#define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_40G 0x20 -#define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_50G_R 0x40 -#define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_50G_R2 0x80 -#define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_100G_R2 0x100 -#define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_100G_R4 0x200 -#define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_100G_P4 0x400 - - u32 extended_fec_mode; - - u32 reserved[112]; -}; - -struct nvm_cfg1_func { - struct nvm_cfg_mac_address mac_address; - u32 rsrv1; - u32 rsrv2; - u32 device_id; - u32 cmn_cfg; - u32 pci_cfg; - struct nvm_cfg_mac_address fcoe_node_wwn_mac_addr; - struct nvm_cfg_mac_address fcoe_port_wwn_mac_addr; - u32 preboot_generic_cfg; - u32 reserved[8]; -}; - -struct nvm_cfg1 { - struct nvm_cfg1_glob glob; - struct nvm_cfg1_path path[MCP_GLOB_PATH_MAX]; - struct nvm_cfg1_port port[MCP_GLOB_PORT_MAX]; - struct nvm_cfg1_func func[MCP_GLOB_FUNC_MAX]; -}; - -enum spad_sections { - SPAD_SECTION_TRACE, - SPAD_SECTION_NVM_CFG, - SPAD_SECTION_PUBLIC, - SPAD_SECTION_PRIVATE, - SPAD_SECTION_MAX -}; - -#define MCP_TRACE_SIZE 2048 /* 2kb */ - -/* This section is located at a fixed location in the beginning of the - * scratchpad, to ensure that the MCP trace is not run over during MFW upgrade. - * All the rest of data has a floating location which differs from version to - * version, and is pointed by the mcp_meta_data below. - * Moreover, the spad_layout section is part of the MFW firmware, and is loaded - * with it from nvram in order to clear this portion. - */ -struct static_init { - u32 num_sections; - offsize_t sections[SPAD_SECTION_MAX]; -#define SECTION(_sec_) (*((offsize_t *)(STRUCT_OFFSET(sections[_sec_])))) - - struct mcp_trace trace; -#define MCP_TRACE_P ((struct mcp_trace *)(STRUCT_OFFSET(trace))) - u8 trace_buffer[MCP_TRACE_SIZE]; -#define MCP_TRACE_BUF ((u8 *)(STRUCT_OFFSET(trace_buffer))) - /* running_mfw has the same definition as in nvm_map.h. - * This bit indicate both the running dir, and the running bundle. - * It is set once when the LIM is loaded. - */ - u32 running_mfw; -#define RUNNING_MFW (*((u32 *)(STRUCT_OFFSET(running_mfw)))) - u32 build_time; -#define MFW_BUILD_TIME (*((u32 *)(STRUCT_OFFSET(build_time)))) - u32 reset_type; -#define RESET_TYPE (*((u32 *)(STRUCT_OFFSET(reset_type)))) - u32 mfw_secure_mode; -#define MFW_SECURE_MODE (*((u32 *)(STRUCT_OFFSET(mfw_secure_mode)))) - u16 pme_status_pf_bitmap; -#define PME_STATUS_PF_BITMAP (*((u16 *)(STRUCT_OFFSET(pme_status_pf_bitmap)))) - u16 pme_enable_pf_bitmap; -#define PME_ENABLE_PF_BITMAP (*((u16 *)(STRUCT_OFFSET(pme_enable_pf_bitmap)))) - u32 mim_nvm_addr; - u32 mim_start_addr; - u32 ah_pcie_link_params; -#define AH_PCIE_LINK_PARAMS_LINK_SPEED_MASK (0x000000ff) -#define AH_PCIE_LINK_PARAMS_LINK_SPEED_SHIFT (0) -#define AH_PCIE_LINK_PARAMS_LINK_WIDTH_MASK (0x0000ff00) -#define AH_PCIE_LINK_PARAMS_LINK_WIDTH_SHIFT (8) -#define AH_PCIE_LINK_PARAMS_ASPM_MODE_MASK (0x00ff0000) -#define AH_PCIE_LINK_PARAMS_ASPM_MODE_SHIFT (16) -#define AH_PCIE_LINK_PARAMS_ASPM_CAP_MASK (0xff000000) -#define AH_PCIE_LINK_PARAMS_ASPM_CAP_SHIFT (24) -#define AH_PCIE_LINK_PARAMS (*((u32 *)(STRUCT_OFFSET(ah_pcie_link_params)))) - - u32 rsrv_persist[5]; /* Persist reserved for MFW upgrades */ -}; - -#define NVM_MAGIC_VALUE 0x669955aa - -enum nvm_image_type { - NVM_TYPE_TIM1 = 0x01, - NVM_TYPE_TIM2 = 0x02, - NVM_TYPE_MIM1 = 0x03, - NVM_TYPE_MIM2 = 0x04, - NVM_TYPE_MBA = 0x05, - NVM_TYPE_MODULES_PN = 0x06, - NVM_TYPE_VPD = 0x07, - NVM_TYPE_MFW_TRACE1 = 0x08, - NVM_TYPE_MFW_TRACE2 = 0x09, - NVM_TYPE_NVM_CFG1 = 0x0a, - NVM_TYPE_L2B = 0x0b, - NVM_TYPE_DIR1 = 0x0c, - NVM_TYPE_EAGLE_FW1 = 0x0d, - NVM_TYPE_FALCON_FW1 = 0x0e, - NVM_TYPE_PCIE_FW1 = 0x0f, - NVM_TYPE_HW_SET = 0x10, - NVM_TYPE_LIM = 0x11, - NVM_TYPE_AVS_FW1 = 0x12, - NVM_TYPE_DIR2 = 0x13, - NVM_TYPE_CCM = 0x14, - NVM_TYPE_EAGLE_FW2 = 0x15, - NVM_TYPE_FALCON_FW2 = 0x16, - NVM_TYPE_PCIE_FW2 = 0x17, - NVM_TYPE_AVS_FW2 = 0x18, - NVM_TYPE_INIT_HW = 0x19, - NVM_TYPE_DEFAULT_CFG = 0x1a, - NVM_TYPE_MDUMP = 0x1b, - NVM_TYPE_META = 0x1c, - NVM_TYPE_ISCSI_CFG = 0x1d, - NVM_TYPE_FCOE_CFG = 0x1f, - NVM_TYPE_ETH_PHY_FW1 = 0x20, - NVM_TYPE_ETH_PHY_FW2 = 0x21, - NVM_TYPE_BDN = 0x22, - NVM_TYPE_8485X_PHY_FW = 0x23, - NVM_TYPE_PUB_KEY = 0x24, - NVM_TYPE_RECOVERY = 0x25, - NVM_TYPE_PLDM = 0x26, - NVM_TYPE_UPK1 = 0x27, - NVM_TYPE_UPK2 = 0x28, - NVM_TYPE_MASTER_KC = 0x29, - NVM_TYPE_BACKUP_KC = 0x2a, - NVM_TYPE_HW_DUMP = 0x2b, - NVM_TYPE_HW_DUMP_OUT = 0x2c, - NVM_TYPE_BIN_NVM_META = 0x30, - NVM_TYPE_ROM_TEST = 0xf0, - NVM_TYPE_88X33X0_PHY_FW = 0x31, - NVM_TYPE_88X33X0_PHY_SLAVE_FW = 0x32, - NVM_TYPE_MAX, -}; - -#define DIR_ID_1 (0) - #endif diff --git a/drivers/net/ethernet/qlogic/qed/qed_hw.h b/drivers/net/ethernet/qlogic/qed/qed_hw.h index 2734f49956f7..e535983ce21b 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_hw.h +++ b/drivers/net/ethernet/qlogic/qed/qed_hw.h @@ -53,85 +53,94 @@ enum _dmae_cmd_crc_mask { #define DMAE_MAX_CLIENTS 32 /** - * @brief qed_gtt_init - Initialize GTT windows + * qed_gtt_init(): Initialize GTT windows. * - * @param p_hwfn + * @p_hwfn: HW device data. + * + * Return: Void. */ void qed_gtt_init(struct qed_hwfn *p_hwfn); /** - * @brief qed_ptt_invalidate - Forces all ptt entries to be re-configured + * qed_ptt_invalidate(): Forces all ptt entries to be re-configured + * + * @p_hwfn: HW device data. * - * @param p_hwfn + * Return: Void. */ void qed_ptt_invalidate(struct qed_hwfn *p_hwfn); /** - * @brief qed_ptt_pool_alloc - Allocate and initialize PTT pool + * qed_ptt_pool_alloc(): Allocate and initialize PTT pool. * - * @param p_hwfn + * @p_hwfn: HW device data. * - * @return struct _qed_status - success (0), negative - error. + * Return: struct _qed_status - success (0), negative - error. */ int qed_ptt_pool_alloc(struct qed_hwfn *p_hwfn); /** - * @brief qed_ptt_pool_free - + * qed_ptt_pool_free(): Free PTT pool. + * + * @p_hwfn: HW device data. * - * @param p_hwfn + * Return: Void. */ void qed_ptt_pool_free(struct qed_hwfn *p_hwfn); /** - * @brief qed_ptt_get_hw_addr - Get PTT's GRC/HW address + * qed_ptt_get_hw_addr(): Get PTT's GRC/HW address. * - * @param p_hwfn - * @param p_ptt + * @p_hwfn: HW device data. + * @p_ptt: P_ptt * - * @return u32 + * Return: u32. */ u32 qed_ptt_get_hw_addr(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); /** - * @brief qed_ptt_get_bar_addr - Get PPT's external BAR address + * qed_ptt_get_bar_addr(): Get PPT's external BAR address. * - * @param p_hwfn - * @param p_ptt + * @p_ptt: P_ptt * - * @return u32 + * Return: u32. */ u32 qed_ptt_get_bar_addr(struct qed_ptt *p_ptt); /** - * @brief qed_ptt_set_win - Set PTT Window's GRC BAR address + * qed_ptt_set_win(): Set PTT Window's GRC BAR address * - * @param p_hwfn - * @param new_hw_addr - * @param p_ptt + * @p_hwfn: HW device data. + * @new_hw_addr: New HW address. + * @p_ptt: P_Ptt + * + * Return: Void. */ void qed_ptt_set_win(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 new_hw_addr); /** - * @brief qed_get_reserved_ptt - Get a specific reserved PTT + * qed_get_reserved_ptt(): Get a specific reserved PTT. * - * @param p_hwfn - * @param ptt_idx + * @p_hwfn: HW device data. + * @ptt_idx: Ptt Index. * - * @return struct qed_ptt * + * Return: struct qed_ptt *. */ struct qed_ptt *qed_get_reserved_ptt(struct qed_hwfn *p_hwfn, enum reserved_ptts ptt_idx); /** - * @brief qed_wr - Write value to BAR using the given ptt + * qed_wr(): Write value to BAR using the given ptt. + * + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @val: Val. + * @hw_addr: HW address * - * @param p_hwfn - * @param p_ptt - * @param val - * @param hw_addr + * Return: Void. */ void qed_wr(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, @@ -139,26 +148,28 @@ void qed_wr(struct qed_hwfn *p_hwfn, u32 val); /** - * @brief qed_rd - Read value from BAR using the given ptt + * qed_rd(): Read value from BAR using the given ptt. + * + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @hw_addr: HW address * - * @param p_hwfn - * @param p_ptt - * @param val - * @param hw_addr + * Return: Void. */ u32 qed_rd(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 hw_addr); /** - * @brief qed_memcpy_from - copy n bytes from BAR using the given - * ptt - * - * @param p_hwfn - * @param p_ptt - * @param dest - * @param hw_addr - * @param n + * qed_memcpy_from(): Copy n bytes from BAR using the given ptt. + * + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @dest: Destination. + * @hw_addr: HW address. + * @n: N + * + * Return: Void. */ void qed_memcpy_from(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, @@ -167,14 +178,15 @@ void qed_memcpy_from(struct qed_hwfn *p_hwfn, size_t n); /** - * @brief qed_memcpy_to - copy n bytes to BAR using the given - * ptt - * - * @param p_hwfn - * @param p_ptt - * @param hw_addr - * @param src - * @param n + * qed_memcpy_to(): Copy n bytes to BAR using the given ptt + * + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @hw_addr: HW address. + * @src: Source. + * @n: N + * + * Return: Void. */ void qed_memcpy_to(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, @@ -182,83 +194,97 @@ void qed_memcpy_to(struct qed_hwfn *p_hwfn, void *src, size_t n); /** - * @brief qed_fid_pretend - pretend to another function when - * accessing the ptt window. There is no way to unpretend - * a function. The only way to cancel a pretend is to - * pretend back to the original function. - * - * @param p_hwfn - * @param p_ptt - * @param fid - fid field of pxp_pretend structure. Can contain - * either pf / vf, port/path fields are don't care. + * qed_fid_pretend(): pretend to another function when + * accessing the ptt window. There is no way to unpretend + * a function. The only way to cancel a pretend is to + * pretend back to the original function. + * + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @fid: fid field of pxp_pretend structure. Can contain + * either pf / vf, port/path fields are don't care. + * + * Return: Void. */ void qed_fid_pretend(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u16 fid); /** - * @brief qed_port_pretend - pretend to another port when - * accessing the ptt window + * qed_port_pretend(): Pretend to another port when accessing the ptt window * - * @param p_hwfn - * @param p_ptt - * @param port_id - the port to pretend to + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @port_id: The port to pretend to + * + * Return: Void. */ void qed_port_pretend(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u8 port_id); /** - * @brief qed_port_unpretend - cancel any previously set port - * pretend + * qed_port_unpretend(): Cancel any previously set port pretend + * + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. * - * @param p_hwfn - * @param p_ptt + * Return: Void. */ void qed_port_unpretend(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); /** - * @brief qed_port_fid_pretend - pretend to another port and another function - * when accessing the ptt window + * qed_port_fid_pretend(): Pretend to another port and another function + * when accessing the ptt window + * + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @port_id: The port to pretend to + * @fid: fid field of pxp_pretend structure. Can contain either pf / vf. * - * @param p_hwfn - * @param p_ptt - * @param port_id - the port to pretend to - * @param fid - fid field of pxp_pretend structure. Can contain either pf / vf. + * Return: Void. */ void qed_port_fid_pretend(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u8 port_id, u16 fid); /** - * @brief qed_vfid_to_concrete - build a concrete FID for a - * given VF ID + * qed_vfid_to_concrete(): Build a concrete FID for a given VF ID * - * @param p_hwfn - * @param p_ptt - * @param vfid + * @p_hwfn: HW device data. + * @vfid: VFID. + * + * Return: Void. */ u32 qed_vfid_to_concrete(struct qed_hwfn *p_hwfn, u8 vfid); /** - * @brief qed_dmae_idx_to_go_cmd - map the idx to dmae cmd - * this is declared here since other files will require it. - * @param idx + * qed_dmae_idx_to_go_cmd(): Map the idx to dmae cmd + * this is declared here since other files will require it. + * + * @idx: Index + * + * Return: Void. */ u32 qed_dmae_idx_to_go_cmd(u8 idx); /** - * @brief qed_dmae_info_alloc - Init the dmae_info structure - * which is part of p_hwfn. - * @param p_hwfn + * qed_dmae_info_alloc(): Init the dmae_info structure + * which is part of p_hwfn. + * + * @p_hwfn: HW device data. + * + * Return: Int. */ int qed_dmae_info_alloc(struct qed_hwfn *p_hwfn); /** - * @brief qed_dmae_info_free - Free the dmae_info structure - * which is part of p_hwfn + * qed_dmae_info_free(): Free the dmae_info structure + * which is part of p_hwfn. + * + * @p_hwfn: HW device data. * - * @param p_hwfn + * Return: Void. */ void qed_dmae_info_free(struct qed_hwfn *p_hwfn); @@ -292,14 +318,16 @@ int qed_dmae_sanity(struct qed_hwfn *p_hwfn, #define QED_HW_ERR_MAX_STR_SIZE 256 /** - * @brief qed_hw_err_notify - Notify upper layer driver and management FW - * about a HW error. - * - * @param p_hwfn - * @param p_ptt - * @param err_type - * @param fmt - debug data buffer to send to the MFW - * @param ... - buffer format args + * qed_hw_err_notify(): Notify upper layer driver and management FW + * about a HW error. + * + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @err_type: Err Type. + * @fmt: Debug data buffer to send to the MFW + * @...: buffer format args + * + * Return void. */ void __printf(4, 5) __cold qed_hw_err_notify(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, diff --git a/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c b/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c index ea888a2c6ddb..321c43408153 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c +++ b/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation - * Copyright (c) 2019-2020 Marvell International Ltd. + * Copyright (c) 2019-2021 Marvell International Ltd. */ #include <linux/types.h> @@ -13,17 +13,18 @@ #include "qed_hsi.h" #include "qed_hw.h" #include "qed_init_ops.h" +#include "qed_iro_hsi.h" #include "qed_reg_addr.h" -#define CDU_VALIDATION_DEFAULT_CFG 61 +#define CDU_VALIDATION_DEFAULT_CFG CDU_CONTEXT_VALIDATION_DEFAULT_CFG -static u16 con_region_offsets[3][NUM_OF_CONNECTION_TYPES_E4] = { +static u16 con_region_offsets[3][NUM_OF_CONNECTION_TYPES] = { {400, 336, 352, 368, 304, 384, 416, 352}, /* region 3 offsets */ {528, 496, 416, 512, 448, 512, 544, 480}, /* region 4 offsets */ {608, 544, 496, 576, 576, 592, 624, 560} /* region 5 offsets */ }; -static u16 task_region_offsets[1][NUM_OF_CONNECTION_TYPES_E4] = { +static u16 task_region_offsets[1][NUM_OF_CONNECTION_TYPES] = { {240, 240, 112, 0, 0, 0, 0, 96} /* region 1 offsets */ }; @@ -42,25 +43,49 @@ static u16 task_region_offsets[1][NUM_OF_CONNECTION_TYPES_E4] = { #define QM_BYPASS_EN 1 #define QM_BYTE_CRD_EN 1 +/* Initial VOQ byte credit */ +#define QM_INITIAL_VOQ_BYTE_CRD 98304 /* Other PQ constants */ #define QM_OTHER_PQS_PER_PF 4 +/* VOQ constants */ +#define MAX_NUM_VOQS (MAX_NUM_PORTS_K2 * NUM_TCS_4PORT_K2) +#define VOQS_BIT_MASK (BIT(MAX_NUM_VOQS) - 1) + /* WFQ constants */ -/* Upper bound in MB, 10 * burst size of 1ms in 50Gbps */ -#define QM_WFQ_UPPER_BOUND 62500000 +/* PF WFQ increment value, 0x9000 = 4*9*1024 */ +#define QM_PF_WFQ_INC_VAL(weight) ((weight) * 0x9000) + +/* PF WFQ Upper bound, in MB, 10 * burst size of 1ms in 50Gbps */ +#define QM_PF_WFQ_UPPER_BOUND 62500000 + +/* PF WFQ max increment value, 0.7 * upper bound */ +#define QM_PF_WFQ_MAX_INC_VAL ((QM_PF_WFQ_UPPER_BOUND * 7) / 10) + +/* Number of VOQs in E5 PF WFQ credit register (QmWfqCrd) */ +#define QM_PF_WFQ_CRD_E5_NUM_VOQS 16 + +/* VP WFQ increment value */ +#define QM_VP_WFQ_INC_VAL(weight) ((weight) * QM_VP_WFQ_MIN_INC_VAL) -/* Bit of VOQ in WFQ VP PQ map */ -#define QM_WFQ_VP_PQ_VOQ_SHIFT 0 +/* VP WFQ min increment value */ +#define QM_VP_WFQ_MIN_INC_VAL 10800 -/* Bit of PF in WFQ VP PQ map */ -#define QM_WFQ_VP_PQ_PF_E4_SHIFT 5 +/* VP WFQ max increment value, 2^30 */ +#define QM_VP_WFQ_MAX_INC_VAL 0x40000000 -/* 0x9000 = 4*9*1024 */ -#define QM_WFQ_INC_VAL(weight) ((weight) * 0x9000) +/* VP WFQ bypass threshold */ +#define QM_VP_WFQ_BYPASS_THRESH (QM_VP_WFQ_MIN_INC_VAL - 100) -/* Max WFQ increment value is 0.7 * upper bound */ -#define QM_WFQ_MAX_INC_VAL ((QM_WFQ_UPPER_BOUND * 7) / 10) +/* VP RL credit task cost */ +#define QM_VP_RL_CRD_TASK_COST 9700 + +/* Bit of VOQ in VP WFQ PQ map */ +#define QM_VP_WFQ_PQ_VOQ_SHIFT 0 + +/* Bit of PF in VP WFQ PQ map */ +#define QM_VP_WFQ_PQ_PF_SHIFT 5 /* RL constants */ @@ -71,12 +96,13 @@ static u16 task_region_offsets[1][NUM_OF_CONNECTION_TYPES_E4] = { #define QM_RL_PERIOD_CLK_25M (25 * QM_RL_PERIOD) /* RL increment value - rate is specified in mbps */ -#define QM_RL_INC_VAL(rate) ({ \ - typeof(rate) __rate = (rate); \ - max_t(u32, \ - (u32)(((__rate ? __rate : 1000000) * QM_RL_PERIOD * 101) / \ - (8 * 100)), \ - 1); }) +#define QM_RL_INC_VAL(rate) ({ \ + typeof(rate) __rate = (rate); \ + max_t(u32, \ + (u32)(((__rate ? __rate : \ + 100000) * \ + QM_RL_PERIOD * \ + 101) / (8 * 100)), 1); }) /* PF RL Upper bound is set to 10 * burst size of 1ms in 50Gbps */ #define QM_PF_RL_UPPER_BOUND 62500000 @@ -84,16 +110,13 @@ static u16 task_region_offsets[1][NUM_OF_CONNECTION_TYPES_E4] = { /* Max PF RL increment value is 0.7 * upper bound */ #define QM_PF_RL_MAX_INC_VAL ((QM_PF_RL_UPPER_BOUND * 7) / 10) -/* Vport RL Upper bound, link speed is in Mpbs */ -#define QM_VP_RL_UPPER_BOUND(speed) ((u32)max_t(u32, \ - QM_RL_INC_VAL(speed), \ - 9700 + 1000)) - -/* Max Vport RL increment value is the Vport RL upper bound */ -#define QM_VP_RL_MAX_INC_VAL(speed) QM_VP_RL_UPPER_BOUND(speed) - -/* Vport RL credit threshold in case of QM bypass */ -#define QM_VP_RL_BYPASS_THRESH_SPEED (QM_VP_RL_UPPER_BOUND(10000) - 1) +/* QCN RL Upper bound, speed is in Mpbs */ +#define QM_GLOBAL_RL_UPPER_BOUND(speed) ((u32)max_t( \ + u32, \ + (u32)(((speed) * \ + QM_RL_PERIOD * 101) / (8 * 100)), \ + QM_VP_RL_CRD_TASK_COST \ + + 1000)) /* AFullOprtnstcCrdMask constants */ #define QM_OPPOR_LINE_VOQ_DEF 1 @@ -156,20 +179,20 @@ static u16 task_region_offsets[1][NUM_OF_CONNECTION_TYPES_E4] = { cmd ## _ ## field, \ value) -#define QM_INIT_TX_PQ_MAP(p_hwfn, map, chip, pq_id, vp_pq_id, rl_valid, \ +#define QM_INIT_TX_PQ_MAP(p_hwfn, map, pq_id, vp_pq_id, rl_valid, \ rl_id, ext_voq, wrr) \ do { \ u32 __reg = 0; \ \ BUILD_BUG_ON(sizeof((map).reg) != sizeof(__reg)); \ - \ - SET_FIELD(__reg, QM_RF_PQ_MAP_##chip##_PQ_VALID, 1); \ - SET_FIELD(__reg, QM_RF_PQ_MAP_##chip##_RL_VALID, \ + memset(&(map), 0, sizeof(map)); \ + SET_FIELD(__reg, QM_RF_PQ_MAP_PQ_VALID, 1); \ + SET_FIELD(__reg, QM_RF_PQ_MAP_RL_VALID, \ !!(rl_valid)); \ - SET_FIELD(__reg, QM_RF_PQ_MAP_##chip##_VP_PQ_ID, (vp_pq_id)); \ - SET_FIELD(__reg, QM_RF_PQ_MAP_##chip##_RL_ID, (rl_id)); \ - SET_FIELD(__reg, QM_RF_PQ_MAP_##chip##_VOQ, (ext_voq)); \ - SET_FIELD(__reg, QM_RF_PQ_MAP_##chip##_WRR_WEIGHT_GROUP, \ + SET_FIELD(__reg, QM_RF_PQ_MAP_VP_PQ_ID, (vp_pq_id)); \ + SET_FIELD(__reg, QM_RF_PQ_MAP_RL_ID, (rl_id)); \ + SET_FIELD(__reg, QM_RF_PQ_MAP_VOQ, (ext_voq)); \ + SET_FIELD(__reg, QM_RF_PQ_MAP_WRR_WEIGHT_GROUP, \ (wrr)); \ \ STORE_RT_REG((p_hwfn), QM_REG_TXPQMAP_RT_OFFSET + (pq_id), \ @@ -184,8 +207,8 @@ static u16 task_region_offsets[1][NUM_OF_CONNECTION_TYPES_E4] = { (((rl) >> 8) << 9)) #define PQ_INFO_RAM_GRC_ADDRESS(pq_id) \ - XSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM + \ - XSTORM_PQ_INFO_OFFSET(pq_id) + (XSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM + \ + XSTORM_PQ_INFO_OFFSET(pq_id)) /******************** INTERNAL IMPLEMENTATION *********************/ @@ -204,7 +227,7 @@ static void qed_enable_pf_rl(struct qed_hwfn *p_hwfn, bool pf_rl_en) { STORE_RT_REG(p_hwfn, QM_REG_RLPFENABLE_RT_OFFSET, pf_rl_en ? 1 : 0); if (pf_rl_en) { - u8 num_ext_voqs = MAX_NUM_VOQS_E4; + u8 num_ext_voqs = MAX_NUM_VOQS; u64 voq_bit_mask = ((u64)1 << num_ext_voqs) - 1; /* Enable RLs for all VOQs */ @@ -236,7 +259,7 @@ static void qed_enable_pf_wfq(struct qed_hwfn *p_hwfn, bool pf_wfq_en) if (pf_wfq_en && QM_BYPASS_EN) STORE_RT_REG(p_hwfn, QM_REG_AFULLQMBYPTHRPFWFQ_RT_OFFSET, - QM_WFQ_UPPER_BOUND); + QM_PF_WFQ_UPPER_BOUND); } /* Prepare global RL enable/disable runtime init values */ @@ -257,7 +280,7 @@ static void qed_enable_global_rl(struct qed_hwfn *p_hwfn, bool global_rl_en) if (QM_BYPASS_EN) STORE_RT_REG(p_hwfn, QM_REG_AFULLQMBYPTHRGLBLRL_RT_OFFSET, - QM_VP_RL_BYPASS_THRESH_SPEED); + QM_GLOBAL_RL_UPPER_BOUND(10000) - 1); } } @@ -271,7 +294,7 @@ static void qed_enable_vport_wfq(struct qed_hwfn *p_hwfn, bool vport_wfq_en) if (vport_wfq_en && QM_BYPASS_EN) STORE_RT_REG(p_hwfn, QM_REG_AFULLQMBYPTHRVPWFQ_RT_OFFSET, - QM_WFQ_UPPER_BOUND); + QM_VP_WFQ_BYPASS_THRESH); } /* Prepare runtime init values to allocate PBF command queue lines for @@ -291,14 +314,14 @@ static void qed_cmdq_lines_voq_rt_init(struct qed_hwfn *p_hwfn, } /* Prepare runtime init values to allocate PBF command queue lines. */ -static void qed_cmdq_lines_rt_init( - struct qed_hwfn *p_hwfn, - u8 max_ports_per_engine, - u8 max_phys_tcs_per_port, - struct init_qm_port_params port_params[MAX_NUM_PORTS]) +static void +qed_cmdq_lines_rt_init(struct qed_hwfn *p_hwfn, + u8 max_ports_per_engine, + u8 max_phys_tcs_per_port, + struct init_qm_port_params port_params[MAX_NUM_PORTS]) { u8 tc, ext_voq, port_id, num_tcs_in_port; - u8 num_ext_voqs = MAX_NUM_VOQS_E4; + u8 num_ext_voqs = MAX_NUM_VOQS; /* Clear PBF lines of all VOQs */ for (ext_voq = 0; ext_voq < num_ext_voqs; ext_voq++) @@ -364,11 +387,11 @@ static void qed_cmdq_lines_rt_init( * - No optimization for lossy TC (all are considered lossless). Shared space * is not enabled and allocated for each TC. */ -static void qed_btb_blocks_rt_init( - struct qed_hwfn *p_hwfn, - u8 max_ports_per_engine, - u8 max_phys_tcs_per_port, - struct init_qm_port_params port_params[MAX_NUM_PORTS]) +static void +qed_btb_blocks_rt_init(struct qed_hwfn *p_hwfn, + u8 max_ports_per_engine, + u8 max_phys_tcs_per_port, + struct init_qm_port_params port_params[MAX_NUM_PORTS]) { u32 usable_blocks, pure_lb_blocks, phys_blocks; u8 tc, ext_voq, port_id, num_tcs_in_port; @@ -428,7 +451,7 @@ static void qed_btb_blocks_rt_init( */ static int qed_global_rl_rt_init(struct qed_hwfn *p_hwfn) { - u32 upper_bound = QM_VP_RL_UPPER_BOUND(QM_MAX_LINK_SPEED) | + u32 upper_bound = QM_GLOBAL_RL_UPPER_BOUND(QM_MAX_LINK_SPEED) | (u32)QM_RL_CRD_REG_SIGN_BIT; u32 inc_val; u16 rl_id; @@ -450,11 +473,73 @@ static int qed_global_rl_rt_init(struct qed_hwfn *p_hwfn) return 0; } +/* Returns the upper bound for the specified Vport RL parameters. + * link_speed is in Mbps. + * Returns 0 in case of error. + */ +static u32 qed_get_vport_rl_upper_bound(enum init_qm_rl_type vport_rl_type, + u32 link_speed) +{ + switch (vport_rl_type) { + case QM_RL_TYPE_NORMAL: + return QM_INITIAL_VOQ_BYTE_CRD; + case QM_RL_TYPE_QCN: + return QM_GLOBAL_RL_UPPER_BOUND(link_speed); + default: + return 0; + } +} + +/* Prepare VPORT RL runtime init values. + * Return -1 on error. + */ +static int qed_vport_rl_rt_init(struct qed_hwfn *p_hwfn, + u16 start_rl, + u16 num_rls, + u32 link_speed, + struct init_qm_rl_params *rl_params) +{ + u16 i, rl_id; + + if (num_rls && start_rl + num_rls >= MAX_QM_GLOBAL_RLS) { + DP_NOTICE(p_hwfn, "Invalid rate limiter configuration\n"); + return -1; + } + + /* Go over all PF VPORTs */ + for (i = 0, rl_id = start_rl; i < num_rls; i++, rl_id++) { + u32 upper_bound, inc_val; + + upper_bound = + qed_get_vport_rl_upper_bound((enum init_qm_rl_type) + rl_params[i].vport_rl_type, + link_speed); + + inc_val = + QM_RL_INC_VAL(rl_params[i].vport_rl ? + rl_params[i].vport_rl : link_speed); + if (inc_val > upper_bound) { + DP_NOTICE(p_hwfn, + "Invalid RL rate - limit configuration\n"); + return -1; + } + + STORE_RT_REG(p_hwfn, QM_REG_RLGLBLCRD_RT_OFFSET + rl_id, + (u32)QM_RL_CRD_REG_SIGN_BIT); + STORE_RT_REG(p_hwfn, QM_REG_RLGLBLUPPERBOUND_RT_OFFSET + rl_id, + upper_bound | (u32)QM_RL_CRD_REG_SIGN_BIT); + STORE_RT_REG(p_hwfn, QM_REG_RLGLBLINCVAL_RT_OFFSET + rl_id, + inc_val); + } + + return 0; +} + /* Prepare Tx PQ mapping runtime init values for the specified PF */ -static void qed_tx_pq_map_rt_init(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, - struct qed_qm_pf_rt_init_params *p_params, - u32 base_mem_addr_4kb) +static int qed_tx_pq_map_rt_init(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_qm_pf_rt_init_params *p_params, + u32 base_mem_addr_4kb) { u32 tx_pq_vf_mask[MAX_QM_TX_QUEUES / QM_PF_QUEUE_GROUP_SIZE] = { 0 }; struct init_qm_vport_params *vport_params = p_params->vport_params; @@ -487,7 +572,7 @@ static void qed_tx_pq_map_rt_init(struct qed_hwfn *p_hwfn, /* Go over all Tx PQs */ for (i = 0, pq_id = p_params->start_pq; i < num_pqs; i++, pq_id++) { u16 *p_first_tx_pq_id, vport_id_in_pf; - struct qm_rf_pq_map_e4 tx_pq_map; + struct qm_rf_pq_map tx_pq_map; u8 tc_id = pq_params[i].tc_id; bool is_vf_pq; u8 ext_voq; @@ -504,8 +589,8 @@ static void qed_tx_pq_map_rt_init(struct qed_hwfn *p_hwfn, &vport_params[vport_id_in_pf].first_tx_pq_id[tc_id]; if (*p_first_tx_pq_id == QM_INVALID_PQ_ID) { u32 map_val = - (ext_voq << QM_WFQ_VP_PQ_VOQ_SHIFT) | - (p_params->pf_id << QM_WFQ_VP_PQ_PF_E4_SHIFT); + (ext_voq << QM_VP_WFQ_PQ_VOQ_SHIFT) | + (p_params->pf_id << QM_VP_WFQ_PQ_PF_SHIFT); /* Create new VP PQ */ *p_first_tx_pq_id = pq_id; @@ -520,7 +605,6 @@ static void qed_tx_pq_map_rt_init(struct qed_hwfn *p_hwfn, /* Prepare PQ map entry */ QM_INIT_TX_PQ_MAP(p_hwfn, tx_pq_map, - E4, pq_id, *p_first_tx_pq_id, pq_params[i].rl_valid, @@ -570,6 +654,8 @@ static void qed_tx_pq_map_rt_init(struct qed_hwfn *p_hwfn, STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET + i, tx_pq_vf_mask[i]); + + return 0; } /* Prepare Other PQ mapping runtime init values for the specified PF */ @@ -620,7 +706,6 @@ static void qed_other_pq_map_rt_init(struct qed_hwfn *p_hwfn, * Return -1 on error. */ static int qed_pf_wfq_rt_init(struct qed_hwfn *p_hwfn, - struct qed_qm_pf_rt_init_params *p_params) { u16 num_tx_pqs = p_params->num_pf_pqs + p_params->num_vf_pqs; @@ -629,8 +714,8 @@ static int qed_pf_wfq_rt_init(struct qed_hwfn *p_hwfn, u8 ext_voq; u16 i; - inc_val = QM_WFQ_INC_VAL(p_params->pf_wfq); - if (!inc_val || inc_val > QM_WFQ_MAX_INC_VAL) { + inc_val = QM_PF_WFQ_INC_VAL(p_params->pf_wfq); + if (!inc_val || inc_val > QM_PF_WFQ_MAX_INC_VAL) { DP_NOTICE(p_hwfn, "Invalid PF WFQ weight configuration\n"); return -1; } @@ -652,7 +737,7 @@ static int qed_pf_wfq_rt_init(struct qed_hwfn *p_hwfn, STORE_RT_REG(p_hwfn, QM_REG_WFQPFUPPERBOUND_RT_OFFSET + p_params->pf_id, - QM_WFQ_UPPER_BOUND | (u32)QM_WFQ_CRD_REG_SIGN_BIT); + QM_PF_WFQ_UPPER_BOUND | (u32)QM_WFQ_CRD_REG_SIGN_BIT); STORE_RT_REG(p_hwfn, QM_REG_WFQPFWEIGHT_RT_OFFSET + p_params->pf_id, inc_val); @@ -689,34 +774,38 @@ static int qed_vp_wfq_rt_init(struct qed_hwfn *p_hwfn, u16 num_vports, struct init_qm_vport_params *vport_params) { - u16 vport_pq_id, i; + u16 vport_pq_id, wfq, i; u32 inc_val; u8 tc; /* Go over all PF VPORTs */ for (i = 0; i < num_vports; i++) { - if (!vport_params[i].wfq) - continue; - - inc_val = QM_WFQ_INC_VAL(vport_params[i].wfq); - if (inc_val > QM_WFQ_MAX_INC_VAL) { - DP_NOTICE(p_hwfn, - "Invalid VPORT WFQ weight configuration\n"); - return -1; - } - /* Each VPORT can have several VPORT PQ IDs for various TCs */ for (tc = 0; tc < NUM_OF_TCS; tc++) { + /* Check if VPORT/TC is valid */ vport_pq_id = vport_params[i].first_tx_pq_id[tc]; - if (vport_pq_id != QM_INVALID_PQ_ID) { - STORE_RT_REG(p_hwfn, - QM_REG_WFQVPCRD_RT_OFFSET + - vport_pq_id, - (u32)QM_WFQ_CRD_REG_SIGN_BIT); - STORE_RT_REG(p_hwfn, - QM_REG_WFQVPWEIGHT_RT_OFFSET + - vport_pq_id, inc_val); + if (vport_pq_id == QM_INVALID_PQ_ID) + continue; + + /* Find WFQ weight (per VPORT or per VPORT+TC) */ + wfq = vport_params[i].wfq; + wfq = wfq ? wfq : vport_params[i].tc_wfq[tc]; + inc_val = QM_VP_WFQ_INC_VAL(wfq); + if (inc_val > QM_VP_WFQ_MAX_INC_VAL) { + DP_NOTICE(p_hwfn, + "Invalid VPORT WFQ weight configuration\n"); + return -1; } + + /* Config registers */ + STORE_RT_REG(p_hwfn, QM_REG_WFQVPCRD_RT_OFFSET + + vport_pq_id, + (u32)QM_WFQ_CRD_REG_SIGN_BIT); + STORE_RT_REG(p_hwfn, QM_REG_WFQVPUPPERBOUND_RT_OFFSET + + vport_pq_id, + inc_val | QM_WFQ_CRD_REG_SIGN_BIT); + STORE_RT_REG(p_hwfn, QM_REG_WFQVPWEIGHT_RT_OFFSET + + vport_pq_id, inc_val); } } @@ -780,11 +869,14 @@ int qed_qm_common_rt_init(struct qed_hwfn *p_hwfn, SET_FIELD(mask, QM_RF_OPPORTUNISTIC_MASK_LINEVOQ, QM_OPPOR_LINE_VOQ_DEF); SET_FIELD(mask, QM_RF_OPPORTUNISTIC_MASK_BYTEVOQ, QM_BYTE_CRD_EN); - SET_FIELD(mask, QM_RF_OPPORTUNISTIC_MASK_PFWFQ, p_params->pf_wfq_en); - SET_FIELD(mask, QM_RF_OPPORTUNISTIC_MASK_VPWFQ, p_params->vport_wfq_en); - SET_FIELD(mask, QM_RF_OPPORTUNISTIC_MASK_PFRL, p_params->pf_rl_en); + SET_FIELD(mask, QM_RF_OPPORTUNISTIC_MASK_PFWFQ, + p_params->pf_wfq_en ? 1 : 0); + SET_FIELD(mask, QM_RF_OPPORTUNISTIC_MASK_VPWFQ, + p_params->vport_wfq_en ? 1 : 0); + SET_FIELD(mask, QM_RF_OPPORTUNISTIC_MASK_PFRL, + p_params->pf_rl_en ? 1 : 0); SET_FIELD(mask, QM_RF_OPPORTUNISTIC_MASK_VPQCNRL, - p_params->global_rl_en); + p_params->global_rl_en ? 1 : 0); SET_FIELD(mask, QM_RF_OPPORTUNISTIC_MASK_FWPAUSE, QM_OPPOR_FW_STOP_DEF); SET_FIELD(mask, QM_RF_OPPORTUNISTIC_MASK_QUEUEEMPTY, QM_OPPOR_PQ_EMPTY_DEF); @@ -830,7 +922,6 @@ int qed_qm_pf_rt_init(struct qed_hwfn *p_hwfn, u16 i; u8 tc; - /* Clear first Tx PQ ID array for each VPORT */ for (i = 0; i < p_params->num_vports; i++) for (tc = 0; tc < NUM_OF_TCS; tc++) @@ -843,7 +934,8 @@ int qed_qm_pf_rt_init(struct qed_hwfn *p_hwfn, p_params->num_tids, 0); /* Map Tx PQs */ - qed_tx_pq_map_rt_init(p_hwfn, p_ptt, p_params, other_mem_size_4kb); + if (qed_tx_pq_map_rt_init(p_hwfn, p_ptt, p_params, other_mem_size_4kb)) + return -1; /* Init PF WFQ */ if (p_params->pf_wfq) @@ -858,15 +950,21 @@ int qed_qm_pf_rt_init(struct qed_hwfn *p_hwfn, if (qed_vp_wfq_rt_init(p_hwfn, p_params->num_vports, vport_params)) return -1; + /* Set VPORT RL */ + if (qed_vport_rl_rt_init(p_hwfn, p_params->start_rl, + p_params->num_rls, p_params->link_speed, + p_params->rl_params)) + return -1; + return 0; } int qed_init_pf_wfq(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u8 pf_id, u16 pf_wfq) { - u32 inc_val = QM_WFQ_INC_VAL(pf_wfq); + u32 inc_val = QM_PF_WFQ_INC_VAL(pf_wfq); - if (!inc_val || inc_val > QM_WFQ_MAX_INC_VAL) { + if (!inc_val || inc_val > QM_PF_WFQ_MAX_INC_VAL) { DP_NOTICE(p_hwfn, "Invalid PF WFQ weight configuration\n"); return -1; } @@ -897,41 +995,66 @@ int qed_init_vport_wfq(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u16 first_tx_pq_id[NUM_OF_TCS], u16 wfq) { + int result = 0; u16 vport_pq_id; - u32 inc_val; u8 tc; - inc_val = QM_WFQ_INC_VAL(wfq); - if (!inc_val || inc_val > QM_WFQ_MAX_INC_VAL) { + for (tc = 0; tc < NUM_OF_TCS && !result; tc++) { + vport_pq_id = first_tx_pq_id[tc]; + if (vport_pq_id != QM_INVALID_PQ_ID) + result = qed_init_vport_tc_wfq(p_hwfn, p_ptt, + vport_pq_id, wfq); + } + + return result; +} + +int qed_init_vport_tc_wfq(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, + u16 first_tx_pq_id, u16 wfq) +{ + u32 inc_val; + + if (first_tx_pq_id == QM_INVALID_PQ_ID) + return -1; + + inc_val = QM_VP_WFQ_INC_VAL(wfq); + if (!inc_val || inc_val > QM_VP_WFQ_MAX_INC_VAL) { DP_NOTICE(p_hwfn, "Invalid VPORT WFQ configuration.\n"); return -1; } - /* A VPORT can have several VPORT PQ IDs for various TCs */ - for (tc = 0; tc < NUM_OF_TCS; tc++) { - vport_pq_id = first_tx_pq_id[tc]; - if (vport_pq_id != QM_INVALID_PQ_ID) - qed_wr(p_hwfn, - p_ptt, - QM_REG_WFQVPWEIGHT + vport_pq_id * 4, inc_val); - } + qed_wr(p_hwfn, p_ptt, QM_REG_WFQVPCRD + first_tx_pq_id * 4, + (u32)QM_WFQ_CRD_REG_SIGN_BIT); + qed_wr(p_hwfn, p_ptt, QM_REG_WFQVPUPPERBOUND + first_tx_pq_id * 4, + inc_val | QM_WFQ_CRD_REG_SIGN_BIT); + qed_wr(p_hwfn, p_ptt, QM_REG_WFQVPWEIGHT + first_tx_pq_id * 4, + inc_val); return 0; } int qed_init_global_rl(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, u16 rl_id, u32 rate_limit) + struct qed_ptt *p_ptt, u16 rl_id, u32 rate_limit, + enum init_qm_rl_type vport_rl_type) { - u32 inc_val; + u32 inc_val, upper_bound; + upper_bound = + (vport_rl_type == + QM_RL_TYPE_QCN) ? QM_GLOBAL_RL_UPPER_BOUND(QM_MAX_LINK_SPEED) : + QM_INITIAL_VOQ_BYTE_CRD; inc_val = QM_RL_INC_VAL(rate_limit); - if (inc_val > QM_VP_RL_MAX_INC_VAL(rate_limit)) { - DP_NOTICE(p_hwfn, "Invalid rate limit configuration.\n"); + if (inc_val > upper_bound) { + DP_NOTICE(p_hwfn, "Invalid VPORT rate limit configuration.\n"); return -1; } qed_wr(p_hwfn, p_ptt, QM_REG_RLGLBLCRD + rl_id * 4, (u32)QM_RL_CRD_REG_SIGN_BIT); + qed_wr(p_hwfn, + p_ptt, + QM_REG_RLGLBLUPPERBOUND + rl_id * 4, + upper_bound | (u32)QM_RL_CRD_REG_SIGN_BIT); qed_wr(p_hwfn, p_ptt, QM_REG_RLGLBLINCVAL + rl_id * 4, inc_val); return 0; @@ -1013,7 +1136,7 @@ bool qed_send_qm_stop_cmd(struct qed_hwfn *p_hwfn, static int qed_dmae_to_grc(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, __le32 *p_data, u32 addr, u32 len_in_dwords) { - struct qed_dmae_params params = {}; + struct qed_dmae_params params = { 0 }; u32 *data_cpu; int rc; @@ -1066,16 +1189,16 @@ void qed_set_vxlan_enable(struct qed_hwfn *p_hwfn, /* Update PRS register */ reg_val = qed_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN); - shift = PRS_REG_ENCAPSULATION_TYPE_EN_VXLAN_ENABLE_SHIFT; - SET_TUNNEL_TYPE_ENABLE_BIT(reg_val, shift, vxlan_enable); + SET_FIELD(reg_val, + PRS_REG_ENCAPSULATION_TYPE_EN_VXLAN_ENABLE, vxlan_enable); qed_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val); if (reg_val) { reg_val = - qed_rd(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2); + qed_rd(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0); /* Update output only if tunnel blocks not included. */ if (reg_val == (u32)PRS_ETH_OUTPUT_FORMAT) - qed_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2, + qed_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0, (u32)PRS_ETH_TUNN_OUTPUT_FORMAT); } @@ -1099,18 +1222,20 @@ void qed_set_gre_enable(struct qed_hwfn *p_hwfn, /* Update PRS register */ reg_val = qed_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN); - shift = PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GRE_ENABLE_SHIFT; - SET_TUNNEL_TYPE_ENABLE_BIT(reg_val, shift, eth_gre_enable); - shift = PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GRE_ENABLE_SHIFT; - SET_TUNNEL_TYPE_ENABLE_BIT(reg_val, shift, ip_gre_enable); + SET_FIELD(reg_val, + PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GRE_ENABLE, + eth_gre_enable); + SET_FIELD(reg_val, + PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GRE_ENABLE, + ip_gre_enable); qed_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val); if (reg_val) { reg_val = - qed_rd(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2); + qed_rd(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0); /* Update output only if tunnel blocks not included. */ if (reg_val == (u32)PRS_ETH_OUTPUT_FORMAT) - qed_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2, + qed_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0, (u32)PRS_ETH_TUNN_OUTPUT_FORMAT); } @@ -1148,22 +1273,23 @@ void qed_set_geneve_enable(struct qed_hwfn *p_hwfn, bool eth_geneve_enable, bool ip_geneve_enable) { u32 reg_val; - u8 shift; /* Update PRS register */ reg_val = qed_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN); - shift = PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GENEVE_ENABLE_SHIFT; - SET_TUNNEL_TYPE_ENABLE_BIT(reg_val, shift, eth_geneve_enable); - shift = PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GENEVE_ENABLE_SHIFT; - SET_TUNNEL_TYPE_ENABLE_BIT(reg_val, shift, ip_geneve_enable); + SET_FIELD(reg_val, + PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GENEVE_ENABLE, + eth_geneve_enable); + SET_FIELD(reg_val, + PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GENEVE_ENABLE, + ip_geneve_enable); qed_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val); if (reg_val) { reg_val = - qed_rd(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2); + qed_rd(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0); /* Update output only if tunnel blocks not included. */ if (reg_val == (u32)PRS_ETH_OUTPUT_FORMAT) - qed_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2, + qed_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0, (u32)PRS_ETH_TUNN_OUTPUT_FORMAT); } @@ -1179,16 +1305,16 @@ void qed_set_geneve_enable(struct qed_hwfn *p_hwfn, /* Update DORQ registers */ qed_wr(p_hwfn, p_ptt, - DORQ_REG_L2_EDPM_TUNNEL_NGE_ETH_EN_K2_E5, + DORQ_REG_L2_EDPM_TUNNEL_NGE_ETH_EN_K2, eth_geneve_enable ? 1 : 0); qed_wr(p_hwfn, p_ptt, - DORQ_REG_L2_EDPM_TUNNEL_NGE_IP_EN_K2_E5, + DORQ_REG_L2_EDPM_TUNNEL_NGE_IP_EN_K2, ip_geneve_enable ? 1 : 0); } #define PRS_ETH_VXLAN_NO_L2_ENABLE_OFFSET 3 -#define PRS_ETH_VXLAN_NO_L2_OUTPUT_FORMAT -925189872 +#define PRS_ETH_VXLAN_NO_L2_OUTPUT_FORMAT 0xC8DAB910 void qed_set_vxlan_no_l2_enable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, bool enable) @@ -1208,7 +1334,7 @@ void qed_set_vxlan_no_l2_enable(struct qed_hwfn *p_hwfn, /* update PRS FIC register */ qed_wr(p_hwfn, p_ptt, - PRS_REG_OUTPUT_FORMAT_4_0_BB_K2, + PRS_REG_OUTPUT_FORMAT_4_0, (u32)PRS_ETH_VXLAN_NO_L2_OUTPUT_FORMAT); } else { /* clear VXLAN_NO_L2_ENABLE flag */ @@ -1229,7 +1355,7 @@ void qed_set_vxlan_no_l2_enable(struct qed_hwfn *p_hwfn, void qed_gft_disable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u16 pf_id) { - struct regpair ram_line = { }; + struct regpair ram_line = { 0 }; /* Disable gft search for PF */ qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_GFT, 0); @@ -1621,6 +1747,8 @@ struct phys_mem_desc *qed_fw_overlay_mem_alloc(struct qed_hwfn *p_hwfn, storm_buf_size = GET_FIELD(hdr->data, FW_OVERLAY_BUF_HDR_BUF_SIZE); storm_id = GET_FIELD(hdr->data, FW_OVERLAY_BUF_HDR_STORM_ID); + if (storm_id >= NUM_STORMS) + break; storm_mem_desc = allocated_mem + storm_id; storm_mem_desc->size = storm_buf_size * sizeof(u32); @@ -1645,7 +1773,7 @@ struct phys_mem_desc *qed_fw_overlay_mem_alloc(struct qed_hwfn *p_hwfn, /* If memory allocation has failed, free all allocated memory */ if (buf_offset < buf_size) { - qed_fw_overlay_mem_free(p_hwfn, allocated_mem); + qed_fw_overlay_mem_free(p_hwfn, &allocated_mem); return NULL; } @@ -1679,16 +1807,16 @@ void qed_fw_overlay_init_ram(struct qed_hwfn *p_hwfn, } void qed_fw_overlay_mem_free(struct qed_hwfn *p_hwfn, - struct phys_mem_desc *fw_overlay_mem) + struct phys_mem_desc **fw_overlay_mem) { u8 storm_id; - if (!fw_overlay_mem) + if (!fw_overlay_mem || !(*fw_overlay_mem)) return; for (storm_id = 0; storm_id < NUM_STORMS; storm_id++) { struct phys_mem_desc *storm_mem_desc = - (struct phys_mem_desc *)fw_overlay_mem + storm_id; + (struct phys_mem_desc *)*fw_overlay_mem + storm_id; /* Free Storm's physical memory */ if (storm_mem_desc->virt_addr) @@ -1699,5 +1827,6 @@ void qed_fw_overlay_mem_free(struct qed_hwfn *p_hwfn, } /* Free allocated virtual memory */ - kfree(fw_overlay_mem); + kfree(*fw_overlay_mem); + *fw_overlay_mem = NULL; } diff --git a/drivers/net/ethernet/qlogic/qed/qed_init_ops.c b/drivers/net/ethernet/qlogic/qed/qed_init_ops.c index 7e6c6389523b..b3bf9899c1a1 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_init_ops.c +++ b/drivers/net/ethernet/qlogic/qed/qed_init_ops.c @@ -15,6 +15,7 @@ #include "qed_hsi.h" #include "qed_hw.h" #include "qed_init_ops.h" +#include "qed_iro_hsi.h" #include "qed_reg_addr.h" #include "qed_sriov.h" @@ -46,30 +47,32 @@ static u32 pxp_global_win[] = { /* IRO Array */ static const u32 iro_arr[] = { 0x00000000, 0x00000000, 0x00080000, + 0x00004478, 0x00000008, 0x00080000, 0x00003288, 0x00000088, 0x00880000, - 0x000058e8, 0x00000020, 0x00200000, + 0x000058a8, 0x00000020, 0x00200000, + 0x00003188, 0x00000008, 0x00080000, 0x00000b00, 0x00000008, 0x00040000, 0x00000a80, 0x00000008, 0x00040000, 0x00000000, 0x00000008, 0x00020000, 0x00000080, 0x00000008, 0x00040000, 0x00000084, 0x00000008, 0x00020000, - 0x00005718, 0x00000004, 0x00040000, - 0x00004dd0, 0x00000000, 0x00780000, + 0x00005798, 0x00000004, 0x00040000, + 0x00004e50, 0x00000000, 0x00780000, 0x00003e40, 0x00000000, 0x00780000, - 0x00004480, 0x00000000, 0x00780000, + 0x00004500, 0x00000000, 0x00780000, 0x00003210, 0x00000000, 0x00780000, 0x00003b50, 0x00000000, 0x00780000, 0x00007f58, 0x00000000, 0x00780000, - 0x00005f58, 0x00000000, 0x00080000, + 0x00005fd8, 0x00000000, 0x00080000, 0x00007100, 0x00000000, 0x00080000, - 0x0000aea0, 0x00000000, 0x00080000, + 0x0000af20, 0x00000000, 0x00080000, 0x00004398, 0x00000000, 0x00080000, 0x0000a5a0, 0x00000000, 0x00080000, 0x0000bde8, 0x00000000, 0x00080000, 0x00000020, 0x00000004, 0x00040000, - 0x000056c8, 0x00000010, 0x00100000, + 0x00005688, 0x00000010, 0x00100000, 0x0000c210, 0x00000030, 0x00300000, - 0x0000b088, 0x00000038, 0x00380000, + 0x0000b108, 0x00000038, 0x00380000, 0x00003d20, 0x00000080, 0x00400000, 0x0000bf60, 0x00000000, 0x00040000, 0x00004560, 0x00040080, 0x00040000, @@ -77,11 +80,11 @@ static const u32 iro_arr[] = { 0x00003d60, 0x00000080, 0x00200000, 0x00008960, 0x00000040, 0x00300000, 0x0000e840, 0x00000060, 0x00600000, - 0x00004618, 0x00000080, 0x00380000, - 0x00010738, 0x000000c0, 0x00c00000, + 0x00004698, 0x00000080, 0x00380000, + 0x000107b8, 0x000000c0, 0x00c00000, 0x000001f8, 0x00000002, 0x00020000, - 0x0000a2a0, 0x00000000, 0x01080000, - 0x0000a3a8, 0x00000008, 0x00080000, + 0x0000a260, 0x00000000, 0x01080000, + 0x0000a368, 0x00000008, 0x00080000, 0x000001c0, 0x00000008, 0x00080000, 0x000001f8, 0x00000008, 0x00080000, 0x00000ac0, 0x00000008, 0x00080000, @@ -90,39 +93,46 @@ static const u32 iro_arr[] = { 0x00000280, 0x00000008, 0x00080000, 0x00000680, 0x00080018, 0x00080000, 0x00000b78, 0x00080018, 0x00020000, - 0x0000c640, 0x00000050, 0x003c0000, - 0x00012038, 0x00000018, 0x00100000, - 0x00011b00, 0x00000040, 0x00180000, - 0x000095d0, 0x00000050, 0x00200000, + 0x0000c600, 0x00000058, 0x003c0000, + 0x00012038, 0x00000020, 0x00100000, + 0x00011b00, 0x00000048, 0x00180000, + 0x00009650, 0x00000050, 0x00200000, 0x00008b10, 0x00000040, 0x00280000, - 0x00011640, 0x00000018, 0x00100000, - 0x0000c828, 0x00000048, 0x00380000, - 0x00011710, 0x00000020, 0x00200000, - 0x00004650, 0x00000080, 0x00100000, + 0x000116c0, 0x00000018, 0x00100000, + 0x0000c808, 0x00000048, 0x00380000, + 0x00011790, 0x00000020, 0x00200000, + 0x000046d0, 0x00000080, 0x00100000, 0x00003618, 0x00000010, 0x00100000, - 0x0000a968, 0x00000008, 0x00010000, + 0x0000a9e8, 0x00000008, 0x00010000, 0x000097a0, 0x00000008, 0x00010000, - 0x00011990, 0x00000008, 0x00010000, - 0x0000f018, 0x00000008, 0x00010000, - 0x00012628, 0x00000008, 0x00010000, - 0x00011da8, 0x00000008, 0x00010000, - 0x0000aa78, 0x00000030, 0x00100000, - 0x0000d768, 0x00000028, 0x00280000, - 0x00009a58, 0x00000018, 0x00180000, - 0x00009bd8, 0x00000008, 0x00080000, - 0x00013a18, 0x00000008, 0x00080000, - 0x000126e8, 0x00000018, 0x00180000, - 0x0000e608, 0x00500288, 0x00100000, - 0x00012970, 0x00000138, 0x00280000, + 0x00011a10, 0x00000008, 0x00010000, + 0x0000e9f8, 0x00000008, 0x00010000, + 0x00012648, 0x00000008, 0x00010000, + 0x000121c8, 0x00000008, 0x00010000, + 0x0000af08, 0x00000030, 0x00100000, + 0x0000d748, 0x00000028, 0x00280000, + 0x00009e68, 0x00000018, 0x00180000, + 0x00009fe8, 0x00000008, 0x00080000, + 0x00013ea8, 0x00000008, 0x00080000, + 0x00012f18, 0x00000018, 0x00180000, + 0x0000dfe8, 0x00500288, 0x00100000, + 0x000131a0, 0x00000138, 0x00280000, }; void qed_init_iro_array(struct qed_dev *cdev) { - cdev->iro_arr = iro_arr; + cdev->iro_arr = iro_arr + E4_IRO_ARR_OFFSET; } void qed_init_store_rt_reg(struct qed_hwfn *p_hwfn, u32 rt_offset, u32 val) { + if (rt_offset >= RUNTIME_ARRAY_SIZE) { + DP_ERR(p_hwfn, + "Avoid storing %u in rt_data at index %u!\n", + val, rt_offset); + return; + } + p_hwfn->rt_data.init_val[rt_offset] = val; p_hwfn->rt_data.b_valid[rt_offset] = true; } @@ -132,6 +142,14 @@ void qed_init_store_rt_agg(struct qed_hwfn *p_hwfn, { size_t i; + if ((rt_offset + size - 1) >= RUNTIME_ARRAY_SIZE) { + DP_ERR(p_hwfn, + "Avoid storing values in rt_data at indices %u-%u!\n", + rt_offset, + (u32)(rt_offset + size - 1)); + return; + } + for (i = 0; i < size / sizeof(u32); i++) { p_hwfn->rt_data.init_val[rt_offset + i] = p_val[i]; p_hwfn->rt_data.b_valid[rt_offset + i] = true; @@ -175,7 +193,7 @@ static int qed_init_rt(struct qed_hwfn *p_hwfn, return rc; /* invalidate after writing */ - for (j = i; j < i + segment; j++) + for (j = i; j < (u32)(i + segment); j++) p_valid[j] = false; /* Jump over the entire segment, including invalid entry */ @@ -245,7 +263,7 @@ static int qed_init_array_dmae(struct qed_hwfn *p_hwfn, static int qed_init_fill_dmae(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, - u32 addr, u32 fill, u32 fill_count) + u32 addr, u32 fill_count) { static u32 zero_buffer[DMAE_MAX_RW_SIZE]; struct qed_dmae_params params = {}; @@ -372,7 +390,7 @@ static int qed_init_cmd_wr(struct qed_hwfn *p_hwfn, case INIT_SRC_ZEROS: data = le32_to_cpu(p_cmd->args.zeros_count); if (b_must_dmae || (b_can_dmae && (data >= 64))) - rc = qed_init_fill_dmae(p_hwfn, p_ptt, addr, 0, data); + rc = qed_init_fill_dmae(p_hwfn, p_ptt, addr, data); else qed_init_fill(p_hwfn, p_ptt, addr, 0, data); break; @@ -419,7 +437,6 @@ static void qed_init_cmd_rd(struct qed_hwfn *p_hwfn, addr = GET_FIELD(data, INIT_READ_OP_ADDRESS) << 2; poll = GET_FIELD(data, INIT_READ_OP_POLL_TYPE); - val = qed_rd(p_hwfn, p_ptt, addr); if (poll == INIT_POLL_NONE) @@ -515,8 +532,7 @@ static u32 qed_init_cmd_mode(struct qed_hwfn *p_hwfn, INIT_IF_MODE_OP_CMD_OFFSET); } -static u32 qed_init_cmd_phase(struct qed_hwfn *p_hwfn, - struct init_if_phase_op *p_cmd, +static u32 qed_init_cmd_phase(struct init_if_phase_op *p_cmd, u32 phase, u32 phase_id) { u32 data = le32_to_cpu(p_cmd->phase_data); @@ -563,7 +579,7 @@ int qed_init_run(struct qed_hwfn *p_hwfn, modes); break; case INIT_OP_IF_PHASE: - cmd_num += qed_init_cmd_phase(p_hwfn, &cmd->if_phase, + cmd_num += qed_init_cmd_phase(&cmd->if_phase, phase, phase_id); break; case INIT_OP_DELAY: diff --git a/drivers/net/ethernet/qlogic/qed/qed_init_ops.h b/drivers/net/ethernet/qlogic/qed/qed_init_ops.h index a573c8921982..12e5c4e370d4 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_init_ops.h +++ b/drivers/net/ethernet/qlogic/qed/qed_init_ops.h @@ -12,23 +12,24 @@ #include "qed.h" /** - * @brief qed_init_iro_array - init iro_arr. + * qed_init_iro_array(): init iro_arr. * + * @cdev: Qed dev pointer. * - * @param cdev + * Return: Void. */ void qed_init_iro_array(struct qed_dev *cdev); /** - * @brief qed_init_run - Run the init-sequence. + * qed_init_run(): Run the init-sequence. * + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @phase: Phase. + * @phase_id: Phase ID. + * @modes: Mode. * - * @param p_hwfn - * @param p_ptt - * @param phase - * @param phase_id - * @param modes - * @return _qed_status_t + * Return: _qed_status_t */ int qed_init_run(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, @@ -37,30 +38,31 @@ int qed_init_run(struct qed_hwfn *p_hwfn, int modes); /** - * @brief qed_init_hwfn_allocate - Allocate RT array, Store 'values' ptrs. + * qed_init_alloc(): Allocate RT array, Store 'values' ptrs. * + * @p_hwfn: HW device data. * - * @param p_hwfn - * - * @return _qed_status_t + * Return: _qed_status_t. */ int qed_init_alloc(struct qed_hwfn *p_hwfn); /** - * @brief qed_init_hwfn_deallocate + * qed_init_free(): Init HW function deallocate. * + * @p_hwfn: HW device data. * - * @param p_hwfn + * Return: Void. */ void qed_init_free(struct qed_hwfn *p_hwfn); /** - * @brief qed_init_store_rt_reg - Store a configuration value in the RT array. + * qed_init_store_rt_reg(): Store a configuration value in the RT array. * + * @p_hwfn: HW device data. + * @rt_offset: RT offset. + * @val: Val. * - * @param p_hwfn - * @param rt_offset - * @param val + * Return: Void. */ void qed_init_store_rt_reg(struct qed_hwfn *p_hwfn, u32 rt_offset, @@ -72,29 +74,21 @@ void qed_init_store_rt_reg(struct qed_hwfn *p_hwfn, #define OVERWRITE_RT_REG(hwfn, offset, val) \ qed_init_store_rt_reg(hwfn, offset, val) -/** - * @brief - * - * - * @param p_hwfn - * @param rt_offset - * @param val - * @param size - */ void qed_init_store_rt_agg(struct qed_hwfn *p_hwfn, u32 rt_offset, u32 *val, size_t size); #define STORE_RT_REG_AGG(hwfn, offset, val) \ - qed_init_store_rt_agg(hwfn, offset, (u32 *)&val, sizeof(val)) + qed_init_store_rt_agg(hwfn, offset, (u32 *)&(val), sizeof(val)) /** - * @brief - * Initialize GTT global windows and set admin window - * related params of GTT/PTT to default values. + * qed_gtt_init(): Initialize GTT global windows and set admin window + * related params of GTT/PTT to default values. + * + * @p_hwfn: HW device data. * - * @param p_hwfn + * Return Void. */ void qed_gtt_init(struct qed_hwfn *p_hwfn); #endif diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.c b/drivers/net/ethernet/qlogic/qed/qed_int.c index f78e6055f654..a97f691839e0 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_int.c +++ b/drivers/net/ethernet/qlogic/qed/qed_int.c @@ -36,7 +36,7 @@ struct qed_sb_sp_info { struct qed_sb_info sb_info; /* per protocol index data */ - struct qed_pi_info pi_info_arr[PIS_PER_SB_E4]; + struct qed_pi_info pi_info_arr[PIS_PER_SB]; }; enum qed_attention_type { @@ -1507,7 +1507,7 @@ static void qed_int_cau_conf_pi(struct qed_hwfn *p_hwfn, else SET_FIELD(prod, CAU_PI_ENTRY_FSM_SEL, 1); - sb_offset = igu_sb_id * PIS_PER_SB_E4; + sb_offset = igu_sb_id * PIS_PER_SB; pi_offset = sb_offset + pi_index; if (p_hwfn->hw_init_done) diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.h b/drivers/net/ethernet/qlogic/qed/qed_int.h index c5550e96bbe1..84c17e97f569 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_int.h +++ b/drivers/net/ethernet/qlogic/qed/qed_int.h @@ -53,51 +53,54 @@ enum qed_coalescing_fsm { }; /** - * @brief qed_int_igu_enable_int - enable device interrupts + * qed_int_igu_enable_int(): Enable device interrupts. * - * @param p_hwfn - * @param p_ptt - * @param int_mode - interrupt mode to use + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @int_mode: Interrupt mode to use. + * + * Return: Void. */ void qed_int_igu_enable_int(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, enum qed_int_mode int_mode); /** - * @brief qed_int_igu_disable_int - disable device interrupts + * qed_int_igu_disable_int(): Disable device interrupts. + * + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. * - * @param p_hwfn - * @param p_ptt + * Return: Void. */ void qed_int_igu_disable_int(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); /** - * @brief qed_int_igu_read_sisr_reg - Reads the single isr multiple dpc - * register from igu. + * qed_int_igu_read_sisr_reg(): Reads the single isr multiple dpc + * register from igu. * - * @param p_hwfn + * @p_hwfn: HW device data. * - * @return u64 + * Return: u64. */ u64 qed_int_igu_read_sisr_reg(struct qed_hwfn *p_hwfn); #define QED_SP_SB_ID 0xffff /** - * @brief qed_int_sb_init - Initializes the sb_info structure. + * qed_int_sb_init(): Initializes the sb_info structure. * - * once the structure is initialized it can be passed to sb related functions. + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @sb_info: points to an uninitialized (but allocated) sb_info structure + * @sb_virt_addr: SB Virtual address. + * @sb_phy_addr: SB Physial address. + * @sb_id: the sb_id to be used (zero based in driver) + * should use QED_SP_SB_ID for SP Status block * - * @param p_hwfn - * @param p_ptt - * @param sb_info points to an uninitialized (but - * allocated) sb_info structure - * @param sb_virt_addr - * @param sb_phy_addr - * @param sb_id the sb_id to be used (zero based in driver) - * should use QED_SP_SB_ID for SP Status block + * Return: int. * - * @return int + * Once the structure is initialized it can be passed to sb related functions. */ int qed_int_sb_init(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, @@ -106,82 +109,91 @@ int qed_int_sb_init(struct qed_hwfn *p_hwfn, dma_addr_t sb_phy_addr, u16 sb_id); /** - * @brief qed_int_sb_setup - Setup the sb. + * qed_int_sb_setup(): Setup the sb. + * + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @sb_info: Initialized sb_info structure. * - * @param p_hwfn - * @param p_ptt - * @param sb_info initialized sb_info structure + * Return: Void. */ void qed_int_sb_setup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, struct qed_sb_info *sb_info); /** - * @brief qed_int_sb_release - releases the sb_info structure. + * qed_int_sb_release(): Releases the sb_info structure. * - * once the structure is released, it's memory can be freed + * @p_hwfn: HW device data. + * @sb_info: Points to an allocated sb_info structure. + * @sb_id: The sb_id to be used (zero based in driver) + * should never be equal to QED_SP_SB_ID + * (SP Status block). * - * @param p_hwfn - * @param sb_info points to an allocated sb_info structure - * @param sb_id the sb_id to be used (zero based in driver) - * should never be equal to QED_SP_SB_ID - * (SP Status block) + * Return: int. * - * @return int + * Once the structure is released, it's memory can be freed. */ int qed_int_sb_release(struct qed_hwfn *p_hwfn, struct qed_sb_info *sb_info, u16 sb_id); /** - * @brief qed_int_sp_dpc - To be called when an interrupt is received on the - * default status block. + * qed_int_sp_dpc(): To be called when an interrupt is received on the + * default status block. * - * @param p_hwfn - pointer to hwfn + * @t: Tasklet. + * + * Return: Void. * */ void qed_int_sp_dpc(struct tasklet_struct *t); /** - * @brief qed_int_get_num_sbs - get the number of status - * blocks configured for this funciton in the igu. + * qed_int_get_num_sbs(): Get the number of status blocks configured + * for this funciton in the igu. * - * @param p_hwfn - * @param p_sb_cnt_info + * @p_hwfn: HW device data. + * @p_sb_cnt_info: Pointer to SB count info. * - * @return int - number of status blocks configured + * Return: Void. */ void qed_int_get_num_sbs(struct qed_hwfn *p_hwfn, struct qed_sb_cnt_info *p_sb_cnt_info); /** - * @brief qed_int_disable_post_isr_release - performs the cleanup post ISR + * qed_int_disable_post_isr_release(): Performs the cleanup post ISR * release. The API need to be called after releasing all slowpath IRQs * of the device. * - * @param cdev + * @cdev: Qed dev pointer. * + * Return: Void. */ void qed_int_disable_post_isr_release(struct qed_dev *cdev); /** - * @brief qed_int_attn_clr_enable - sets whether the general behavior is + * qed_int_attn_clr_enable: Sets whether the general behavior is * preventing attentions from being reasserted, or following the * attributes of the specific attention. * - * @param cdev - * @param clr_enable + * @cdev: Qed dev pointer. + * @clr_enable: Clear enable + * + * Return: Void. * */ void qed_int_attn_clr_enable(struct qed_dev *cdev, bool clr_enable); /** - * @brief - Doorbell Recovery handler. + * qed_db_rec_handler(): Doorbell Recovery handler. * Run doorbell recovery in case of PF overflow (and flush DORQ if * needed). * - * @param p_hwfn - * @param p_ptt + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * + * Return: Int. */ int qed_db_rec_handler(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); @@ -192,7 +204,7 @@ int qed_db_rec_handler(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); #define QED_SB_EVENT_MASK 0x0003 #define SB_ALIGNED_SIZE(p_hwfn) \ - ALIGNED_TYPE_SIZE(struct status_block_e4, p_hwfn) + ALIGNED_TYPE_SIZE(struct status_block, p_hwfn) #define QED_SB_INVALID_IDX 0xffff @@ -223,30 +235,34 @@ struct qed_igu_info { }; /** - * @brief - Make sure the IGU CAM reflects the resources provided by MFW + * qed_int_igu_reset_cam(): Make sure the IGU CAM reflects the resources + * provided by MFW. + * + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. * - * @param p_hwfn - * @param p_ptt + * Return: Void. */ int qed_int_igu_reset_cam(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); /** - * @brief Translate the weakly-defined client sb-id into an IGU sb-id + * qed_get_igu_sb_id(): Translate the weakly-defined client sb-id into + * an IGU sb-id * - * @param p_hwfn - * @param sb_id - user provided sb_id + * @p_hwfn: HW device data. + * @sb_id: user provided sb_id. * - * @return an index inside IGU CAM where the SB resides + * Return: An index inside IGU CAM where the SB resides. */ u16 qed_get_igu_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id); /** - * @brief return a pointer to an unused valid SB + * qed_get_igu_free_sb(): Return a pointer to an unused valid SB * - * @param p_hwfn - * @param b_is_pf - true iff we want a SB belonging to a PF + * @p_hwfn: HW device data. + * @b_is_pf: True iff we want a SB belonging to a PF. * - * @return point to an igu_block, NULL if none is available + * Return: Point to an igu_block, NULL if none is available. */ struct qed_igu_block *qed_get_igu_free_sb(struct qed_hwfn *p_hwfn, bool b_is_pf); @@ -259,15 +275,15 @@ void qed_int_igu_init_pure_rt(struct qed_hwfn *p_hwfn, void qed_int_igu_init_rt(struct qed_hwfn *p_hwfn); /** - * @brief qed_int_igu_read_cam - Reads the IGU CAM. + * qed_int_igu_read_cam(): Reads the IGU CAM. * This function needs to be called during hardware * prepare. It reads the info from igu cam to know which * status block is the default / base status block etc. * - * @param p_hwfn - * @param p_ptt + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. * - * @return int + * Return: Int. */ int qed_int_igu_read_cam(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); @@ -275,24 +291,22 @@ int qed_int_igu_read_cam(struct qed_hwfn *p_hwfn, typedef int (*qed_int_comp_cb_t)(struct qed_hwfn *p_hwfn, void *cookie); /** - * @brief qed_int_register_cb - Register callback func for - * slowhwfn statusblock. - * - * Every protocol that uses the slowhwfn status block - * should register a callback function that will be called - * once there is an update of the sp status block. - * - * @param p_hwfn - * @param comp_cb - function to be called when there is an - * interrupt on the sp sb - * - * @param cookie - passed to the callback function - * @param sb_idx - OUT parameter which gives the chosen index - * for this protocol. - * @param p_fw_cons - pointer to the actual address of the - * consumer for this protocol. - * - * @return int + * qed_int_register_cb(): Register callback func for slowhwfn statusblock. + * + * @p_hwfn: HW device data. + * @comp_cb: Function to be called when there is an + * interrupt on the sp sb + * @cookie: Passed to the callback function + * @sb_idx: (OUT) parameter which gives the chosen index + * for this protocol. + * @p_fw_cons: Pointer to the actual address of the + * consumer for this protocol. + * + * Return: Int. + * + * Every protocol that uses the slowhwfn status block + * should register a callback function that will be called + * once there is an update of the sp status block. */ int qed_int_register_cb(struct qed_hwfn *p_hwfn, qed_int_comp_cb_t comp_cb, @@ -301,37 +315,40 @@ int qed_int_register_cb(struct qed_hwfn *p_hwfn, __le16 **p_fw_cons); /** - * @brief qed_int_unregister_cb - Unregisters callback - * function from sp sb. - * Partner of qed_int_register_cb -> should be called - * when no longer required. + * qed_int_unregister_cb(): Unregisters callback function from sp sb. + * + * @p_hwfn: HW device data. + * @pi: Producer Index. * - * @param p_hwfn - * @param pi + * Return: Int. * - * @return int + * Partner of qed_int_register_cb -> should be called + * when no longer required. */ int qed_int_unregister_cb(struct qed_hwfn *p_hwfn, u8 pi); /** - * @brief qed_int_get_sp_sb_id - Get the slowhwfn sb id. + * qed_int_get_sp_sb_id(): Get the slowhwfn sb id. * - * @param p_hwfn + * @p_hwfn: HW device data. * - * @return u16 + * Return: u16. */ u16 qed_int_get_sp_sb_id(struct qed_hwfn *p_hwfn); /** - * @brief Status block cleanup. Should be called for each status - * block that will be used -> both PF / VF - * - * @param p_hwfn - * @param p_ptt - * @param igu_sb_id - igu status block id - * @param opaque - opaque fid of the sb owner. - * @param b_set - set(1) / clear(0) + * qed_int_igu_init_pure_rt_single(): Status block cleanup. + * Should be called for each status + * block that will be used -> both PF / VF. + * + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @igu_sb_id: IGU status block id. + * @opaque: Opaque fid of the sb owner. + * @b_set: Set(1) / Clear(0). + * + * Return: Void. */ void qed_int_igu_init_pure_rt_single(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, @@ -340,15 +357,16 @@ void qed_int_igu_init_pure_rt_single(struct qed_hwfn *p_hwfn, bool b_set); /** - * @brief qed_int_cau_conf - configure cau for a given status - * block - * - * @param p_hwfn - * @param ptt - * @param sb_phys - * @param igu_sb_id - * @param vf_number - * @param vf_valid + * qed_int_cau_conf_sb(): Configure cau for a given status block. + * + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @sb_phys: SB Physical. + * @igu_sb_id: IGU status block id. + * @vf_number: VF number + * @vf_valid: VF valid or not. + * + * Return: Void. */ void qed_int_cau_conf_sb(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, @@ -358,52 +376,58 @@ void qed_int_cau_conf_sb(struct qed_hwfn *p_hwfn, u8 vf_valid); /** - * @brief qed_int_alloc + * qed_int_alloc(): QED interrupt alloc. * - * @param p_hwfn - * @param p_ptt + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. * - * @return int + * Return: Int. */ int qed_int_alloc(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); /** - * @brief qed_int_free + * qed_int_free(): QED interrupt free. + * + * @p_hwfn: HW device data. * - * @param p_hwfn + * Return: Void. */ void qed_int_free(struct qed_hwfn *p_hwfn); /** - * @brief qed_int_setup + * qed_int_setup(): QED interrupt setup. * - * @param p_hwfn - * @param p_ptt + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * + * Return: Void. */ void qed_int_setup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); /** - * @brief - Enable Interrupt & Attention for hw function + * qed_int_igu_enable(): Enable Interrupt & Attention for hw function. * - * @param p_hwfn - * @param p_ptt - * @param int_mode + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @int_mode: Interrut mode * - * @return int + * Return: Int. */ int qed_int_igu_enable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, enum qed_int_mode int_mode); /** - * @brief - Initialize CAU status block entry + * qed_init_cau_sb_entry(): Initialize CAU status block entry. + * + * @p_hwfn: HW device data. + * @p_sb_entry: Pointer SB entry. + * @pf_id: PF number + * @vf_number: VF number + * @vf_valid: VF valid or not. * - * @param p_hwfn - * @param p_sb_entry - * @param pf_id - * @param vf_number - * @param vf_valid + * Return: Void. */ void qed_init_cau_sb_entry(struct qed_hwfn *p_hwfn, struct cau_sb_entry *p_sb_entry, diff --git a/drivers/net/ethernet/qlogic/qed/qed_iro_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_iro_hsi.h new file mode 100644 index 000000000000..3ccdd3b1d8cb --- /dev/null +++ b/drivers/net/ethernet/qlogic/qed/qed_iro_hsi.h @@ -0,0 +1,500 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ +/* QLogic qed NIC Driver + * Copyright (c) 2019-2021 Marvell International Ltd. + */ + +#ifndef _QED_IRO_HSI_H +#define _QED_IRO_HSI_H + +#include <linux/types.h> + +enum { + IRO_YSTORM_FLOW_CONTROL_MODE_GTT, + IRO_PSTORM_PKT_DUPLICATION_CFG, + IRO_TSTORM_PORT_STAT, + IRO_TSTORM_LL2_PORT_STAT, + IRO_TSTORM_PKT_DUPLICATION_CFG, + IRO_USTORM_VF_PF_CHANNEL_READY_GTT, + IRO_USTORM_FLR_FINAL_ACK_GTT, + IRO_USTORM_EQE_CONS_GTT, + IRO_USTORM_ETH_QUEUE_ZONE_GTT, + IRO_USTORM_COMMON_QUEUE_CONS_GTT, + IRO_XSTORM_PQ_INFO, + IRO_XSTORM_INTEG_TEST_DATA, + IRO_YSTORM_INTEG_TEST_DATA, + IRO_PSTORM_INTEG_TEST_DATA, + IRO_TSTORM_INTEG_TEST_DATA, + IRO_MSTORM_INTEG_TEST_DATA, + IRO_USTORM_INTEG_TEST_DATA, + IRO_XSTORM_OVERLAY_BUF_ADDR, + IRO_YSTORM_OVERLAY_BUF_ADDR, + IRO_PSTORM_OVERLAY_BUF_ADDR, + IRO_TSTORM_OVERLAY_BUF_ADDR, + IRO_MSTORM_OVERLAY_BUF_ADDR, + IRO_USTORM_OVERLAY_BUF_ADDR, + IRO_TSTORM_LL2_RX_PRODS_GTT, + IRO_CORE_LL2_TSTORM_PER_QUEUE_STAT, + IRO_CORE_LL2_USTORM_PER_QUEUE_STAT, + IRO_CORE_LL2_PSTORM_PER_QUEUE_STAT, + IRO_MSTORM_QUEUE_STAT, + IRO_MSTORM_TPA_TIMEOUT_US, + IRO_MSTORM_ETH_VF_PRODS, + IRO_MSTORM_ETH_PF_PRODS_GTT, + IRO_MSTORM_ETH_PF_STAT, + IRO_USTORM_QUEUE_STAT, + IRO_USTORM_ETH_PF_STAT, + IRO_PSTORM_QUEUE_STAT, + IRO_PSTORM_ETH_PF_STAT, + IRO_PSTORM_CTL_FRAME_ETHTYPE_GTT, + IRO_TSTORM_ETH_PRS_INPUT, + IRO_ETH_RX_RATE_LIMIT, + IRO_TSTORM_ETH_RSS_UPDATE_GTT, + IRO_XSTORM_ETH_QUEUE_ZONE_GTT, + IRO_YSTORM_TOE_CQ_PROD, + IRO_USTORM_TOE_CQ_PROD, + IRO_USTORM_TOE_GRQ_PROD, + IRO_TSTORM_SCSI_CMDQ_CONS_GTT, + IRO_TSTORM_SCSI_BDQ_EXT_PROD_GTT, + IRO_MSTORM_SCSI_BDQ_EXT_PROD_GTT, + IRO_TSTORM_ISCSI_RX_STATS, + IRO_MSTORM_ISCSI_RX_STATS, + IRO_USTORM_ISCSI_RX_STATS, + IRO_XSTORM_ISCSI_TX_STATS, + IRO_YSTORM_ISCSI_TX_STATS, + IRO_PSTORM_ISCSI_TX_STATS, + IRO_TSTORM_FCOE_RX_STATS, + IRO_PSTORM_FCOE_TX_STATS, + IRO_PSTORM_RDMA_QUEUE_STAT, + IRO_TSTORM_RDMA_QUEUE_STAT, + IRO_XSTORM_RDMA_ASSERT_LEVEL, + IRO_YSTORM_RDMA_ASSERT_LEVEL, + IRO_PSTORM_RDMA_ASSERT_LEVEL, + IRO_TSTORM_RDMA_ASSERT_LEVEL, + IRO_MSTORM_RDMA_ASSERT_LEVEL, + IRO_USTORM_RDMA_ASSERT_LEVEL, + IRO_XSTORM_IWARP_RXMIT_STATS, + IRO_TSTORM_ROCE_EVENTS_STAT, + IRO_YSTORM_ROCE_DCQCN_RECEIVED_STATS, + IRO_YSTORM_ROCE_ERROR_STATS, + IRO_PSTORM_ROCE_DCQCN_SENT_STATS, + IRO_USTORM_ROCE_CQE_STATS, +}; + +/* Pstorm LiteL2 queue statistics */ + +#define CORE_LL2_PSTORM_PER_QUEUE_STAT_OFFSET(core_tx_stats_id) \ + (IRO[IRO_CORE_LL2_PSTORM_PER_QUEUE_STAT].base \ + + ((core_tx_stats_id) * IRO[IRO_CORE_LL2_PSTORM_PER_QUEUE_STAT].m1)) +#define CORE_LL2_PSTORM_PER_QUEUE_STAT_SIZE \ + (IRO[IRO_CORE_LL2_PSTORM_PER_QUEUE_STAT].size) + +/* Tstorm LightL2 queue statistics */ +#define CORE_LL2_TSTORM_PER_QUEUE_STAT_OFFSET(core_rx_queue_id) \ + (IRO[IRO_CORE_LL2_TSTORM_PER_QUEUE_STAT].base \ + + ((core_rx_queue_id) * IRO[IRO_CORE_LL2_TSTORM_PER_QUEUE_STAT].m1)) +#define CORE_LL2_TSTORM_PER_QUEUE_STAT_SIZE \ + (IRO[IRO_CORE_LL2_TSTORM_PER_QUEUE_STAT].size) + +/* Ustorm LiteL2 queue statistics */ +#define CORE_LL2_USTORM_PER_QUEUE_STAT_OFFSET(core_rx_queue_id) \ + (IRO[IRO_CORE_LL2_USTORM_PER_QUEUE_STAT].base \ + + ((core_rx_queue_id) * IRO[IRO_CORE_LL2_USTORM_PER_QUEUE_STAT].m1)) +#define CORE_LL2_USTORM_PER_QUEUE_STAT_SIZE \ + (IRO[IRO_CORE_LL2_USTORM_PER_QUEUE_STAT].size) + +/* Tstorm Eth limit Rx rate */ +#define ETH_RX_RATE_LIMIT_OFFSET(pf_id) \ + (IRO[IRO_ETH_RX_RATE_LIMIT].base \ + + ((pf_id) * IRO[IRO_ETH_RX_RATE_LIMIT].m1)) +#define ETH_RX_RATE_LIMIT_SIZE (IRO[IRO_ETH_RX_RATE_LIMIT].size) + +/* Mstorm ETH PF queues producers */ +#define MSTORM_ETH_PF_PRODS_GTT_OFFSET(queue_id) \ + (IRO[IRO_MSTORM_ETH_PF_PRODS_GTT].base \ + + ((queue_id) * IRO[IRO_MSTORM_ETH_PF_PRODS_GTT].m1)) +#define MSTORM_ETH_PF_PRODS_GTT_SIZE (IRO[IRO_MSTORM_ETH_PF_PRODS_GTT].size) + +/* Mstorm pf statistics */ +#define MSTORM_ETH_PF_STAT_OFFSET(pf_id) \ + (IRO[IRO_MSTORM_ETH_PF_STAT].base \ + + ((pf_id) * IRO[IRO_MSTORM_ETH_PF_STAT].m1)) +#define MSTORM_ETH_PF_STAT_SIZE (IRO[IRO_MSTORM_ETH_PF_STAT].size) + +/* Mstorm ETH VF queues producers offset in RAM. Used in default VF zone + * size mode. + */ +#define MSTORM_ETH_VF_PRODS_OFFSET(vf_id, vf_queue_id) \ + (IRO[IRO_MSTORM_ETH_VF_PRODS].base \ + + ((vf_id) * IRO[IRO_MSTORM_ETH_VF_PRODS].m1) \ + + ((vf_queue_id) * IRO[IRO_MSTORM_ETH_VF_PRODS].m2)) +#define MSTORM_ETH_VF_PRODS_SIZE (IRO[IRO_MSTORM_ETH_VF_PRODS].size) + +/* Mstorm Integration Test Data */ +#define MSTORM_INTEG_TEST_DATA_OFFSET (IRO[IRO_MSTORM_INTEG_TEST_DATA].base) +#define MSTORM_INTEG_TEST_DATA_SIZE (IRO[IRO_MSTORM_INTEG_TEST_DATA].size) + +/* Mstorm iSCSI RX stats */ +#define MSTORM_ISCSI_RX_STATS_OFFSET(storage_func_id) \ + (IRO[IRO_MSTORM_ISCSI_RX_STATS].base \ + + ((storage_func_id) * IRO[IRO_MSTORM_ISCSI_RX_STATS].m1)) +#define MSTORM_ISCSI_RX_STATS_SIZE (IRO[IRO_MSTORM_ISCSI_RX_STATS].size) + +/* Mstorm overlay buffer host address */ +#define MSTORM_OVERLAY_BUF_ADDR_OFFSET (IRO[IRO_MSTORM_OVERLAY_BUF_ADDR].base) +#define MSTORM_OVERLAY_BUF_ADDR_SIZE (IRO[IRO_MSTORM_OVERLAY_BUF_ADDR].size) + +/* Mstorm queue statistics */ +#define MSTORM_QUEUE_STAT_OFFSET(stat_counter_id) \ + (IRO[IRO_MSTORM_QUEUE_STAT].base \ + + ((stat_counter_id) * IRO[IRO_MSTORM_QUEUE_STAT].m1)) +#define MSTORM_QUEUE_STAT_SIZ (IRO[IRO_MSTORM_QUEUE_STAT].size) + +/* Mstorm error level for assert */ +#define MSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) \ + (IRO[IRO_MSTORM_RDMA_ASSERT_LEVEL].base \ + + ((pf_id) * IRO[IRO_MSTORM_RDMA_ASSERT_LEVEL].m1)) +#define MSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[IRO_MSTORM_RDMA_ASSERT_LEVEL].size) + +/* Mstorm bdq-external-producer of given BDQ resource ID, BDqueue-id */ +#define MSTORM_SCSI_BDQ_EXT_PROD_GTT_OFFSET(storage_func_id, bdq_id) \ + (IRO[IRO_MSTORM_SCSI_BDQ_EXT_PROD_GTT].base \ + + ((storage_func_id) * IRO[IRO_MSTORM_SCSI_BDQ_EXT_PROD_GTT].m1) \ + + ((bdq_id) * IRO[IRO_MSTORM_SCSI_BDQ_EXT_PROD_GTT].m2)) +#define MSTORM_SCSI_BDQ_EXT_PROD_GTT_SIZE \ + (IRO[IRO_MSTORM_SCSI_BDQ_EXT_PROD_GTT].size) + +/* TPA agregation timeout in us resolution (on ASIC) */ +#define MSTORM_TPA_TIMEOUT_US_OFFSET (IRO[IRO_MSTORM_TPA_TIMEOUT_US].base) +#define MSTORM_TPA_TIMEOUT_US_SIZE (IRO[IRO_MSTORM_TPA_TIMEOUT_US].size) + +/* Control frame's EthType configuration for TX control frame security */ +#define PSTORM_CTL_FRAME_ETHTYPE_GTT_OFFSET(ethtype_id) \ + (IRO[IRO_PSTORM_CTL_FRAME_ETHTYPE_GTT].base \ + + ((ethtype_id) * IRO[IRO_PSTORM_CTL_FRAME_ETHTYPE_GTT].m1)) +#define PSTORM_CTL_FRAME_ETHTYPE_GTT_SIZE \ + (IRO[IRO_PSTORM_CTL_FRAME_ETHTYPE_GTT].size) + +/* Pstorm pf statistics */ +#define PSTORM_ETH_PF_STAT_OFFSET(pf_id) \ + (IRO[IRO_PSTORM_ETH_PF_STAT].base \ + + ((pf_id) * IRO[IRO_PSTORM_ETH_PF_STAT].m1)) +#define PSTORM_ETH_PF_STAT_SIZE (IRO[IRO_PSTORM_ETH_PF_STAT].size) + +/* Pstorm FCoE TX stats */ +#define PSTORM_FCOE_TX_STATS_OFFSET(pf_id) \ + (IRO[IRO_PSTORM_FCOE_TX_STATS].base \ + + ((pf_id) * IRO[IRO_PSTORM_FCOE_TX_STATS].m1)) +#define PSTORM_FCOE_TX_STATS_SIZE (IRO[IRO_PSTORM_FCOE_TX_STATS].size) + +/* Pstorm Integration Test Data */ +#define PSTORM_INTEG_TEST_DATA_OFFSET (IRO[IRO_PSTORM_INTEG_TEST_DATA].base) +#define PSTORM_INTEG_TEST_DATA_SIZE (IRO[IRO_PSTORM_INTEG_TEST_DATA].size) + +/* Pstorm iSCSI TX stats */ +#define PSTORM_ISCSI_TX_STATS_OFFSET(storage_func_id) \ + (IRO[IRO_PSTORM_ISCSI_TX_STATS].base \ + + ((storage_func_id) * IRO[IRO_PSTORM_ISCSI_TX_STATS].m1)) +#define PSTORM_ISCSI_TX_STATS_SIZE (IRO[IRO_PSTORM_ISCSI_TX_STATS].size) + +/* Pstorm overlay buffer host address */ +#define PSTORM_OVERLAY_BUF_ADDR_OFFSET (IRO[IRO_PSTORM_OVERLAY_BUF_ADDR].base) +#define PSTORM_OVERLAY_BUF_ADDR_SIZE (IRO[IRO_PSTORM_OVERLAY_BUF_ADDR].size) + +/* Pstorm LL2 packet duplication configuration. Use pstorm_pkt_dup_cfg + * data type. + */ +#define PSTORM_PKT_DUPLICATION_CFG_OFFSET(pf_id) \ + (IRO[IRO_PSTORM_PKT_DUPLICATION_CFG].base \ + + ((pf_id) * IRO[IRO_PSTORM_PKT_DUPLICATION_CFG].m1)) +#define PSTORM_PKT_DUPLICATION_CFG_SIZE \ + (IRO[IRO_PSTORM_PKT_DUPLICATION_CFG].size) + +/* Pstorm queue statistics */ +#define PSTORM_QUEUE_STAT_OFFSET(stat_counter_id) \ + (IRO[IRO_PSTORM_QUEUE_STAT].base \ + + ((stat_counter_id) * IRO[IRO_PSTORM_QUEUE_STAT].m1)) +#define PSTORM_QUEUE_STAT_SIZE (IRO[IRO_PSTORM_QUEUE_STAT].size) + +/* Pstorm error level for assert */ +#define PSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) \ + (IRO[IRO_PSTORM_RDMA_ASSERT_LEVEL].base \ + + ((pf_id) * IRO[IRO_PSTORM_RDMA_ASSERT_LEVEL].m1)) +#define PSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[IRO_PSTORM_RDMA_ASSERT_LEVEL].size) + +/* Pstorm RDMA queue statistics */ +#define PSTORM_RDMA_QUEUE_STAT_OFFSET(rdma_stat_counter_id) \ + (IRO[IRO_PSTORM_RDMA_QUEUE_STAT].base \ + + ((rdma_stat_counter_id) * IRO[IRO_PSTORM_RDMA_QUEUE_STAT].m1)) +#define PSTORM_RDMA_QUEUE_STAT_SIZE (IRO[IRO_PSTORM_RDMA_QUEUE_STAT].size) + +/* DCQCN Sent Statistics */ +#define PSTORM_ROCE_DCQCN_SENT_STATS_OFFSET(roce_pf_id) \ + (IRO[IRO_PSTORM_ROCE_DCQCN_SENT_STATS].base \ + + ((roce_pf_id) * IRO[IRO_PSTORM_ROCE_DCQCN_SENT_STATS].m1)) +#define PSTORM_ROCE_DCQCN_SENT_STATS_SIZE \ + (IRO[IRO_PSTORM_ROCE_DCQCN_SENT_STATS].size) + +/* Tstorm last parser message */ +#define TSTORM_ETH_PRS_INPUT_OFFSET (IRO[IRO_TSTORM_ETH_PRS_INPUT].base) +#define TSTORM_ETH_PRS_INPUT_SIZE (IRO[IRO_TSTORM_ETH_PRS_INPUT].size) + +/* RSS indirection table entry update command per PF offset in TSTORM PF BAR0. + * Use eth_tstorm_rss_update_data for update. + */ +#define TSTORM_ETH_RSS_UPDATE_GTT_OFFSET(pf_id) \ + (IRO[IRO_TSTORM_ETH_RSS_UPDATE_GTT].base \ + + ((pf_id) * IRO[IRO_TSTORM_ETH_RSS_UPDATE_GTT].m1)) +#define TSTORM_ETH_RSS_UPDATE_GTT_SIZE\ + (IRO[IRO_TSTORM_ETH_RSS_UPDATE_GTT].size) + +/* Tstorm FCoE RX stats */ +#define TSTORM_FCOE_RX_STATS_OFFSET(pf_id) \ + (IRO[IRO_TSTORM_FCOE_RX_STATS].base \ + + ((pf_id) * IRO[IRO_TSTORM_FCOE_RX_STATS].m1)) +#define TSTORM_FCOE_RX_STATS_SIZE (IRO[IRO_TSTORM_FCOE_RX_STATS].size) + +/* Tstorm Integration Test Data */ +#define TSTORM_INTEG_TEST_DATA_OFFSET (IRO[IRO_TSTORM_INTEG_TEST_DATA].base) +#define TSTORM_INTEG_TEST_DATA_SIZE (IRO[IRO_TSTORM_INTEG_TEST_DATA].size) + +/* Tstorm iSCSI RX stats */ +#define TSTORM_ISCSI_RX_STATS_OFFSET(storage_func_id) \ + (IRO[IRO_TSTORM_ISCSI_RX_STATS].base \ + + ((storage_func_id) * IRO[IRO_TSTORM_ISCSI_RX_STATS].m1)) +#define TSTORM_ISCSI_RX_STATS_SIZE (IRO[IRO_TSTORM_ISCSI_RX_STATS].size) + +/* Tstorm ll2 port statistics */ +#define TSTORM_LL2_PORT_STAT_OFFSET(port_id) \ + (IRO[IRO_TSTORM_LL2_PORT_STAT].base \ + + ((port_id) * IRO[IRO_TSTORM_LL2_PORT_STAT].m1)) +#define TSTORM_LL2_PORT_STAT_SIZE (IRO[IRO_TSTORM_LL2_PORT_STAT].size) + +/* Tstorm producers */ +#define TSTORM_LL2_RX_PRODS_GTT_OFFSET(core_rx_queue_id) \ + (IRO[IRO_TSTORM_LL2_RX_PRODS_GTT].base \ + + ((core_rx_queue_id) * IRO[IRO_TSTORM_LL2_RX_PRODS_GTT].m1)) +#define TSTORM_LL2_RX_PRODS_GTT_SIZE (IRO[IRO_TSTORM_LL2_RX_PRODS_GTT].size) + +/* Tstorm overlay buffer host address */ +#define TSTORM_OVERLAY_BUF_ADDR_OFFSET (IRO[IRO_TSTORM_OVERLAY_BUF_ADDR].base) + +#define TSTORM_OVERLAY_BUF_ADDR_SIZE (IRO[IRO_TSTORM_OVERLAY_BUF_ADDR].size) + +/* Tstorm LL2 packet duplication configuration. + * Use tstorm_pkt_dup_cfg data type. + */ +#define TSTORM_PKT_DUPLICATION_CFG_OFFSET(pf_id) \ + (IRO[IRO_TSTORM_PKT_DUPLICATION_CFG].base \ + + ((pf_id) * IRO[IRO_TSTORM_PKT_DUPLICATION_CFG].m1)) +#define TSTORM_PKT_DUPLICATION_CFG_SIZE \ + (IRO[IRO_TSTORM_PKT_DUPLICATION_CFG].size) + +/* Tstorm port statistics */ +#define TSTORM_PORT_STAT_OFFSET(port_id) \ + (IRO[IRO_TSTORM_PORT_STAT].base \ + + ((port_id) * IRO[IRO_TSTORM_PORT_STAT].m1)) +#define TSTORM_PORT_STAT_SIZE (IRO[IRO_TSTORM_PORT_STAT].size) + +/* Tstorm error level for assert */ +#define TSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) \ + (IRO[IRO_TSTORM_RDMA_ASSERT_LEVEL].base \ + + ((pf_id) * IRO[IRO_TSTORM_RDMA_ASSERT_LEVEL].m1)) +#define TSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[IRO_TSTORM_RDMA_ASSERT_LEVEL].size) + +/* Tstorm RDMA queue statistics */ +#define TSTORM_RDMA_QUEUE_STAT_OFFSET(rdma_stat_counter_id) \ + (IRO[IRO_TSTORM_RDMA_QUEUE_STAT].base \ + + ((rdma_stat_counter_id) * IRO[IRO_TSTORM_RDMA_QUEUE_STAT].m1)) +#define TSTORM_RDMA_QUEUE_STAT_SIZE (IRO[IRO_TSTORM_RDMA_QUEUE_STAT].size) + +/* Tstorm RoCE Event Statistics */ +#define TSTORM_ROCE_EVENTS_STAT_OFFSET(roce_pf_id) \ + (IRO[IRO_TSTORM_ROCE_EVENTS_STAT].base \ + + ((roce_pf_id) * IRO[IRO_TSTORM_ROCE_EVENTS_STAT].m1)) +#define TSTORM_ROCE_EVENTS_STAT_SIZE (IRO[IRO_TSTORM_ROCE_EVENTS_STAT].size) + +/* Tstorm (reflects M-Storm) bdq-external-producer of given function ID, + * BDqueue-id. + */ +#define TSTORM_SCSI_BDQ_EXT_PROD_GTT_OFFSET(storage_func_id, bdq_id) \ + (IRO[IRO_TSTORM_SCSI_BDQ_EXT_PROD_GTT].base \ + + ((storage_func_id) * IRO[IRO_TSTORM_SCSI_BDQ_EXT_PROD_GTT].m1) \ + + ((bdq_id) * IRO[IRO_TSTORM_SCSI_BDQ_EXT_PROD_GTT].m2)) +#define TSTORM_SCSI_BDQ_EXT_PROD_GTT_SIZE \ + (IRO[IRO_TSTORM_SCSI_BDQ_EXT_PROD_GTT].size) + +/* Tstorm cmdq-cons of given command queue-id */ +#define TSTORM_SCSI_CMDQ_CONS_GTT_OFFSET(cmdq_queue_id) \ + (IRO[IRO_TSTORM_SCSI_CMDQ_CONS_GTT].base \ + + ((cmdq_queue_id) * IRO[IRO_TSTORM_SCSI_CMDQ_CONS_GTT].m1)) +#define TSTORM_SCSI_CMDQ_CONS_GTT_SIZE \ + (IRO[IRO_TSTORM_SCSI_CMDQ_CONS_GTT].size) + +/* Ustorm Common Queue ring consumer */ +#define USTORM_COMMON_QUEUE_CONS_GTT_OFFSET(queue_zone_id) \ + (IRO[IRO_USTORM_COMMON_QUEUE_CONS_GTT].base \ + + ((queue_zone_id) * IRO[IRO_USTORM_COMMON_QUEUE_CONS_GTT].m1)) +#define USTORM_COMMON_QUEUE_CONS_GTT_SIZE \ + (IRO[IRO_USTORM_COMMON_QUEUE_CONS_GTT].size) + +/* Ustorm Event ring consumer */ +#define USTORM_EQE_CONS_GTT_OFFSET(pf_id) \ + (IRO[IRO_USTORM_EQE_CONS_GTT].base \ + + ((pf_id) * IRO[IRO_USTORM_EQE_CONS_GTT].m1)) +#define USTORM_EQE_CONS_GTT_SIZE (IRO[IRO_USTORM_EQE_CONS_GTT].size) + +/* Ustorm pf statistics */ +#define USTORM_ETH_PF_STAT_OFFSET(pf_id) \ + (IRO[IRO_USTORM_ETH_PF_STAT].base \ + + ((pf_id) * IRO[IRO_USTORM_ETH_PF_STAT].m1)) +#define USTORM_ETH_PF_STAT_SIZE (IRO[IRO_USTORM_ETH_PF_STAT].size) + +/* Ustorm eth queue zone */ +#define USTORM_ETH_QUEUE_ZONE_GTT_OFFSET(queue_zone_id) \ + (IRO[IRO_USTORM_ETH_QUEUE_ZONE_GTT].base \ + + ((queue_zone_id) * IRO[IRO_USTORM_ETH_QUEUE_ZONE_GTT].m1)) +#define USTORM_ETH_QUEUE_ZONE_GTT_SIZE (IRO[IRO_USTORM_ETH_QUEUE_ZONE_GTT].size) + +/* Ustorm Final flr cleanup ack */ +#define USTORM_FLR_FINAL_ACK_GTT_OFFSET(pf_id) \ + (IRO[IRO_USTORM_FLR_FINAL_ACK_GTT].base \ + + ((pf_id) * IRO[IRO_USTORM_FLR_FINAL_ACK_GTT].m1)) +#define USTORM_FLR_FINAL_ACK_GTT_SIZE (IRO[IRO_USTORM_FLR_FINAL_ACK_GTT].size) + +/* Ustorm Integration Test Data */ +#define USTORM_INTEG_TEST_DATA_OFFSET (IRO[IRO_USTORM_INTEG_TEST_DATA].base) +#define USTORM_INTEG_TEST_DATA_SIZE (IRO[IRO_USTORM_INTEG_TEST_DATA].size) + +/* Ustorm iSCSI RX stats */ +#define USTORM_ISCSI_RX_STATS_OFFSET(storage_func_id) \ + (IRO[IRO_USTORM_ISCSI_RX_STATS].base \ + + ((storage_func_id) * IRO[IRO_USTORM_ISCSI_RX_STATS].m1)) +#define USTORM_ISCSI_RX_STATS_SIZE (IRO[IRO_USTORM_ISCSI_RX_STATS].size) + +/* Ustorm overlay buffer host address */ +#define USTORM_OVERLAY_BUF_ADDR_OFFSET (IRO[IRO_USTORM_OVERLAY_BUF_ADDR].base) +#define USTORM_OVERLAY_BUF_ADDR_SIZE (IRO[IRO_USTORM_OVERLAY_BUF_ADDR].size) + +/* Ustorm queue statistics */ +#define USTORM_QUEUE_STAT_OFFSET(stat_counter_id) \ + (IRO[IRO_USTORM_QUEUE_STAT].base \ + + ((stat_counter_id) * IRO[IRO_USTORM_QUEUE_STAT].m1)) +#define USTORM_QUEUE_STAT_SIZE (IRO[IRO_USTORM_QUEUE_STAT].size) + +/* Ustorm error level for assert */ +#define USTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) \ + (IRO[IRO_USTORM_RDMA_ASSERT_LEVEL].base \ + + ((pf_id) * IRO[IRO_USTORM_RDMA_ASSERT_LEVEL].m1)) +#define USTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[IRO_USTORM_RDMA_ASSERT_LEVEL].size) + +/* RoCE CQEs Statistics */ +#define USTORM_ROCE_CQE_STATS_OFFSET(roce_pf_id) \ + (IRO[IRO_USTORM_ROCE_CQE_STATS].base \ + + ((roce_pf_id) * IRO[IRO_USTORM_ROCE_CQE_STATS].m1)) +#define USTORM_ROCE_CQE_STATS_SIZE (IRO[IRO_USTORM_ROCE_CQE_STATS].size) + +/* Ustorm cqe producer */ +#define USTORM_TOE_CQ_PROD_OFFSET(rss_id) \ + (IRO[IRO_USTORM_TOE_CQ_PROD].base \ + + ((rss_id) * IRO[IRO_USTORM_TOE_CQ_PROD].m1)) +#define USTORM_TOE_CQ_PROD_SIZE (IRO[IRO_USTORM_TOE_CQ_PROD].size) + +/* Ustorm grq producer */ +#define USTORM_TOE_GRQ_PROD_OFFSET(pf_id) \ + (IRO[IRO_USTORM_TOE_GRQ_PROD].base \ + + ((pf_id) * IRO[IRO_USTORM_TOE_GRQ_PROD].m1)) +#define USTORM_TOE_GRQ_PROD_SIZE (IRO[IRO_USTORM_TOE_GRQ_PROD].size) + +/* Ustorm VF-PF Channel ready flag */ +#define USTORM_VF_PF_CHANNEL_READY_GTT_OFFSET(vf_id) \ + (IRO[IRO_USTORM_VF_PF_CHANNEL_READY_GTT].base \ + + ((vf_id) * IRO[IRO_USTORM_VF_PF_CHANNEL_READY_GTT].m1)) +#define USTORM_VF_PF_CHANNEL_READY_GTT_SIZE \ + (IRO[IRO_USTORM_VF_PF_CHANNEL_READY_GTT].size) + +/* Xstorm queue zone */ +#define XSTORM_ETH_QUEUE_ZONE_GTT_OFFSET(queue_id) \ + (IRO[IRO_XSTORM_ETH_QUEUE_ZONE_GTT].base \ + + ((queue_id) * IRO[IRO_XSTORM_ETH_QUEUE_ZONE_GTT].m1)) +#define XSTORM_ETH_QUEUE_ZONE_GTT_SIZE (IRO[IRO_XSTORM_ETH_QUEUE_ZONE_GTT].size) + +/* Xstorm Integration Test Data */ +#define XSTORM_INTEG_TEST_DATA_OFFSET (IRO[IRO_XSTORM_INTEG_TEST_DATA].base) +#define XSTORM_INTEG_TEST_DATA_SIZE (IRO[IRO_XSTORM_INTEG_TEST_DATA].size) + +/* Xstorm iSCSI TX stats */ +#define XSTORM_ISCSI_TX_STATS_OFFSET(storage_func_id) \ + (IRO[IRO_XSTORM_ISCSI_TX_STATS].base \ + + ((storage_func_id) * IRO[IRO_XSTORM_ISCSI_TX_STATS].m1)) +#define XSTORM_ISCSI_TX_STATS_SIZE (IRO[IRO_XSTORM_ISCSI_TX_STATS].size) + +/* Xstorm iWARP rxmit stats */ +#define XSTORM_IWARP_RXMIT_STATS_OFFSET(pf_id) \ + (IRO[IRO_XSTORM_IWARP_RXMIT_STATS].base \ + + ((pf_id) * IRO[IRO_XSTORM_IWARP_RXMIT_STATS].m1)) +#define XSTORM_IWARP_RXMIT_STATS_SIZE (IRO[IRO_XSTORM_IWARP_RXMIT_STATS].size) + +/* Xstorm overlay buffer host address */ +#define XSTORM_OVERLAY_BUF_ADDR_OFFSET (IRO[IRO_XSTORM_OVERLAY_BUF_ADDR].base) +#define XSTORM_OVERLAY_BUF_ADDR_SIZE (IRO[IRO_XSTORM_OVERLAY_BUF_ADDR].size) + +/* Xstorm common PQ info */ +#define XSTORM_PQ_INFO_OFFSET(pq_id) \ + (IRO[IRO_XSTORM_PQ_INFO].base \ + + ((pq_id) * IRO[IRO_XSTORM_PQ_INFO].m1)) +#define XSTORM_PQ_INFO_SIZE (IRO[IRO_XSTORM_PQ_INFO].size) + +/* Xstorm error level for assert */ +#define XSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) \ + (IRO[IRO_XSTORM_RDMA_ASSERT_LEVEL].base \ + + ((pf_id) * IRO[IRO_XSTORM_RDMA_ASSERT_LEVEL].m1)) +#define XSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[IRO_XSTORM_RDMA_ASSERT_LEVEL].size) + +/* Ystorm flow control mode. Use enum fw_flow_ctrl_mode */ +#define YSTORM_FLOW_CONTROL_MODE_GTT_OFFSET \ + (IRO[IRO_YSTORM_FLOW_CONTROL_MODE_GTT].base) +#define YSTORM_FLOW_CONTROL_MODE_GTT_SIZE \ + (IRO[IRO_YSTORM_FLOW_CONTROL_MODE_GTT].size) + +/* Ystorm Integration Test Data */ +#define YSTORM_INTEG_TEST_DATA_OFFSET (IRO[IRO_YSTORM_INTEG_TEST_DATA].base) +#define YSTORM_INTEG_TEST_DATA_SIZE (IRO[IRO_YSTORM_INTEG_TEST_DATA].size) + +/* Ystorm iSCSI TX stats */ +#define YSTORM_ISCSI_TX_STATS_OFFSET(storage_func_id) \ + (IRO[IRO_YSTORM_ISCSI_TX_STATS].base \ + + ((storage_func_id) * IRO[IRO_YSTORM_ISCSI_TX_STATS].m1)) +#define YSTORM_ISCSI_TX_STATS_SIZE (IRO[IRO_YSTORM_ISCSI_TX_STATS].size) + +/* Ystorm overlay buffer host address */ +#define YSTORM_OVERLAY_BUF_ADDR_OFFSET (IRO[IRO_YSTORM_OVERLAY_BUF_ADDR].base) +#define YSTORM_OVERLAY_BUF_ADDR_SIZE (IRO[IRO_YSTORM_OVERLAY_BUF_ADDR].size) + +/* Ystorm error level for assert */ +#define YSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) \ + (IRO[IRO_YSTORM_RDMA_ASSERT_LEVEL].base \ + + ((pf_id) * IRO[IRO_YSTORM_RDMA_ASSERT_LEVEL].m1)) +#define YSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[IRO_YSTORM_RDMA_ASSERT_LEVEL].size) + +/* DCQCN Received Statistics */ +#define YSTORM_ROCE_DCQCN_RECEIVED_STATS_OFFSET(roce_pf_id) \ + (IRO[IRO_YSTORM_ROCE_DCQCN_RECEIVED_STATS].base \ + + ((roce_pf_id) * IRO[IRO_YSTORM_ROCE_DCQCN_RECEIVED_STATS].m1)) +#define YSTORM_ROCE_DCQCN_RECEIVED_STATS_SIZE \ + (IRO[IRO_YSTORM_ROCE_DCQCN_RECEIVED_STATS].size) + +/* RoCE Error Statistics */ +#define YSTORM_ROCE_ERROR_STATS_OFFSET(roce_pf_id) \ + (IRO[IRO_YSTORM_ROCE_ERROR_STATS].base \ + + ((roce_pf_id) * IRO[IRO_YSTORM_ROCE_ERROR_STATS].m1)) +#define YSTORM_ROCE_ERROR_STATS_SIZE (IRO[IRO_YSTORM_ROCE_ERROR_STATS].size) + +/* Ystorm cqe producer */ +#define YSTORM_TOE_CQ_PROD_OFFSET(rss_id) \ + (IRO[IRO_YSTORM_TOE_CQ_PROD].base \ + + ((rss_id) * IRO[IRO_YSTORM_TOE_CQ_PROD].m1)) +#define YSTORM_TOE_CQ_PROD_SIZE (IRO[IRO_YSTORM_TOE_CQ_PROD].size) + +/* Per-chip offsets in iro_arr in dwords */ +#define E4_IRO_ARR_OFFSET 0 +#endif diff --git a/drivers/net/ethernet/qlogic/qed/qed_iscsi.c b/drivers/net/ethernet/qlogic/qed/qed_iscsi.c index db926d8b3033..511ab214eb9c 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_iscsi.c +++ b/drivers/net/ethernet/qlogic/qed/qed_iscsi.c @@ -29,6 +29,7 @@ #include "qed_hsi.h" #include "qed_hw.h" #include "qed_int.h" +#include "qed_iro_hsi.h" #include "qed_iscsi.h" #include "qed_ll2.h" #include "qed_mcp.h" @@ -627,10 +628,9 @@ static void __iomem *qed_iscsi_get_primary_bdq_prod(struct qed_hwfn *p_hwfn, { if (RESC_NUM(p_hwfn, QED_BDQ)) { return (u8 __iomem *)p_hwfn->regview + - GTT_BAR0_MAP_REG_MSDM_RAM + - MSTORM_SCSI_BDQ_EXT_PROD_OFFSET(RESC_START(p_hwfn, - QED_BDQ), - bdq_id); + GET_GTT_BDQ_REG_ADDR(GTT_BAR0_MAP_REG_MSDM_RAM, + MSTORM_SCSI_BDQ_EXT_PROD, + RESC_START(p_hwfn, QED_BDQ), bdq_id); } else { DP_NOTICE(p_hwfn, "BDQ is not allocated!\n"); return NULL; @@ -642,10 +642,9 @@ static void __iomem *qed_iscsi_get_secondary_bdq_prod(struct qed_hwfn *p_hwfn, { if (RESC_NUM(p_hwfn, QED_BDQ)) { return (u8 __iomem *)p_hwfn->regview + - GTT_BAR0_MAP_REG_TSDM_RAM + - TSTORM_SCSI_BDQ_EXT_PROD_OFFSET(RESC_START(p_hwfn, - QED_BDQ), - bdq_id); + GET_GTT_BDQ_REG_ADDR(GTT_BAR0_MAP_REG_TSDM_RAM, + TSTORM_SCSI_BDQ_EXT_PROD, + RESC_START(p_hwfn, QED_BDQ), bdq_id); } else { DP_NOTICE(p_hwfn, "BDQ is not allocated!\n"); return NULL; diff --git a/drivers/net/ethernet/qlogic/qed/qed_iscsi.h b/drivers/net/ethernet/qlogic/qed/qed_iscsi.h index dab7a5d09f87..dec2b00259d4 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_iscsi.h +++ b/drivers/net/ethernet/qlogic/qed/qed_iscsi.h @@ -34,10 +34,13 @@ void qed_iscsi_setup(struct qed_hwfn *p_hwfn); void qed_iscsi_free(struct qed_hwfn *p_hwfn); /** - * @brief - Fills provided statistics struct with statistics. + * qed_get_protocol_stats_iscsi(): Fills provided statistics + * struct with statistics. * - * @param cdev - * @param stats - points to struct that will be filled with statistics. + * @cdev: Qed dev pointer. + * @stats: Points to struct that will be filled with statistics. + * + * Return: Void. */ void qed_get_protocol_stats_iscsi(struct qed_dev *cdev, struct qed_mcp_iscsi_stats *stats); diff --git a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c index 186d0048a9d1..1d1d4caad680 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c +++ b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c @@ -114,6 +114,8 @@ qed_iwarp_init_fw_ramrod(struct qed_hwfn *p_hwfn, RESC_START(p_hwfn, QED_LL2_RAM_QUEUE) + p_hwfn->p_rdma_info->iwarp.ll2_ooo_handle; + p_ramrod->tcp.tx_sws_timer = cpu_to_le16(QED_TX_SWS_TIMER_DFLT); + p_ramrod->tcp.two_msl_timer = cpu_to_le32(QED_TWO_MSL_TIMER_DFLT); p_ramrod->tcp.max_fin_rt = QED_IWARP_MAX_FIN_RT_DEFAULT; return; diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.c b/drivers/net/ethernet/qlogic/qed/qed_l2.c index dfaf10edfabf..2edd6bf64a3c 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_l2.c +++ b/drivers/net/ethernet/qlogic/qed/qed_l2.c @@ -28,6 +28,7 @@ #include "qed_dev_api.h" #include <linux/qed/qed_eth_if.h> #include "qed_hsi.h" +#include "qed_iro_hsi.h" #include "qed_hw.h" #include "qed_int.h" #include "qed_l2.h" @@ -37,7 +38,6 @@ #include "qed_sp.h" #include "qed_sriov.h" - #define QED_MAX_SGES_NUM 16 #define CRC32_POLY 0x1edc6f41 @@ -904,9 +904,10 @@ qed_eth_pf_rx_queue_start(struct qed_hwfn *p_hwfn, { u32 init_prod_val = 0; - *pp_prod = p_hwfn->regview + - GTT_BAR0_MAP_REG_MSDM_RAM + - MSTORM_ETH_PF_PRODS_OFFSET(p_cid->abs.queue_id); + *pp_prod = (u8 __iomem *) + p_hwfn->regview + + GET_GTT_REG_ADDR(GTT_BAR0_MAP_REG_MSDM_RAM, + MSTORM_ETH_PF_PRODS, p_cid->abs.queue_id); /* Init the rcq, rx bd and rx sge (if valid) producers to 0 */ __internal_ram_wr(p_hwfn, *pp_prod, sizeof(u32), @@ -1111,7 +1112,6 @@ qed_eth_pf_tx_queue_start(struct qed_hwfn *p_hwfn, { int rc; - rc = qed_eth_txq_start_ramrod(p_hwfn, p_cid, pbl_addr, pbl_size, qed_get_cm_pq_idx_mcos(p_hwfn, tc)); @@ -2010,7 +2010,7 @@ qed_configure_rfs_ntuple_filter(struct qed_hwfn *p_hwfn, struct qed_spq_comp_cb *p_cb, struct qed_ntuple_filter_params *p_params) { - struct rx_update_gft_filter_data *p_ramrod = NULL; + struct rx_update_gft_filter_ramrod_data *p_ramrod = NULL; struct qed_spq_entry *p_ent = NULL; struct qed_sp_init_data init_data; u16 abs_rx_q_id = 0; @@ -2031,7 +2031,7 @@ qed_configure_rfs_ntuple_filter(struct qed_hwfn *p_hwfn, } rc = qed_sp_init_request(p_hwfn, &p_ent, - ETH_RAMROD_GFT_UPDATE_FILTER, + ETH_RAMROD_RX_UPDATE_GFT_FILTER, PROTOCOLID_ETH, &init_data); if (rc) return rc; @@ -2100,7 +2100,7 @@ int qed_get_rxq_coalesce(struct qed_hwfn *p_hwfn, CAU_SB_ENTRY_TIMER_RES0); address = BAR0_MAP_REG_USDM_RAM + - USTORM_ETH_QUEUE_ZONE_OFFSET(p_cid->abs.queue_id); + USTORM_ETH_QUEUE_ZONE_GTT_OFFSET(p_cid->abs.queue_id); coalesce = qed_rd(p_hwfn, p_ptt, address); is_valid = GET_FIELD(coalesce, COALESCING_TIMESET_VALID); @@ -2134,7 +2134,7 @@ int qed_get_txq_coalesce(struct qed_hwfn *p_hwfn, CAU_SB_ENTRY_TIMER_RES1); address = BAR0_MAP_REG_XSDM_RAM + - XSTORM_ETH_QUEUE_ZONE_OFFSET(p_cid->abs.queue_id); + XSTORM_ETH_QUEUE_ZONE_GTT_OFFSET(p_cid->abs.queue_id); coalesce = qed_rd(p_hwfn, p_ptt, address); is_valid = GET_FIELD(coalesce, COALESCING_TIMESET_VALID); @@ -2763,25 +2763,6 @@ static int qed_configure_filter_mcast(struct qed_dev *cdev, return qed_filter_mcast_cmd(cdev, &mcast, QED_SPQ_MODE_CB, NULL); } -static int qed_configure_filter(struct qed_dev *cdev, - struct qed_filter_params *params) -{ - enum qed_filter_rx_mode_type accept_flags; - - switch (params->type) { - case QED_FILTER_TYPE_UCAST: - return qed_configure_filter_ucast(cdev, ¶ms->filter.ucast); - case QED_FILTER_TYPE_MCAST: - return qed_configure_filter_mcast(cdev, ¶ms->filter.mcast); - case QED_FILTER_TYPE_RX_MODE: - accept_flags = params->filter.accept_flags; - return qed_configure_filter_rx_mode(cdev, accept_flags); - default: - DP_NOTICE(cdev, "Unknown filter type %d\n", (int)params->type); - return -EINVAL; - } -} - static int qed_configure_arfs_searcher(struct qed_dev *cdev, enum qed_filter_config_mode mode) { @@ -2867,7 +2848,7 @@ static int qed_fp_cqe_completion(struct qed_dev *dev, cqe); } -static int qed_req_bulletin_update_mac(struct qed_dev *cdev, u8 *mac) +static int qed_req_bulletin_update_mac(struct qed_dev *cdev, const u8 *mac) { int i, ret; @@ -2904,7 +2885,9 @@ static const struct qed_eth_ops qed_eth_ops_pass = { .q_rx_stop = &qed_stop_rxq, .q_tx_start = &qed_start_txq, .q_tx_stop = &qed_stop_txq, - .filter_config = &qed_configure_filter, + .filter_config_rx_mode = &qed_configure_filter_rx_mode, + .filter_config_ucast = &qed_configure_filter_ucast, + .filter_config_mcast = &qed_configure_filter_mcast, .fastpath_stop = &qed_fastpath_stop, .eth_cqe_completion = &qed_fp_cqe_completion, .get_vport_stats = &qed_get_vport_stats, diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.h b/drivers/net/ethernet/qlogic/qed/qed_l2.h index 8eceeebb1a7b..a538cf478c14 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_l2.h +++ b/drivers/net/ethernet/qlogic/qed/qed_l2.h @@ -92,18 +92,18 @@ struct qed_filter_mcast { }; /** - * @brief qed_eth_rx_queue_stop - This ramrod closes an Rx queue + * qed_eth_rx_queue_stop(): This ramrod closes an Rx queue. * - * @param p_hwfn - * @param p_rxq Handler of queue to close - * @param eq_completion_only If True completion will be on - * EQe, if False completion will be - * on EQe if p_hwfn opaque - * different from the RXQ opaque - * otherwise on CQe. - * @param cqe_completion If True completion will be - * receive on CQe. - * @return int + * @p_hwfn: HW device data. + * @p_rxq: Handler of queue to close + * @eq_completion_only: If True completion will be on + * EQe, if False completion will be + * on EQe if p_hwfn opaque + * different from the RXQ opaque + * otherwise on CQe. + * @cqe_completion: If True completion will be receive on CQe. + * + * Return: Int. */ int qed_eth_rx_queue_stop(struct qed_hwfn *p_hwfn, @@ -111,12 +111,12 @@ qed_eth_rx_queue_stop(struct qed_hwfn *p_hwfn, bool eq_completion_only, bool cqe_completion); /** - * @brief qed_eth_tx_queue_stop - closes a Tx queue + * qed_eth_tx_queue_stop(): Closes a Tx queue. * - * @param p_hwfn - * @param p_txq - handle to Tx queue needed to be closed + * @p_hwfn: HW device data. + * @p_txq: handle to Tx queue needed to be closed. * - * @return int + * Return: Int. */ int qed_eth_tx_queue_stop(struct qed_hwfn *p_hwfn, void *p_txq); @@ -146,7 +146,6 @@ struct qed_sp_vport_start_params { int qed_sp_eth_vport_start(struct qed_hwfn *p_hwfn, struct qed_sp_vport_start_params *p_params); - struct qed_filter_accept_flags { u8 update_rx_mode_config; u8 update_tx_mode_config; @@ -205,16 +204,15 @@ int qed_sp_vport_update(struct qed_hwfn *p_hwfn, struct qed_spq_comp_cb *p_comp_data); /** - * @brief qed_sp_vport_stop - - * - * This ramrod closes a VPort after all its RX and TX queues are terminated. - * An Assert is generated if any queues are left open. + * qed_sp_vport_stop: This ramrod closes a VPort after all its + * RX and TX queues are terminated. + * An Assert is generated if any queues are left open. * - * @param p_hwfn - * @param opaque_fid - * @param vport_id VPort ID + * @p_hwfn: HW device data. + * @opaque_fid: Opaque FID + * @vport_id: VPort ID. * - * @return int + * Return: Int. */ int qed_sp_vport_stop(struct qed_hwfn *p_hwfn, u16 opaque_fid, u8 vport_id); @@ -225,22 +223,21 @@ int qed_sp_eth_filter_ucast(struct qed_hwfn *p_hwfn, struct qed_spq_comp_cb *p_comp_data); /** - * @brief qed_sp_rx_eth_queues_update - + * qed_sp_eth_rx_queues_update(): This ramrod updates an RX queue. + * It is used for setting the active state + * of the queue and updating the TPA and + * SGE parameters. + * @p_hwfn: HW device data. + * @pp_rxq_handlers: An array of queue handlers to be updated. + * @num_rxqs: number of queues to update. + * @complete_cqe_flg: Post completion to the CQE Ring if set. + * @complete_event_flg: Post completion to the Event Ring if set. + * @comp_mode: Comp mode. + * @p_comp_data: Pointer Comp data. * - * This ramrod updates an RX queue. It is used for setting the active state - * of the queue and updating the TPA and SGE parameters. + * Return: Int. * - * @note At the moment - only used by non-linux VFs. - * - * @param p_hwfn - * @param pp_rxq_handlers An array of queue handlers to be updated. - * @param num_rxqs number of queues to update. - * @param complete_cqe_flg Post completion to the CQE Ring if set - * @param complete_event_flg Post completion to the Event Ring if set - * @param comp_mode - * @param p_comp_data - * - * @return int + * Note At the moment - only used by non-linux VFs. */ int @@ -257,30 +254,32 @@ void qed_get_vport_stats(struct qed_dev *cdev, struct qed_eth_stats *stats); void qed_reset_vport_stats(struct qed_dev *cdev); /** - * *@brief qed_arfs_mode_configure - - * - **Enable or disable rfs mode. It must accept atleast one of tcp or udp true - **and atleast one of ipv4 or ipv6 true to enable rfs mode. + * qed_arfs_mode_configure(): Enable or disable rfs mode. + * It must accept at least one of tcp or udp true + * and at least one of ipv4 or ipv6 true to enable + * rfs mode. * - **@param p_hwfn - **@param p_ptt - **@param p_cfg_params - arfs mode configuration parameters. + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @p_cfg_params: arfs mode configuration parameters. * + * Return. Void. */ void qed_arfs_mode_configure(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, struct qed_arfs_config_params *p_cfg_params); /** - * @brief - qed_configure_rfs_ntuple_filter + * qed_configure_rfs_ntuple_filter(): This ramrod should be used to add + * or remove arfs hw filter * - * This ramrod should be used to add or remove arfs hw filter + * @p_hwfn: HW device data. + * @p_cb: Used for QED_SPQ_MODE_CB,where client would initialize + * it with cookie and callback function address, if not + * using this mode then client must pass NULL. + * @p_params: Pointer to params. * - * @params p_hwfn - * @params p_cb - Used for QED_SPQ_MODE_CB,where client would initialize - * it with cookie and callback function address, if not - * using this mode then client must pass NULL. - * @params p_params + * Return: Void. */ int qed_configure_rfs_ntuple_filter(struct qed_hwfn *p_hwfn, @@ -374,16 +373,17 @@ qed_sp_eth_vport_start(struct qed_hwfn *p_hwfn, struct qed_sp_vport_start_params *p_params); /** - * @brief - Starts an Rx queue, when queue_cid is already prepared + * qed_eth_rxq_start_ramrod(): Starts an Rx queue, when queue_cid is + * already prepared * - * @param p_hwfn - * @param p_cid - * @param bd_max_bytes - * @param bd_chain_phys_addr - * @param cqe_pbl_addr - * @param cqe_pbl_size + * @p_hwfn: HW device data. + * @p_cid: Pointer CID. + * @bd_max_bytes: Max bytes. + * @bd_chain_phys_addr: Chain physcial address. + * @cqe_pbl_addr: PBL address. + * @cqe_pbl_size: PBL size. * - * @return int + * Return: Int. */ int qed_eth_rxq_start_ramrod(struct qed_hwfn *p_hwfn, @@ -393,15 +393,16 @@ qed_eth_rxq_start_ramrod(struct qed_hwfn *p_hwfn, dma_addr_t cqe_pbl_addr, u16 cqe_pbl_size); /** - * @brief - Starts a Tx queue, where queue_cid is already prepared + * qed_eth_txq_start_ramrod(): Starts a Tx queue, where queue_cid is + * already prepared * - * @param p_hwfn - * @param p_cid - * @param pbl_addr - * @param pbl_size - * @param p_pq_params - parameters for choosing the PQ for this Tx queue + * @p_hwfn: HW device data. + * @p_cid: Pointer CID. + * @pbl_addr: PBL address. + * @pbl_size: PBL size. + * @pq_id: Parameters for choosing the PQ for this Tx queue. * - * @return int + * Return: Int. */ int qed_eth_txq_start_ramrod(struct qed_hwfn *p_hwfn, diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.c b/drivers/net/ethernet/qlogic/qed/qed_ll2.c index c46a7f756ed5..ed274f033626 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_ll2.c +++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.c @@ -28,6 +28,7 @@ #include "qed_cxt.h" #include "qed_dev_api.h" #include "qed_hsi.h" +#include "qed_iro_hsi.h" #include "qed_hw.h" #include "qed_int.h" #include "qed_ll2.h" @@ -43,6 +44,8 @@ #define QED_LL2_TX_SIZE (256) #define QED_LL2_RX_SIZE (4096) +#define QED_LL2_INVALID_STATS_ID 0xff + struct qed_cb_ll2_info { int rx_cnt; u32 rx_size; @@ -62,6 +65,29 @@ struct qed_ll2_buffer { dma_addr_t phys_addr; }; +static u8 qed_ll2_handle_to_stats_id(struct qed_hwfn *p_hwfn, + u8 ll2_queue_type, u8 qid) +{ + u8 stats_id; + + /* For legacy (RAM based) queues, the stats_id will be set as the + * queue_id. Otherwise (context based queue), it will be set to + * the "abs_pf_id" offset from the end of the RAM based queue IDs. + * If the final value exceeds the total counters amount, return + * INVALID value to indicate that the stats for this connection should + * be disabled. + */ + if (ll2_queue_type == QED_LL2_RX_TYPE_LEGACY) + stats_id = qid; + else + stats_id = MAX_NUM_LL2_RX_RAM_QUEUES + p_hwfn->abs_pf_id; + + if (stats_id < MAX_NUM_LL2_TX_STATS_COUNTERS) + return stats_id; + else + return QED_LL2_INVALID_STATS_ID; +} + static void qed_ll2b_complete_tx_packet(void *cxt, u8 connection_handle, void *cookie, @@ -106,7 +132,7 @@ static int qed_ll2_alloc_buffer(struct qed_dev *cdev, } static int qed_ll2_dealloc_buffer(struct qed_dev *cdev, - struct qed_ll2_buffer *buffer) + struct qed_ll2_buffer *buffer) { spin_lock_bh(&cdev->ll2->lock); @@ -352,7 +378,7 @@ static int qed_ll2_txq_completion(struct qed_hwfn *p_hwfn, void *p_cookie) num_bds_in_packet = p_pkt->bd_used; list_del(&p_pkt->list_entry); - if (num_bds < num_bds_in_packet) { + if (unlikely(num_bds < num_bds_in_packet)) { DP_NOTICE(p_hwfn, "Rest of BDs does not cover whole packet\n"); goto out; @@ -462,7 +488,7 @@ qed_ll2_rxq_handle_completion(struct qed_hwfn *p_hwfn, if (!list_empty(&p_rx->active_descq)) p_pkt = list_first_entry(&p_rx->active_descq, struct qed_ll2_rx_packet, list_entry); - if (!p_pkt) { + if (unlikely(!p_pkt)) { DP_NOTICE(p_hwfn, "[%d] LL2 Rx completion but active_descq is empty\n", p_ll2_conn->input.conn_type); @@ -475,7 +501,7 @@ qed_ll2_rxq_handle_completion(struct qed_hwfn *p_hwfn, qed_ll2_rxq_parse_reg(p_hwfn, p_cqe, &data); else qed_ll2_rxq_parse_gsi(p_hwfn, p_cqe, &data); - if (qed_chain_consume(&p_rx->rxq_chain) != p_pkt->rxq_bd) + if (unlikely(qed_chain_consume(&p_rx->rxq_chain) != p_pkt->rxq_bd)) DP_NOTICE(p_hwfn, "Mismatch between active_descq and the LL2 Rx chain\n"); @@ -597,18 +623,18 @@ static bool qed_ll2_lb_rxq_handler_slowpath(struct qed_hwfn *p_hwfn, struct core_rx_slow_path_cqe *p_cqe) { - struct ooo_opaque *iscsi_ooo; + struct ooo_opaque *ooo_opq; u32 cid; if (p_cqe->ramrod_cmd_id != CORE_RAMROD_RX_QUEUE_FLUSH) return false; - iscsi_ooo = (struct ooo_opaque *)&p_cqe->opaque_data; - if (iscsi_ooo->ooo_opcode != TCP_EVENT_DELETE_ISLES) + ooo_opq = (struct ooo_opaque *)&p_cqe->opaque_data; + if (ooo_opq->ooo_opcode != TCP_EVENT_DELETE_ISLES) return false; /* Need to make a flush */ - cid = le32_to_cpu(iscsi_ooo->cid); + cid = le32_to_cpu(ooo_opq->cid); qed_ooo_release_connection_isles(p_hwfn, p_hwfn->p_ooo_info, cid); return true; @@ -624,7 +650,7 @@ static int qed_ll2_lb_rxq_handler(struct qed_hwfn *p_hwfn, union core_rx_cqe_union *cqe = NULL; u16 cq_new_idx = 0, cq_old_idx = 0; struct qed_ooo_buffer *p_buffer; - struct ooo_opaque *iscsi_ooo; + struct ooo_opaque *ooo_opq; u8 placement_offset = 0; u8 cqe_type; @@ -645,7 +671,7 @@ static int qed_ll2_lb_rxq_handler(struct qed_hwfn *p_hwfn, &cqe->rx_cqe_sp)) continue; - if (cqe_type != CORE_RX_CQE_TYPE_REGULAR) { + if (unlikely(cqe_type != CORE_RX_CQE_TYPE_REGULAR)) { DP_NOTICE(p_hwfn, "Got a non-regular LB LL2 completion [type 0x%02x]\n", cqe_type); @@ -657,22 +683,21 @@ static int qed_ll2_lb_rxq_handler(struct qed_hwfn *p_hwfn, parse_flags = le16_to_cpu(p_cqe_fp->parse_flags.flags); packet_length = le16_to_cpu(p_cqe_fp->packet_length); vlan = le16_to_cpu(p_cqe_fp->vlan); - iscsi_ooo = (struct ooo_opaque *)&p_cqe_fp->opaque_data; - qed_ooo_save_history_entry(p_hwfn, p_hwfn->p_ooo_info, - iscsi_ooo); - cid = le32_to_cpu(iscsi_ooo->cid); + ooo_opq = (struct ooo_opaque *)&p_cqe_fp->opaque_data; + qed_ooo_save_history_entry(p_hwfn, p_hwfn->p_ooo_info, ooo_opq); + cid = le32_to_cpu(ooo_opq->cid); /* Process delete isle first */ - if (iscsi_ooo->drop_size) + if (ooo_opq->drop_size) qed_ooo_delete_isles(p_hwfn, p_hwfn->p_ooo_info, cid, - iscsi_ooo->drop_isle, - iscsi_ooo->drop_size); + ooo_opq->drop_isle, + ooo_opq->drop_size); - if (iscsi_ooo->ooo_opcode == TCP_EVENT_NOP) + if (ooo_opq->ooo_opcode == TCP_EVENT_NOP) continue; /* Now process create/add/join isles */ - if (list_empty(&p_rx->active_descq)) { + if (unlikely(list_empty(&p_rx->active_descq))) { DP_NOTICE(p_hwfn, "LL2 OOO RX chain has no submitted buffers\n" ); @@ -682,12 +707,12 @@ static int qed_ll2_lb_rxq_handler(struct qed_hwfn *p_hwfn, p_pkt = list_first_entry(&p_rx->active_descq, struct qed_ll2_rx_packet, list_entry); - if ((iscsi_ooo->ooo_opcode == TCP_EVENT_ADD_NEW_ISLE) || - (iscsi_ooo->ooo_opcode == TCP_EVENT_ADD_ISLE_RIGHT) || - (iscsi_ooo->ooo_opcode == TCP_EVENT_ADD_ISLE_LEFT) || - (iscsi_ooo->ooo_opcode == TCP_EVENT_ADD_PEN) || - (iscsi_ooo->ooo_opcode == TCP_EVENT_JOIN)) { - if (!p_pkt) { + if (likely(ooo_opq->ooo_opcode == TCP_EVENT_ADD_NEW_ISLE || + ooo_opq->ooo_opcode == TCP_EVENT_ADD_ISLE_RIGHT || + ooo_opq->ooo_opcode == TCP_EVENT_ADD_ISLE_LEFT || + ooo_opq->ooo_opcode == TCP_EVENT_ADD_PEN || + ooo_opq->ooo_opcode == TCP_EVENT_JOIN)) { + if (unlikely(!p_pkt)) { DP_NOTICE(p_hwfn, "LL2 OOO RX packet is not valid\n"); return -EIO; @@ -701,19 +726,19 @@ static int qed_ll2_lb_rxq_handler(struct qed_hwfn *p_hwfn, qed_chain_consume(&p_rx->rxq_chain); list_add_tail(&p_pkt->list_entry, &p_rx->free_descq); - switch (iscsi_ooo->ooo_opcode) { + switch (ooo_opq->ooo_opcode) { case TCP_EVENT_ADD_NEW_ISLE: qed_ooo_add_new_isle(p_hwfn, p_hwfn->p_ooo_info, cid, - iscsi_ooo->ooo_isle, + ooo_opq->ooo_isle, p_buffer); break; case TCP_EVENT_ADD_ISLE_RIGHT: qed_ooo_add_new_buffer(p_hwfn, p_hwfn->p_ooo_info, cid, - iscsi_ooo->ooo_isle, + ooo_opq->ooo_isle, p_buffer, QED_OOO_RIGHT_BUF); break; @@ -721,7 +746,7 @@ static int qed_ll2_lb_rxq_handler(struct qed_hwfn *p_hwfn, qed_ooo_add_new_buffer(p_hwfn, p_hwfn->p_ooo_info, cid, - iscsi_ooo->ooo_isle, + ooo_opq->ooo_isle, p_buffer, QED_OOO_LEFT_BUF); break; @@ -729,13 +754,12 @@ static int qed_ll2_lb_rxq_handler(struct qed_hwfn *p_hwfn, qed_ooo_add_new_buffer(p_hwfn, p_hwfn->p_ooo_info, cid, - iscsi_ooo->ooo_isle + - 1, + ooo_opq->ooo_isle + 1, p_buffer, QED_OOO_LEFT_BUF); qed_ooo_join_isles(p_hwfn, p_hwfn->p_ooo_info, - cid, iscsi_ooo->ooo_isle); + cid, ooo_opq->ooo_isle); break; case TCP_EVENT_ADD_PEN: num_ooo_add_to_peninsula++; @@ -747,7 +771,7 @@ static int qed_ll2_lb_rxq_handler(struct qed_hwfn *p_hwfn, } else { DP_NOTICE(p_hwfn, "Unexpected event (%d) TX OOO completion\n", - iscsi_ooo->ooo_opcode); + ooo_opq->ooo_opcode); } } @@ -859,16 +883,16 @@ static int qed_ll2_lb_txq_completion(struct qed_hwfn *p_hwfn, void *p_cookie) u16 new_idx = 0, num_bds = 0; int rc; - if (!p_ll2_conn) + if (unlikely(!p_ll2_conn)) return 0; - if (!QED_LL2_TX_REGISTERED(p_ll2_conn)) + if (unlikely(!QED_LL2_TX_REGISTERED(p_ll2_conn))) return 0; new_idx = le16_to_cpu(*p_tx->p_fw_cons); num_bds = ((s16)new_idx - (s16)p_tx->bds_idx); - if (!num_bds) + if (unlikely(!num_bds)) return 0; while (num_bds) { @@ -877,10 +901,10 @@ static int qed_ll2_lb_txq_completion(struct qed_hwfn *p_hwfn, void *p_cookie) p_pkt = list_first_entry(&p_tx->active_descq, struct qed_ll2_tx_packet, list_entry); - if (!p_pkt) + if (unlikely(!p_pkt)) return -EINVAL; - if (p_pkt->bd_used != 1) { + if (unlikely(p_pkt->bd_used != 1)) { DP_NOTICE(p_hwfn, "Unexpectedly many BDs(%d) in TX OOO completion\n", p_pkt->bd_used); @@ -1008,7 +1032,7 @@ static int qed_sp_ll2_tx_queue_start(struct qed_hwfn *p_hwfn, if (!QED_LL2_TX_REGISTERED(p_ll2_conn)) return 0; - if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_OOO) + if (likely(p_ll2_conn->input.conn_type == QED_LL2_TYPE_OOO)) p_ll2_conn->tx_stats_en = 0; else p_ll2_conn->tx_stats_en = 1; @@ -1124,6 +1148,7 @@ static int qed_sp_ll2_tx_queue_stop(struct qed_hwfn *p_hwfn, struct qed_spq_entry *p_ent = NULL; struct qed_sp_init_data init_data; int rc = -EINVAL; + qed_db_recovery_del(p_hwfn->cdev, p_tx->doorbell_addr, &p_tx->db_msg); /* Get SPQ entry */ @@ -1533,7 +1558,7 @@ static inline u8 qed_ll2_handle_to_queue_id(struct qed_hwfn *p_hwfn, int qed_ll2_establish_connection(void *cxt, u8 connection_handle) { - struct e4_core_conn_context *p_cxt; + struct core_conn_context *p_cxt; struct qed_ll2_tx_packet *p_pkt; struct qed_ll2_info *p_ll2_conn; struct qed_hwfn *p_hwfn = cxt; @@ -1544,7 +1569,7 @@ int qed_ll2_establish_connection(void *cxt, u8 connection_handle) int rc = -EINVAL; u32 i, capacity; size_t desc_size; - u8 qid; + u8 qid, stats_id; p_ptt = qed_ptt_acquire(p_hwfn); if (!p_ptt) @@ -1610,16 +1635,32 @@ int qed_ll2_establish_connection(void *cxt, u8 connection_handle) qid = qed_ll2_handle_to_queue_id(p_hwfn, connection_handle, p_ll2_conn->input.rx_conn_type); + stats_id = qed_ll2_handle_to_stats_id(p_hwfn, + p_ll2_conn->input.rx_conn_type, + qid); p_ll2_conn->queue_id = qid; - p_ll2_conn->tx_stats_id = qid; + p_ll2_conn->tx_stats_id = stats_id; - DP_VERBOSE(p_hwfn, QED_MSG_LL2, - "Establishing ll2 queue. PF %d ctx_based=%d abs qid=%d\n", - p_hwfn->rel_pf_id, p_ll2_conn->input.rx_conn_type, qid); + /* If there is no valid stats id for this connection, disable stats */ + if (p_ll2_conn->tx_stats_id == QED_LL2_INVALID_STATS_ID) { + p_ll2_conn->tx_stats_en = 0; + DP_VERBOSE(p_hwfn, + QED_MSG_LL2, + "Disabling stats for queue %d - not enough counters\n", + qid); + } + + DP_VERBOSE(p_hwfn, + QED_MSG_LL2, + "Establishing ll2 queue. PF %d ctx_based=%d abs qid=%d stats_id=%d\n", + p_hwfn->rel_pf_id, + p_ll2_conn->input.rx_conn_type, qid, stats_id); if (p_ll2_conn->input.rx_conn_type == QED_LL2_RX_TYPE_LEGACY) { - p_rx->set_prod_addr = p_hwfn->regview + - GTT_BAR0_MAP_REG_TSDM_RAM + TSTORM_LL2_RX_PRODS_OFFSET(qid); + p_rx->set_prod_addr = + (u8 __iomem *)p_hwfn->regview + + GET_GTT_REG_ADDR(GTT_BAR0_MAP_REG_TSDM_RAM, + TSTORM_LL2_RX_PRODS, qid); } else { /* QED_LL2_RX_TYPE_CTX - using doorbell */ p_rx->ctx_based = 1; @@ -1762,7 +1803,7 @@ int qed_ll2_post_rx_buffer(void *cxt, } } - /* If we're lacking entires, let's try to flush buffers to FW */ + /* If we're lacking entries, let's try to flush buffers to FW */ if (!p_curp || !p_curb) { rc = -EBUSY; p_curp = NULL; @@ -1842,8 +1883,8 @@ qed_ll2_prepare_tx_packet_set_bd(struct qed_hwfn *p_hwfn, } start_bd = (struct core_tx_bd *)qed_chain_produce(p_tx_chain); - if (QED_IS_IWARP_PERSONALITY(p_hwfn) && - p_ll2->input.conn_type == QED_LL2_TYPE_OOO) { + if (likely(QED_IS_IWARP_PERSONALITY(p_hwfn) && + p_ll2->input.conn_type == QED_LL2_TYPE_OOO)) { start_bd->nw_vlan_or_lb_echo = cpu_to_le16(IWARP_LL2_IN_ORDER_TX_QUEUE); } else { @@ -1964,28 +2005,29 @@ int qed_ll2_prepare_tx_packet(void *cxt, int rc = 0; p_ll2_conn = qed_ll2_handle_sanity(p_hwfn, connection_handle); - if (!p_ll2_conn) + if (unlikely(!p_ll2_conn)) return -EINVAL; p_tx = &p_ll2_conn->tx_queue; p_tx_chain = &p_tx->txq_chain; - if (pkt->num_of_bds > p_ll2_conn->input.tx_max_bds_per_packet) + if (unlikely(pkt->num_of_bds > p_ll2_conn->input.tx_max_bds_per_packet)) return -EIO; spin_lock_irqsave(&p_tx->lock, flags); - if (p_tx->cur_send_packet) { + if (unlikely(p_tx->cur_send_packet)) { rc = -EEXIST; goto out; } /* Get entry, but only if we have tx elements for it */ - if (!list_empty(&p_tx->free_descq)) + if (unlikely(!list_empty(&p_tx->free_descq))) p_curp = list_first_entry(&p_tx->free_descq, struct qed_ll2_tx_packet, list_entry); - if (p_curp && qed_chain_get_elem_left(p_tx_chain) < pkt->num_of_bds) + if (unlikely(p_curp && + qed_chain_get_elem_left(p_tx_chain) < pkt->num_of_bds)) p_curp = NULL; - if (!p_curp) { + if (unlikely(!p_curp)) { rc = -EBUSY; goto out; } @@ -2014,16 +2056,16 @@ int qed_ll2_set_fragment_of_tx_packet(void *cxt, unsigned long flags; p_ll2_conn = qed_ll2_handle_sanity(p_hwfn, connection_handle); - if (!p_ll2_conn) + if (unlikely(!p_ll2_conn)) return -EINVAL; - if (!p_ll2_conn->tx_queue.cur_send_packet) + if (unlikely(!p_ll2_conn->tx_queue.cur_send_packet)) return -EINVAL; p_cur_send_packet = p_ll2_conn->tx_queue.cur_send_packet; cur_send_frag_num = p_ll2_conn->tx_queue.cur_send_frag_num; - if (cur_send_frag_num >= p_cur_send_packet->bd_used) + if (unlikely(cur_send_frag_num >= p_cur_send_packet->bd_used)) return -EINVAL; /* Fill the BD information, and possibly notify FW */ @@ -2609,7 +2651,6 @@ static int qed_ll2_start(struct qed_dev *cdev, struct qed_ll2_params *params) DP_NOTICE(cdev, "Failed to add an LLH filter\n"); goto err3; } - } ether_addr_copy(cdev->ll2_mac_address, params->ll2_mac_address); @@ -2651,7 +2692,7 @@ static int qed_ll2_start_xmit(struct qed_dev *cdev, struct sk_buff *skb, */ nr_frags = skb_shinfo(skb)->nr_frags; - if (1 + nr_frags > CORE_LL2_TX_MAX_BDS_PER_PACKET) { + if (unlikely(1 + nr_frags > CORE_LL2_TX_MAX_BDS_PER_PACKET)) { DP_ERR(cdev, "Cannot transmit a packet with %d fragments\n", 1 + nr_frags); return -EINVAL; @@ -2693,7 +2734,7 @@ static int qed_ll2_start_xmit(struct qed_dev *cdev, struct sk_buff *skb, */ rc = qed_ll2_prepare_tx_packet(p_hwfn, cdev->ll2->handle, &pkt, 1); - if (rc) + if (unlikely(rc)) goto err; for (i = 0; i < nr_frags; i++) { @@ -2717,7 +2758,7 @@ static int qed_ll2_start_xmit(struct qed_dev *cdev, struct sk_buff *skb, /* if failed not much to do here, partial packet has been posted * we can't free memory, will need to wait for completion */ - if (rc) + if (unlikely(rc)) goto err2; } diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.h b/drivers/net/ethernet/qlogic/qed/qed_ll2.h index df88d00053a2..0bfc375161ed 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_ll2.h +++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.h @@ -32,7 +32,6 @@ #define QED_LL2_LEGACY_CONN_BASE_PF 0 #define QED_LL2_CTX_CONN_BASE_PF QED_MAX_NUM_OF_LEGACY_LL2_CONNS_PF - struct qed_ll2_rx_packet { struct list_head list_entry; struct core_rx_bd_with_buff_len *rxq_bd; @@ -119,41 +118,41 @@ struct qed_ll2_info { extern const struct qed_ll2_ops qed_ll2_ops_pass; /** - * @brief qed_ll2_acquire_connection - allocate resources, - * starts rx & tx (if relevant) queues pair. Provides - * connecion handler as output parameter. + * qed_ll2_acquire_connection(): Allocate resources, + * starts rx & tx (if relevant) queues pair. + * Provides connecion handler as output + * parameter. * + * @cxt: Pointer to the hw-function [opaque to some]. + * @data: Describes connection parameters. * - * @param cxt - pointer to the hw-function [opaque to some] - * @param data - describes connection parameters - * @return int + * Return: Int. */ int qed_ll2_acquire_connection(void *cxt, struct qed_ll2_acquire_data *data); /** - * @brief qed_ll2_establish_connection - start previously - * allocated LL2 queues pair + * qed_ll2_establish_connection(): start previously allocated LL2 queues pair * - * @param cxt - pointer to the hw-function [opaque to some] - * @param p_ptt - * @param connection_handle LL2 connection's handle obtained from - * qed_ll2_require_connection + * @cxt: Pointer to the hw-function [opaque to some]. + * @connection_handle: LL2 connection's handle obtained from + * qed_ll2_require_connection. * - * @return 0 on success, failure otherwise + * Return: 0 on success, failure otherwise. */ int qed_ll2_establish_connection(void *cxt, u8 connection_handle); /** - * @brief qed_ll2_post_rx_buffers - submit buffers to LL2 Rx queue. + * qed_ll2_post_rx_buffer(): Submit buffers to LL2 Rx queue. * - * @param cxt - pointer to the hw-function [opaque to some] - * @param connection_handle LL2 connection's handle obtained from - * qed_ll2_require_connection - * @param addr rx (physical address) buffers to submit - * @param cookie - * @param notify_fw produce corresponding Rx BD immediately + * @cxt: Pointer to the hw-function [opaque to some]. + * @connection_handle: LL2 connection's handle obtained from + * qed_ll2_require_connection. + * @addr: RX (physical address) buffers to submit. + * @buf_len: Buffer Len. + * @cookie: Cookie. + * @notify_fw: Produce corresponding Rx BD immediately. * - * @return 0 on success, failure otherwise + * Return: 0 on success, failure otherwise. */ int qed_ll2_post_rx_buffer(void *cxt, u8 connection_handle, @@ -161,15 +160,15 @@ int qed_ll2_post_rx_buffer(void *cxt, u16 buf_len, void *cookie, u8 notify_fw); /** - * @brief qed_ll2_prepare_tx_packet - request for start Tx BD - * to prepare Tx packet submission to FW. + * qed_ll2_prepare_tx_packet(): Request for start Tx BD + * to prepare Tx packet submission to FW. * - * @param cxt - pointer to the hw-function [opaque to some] - * @param connection_handle - * @param pkt - info regarding the tx packet - * @param notify_fw - issue doorbell to fw for this packet + * @cxt: Pointer to the hw-function [opaque to some]. + * @connection_handle: Connection handle. + * @pkt: Info regarding the tx packet. + * @notify_fw: Issue doorbell to fw for this packet. * - * @return 0 on success, failure otherwise + * Return: 0 on success, failure otherwise. */ int qed_ll2_prepare_tx_packet(void *cxt, u8 connection_handle, @@ -177,81 +176,83 @@ int qed_ll2_prepare_tx_packet(void *cxt, bool notify_fw); /** - * @brief qed_ll2_release_connection - releases resources - * allocated for LL2 connection + * qed_ll2_release_connection(): Releases resources allocated for LL2 + * connection. + * + * @cxt: Pointer to the hw-function [opaque to some]. + * @connection_handle: LL2 connection's handle obtained from + * qed_ll2_require_connection. * - * @param cxt - pointer to the hw-function [opaque to some] - * @param connection_handle LL2 connection's handle obtained from - * qed_ll2_require_connection + * Return: Void. */ void qed_ll2_release_connection(void *cxt, u8 connection_handle); /** - * @brief qed_ll2_set_fragment_of_tx_packet - provides fragments to fill - * Tx BD of BDs requested by - * qed_ll2_prepare_tx_packet + * qed_ll2_set_fragment_of_tx_packet(): Provides fragments to fill + * Tx BD of BDs requested by + * qed_ll2_prepare_tx_packet * - * @param cxt - pointer to the hw-function [opaque to some] - * @param connection_handle LL2 connection's handle - * obtained from - * qed_ll2_require_connection - * @param addr - * @param nbytes + * @cxt: Pointer to the hw-function [opaque to some]. + * @connection_handle: LL2 connection's handle obtained from + * qed_ll2_require_connection. + * @addr: Address. + * @nbytes: Number of bytes. * - * @return 0 on success, failure otherwise + * Return: 0 on success, failure otherwise. */ int qed_ll2_set_fragment_of_tx_packet(void *cxt, u8 connection_handle, dma_addr_t addr, u16 nbytes); /** - * @brief qed_ll2_terminate_connection - stops Tx/Rx queues - * + * qed_ll2_terminate_connection(): Stops Tx/Rx queues * - * @param cxt - pointer to the hw-function [opaque to some] - * @param connection_handle LL2 connection's handle - * obtained from - * qed_ll2_require_connection + * @cxt: Pointer to the hw-function [opaque to some]. + * @connection_handle: LL2 connection's handle obtained from + * qed_ll2_require_connection. * - * @return 0 on success, failure otherwise + * Return: 0 on success, failure otherwise. */ int qed_ll2_terminate_connection(void *cxt, u8 connection_handle); /** - * @brief qed_ll2_get_stats - get LL2 queue's statistics + * qed_ll2_get_stats(): Get LL2 queue's statistics * + * @cxt: Pointer to the hw-function [opaque to some]. + * @connection_handle: LL2 connection's handle obtained from + * qed_ll2_require_connection. + * @p_stats: Pointer Status. * - * @param cxt - pointer to the hw-function [opaque to some] - * @param connection_handle LL2 connection's handle obtained from - * qed_ll2_require_connection - * @param p_stats - * - * @return 0 on success, failure otherwise + * Return: 0 on success, failure otherwise. */ int qed_ll2_get_stats(void *cxt, u8 connection_handle, struct qed_ll2_stats *p_stats); /** - * @brief qed_ll2_alloc - Allocates LL2 connections set + * qed_ll2_alloc(): Allocates LL2 connections set. * - * @param p_hwfn + * @p_hwfn: HW device data. * - * @return int + * Return: Int. */ int qed_ll2_alloc(struct qed_hwfn *p_hwfn); /** - * @brief qed_ll2_setup - Inits LL2 connections set + * qed_ll2_setup(): Inits LL2 connections set. + * + * @p_hwfn: HW device data. * - * @param p_hwfn + * Return: Void. * */ void qed_ll2_setup(struct qed_hwfn *p_hwfn); /** - * @brief qed_ll2_free - Releases LL2 connections set + * qed_ll2_free(): Releases LL2 connections set + * + * @p_hwfn: HW device data. * - * @param p_hwfn + * Return: Void. * */ void qed_ll2_free(struct qed_hwfn *p_hwfn); diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c index d10e1cd6d2ba..7673b3e07736 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_main.c +++ b/drivers/net/ethernet/qlogic/qed/qed_main.c @@ -99,10 +99,6 @@ static const u32 qed_mfw_ext_10g[] __initconst = { ETHTOOL_LINK_MODE_10000baseLRM_Full_BIT, }; -static const u32 qed_mfw_ext_20g[] __initconst = { - ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT, -}; - static const u32 qed_mfw_ext_25g[] __initconst = { ETHTOOL_LINK_MODE_25000baseKR_Full_BIT, ETHTOOL_LINK_MODE_25000baseCR_Full_BIT, @@ -148,7 +144,6 @@ static const u32 qed_mfw_ext_100g_base_r4[] __initconst = { static struct qed_mfw_speed_map qed_mfw_ext_maps[] __ro_after_init = { QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_1G, qed_mfw_ext_1g), QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_10G, qed_mfw_ext_10g), - QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_20G, qed_mfw_ext_20g), QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_25G, qed_mfw_ext_25g), QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_40G, qed_mfw_ext_40g), QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_50G_BASE_R, @@ -262,7 +257,7 @@ module_exit(qed_exit); /* Check if the DMA controller on the machine can properly handle the DMA * addressing required by the device. -*/ + */ static int qed_set_coherency_mask(struct qed_dev *cdev) { struct device *dev = &cdev->pdev->dev; @@ -547,7 +542,7 @@ static struct qed_dev *qed_probe(struct pci_dev *pdev, goto err2; } - DP_INFO(cdev, "qed_probe completed successfully\n"); + DP_INFO(cdev, "%s completed successfully\n", __func__); return cdev; @@ -980,7 +975,7 @@ static int qed_slowpath_setup_int(struct qed_dev *cdev, rc = qed_set_int_mode(cdev, false); if (rc) { - DP_ERR(cdev, "qed_slowpath_setup_int ERR\n"); + DP_ERR(cdev, "%s ERR\n", __func__); return rc; } @@ -1161,6 +1156,7 @@ static int qed_slowpath_delayed_work(struct qed_hwfn *hwfn, /* Memory barrier for setting atomic bit */ smp_mb__before_atomic(); set_bit(wq_flag, &hwfn->slowpath_task_flags); + /* Memory barrier after setting atomic bit */ smp_mb__after_atomic(); queue_delayed_work(hwfn->slowpath_wq, &hwfn->slowpath_task, delay); @@ -1382,7 +1378,7 @@ static int qed_slowpath_start(struct qed_dev *cdev, (params->drv_minor << 16) | (params->drv_rev << 8) | (params->drv_eng); - strlcpy(drv_version.name, params->name, + strscpy(drv_version.name, params->name, MCP_DRV_VER_STR_SIZE - 4); rc = qed_mcp_send_drv_version(hwfn, hwfn->p_main_ptt, &drv_version); @@ -2892,7 +2888,7 @@ static int qed_update_drv_state(struct qed_dev *cdev, bool active) return status; } -static int qed_update_mac(struct qed_dev *cdev, u8 *mac) +static int qed_update_mac(struct qed_dev *cdev, const u8 *mac) { struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); struct qed_ptt *ptt; @@ -3079,8 +3075,10 @@ int qed_mfw_tlv_req(struct qed_hwfn *hwfn) DP_VERBOSE(hwfn->cdev, NETIF_MSG_DRV, "Scheduling slowpath task [Flag: %d]\n", QED_SLOWPATH_MFW_TLV_REQ); + /* Memory barrier for setting atomic bit */ smp_mb__before_atomic(); set_bit(QED_SLOWPATH_MFW_TLV_REQ, &hwfn->slowpath_task_flags); + /* Memory barrier after setting atomic bit */ smp_mb__after_atomic(); queue_delayed_work(hwfn->slowpath_wq, &hwfn->slowpath_task, 0); @@ -3159,3 +3157,8 @@ int qed_mfw_fill_tlv_data(struct qed_hwfn *hwfn, enum qed_mfw_tlv_type type, return 0; } + +unsigned long qed_get_epoch_time(void) +{ + return ktime_get_real_seconds(); +} diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c index 24cd41567775..64678a256f3b 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c @@ -17,6 +17,7 @@ #include "qed_cxt.h" #include "qed_dcbx.h" #include "qed_hsi.h" +#include "qed_mfw_hsi.h" #include "qed_hw.h" #include "qed_mcp.h" #include "qed_reg_addr.h" @@ -30,11 +31,11 @@ #define QED_MCP_RESET_RETRIES (50 * 1000) /* Account for 500 msec */ #define DRV_INNER_WR(_p_hwfn, _p_ptt, _ptr, _offset, _val) \ - qed_wr(_p_hwfn, _p_ptt, (_p_hwfn->mcp_info->_ptr + _offset), \ + qed_wr(_p_hwfn, _p_ptt, (_p_hwfn->mcp_info->_ptr + (_offset)), \ _val) #define DRV_INNER_RD(_p_hwfn, _p_ptt, _ptr, _offset) \ - qed_rd(_p_hwfn, _p_ptt, (_p_hwfn->mcp_info->_ptr + _offset)) + qed_rd(_p_hwfn, _p_ptt, (_p_hwfn->mcp_info->_ptr + (_offset))) #define DRV_MB_WR(_p_hwfn, _p_ptt, _field, _val) \ DRV_INNER_WR(p_hwfn, _p_ptt, drv_mb_addr, \ @@ -384,7 +385,7 @@ qed_mcp_update_pending_cmd(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) p_mb_params->mcp_param = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_param); /* Get the union data */ - if (p_mb_params->p_data_dst != NULL && p_mb_params->data_dst_size) { + if (p_mb_params->p_data_dst && p_mb_params->data_dst_size) { u32 union_data_addr = p_hwfn->mcp_info->drv_mb_addr + offsetof(struct public_drv_mb, union_data); @@ -410,7 +411,7 @@ static void __qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn, union_data_addr = p_hwfn->mcp_info->drv_mb_addr + offsetof(struct public_drv_mb, union_data); memset(&union_data, 0, sizeof(union_data)); - if (p_mb_params->p_data_src != NULL && p_mb_params->data_src_size) + if (p_mb_params->p_data_src && p_mb_params->data_src_size) memcpy(&union_data, p_mb_params->p_data_src, p_mb_params->data_src_size); qed_memcpy_to(p_hwfn, p_ptt, union_data_addr, &union_data, @@ -671,7 +672,8 @@ int qed_mcp_nvm_rd_cmd(struct qed_hwfn *p_hwfn, u32 cmd, u32 param, u32 *o_mcp_resp, - u32 *o_mcp_param, u32 *o_txn_size, u32 *o_buf) + u32 *o_mcp_param, + u32 *o_txn_size, u32 *o_buf, bool b_can_sleep) { struct qed_mcp_mb_params mb_params; u8 raw_data[MCP_DRV_NVM_BUF_LEN]; @@ -684,6 +686,8 @@ int qed_mcp_nvm_rd_cmd(struct qed_hwfn *p_hwfn, /* Use the maximal value since the actual one is part of the response */ mb_params.data_dst_size = MCP_DRV_NVM_BUF_LEN; + if (b_can_sleep) + mb_params.flags = QED_MB_FLAG_CAN_SLEEP; rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); if (rc) @@ -916,7 +920,6 @@ enum qed_load_req_force { }; static void qed_get_mfw_force_cmd(struct qed_hwfn *p_hwfn, - enum qed_load_req_force force_cmd, u8 *p_mfw_force_cmd) { @@ -1526,15 +1529,13 @@ int qed_mcp_set_link(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, bool b_up) FW_MB_PARAM_FEATURE_SUPPORT_EXT_SPEED_FEC_CONTROL) { ext_speed = 0; if (params->ext_speed.autoneg) - ext_speed |= ETH_EXT_SPEED_AN; + ext_speed |= ETH_EXT_SPEED_NONE; val = params->ext_speed.forced_speed; if (val & QED_EXT_SPEED_1G) ext_speed |= ETH_EXT_SPEED_1G; if (val & QED_EXT_SPEED_10G) ext_speed |= ETH_EXT_SPEED_10G; - if (val & QED_EXT_SPEED_20G) - ext_speed |= ETH_EXT_SPEED_20G; if (val & QED_EXT_SPEED_25G) ext_speed |= ETH_EXT_SPEED_25G; if (val & QED_EXT_SPEED_40G) @@ -1560,8 +1561,6 @@ int qed_mcp_set_link(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, bool b_up) ext_speed |= ETH_EXT_ADV_SPEED_1G; if (val & QED_EXT_SPEED_MASK_10G) ext_speed |= ETH_EXT_ADV_SPEED_10G; - if (val & QED_EXT_SPEED_MASK_20G) - ext_speed |= ETH_EXT_ADV_SPEED_20G; if (val & QED_EXT_SPEED_MASK_25G) ext_speed |= ETH_EXT_ADV_SPEED_25G; if (val & QED_EXT_SPEED_MASK_40G) @@ -2081,7 +2080,7 @@ int qed_mcp_get_mfw_ver(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 *p_mfw_ver, u32 *p_running_bundle_id) { - u32 global_offsize; + u32 global_offsize, public_base; if (IS_VF(p_hwfn->cdev)) { if (p_hwfn->vf_iov_info) { @@ -2098,16 +2097,16 @@ int qed_mcp_get_mfw_ver(struct qed_hwfn *p_hwfn, } } + public_base = p_hwfn->mcp_info->public_base; global_offsize = qed_rd(p_hwfn, p_ptt, - SECTION_OFFSIZE_ADDR(p_hwfn-> - mcp_info->public_base, + SECTION_OFFSIZE_ADDR(public_base, PUBLIC_GLOBAL)); *p_mfw_ver = qed_rd(p_hwfn, p_ptt, SECTION_ADDR(global_offsize, 0) + offsetof(struct public_global, mfw_ver)); - if (p_running_bundle_id != NULL) { + if (p_running_bundle_id) { *p_running_bundle_id = qed_rd(p_hwfn, p_ptt, SECTION_ADDR(global_offsize, 0) + offsetof(struct public_global, @@ -2209,6 +2208,7 @@ int qed_mcp_get_transceiver_data(struct qed_hwfn *p_hwfn, return 0; } + static bool qed_is_transceiver_ready(u32 transceiver_state, u32 transceiver_type) { @@ -2378,7 +2378,7 @@ qed_mcp_get_shmem_proto_legacy(struct qed_hwfn *p_hwfn, DP_VERBOSE(p_hwfn, NETIF_MSG_IFUP, "According to Legacy capabilities, L2 personality is %08x\n", - (u32) *p_proto); + (u32)*p_proto); } static int @@ -2423,7 +2423,7 @@ qed_mcp_get_shmem_proto_mfw(struct qed_hwfn *p_hwfn, DP_VERBOSE(p_hwfn, NETIF_MSG_IFUP, "According to capabilities, L2 personality is %08x [resp %08x param %08x]\n", - (u32) *p_proto, resp, param); + (u32)*p_proto, resp, param); return 0; } @@ -2445,9 +2445,6 @@ qed_mcp_get_shmem_proto(struct qed_hwfn *p_hwfn, case FUNC_MF_CFG_PROTOCOL_ISCSI: *p_proto = QED_PCI_ISCSI; break; - case FUNC_MF_CFG_PROTOCOL_NVMETCP: - *p_proto = QED_PCI_NVMETCP; - break; case FUNC_MF_CFG_PROTOCOL_FCOE: *p_proto = QED_PCI_FCOE; break; @@ -2854,7 +2851,7 @@ int qed_mcp_ov_update_mtu(struct qed_hwfn *p_hwfn, } int qed_mcp_ov_update_mac(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, u8 *mac) + struct qed_ptt *p_ptt, const u8 *mac) { struct qed_mcp_mb_params mb_params; u32 mfw_mac[2]; @@ -3026,7 +3023,7 @@ int qed_mcp_nvm_read(struct qed_dev *cdev, u32 addr, u8 *p_buf, u32 len) DRV_MB_PARAM_NVM_LEN_OFFSET), &resp, &resp_param, &read_len, - (u32 *)(p_buf + offset)); + (u32 *)(p_buf + offset), false); if (rc || (resp != FW_MSG_CODE_NVM_OK)) { DP_NOTICE(cdev, "MCP command rc = %d\n", rc); @@ -3034,7 +3031,7 @@ int qed_mcp_nvm_read(struct qed_dev *cdev, u32 addr, u8 *p_buf, u32 len) } /* This can be a lengthy process, and it's possible scheduler - * isn't preemptable. Sleep a bit to prevent CPU hogging. + * isn't preemptible. Sleep a bit to prevent CPU hogging. */ if (bytes_left % 0x1000 < (bytes_left - read_len) % 0x1000) @@ -3129,10 +3126,12 @@ int qed_mcp_nvm_write(struct qed_dev *cdev, * to be delivered to MFW. */ if (param && cmd == QED_PUT_FILE_DATA) { - buf_idx = QED_MFW_GET_FIELD(param, - FW_MB_PARAM_NVM_PUT_FILE_REQ_OFFSET); - buf_size = QED_MFW_GET_FIELD(param, - FW_MB_PARAM_NVM_PUT_FILE_REQ_SIZE); + buf_idx = + QED_MFW_GET_FIELD(param, + FW_MB_PARAM_NVM_PUT_FILE_REQ_OFFSET); + buf_size = + QED_MFW_GET_FIELD(param, + FW_MB_PARAM_NVM_PUT_FILE_REQ_SIZE); } else { buf_idx += buf_size; buf_size = min_t(u32, (len - buf_idx), @@ -3176,7 +3175,7 @@ int qed_mcp_phy_sfp_read(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, rc = qed_mcp_nvm_rd_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_TRANSCEIVER_READ, nvm_offset, &resp, ¶m, &buf_size, - (u32 *)(p_buf + offset)); + (u32 *)(p_buf + offset), true); if (rc) { DP_NOTICE(p_hwfn, "Failed to send a transceiver read command to the MFW. rc = %d.\n", @@ -3275,7 +3274,7 @@ int qed_mcp_bist_nvm_get_image_att(struct qed_hwfn *p_hwfn, DRV_MSG_CODE_BIST_TEST, param, &resp, &resp_param, &buf_size, - (u32 *)p_image_att); + (u32 *)p_image_att, false); if (rc) return rc; @@ -3388,7 +3387,7 @@ qed_mcp_get_nvm_image_att(struct qed_hwfn *p_hwfn, type = NVM_TYPE_DEFAULT_CFG; break; case QED_NVM_IMAGE_NVM_META: - type = NVM_TYPE_META; + type = NVM_TYPE_NVM_META; break; default: DP_NOTICE(p_hwfn, "Unknown request of image_id %08x\n", @@ -3905,10 +3904,6 @@ int qed_mcp_set_capabilities(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) DRV_MB_PARAM_FEATURE_SUPPORT_FUNC_VLINK | DRV_MB_PARAM_FEATURE_SUPPORT_PORT_FEC_CONTROL; - if (QED_IS_E5(p_hwfn->cdev)) - features |= - DRV_MB_PARAM_FEATURE_SUPPORT_PORT_EXT_SPEED_FEC_CONTROL; - return qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_FEATURE_SUPPORT, features, &mcp_resp, &mcp_param); } @@ -4002,7 +3997,8 @@ int qed_mcp_nvm_get_cfg(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, rc = qed_mcp_nvm_rd_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GET_NVM_CFG_OPTION, - mb_param, &resp, ¶m, p_len, (u32 *)p_buf); + mb_param, &resp, ¶m, p_len, + (u32 *)p_buf, false); return rc; } diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.h b/drivers/net/ethernet/qlogic/qed/qed_mcp.h index 8edb450d0abf..564723800d15 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mcp.h +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.h @@ -266,97 +266,97 @@ union qed_mfw_tlv_data { #define QED_NVM_CFG_OPTION_ENTITY_SEL BIT(4) /** - * @brief - returns the link params of the hw function + * qed_mcp_get_link_params(): Returns the link params of the hw function. * - * @param p_hwfn + * @p_hwfn: HW device data. * - * @returns pointer to link params + * Returns: Pointer to link params. */ -struct qed_mcp_link_params *qed_mcp_get_link_params(struct qed_hwfn *); +struct qed_mcp_link_params *qed_mcp_get_link_params(struct qed_hwfn *p_hwfn); /** - * @brief - return the link state of the hw function + * qed_mcp_get_link_state(): Return the link state of the hw function. * - * @param p_hwfn + * @p_hwfn: HW device data. * - * @returns pointer to link state + * Returns: Pointer to link state. */ -struct qed_mcp_link_state *qed_mcp_get_link_state(struct qed_hwfn *); +struct qed_mcp_link_state *qed_mcp_get_link_state(struct qed_hwfn *p_hwfn); /** - * @brief - return the link capabilities of the hw function + * qed_mcp_get_link_capabilities(): Return the link capabilities of the + * hw function. * - * @param p_hwfn + * @p_hwfn: HW device data. * - * @returns pointer to link capabilities + * Returns: Pointer to link capabilities. */ struct qed_mcp_link_capabilities *qed_mcp_get_link_capabilities(struct qed_hwfn *p_hwfn); /** - * @brief Request the MFW to set the the link according to 'link_input'. + * qed_mcp_set_link(): Request the MFW to set the link according + * to 'link_input'. * - * @param p_hwfn - * @param p_ptt - * @param b_up - raise link if `true'. Reset link if `false'. + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @b_up: Raise link if `true'. Reset link if `false'. * - * @return int + * Return: Int. */ int qed_mcp_set_link(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, bool b_up); /** - * @brief Get the management firmware version value + * qed_mcp_get_mfw_ver(): Get the management firmware version value. * - * @param p_hwfn - * @param p_ptt - * @param p_mfw_ver - mfw version value - * @param p_running_bundle_id - image id in nvram; Optional. + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @p_mfw_ver: MFW version value. + * @p_running_bundle_id: Image id in nvram; Optional. * - * @return int - 0 - operation was successful. + * Return: Int - 0 - operation was successful. */ int qed_mcp_get_mfw_ver(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 *p_mfw_ver, u32 *p_running_bundle_id); /** - * @brief Get the MBI version value + * qed_mcp_get_mbi_ver(): Get the MBI version value. * - * @param p_hwfn - * @param p_ptt - * @param p_mbi_ver - A pointer to a variable to be filled with the MBI version. + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @p_mbi_ver: A pointer to a variable to be filled with the MBI version. * - * @return int - 0 - operation was successful. + * Return: Int - 0 - operation was successful. */ int qed_mcp_get_mbi_ver(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 *p_mbi_ver); /** - * @brief Get media type value of the port. + * qed_mcp_get_media_type(): Get media type value of the port. * - * @param cdev - qed dev pointer - * @param p_ptt - * @param mfw_ver - media type value + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @media_type: Media type value * - * @return int - - * 0 - Operation was successul. - * -EBUSY - Operation failed + * Return: Int - 0 - Operation was successul. + * -EBUSY - Operation failed */ int qed_mcp_get_media_type(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 *media_type); /** - * @brief Get transceiver data of the port. + * qed_mcp_get_transceiver_data(): Get transceiver data of the port. * - * @param cdev - qed dev pointer - * @param p_ptt - * @param p_transceiver_state - transceiver state. - * @param p_transceiver_type - media type value + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @p_transceiver_state: Transceiver state. + * @p_tranceiver_type: Media type value. * - * @return int - - * 0 - Operation was successful. - * -EBUSY - Operation failed + * Return: Int - 0 - Operation was successul. + * -EBUSY - Operation failed */ int qed_mcp_get_transceiver_data(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, @@ -364,50 +364,48 @@ int qed_mcp_get_transceiver_data(struct qed_hwfn *p_hwfn, u32 *p_tranceiver_type); /** - * @brief Get transceiver supported speed mask. + * qed_mcp_trans_speed_mask(): Get transceiver supported speed mask. * - * @param cdev - qed dev pointer - * @param p_ptt - * @param p_speed_mask - Bit mask of all supported speeds. + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @p_speed_mask: Bit mask of all supported speeds. * - * @return int - - * 0 - Operation was successful. - * -EBUSY - Operation failed + * Return: Int - 0 - Operation was successul. + * -EBUSY - Operation failed */ int qed_mcp_trans_speed_mask(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 *p_speed_mask); /** - * @brief Get board configuration. + * qed_mcp_get_board_config(): Get board configuration. * - * @param cdev - qed dev pointer - * @param p_ptt - * @param p_board_config - Board config. + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @p_board_config: Board config. * - * @return int - - * 0 - Operation was successful. - * -EBUSY - Operation failed + * Return: Int - 0 - Operation was successul. + * -EBUSY - Operation failed */ int qed_mcp_get_board_config(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 *p_board_config); /** - * @brief General function for sending commands to the MCP - * mailbox. It acquire mutex lock for the entire - * operation, from sending the request until the MCP - * response. Waiting for MCP response will be checked up - * to 5 seconds every 5ms. + * qed_mcp_cmd(): General function for sending commands to the MCP + * mailbox. It acquire mutex lock for the entire + * operation, from sending the request until the MCP + * response. Waiting for MCP response will be checked up + * to 5 seconds every 5ms. * - * @param p_hwfn - hw function - * @param p_ptt - PTT required for register access - * @param cmd - command to be sent to the MCP. - * @param param - Optional param - * @param o_mcp_resp - The MCP response code (exclude sequence). - * @param o_mcp_param- Optional parameter provided by the MCP + * @p_hwfn: HW device data. + * @p_ptt: PTT required for register access. + * @cmd: command to be sent to the MCP. + * @param: Optional param + * @o_mcp_resp: The MCP response code (exclude sequence). + * @o_mcp_param: Optional parameter provided by the MCP * response - * @return int - 0 - operation - * was successul. + * + * Return: Int - 0 - Operation was successul. */ int qed_mcp_cmd(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, @@ -417,37 +415,39 @@ int qed_mcp_cmd(struct qed_hwfn *p_hwfn, u32 *o_mcp_param); /** - * @brief - drains the nig, allowing completion to pass in case of pauses. - * (Should be called only from sleepable context) + * qed_mcp_drain(): drains the nig, allowing completion to pass in + * case of pauses. + * (Should be called only from sleepable context) * - * @param p_hwfn - * @param p_ptt + * @p_hwfn: HW device data. + * @p_ptt: PTT required for register access. + * + * Return: Int. */ int qed_mcp_drain(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); /** - * @brief Get the flash size value + * qed_mcp_get_flash_size(): Get the flash size value. * - * @param p_hwfn - * @param p_ptt - * @param p_flash_size - flash size in bytes to be filled. + * @p_hwfn: HW device data. + * @p_ptt: PTT required for register access. + * @p_flash_size: Flash size in bytes to be filled. * - * @return int - 0 - operation was successul. + * Return: Int - 0 - Operation was successul. */ int qed_mcp_get_flash_size(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 *p_flash_size); /** - * @brief Send driver version to MFW + * qed_mcp_send_drv_version(): Send driver version to MFW. * - * @param p_hwfn - * @param p_ptt - * @param version - Version value - * @param name - Protocol driver name + * @p_hwfn: HW device data. + * @p_ptt: PTT required for register access. + * @p_ver: Version value. * - * @return int - 0 - operation was successul. + * Return: Int - 0 - Operation was successul. */ int qed_mcp_send_drv_version(struct qed_hwfn *p_hwfn, @@ -455,146 +455,148 @@ qed_mcp_send_drv_version(struct qed_hwfn *p_hwfn, struct qed_mcp_drv_version *p_ver); /** - * @brief Read the MFW process kill counter + * qed_get_process_kill_counter(): Read the MFW process kill counter. * - * @param p_hwfn - * @param p_ptt + * @p_hwfn: HW device data. + * @p_ptt: PTT required for register access. * - * @return u32 + * Return: u32. */ u32 qed_get_process_kill_counter(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); /** - * @brief Trigger a recovery process + * qed_start_recovery_process(): Trigger a recovery process. * - * @param p_hwfn - * @param p_ptt + * @p_hwfn: HW device data. + * @p_ptt: PTT required for register access. * - * @return int + * Return: Int. */ int qed_start_recovery_process(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); /** - * @brief A recovery handler must call this function as its first step. - * It is assumed that the handler is not run from an interrupt context. + * qed_recovery_prolog(): A recovery handler must call this function + * as its first step. + * It is assumed that the handler is not run from + * an interrupt context. * - * @param cdev - * @param p_ptt + * @cdev: Qed dev pointer. * - * @return int + * Return: int. */ int qed_recovery_prolog(struct qed_dev *cdev); /** - * @brief Notify MFW about the change in base device properties + * qed_mcp_ov_update_current_config(): Notify MFW about the change in base + * device properties * - * @param p_hwfn - * @param p_ptt - * @param client - qed client type + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @client: Qed client type. * - * @return int - 0 - operation was successful. + * Return: Int - 0 - Operation was successul. */ int qed_mcp_ov_update_current_config(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, enum qed_ov_client client); /** - * @brief Notify MFW about the driver state + * qed_mcp_ov_update_driver_state(): Notify MFW about the driver state. * - * @param p_hwfn - * @param p_ptt - * @param drv_state - Driver state + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @drv_state: Driver state. * - * @return int - 0 - operation was successful. + * Return: Int - 0 - Operation was successul. */ int qed_mcp_ov_update_driver_state(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, enum qed_ov_driver_state drv_state); /** - * @brief Send MTU size to MFW + * qed_mcp_ov_update_mtu(): Send MTU size to MFW. * - * @param p_hwfn - * @param p_ptt - * @param mtu - MTU size + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @mtu: MTU size. * - * @return int - 0 - operation was successful. + * Return: Int - 0 - Operation was successul. */ int qed_mcp_ov_update_mtu(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u16 mtu); /** - * @brief Send MAC address to MFW + * qed_mcp_ov_update_mac(): Send MAC address to MFW. * - * @param p_hwfn - * @param p_ptt - * @param mac - MAC address + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @mac: MAC address. * - * @return int - 0 - operation was successful. + * Return: Int - 0 - Operation was successul. */ int qed_mcp_ov_update_mac(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, u8 *mac); + struct qed_ptt *p_ptt, const u8 *mac); /** - * @brief Send WOL mode to MFW + * qed_mcp_ov_update_wol(): Send WOL mode to MFW. * - * @param p_hwfn - * @param p_ptt - * @param wol - WOL mode + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @wol: WOL mode. * - * @return int - 0 - operation was successful. + * Return: Int - 0 - Operation was successul. */ int qed_mcp_ov_update_wol(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, enum qed_ov_wol wol); /** - * @brief Set LED status + * qed_mcp_set_led(): Set LED status. * - * @param p_hwfn - * @param p_ptt - * @param mode - LED mode + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @mode: LED mode. * - * @return int - 0 - operation was successful. + * Return: Int - 0 - Operation was successul. */ int qed_mcp_set_led(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, enum qed_led_mode mode); /** - * @brief Read from nvm + * qed_mcp_nvm_read(): Read from NVM. * - * @param cdev - * @param addr - nvm offset - * @param p_buf - nvm read buffer - * @param len - buffer len + * @cdev: Qed dev pointer. + * @addr: NVM offset. + * @p_buf: NVM read buffer. + * @len: Buffer len. * - * @return int - 0 - operation was successful. + * Return: Int - 0 - Operation was successul. */ int qed_mcp_nvm_read(struct qed_dev *cdev, u32 addr, u8 *p_buf, u32 len); /** - * @brief Write to nvm + * qed_mcp_nvm_write(): Write to NVM. * - * @param cdev - * @param addr - nvm offset - * @param cmd - nvm command - * @param p_buf - nvm write buffer - * @param len - buffer len + * @cdev: Qed dev pointer. + * @addr: NVM offset. + * @cmd: NVM command. + * @p_buf: NVM write buffer. + * @len: Buffer len. * - * @return int - 0 - operation was successful. + * Return: Int - 0 - Operation was successul. */ int qed_mcp_nvm_write(struct qed_dev *cdev, u32 cmd, u32 addr, u8 *p_buf, u32 len); /** - * @brief Check latest response + * qed_mcp_nvm_resp(): Check latest response. * - * @param cdev - * @param p_buf - nvm write buffer + * @cdev: Qed dev pointer. + * @p_buf: NVM write buffer. * - * @return int - 0 - operation was successful. + * Return: Int - 0 - Operation was successul. */ int qed_mcp_nvm_resp(struct qed_dev *cdev, u8 *p_buf); @@ -604,13 +606,13 @@ struct qed_nvm_image_att { }; /** - * @brief Allows reading a whole nvram image + * qed_mcp_get_nvm_image_att(): Allows reading a whole nvram image. * - * @param p_hwfn - * @param image_id - image to get attributes for - * @param p_image_att - image attributes structure into which to fill data + * @p_hwfn: HW device data. + * @image_id: Image to get attributes for. + * @p_image_att: Image attributes structure into which to fill data. * - * @return int - 0 - operation was successful. + * Return: Int - 0 - Operation was successul. */ int qed_mcp_get_nvm_image_att(struct qed_hwfn *p_hwfn, @@ -618,64 +620,65 @@ qed_mcp_get_nvm_image_att(struct qed_hwfn *p_hwfn, struct qed_nvm_image_att *p_image_att); /** - * @brief Allows reading a whole nvram image + * qed_mcp_get_nvm_image(): Allows reading a whole nvram image. * - * @param p_hwfn - * @param image_id - image requested for reading - * @param p_buffer - allocated buffer into which to fill data - * @param buffer_len - length of the allocated buffer. + * @p_hwfn: HW device data. + * @image_id: image requested for reading. + * @p_buffer: allocated buffer into which to fill data. + * @buffer_len: length of the allocated buffer. * - * @return 0 iff p_buffer now contains the nvram image. + * Return: 0 if p_buffer now contains the nvram image. */ int qed_mcp_get_nvm_image(struct qed_hwfn *p_hwfn, enum qed_nvm_images image_id, u8 *p_buffer, u32 buffer_len); /** - * @brief Bist register test + * qed_mcp_bist_register_test(): Bist register test. * - * @param p_hwfn - hw function - * @param p_ptt - PTT required for register access + * @p_hwfn: HW device data. + * @p_ptt: PTT required for register access. * - * @return int - 0 - operation was successful. + * Return: Int - 0 - Operation was successul. */ int qed_mcp_bist_register_test(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); /** - * @brief Bist clock test + * qed_mcp_bist_clock_test(): Bist clock test. * - * @param p_hwfn - hw function - * @param p_ptt - PTT required for register access + * @p_hwfn: HW device data. + * @p_ptt: PTT required for register access. * - * @return int - 0 - operation was successful. + * Return: Int - 0 - Operation was successul. */ int qed_mcp_bist_clock_test(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); /** - * @brief Bist nvm test - get number of images + * qed_mcp_bist_nvm_get_num_images(): Bist nvm test - get number of images. * - * @param p_hwfn - hw function - * @param p_ptt - PTT required for register access - * @param num_images - number of images if operation was + * @p_hwfn: HW device data. + * @p_ptt: PTT required for register access. + * @num_images: number of images if operation was * successful. 0 if not. * - * @return int - 0 - operation was successful. + * Return: Int - 0 - Operation was successul. */ int qed_mcp_bist_nvm_get_num_images(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 *num_images); /** - * @brief Bist nvm test - get image attributes by index + * qed_mcp_bist_nvm_get_image_att(): Bist nvm test - get image attributes + * by index. * - * @param p_hwfn - hw function - * @param p_ptt - PTT required for register access - * @param p_image_att - Attributes of image - * @param image_index - Index of image to get information for + * @p_hwfn: HW device data. + * @p_ptt: PTT required for register access. + * @p_image_att: Attributes of image. + * @image_index: Index of image to get information for. * - * @return int - 0 - operation was successful. + * Return: Int - 0 - Operation was successul. */ int qed_mcp_bist_nvm_get_image_att(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, @@ -683,23 +686,26 @@ int qed_mcp_bist_nvm_get_image_att(struct qed_hwfn *p_hwfn, u32 image_index); /** - * @brief - Processes the TLV request from MFW i.e., get the required TLV info - * from the qed client and send it to the MFW. + * qed_mfw_process_tlv_req(): Processes the TLV request from MFW i.e., + * get the required TLV info + * from the qed client and send it to the MFW. * - * @param p_hwfn - * @param p_ptt + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. * - * @param return 0 upon success. + * Return: 0 upon success. */ int qed_mfw_process_tlv_req(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); /** - * @brief Send raw debug data to the MFW + * qed_mcp_send_raw_debug_data(): Send raw debug data to the MFW + * + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @p_buf: raw debug data buffer. + * @size: Buffer size. * - * @param p_hwfn - * @param p_ptt - * @param p_buf - raw debug data buffer - * @param size - buffer size + * Return : Int. */ int qed_mcp_send_raw_debug_data(struct qed_hwfn *p_hwfn, @@ -796,47 +802,49 @@ qed_mcp_is_ext_speed_supported(const struct qed_hwfn *p_hwfn) } /** - * @brief Initialize the interface with the MCP + * qed_mcp_cmd_init(): Initialize the interface with the MCP. * - * @param p_hwfn - HW func - * @param p_ptt - PTT required for register access + * @p_hwfn: HW device data. + * @p_ptt: PTT required for register access. * - * @return int + * Return: Int. */ int qed_mcp_cmd_init(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); /** - * @brief Initialize the port interface with the MCP + * qed_mcp_cmd_port_init(): Initialize the port interface with the MCP + * + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * + * Return: Void. * - * @param p_hwfn - * @param p_ptt * Can only be called after `num_ports_in_engines' is set */ void qed_mcp_cmd_port_init(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); /** - * @brief Releases resources allocated during the init process. + * qed_mcp_free(): Releases resources allocated during the init process. * - * @param p_hwfn - HW func - * @param p_ptt - PTT required for register access + * @p_hwfn: HW function. * - * @return int + * Return: Int. */ int qed_mcp_free(struct qed_hwfn *p_hwfn); /** - * @brief This function is called from the DPC context. After - * pointing PTT to the mfw mb, check for events sent by the MCP - * to the driver and ack them. In case a critical event - * detected, it will be handled here, otherwise the work will be - * queued to a sleepable work-queue. + * qed_mcp_handle_events(): This function is called from the DPC context. + * After pointing PTT to the mfw mb, check for events sent by + * the MCP to the driver and ack them. In case a critical event + * detected, it will be handled here, otherwise the work will be + * queued to a sleepable work-queue. + * + * @p_hwfn: HW function. + * @p_ptt: PTT required for register access. * - * @param p_hwfn - HW function - * @param p_ptt - PTT required for register access - * @return int - 0 - operation - * was successul. + * Return: Int - 0 - Operation was successul. */ int qed_mcp_handle_events(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); @@ -858,169 +866,177 @@ struct qed_load_req_params { }; /** - * @brief Sends a LOAD_REQ to the MFW, and in case the operation succeeds, - * returns whether this PF is the first on the engine/port or function. + * qed_mcp_load_req(): Sends a LOAD_REQ to the MFW, and in case the + * operation succeeds, returns whether this PF is + * the first on the engine/port or function. * - * @param p_hwfn - * @param p_ptt - * @param p_params + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @p_params: Params. * - * @return int - 0 - Operation was successful. + * Return: Int - 0 - Operation was successul. */ int qed_mcp_load_req(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, struct qed_load_req_params *p_params); /** - * @brief Sends a LOAD_DONE message to the MFW + * qed_mcp_load_done(): Sends a LOAD_DONE message to the MFW. * - * @param p_hwfn - * @param p_ptt + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. * - * @return int - 0 - Operation was successful. + * Return: Int - 0 - Operation was successul. */ int qed_mcp_load_done(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); /** - * @brief Sends a UNLOAD_REQ message to the MFW + * qed_mcp_unload_req(): Sends a UNLOAD_REQ message to the MFW. * - * @param p_hwfn - * @param p_ptt + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. * - * @return int - 0 - Operation was successful. + * Return: Int - 0 - Operation was successul. */ int qed_mcp_unload_req(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); /** - * @brief Sends a UNLOAD_DONE message to the MFW + * qed_mcp_unload_done(): Sends a UNLOAD_DONE message to the MFW * - * @param p_hwfn - * @param p_ptt + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. * - * @return int - 0 - Operation was successful. + * Return: Int - 0 - Operation was successul. */ int qed_mcp_unload_done(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); /** - * @brief Read the MFW mailbox into Current buffer. + * qed_mcp_read_mb(): Read the MFW mailbox into Current buffer. * - * @param p_hwfn - * @param p_ptt + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * + * Return: Void. */ void qed_mcp_read_mb(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); /** - * @brief Ack to mfw that driver finished FLR process for VFs + * qed_mcp_ack_vf_flr(): Ack to mfw that driver finished FLR process for VFs * - * @param p_hwfn - * @param p_ptt - * @param vfs_to_ack - bit mask of all engine VFs for which the PF acks. + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @vfs_to_ack: bit mask of all engine VFs for which the PF acks. * - * @param return int - 0 upon success. + * Return: Int - 0 - Operation was successul. */ int qed_mcp_ack_vf_flr(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 *vfs_to_ack); /** - * @brief - calls during init to read shmem of all function-related info. + * qed_mcp_fill_shmem_func_info(): Calls during init to read shmem of + * all function-related info. * - * @param p_hwfn + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. * - * @param return 0 upon success. + * Return: 0 upon success. */ int qed_mcp_fill_shmem_func_info(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); /** - * @brief - Reset the MCP using mailbox command. + * qed_mcp_reset(): Reset the MCP using mailbox command. * - * @param p_hwfn - * @param p_ptt + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. * - * @param return 0 upon success. + * Return: 0 upon success. */ int qed_mcp_reset(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); /** - * @brief - Sends an NVM read command request to the MFW to get - * a buffer. - * - * @param p_hwfn - * @param p_ptt - * @param cmd - Command: DRV_MSG_CODE_NVM_GET_FILE_DATA or - * DRV_MSG_CODE_NVM_READ_NVRAM commands - * @param param - [0:23] - Offset [24:31] - Size - * @param o_mcp_resp - MCP response - * @param o_mcp_param - MCP response param - * @param o_txn_size - Buffer size output - * @param o_buf - Pointer to the buffer returned by the MFW. + * qed_mcp_nvm_rd_cmd(): Sends an NVM read command request to the MFW to get + * a buffer. * - * @param return 0 upon success. + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @cmd: (Command) DRV_MSG_CODE_NVM_GET_FILE_DATA or + * DRV_MSG_CODE_NVM_READ_NVRAM commands. + * @param: [0:23] - Offset [24:31] - Size. + * @o_mcp_resp: MCP response. + * @o_mcp_param: MCP response param. + * @o_txn_size: Buffer size output. + * @o_buf: Pointer to the buffer returned by the MFW. + * @b_can_sleep: Can sleep. + * + * Return: 0 upon success. */ int qed_mcp_nvm_rd_cmd(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 cmd, u32 param, u32 *o_mcp_resp, - u32 *o_mcp_param, u32 *o_txn_size, u32 *o_buf); + u32 *o_mcp_param, + u32 *o_txn_size, u32 *o_buf, bool b_can_sleep); /** - * @brief Read from sfp + * qed_mcp_phy_sfp_read(): Read from sfp. * - * @param p_hwfn - hw function - * @param p_ptt - PTT required for register access - * @param port - transceiver port - * @param addr - I2C address - * @param offset - offset in sfp - * @param len - buffer length - * @param p_buf - buffer to read into + * @p_hwfn: HW device data. + * @p_ptt: PTT required for register access. + * @port: transceiver port. + * @addr: I2C address. + * @offset: offset in sfp. + * @len: buffer length. + * @p_buf: buffer to read into. * - * @return int - 0 - operation was successful. + * Return: Int - 0 - Operation was successul. */ int qed_mcp_phy_sfp_read(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 port, u32 addr, u32 offset, u32 len, u8 *p_buf); /** - * @brief indicates whether the MFW objects [under mcp_info] are accessible + * qed_mcp_is_init(): indicates whether the MFW objects [under mcp_info] + * are accessible * - * @param p_hwfn + * @p_hwfn: HW device data. * - * @return true iff MFW is running and mcp_info is initialized + * Return: true if MFW is running and mcp_info is initialized. */ bool qed_mcp_is_init(struct qed_hwfn *p_hwfn); /** - * @brief request MFW to configure MSI-X for a VF + * qed_mcp_config_vf_msix(): Request MFW to configure MSI-X for a VF. * - * @param p_hwfn - * @param p_ptt - * @param vf_id - absolute inside engine - * @param num_sbs - number of entries to request + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @vf_id: absolute inside engine. + * @num: number of entries to request. * - * @return int + * Return: Int. */ int qed_mcp_config_vf_msix(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u8 vf_id, u8 num); /** - * @brief - Halt the MCP. + * qed_mcp_halt(): Halt the MCP. * - * @param p_hwfn - * @param p_ptt + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. * - * @param return 0 upon success. + * Return: 0 upon success. */ int qed_mcp_halt(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); /** - * @brief - Wake up the MCP. + * qed_mcp_resume: Wake up the MCP. * - * @param p_hwfn - * @param p_ptt + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. * - * @param return 0 upon success. + * Return: 0 upon success. */ int qed_mcp_resume(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); @@ -1038,13 +1054,13 @@ int __qed_configure_pf_min_bandwidth(struct qed_hwfn *p_hwfn, int qed_mcp_mask_parities(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 mask_parities); -/* @brief - Gets the mdump retained data from the MFW. +/* qed_mcp_mdump_get_retain(): Gets the mdump retained data from the MFW. * - * @param p_hwfn - * @param p_ptt - * @param p_mdump_retain + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @p_mdump_retain: mdump retain. * - * @param return 0 upon success. + * Return: Int - 0 - Operation was successul. */ int qed_mcp_mdump_get_retain(struct qed_hwfn *p_hwfn, @@ -1052,15 +1068,15 @@ qed_mcp_mdump_get_retain(struct qed_hwfn *p_hwfn, struct mdump_retain_data_stc *p_mdump_retain); /** - * @brief - Sets the MFW's max value for the given resource + * qed_mcp_set_resc_max_val(): Sets the MFW's max value for the given resource. * - * @param p_hwfn - * @param p_ptt - * @param res_id - * @param resc_max_val - * @param p_mcp_resp + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @res_id: RES ID. + * @resc_max_val: Resec max val. + * @p_mcp_resp: MCP Resp * - * @return int - 0 - operation was successful. + * Return: Int - 0 - Operation was successul. */ int qed_mcp_set_resc_max_val(struct qed_hwfn *p_hwfn, @@ -1069,16 +1085,17 @@ qed_mcp_set_resc_max_val(struct qed_hwfn *p_hwfn, u32 resc_max_val, u32 *p_mcp_resp); /** - * @brief - Gets the MFW allocation info for the given resource + * qed_mcp_get_resc_info(): Gets the MFW allocation info for the given + * resource. * - * @param p_hwfn - * @param p_ptt - * @param res_id - * @param p_mcp_resp - * @param p_resc_num - * @param p_resc_start + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @res_id: Res ID. + * @p_mcp_resp: MCP resp. + * @p_resc_num: Resc num. + * @p_resc_start: Resc start. * - * @return int - 0 - operation was successful. + * Return: Int - 0 - Operation was successul. */ int qed_mcp_get_resc_info(struct qed_hwfn *p_hwfn, @@ -1087,13 +1104,13 @@ qed_mcp_get_resc_info(struct qed_hwfn *p_hwfn, u32 *p_mcp_resp, u32 *p_resc_num, u32 *p_resc_start); /** - * @brief Send eswitch mode to MFW + * qed_mcp_ov_update_eswitch(): Send eswitch mode to MFW. * - * @param p_hwfn - * @param p_ptt - * @param eswitch - eswitch mode + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @eswitch: eswitch mode. * - * @return int - 0 - operation was successful. + * Return: Int - 0 - Operation was successul. */ int qed_mcp_ov_update_eswitch(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, @@ -1113,12 +1130,12 @@ enum qed_resc_lock { }; /** - * @brief - Initiates PF FLR + * qed_mcp_initiate_pf_flr(): Initiates PF FLR. * - * @param p_hwfn - * @param p_ptt + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. * - * @return int - 0 - operation was successful. + * Return: Int - 0 - Operation was successul. */ int qed_mcp_initiate_pf_flr(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); struct qed_resc_lock_params { @@ -1151,13 +1168,13 @@ struct qed_resc_lock_params { }; /** - * @brief Acquires MFW generic resource lock + * qed_mcp_resc_lock(): Acquires MFW generic resource lock. * - * @param p_hwfn - * @param p_ptt - * @param p_params + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @p_params: Params. * - * @return int - 0 - operation was successful. + * Return: Int - 0 - Operation was successul. */ int qed_mcp_resc_lock(struct qed_hwfn *p_hwfn, @@ -1175,13 +1192,13 @@ struct qed_resc_unlock_params { }; /** - * @brief Releases MFW generic resource lock + * qed_mcp_resc_unlock(): Releases MFW generic resource lock. * - * @param p_hwfn - * @param p_ptt - * @param p_params + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @p_params: Params. * - * @return int - 0 - operation was successful. + * Return: Int - 0 - Operation was successul. */ int qed_mcp_resc_unlock(struct qed_hwfn *p_hwfn, @@ -1189,12 +1206,15 @@ qed_mcp_resc_unlock(struct qed_hwfn *p_hwfn, struct qed_resc_unlock_params *p_params); /** - * @brief - default initialization for lock/unlock resource structs + * qed_mcp_resc_lock_default_init(): Default initialization for + * lock/unlock resource structs. + * + * @p_lock: lock params struct to be initialized; Can be NULL. + * @p_unlock: unlock params struct to be initialized; Can be NULL. + * @resource: the requested resource. + * @b_is_permanent: disable retries & aging when set. * - * @param p_lock - lock params struct to be initialized; Can be NULL - * @param p_unlock - unlock params struct to be initialized; Can be NULL - * @param resource - the requested resource - * @paral b_is_permanent - disable retries & aging when set + * Return: Void. */ void qed_mcp_resc_lock_default_init(struct qed_resc_lock_params *p_lock, struct qed_resc_unlock_params *p_unlock, @@ -1202,94 +1222,117 @@ void qed_mcp_resc_lock_default_init(struct qed_resc_lock_params *p_lock, resource, bool b_is_permanent); /** - * @brief - Return whether management firmware support smart AN + * qed_mcp_is_smart_an_supported(): Return whether management firmware + * support smart AN * - * @param p_hwfn + * @p_hwfn: HW device data. * - * @return bool - true if feature is supported. + * Return: bool true if feature is supported. */ bool qed_mcp_is_smart_an_supported(struct qed_hwfn *p_hwfn); /** - * @brief Learn of supported MFW features; To be done during early init + * qed_mcp_get_capabilities(): Learn of supported MFW features; + * To be done during early init. + * + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. * - * @param p_hwfn - * @param p_ptt + * Return: Int. */ int qed_mcp_get_capabilities(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); /** - * @brief Inform MFW of set of features supported by driver. Should be done - * inside the content of the LOAD_REQ. + * qed_mcp_set_capabilities(): Inform MFW of set of features supported + * by driver. Should be done inside the content + * of the LOAD_REQ. * - * @param p_hwfn - * @param p_ptt + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * + * Return: Int. */ int qed_mcp_set_capabilities(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); /** - * @brief Read ufp config from the shared memory. + * qed_mcp_read_ufp_config(): Read ufp config from the shared memory. * - * @param p_hwfn - * @param p_ptt + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * + * Return: Void. */ void qed_mcp_read_ufp_config(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); /** - * @brief Populate the nvm info shadow in the given hardware function + * qed_mcp_nvm_info_populate(): Populate the nvm info shadow in the given + * hardware function. + * + * @p_hwfn: HW device data. * - * @param p_hwfn + * Return: Int. */ int qed_mcp_nvm_info_populate(struct qed_hwfn *p_hwfn); /** - * @brief Delete nvm info shadow in the given hardware function + * qed_mcp_nvm_info_free(): Delete nvm info shadow in the given + * hardware function. + * + * @p_hwfn: HW device data. * - * @param p_hwfn + * Return: Void. */ void qed_mcp_nvm_info_free(struct qed_hwfn *p_hwfn); /** - * @brief Get the engine affinity configuration. + * qed_mcp_get_engine_config(): Get the engine affinity configuration. + * + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. * - * @param p_hwfn - * @param p_ptt + * Return: Int. */ int qed_mcp_get_engine_config(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); /** - * @brief Get the PPFID bitmap. + * qed_mcp_get_ppfid_bitmap(): Get the PPFID bitmap. * - * @param p_hwfn - * @param p_ptt + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * + * Return: Int. */ int qed_mcp_get_ppfid_bitmap(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); /** - * @brief Get NVM config attribute value. + * qed_mcp_nvm_get_cfg(): Get NVM config attribute value. * - * @param p_hwfn - * @param p_ptt - * @param option_id - * @param entity_id - * @param flags - * @param p_buf - * @param p_len + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @option_id: Option ID. + * @entity_id: Entity ID. + * @flags: Flags. + * @p_buf: Buf. + * @p_len: Len. + * + * Return: Int. */ int qed_mcp_nvm_get_cfg(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u16 option_id, u8 entity_id, u16 flags, u8 *p_buf, u32 *p_len); /** - * @brief Set NVM config attribute value. + * qed_mcp_nvm_set_cfg(): Set NVM config attribute value. * - * @param p_hwfn - * @param p_ptt - * @param option_id - * @param entity_id - * @param flags - * @param p_buf - * @param len + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @option_id: Option ID. + * @entity_id: Entity ID. + * @flags: Flags. + * @p_buf: Buf. + * @len: Len. + * + * Return: Int. */ int qed_mcp_nvm_set_cfg(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u16 option_id, u8 entity_id, u16 flags, u8 *p_buf, diff --git a/drivers/net/ethernet/qlogic/qed/qed_mfw_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_mfw_hsi.h new file mode 100644 index 000000000000..8a0e3c5d4bda --- /dev/null +++ b/drivers/net/ethernet/qlogic/qed/qed_mfw_hsi.h @@ -0,0 +1,2474 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ +/* QLogic qed NIC Driver + * Copyright (c) 2019-2021 Marvell International Ltd. + */ + +#ifndef _QED_MFW_HSI_H +#define _QED_MFW_HSI_H + +#define MFW_TRACE_SIGNATURE 0x25071946 + +/* The trace in the buffer */ +#define MFW_TRACE_EVENTID_MASK 0x00ffff +#define MFW_TRACE_PRM_SIZE_MASK 0x0f0000 +#define MFW_TRACE_PRM_SIZE_OFFSET 16 +#define MFW_TRACE_ENTRY_SIZE 3 + +struct mcp_trace { + u32 signature; /* Help to identify that the trace is valid */ + u32 size; /* the size of the trace buffer in bytes */ + u32 curr_level; /* 2 - all will be written to the buffer + * 1 - debug trace will not be written + * 0 - just errors will be written to the buffer + */ + u32 modules_mask[2]; /* a bit per module, 1 means write it, 0 means + * mask it. + */ + + /* Warning: the following pointers are assumed to be 32bits as they are + * used only in the MFW. + */ + u32 trace_prod; /* The next trace will be written to this offset */ + u32 trace_oldest; /* The oldest valid trace starts at this offset + * (usually very close after the current producer). + */ +}; + +#define VF_MAX_STATIC 192 +#define VF_BITMAP_SIZE_IN_DWORDS (VF_MAX_STATIC / 32) +#define VF_BITMAP_SIZE_IN_BYTES (VF_BITMAP_SIZE_IN_DWORDS * sizeof(u32)) + +#define EXT_VF_MAX_STATIC 240 +#define EXT_VF_BITMAP_SIZE_IN_DWORDS (((EXT_VF_MAX_STATIC - 1) / 32) + 1) +#define EXT_VF_BITMAP_SIZE_IN_BYTES (EXT_VF_BITMAP_SIZE_IN_DWORDS * sizeof(u32)) +#define ADDED_VF_BITMAP_SIZE 2 + +#define MCP_GLOB_PATH_MAX 2 +#define MCP_PORT_MAX 2 +#define MCP_GLOB_PORT_MAX 4 +#define MCP_GLOB_FUNC_MAX 16 + +typedef u32 offsize_t; /* In DWORDS !!! */ +/* Offset from the beginning of the MCP scratchpad */ +#define OFFSIZE_OFFSET_SHIFT 0 +#define OFFSIZE_OFFSET_MASK 0x0000ffff +/* Size of specific element (not the whole array if any) */ +#define OFFSIZE_SIZE_SHIFT 16 +#define OFFSIZE_SIZE_MASK 0xffff0000 + +#define SECTION_OFFSET(_offsize) (((((_offsize) & \ + OFFSIZE_OFFSET_MASK) >> \ + OFFSIZE_OFFSET_SHIFT) << 2)) + +#define QED_SECTION_SIZE(_offsize) ((((_offsize) & \ + OFFSIZE_SIZE_MASK) >> \ + OFFSIZE_SIZE_SHIFT) << 2) + +#define SECTION_ADDR(_offsize, idx) (MCP_REG_SCRATCH + \ + SECTION_OFFSET((_offsize)) + \ + (QED_SECTION_SIZE((_offsize)) * (idx))) + +#define SECTION_OFFSIZE_ADDR(_pub_base, _section) \ + ((_pub_base) + offsetof(struct mcp_public_data, sections[_section])) + +/* PHY configuration */ +struct eth_phy_cfg { + u32 speed; +#define ETH_SPEED_AUTONEG 0x0 +#define ETH_SPEED_SMARTLINQ 0x8 + + u32 pause; +#define ETH_PAUSE_NONE 0x0 +#define ETH_PAUSE_AUTONEG 0x1 +#define ETH_PAUSE_RX 0x2 +#define ETH_PAUSE_TX 0x4 + + u32 adv_speed; + + u32 loopback_mode; +#define ETH_LOOPBACK_NONE 0x0 +#define ETH_LOOPBACK_INT_PHY 0x1 +#define ETH_LOOPBACK_EXT_PHY 0x2 +#define ETH_LOOPBACK_EXT 0x3 +#define ETH_LOOPBACK_MAC 0x4 +#define ETH_LOOPBACK_CNIG_AH_ONLY_0123 0x5 +#define ETH_LOOPBACK_CNIG_AH_ONLY_2301 0x6 +#define ETH_LOOPBACK_PCS_AH_ONLY 0x7 +#define ETH_LOOPBACK_REVERSE_MAC_AH_ONLY 0x8 +#define ETH_LOOPBACK_INT_PHY_FEA_AH_ONLY 0x9 + + u32 eee_cfg; +#define EEE_CFG_EEE_ENABLED BIT(0) +#define EEE_CFG_TX_LPI BIT(1) +#define EEE_CFG_ADV_SPEED_1G BIT(2) +#define EEE_CFG_ADV_SPEED_10G BIT(3) +#define EEE_TX_TIMER_USEC_MASK 0xfffffff0 +#define EEE_TX_TIMER_USEC_OFFSET 4 +#define EEE_TX_TIMER_USEC_BALANCED_TIME 0xa00 +#define EEE_TX_TIMER_USEC_AGGRESSIVE_TIME 0x100 +#define EEE_TX_TIMER_USEC_LATENCY_TIME 0x6000 + + u32 link_modes; + + u32 fec_mode; +#define FEC_FORCE_MODE_MASK 0x000000ff +#define FEC_FORCE_MODE_OFFSET 0 +#define FEC_FORCE_MODE_NONE 0x00 +#define FEC_FORCE_MODE_FIRECODE 0x01 +#define FEC_FORCE_MODE_RS 0x02 +#define FEC_FORCE_MODE_AUTO 0x07 +#define FEC_EXTENDED_MODE_MASK 0xffffff00 +#define FEC_EXTENDED_MODE_OFFSET 8 +#define ETH_EXT_FEC_NONE 0x00000000 +#define ETH_EXT_FEC_10G_NONE 0x00000100 +#define ETH_EXT_FEC_10G_BASE_R 0x00000200 +#define ETH_EXT_FEC_25G_NONE 0x00000400 +#define ETH_EXT_FEC_25G_BASE_R 0x00000800 +#define ETH_EXT_FEC_25G_RS528 0x00001000 +#define ETH_EXT_FEC_40G_NONE 0x00002000 +#define ETH_EXT_FEC_40G_BASE_R 0x00004000 +#define ETH_EXT_FEC_50G_NONE 0x00008000 +#define ETH_EXT_FEC_50G_BASE_R 0x00010000 +#define ETH_EXT_FEC_50G_RS528 0x00020000 +#define ETH_EXT_FEC_50G_RS544 0x00040000 +#define ETH_EXT_FEC_100G_NONE 0x00080000 +#define ETH_EXT_FEC_100G_BASE_R 0x00100000 +#define ETH_EXT_FEC_100G_RS528 0x00200000 +#define ETH_EXT_FEC_100G_RS544 0x00400000 + + u32 extended_speed; +#define ETH_EXT_SPEED_MASK 0x0000ffff +#define ETH_EXT_SPEED_OFFSET 0 +#define ETH_EXT_SPEED_NONE 0x00000001 +#define ETH_EXT_SPEED_1G 0x00000002 +#define ETH_EXT_SPEED_10G 0x00000004 +#define ETH_EXT_SPEED_25G 0x00000008 +#define ETH_EXT_SPEED_40G 0x00000010 +#define ETH_EXT_SPEED_50G_BASE_R 0x00000020 +#define ETH_EXT_SPEED_50G_BASE_R2 0x00000040 +#define ETH_EXT_SPEED_100G_BASE_R2 0x00000080 +#define ETH_EXT_SPEED_100G_BASE_R4 0x00000100 +#define ETH_EXT_SPEED_100G_BASE_P4 0x00000200 +#define ETH_EXT_ADV_SPEED_MASK 0xFFFF0000 +#define ETH_EXT_ADV_SPEED_OFFSET 16 +#define ETH_EXT_ADV_SPEED_1G 0x00010000 +#define ETH_EXT_ADV_SPEED_10G 0x00020000 +#define ETH_EXT_ADV_SPEED_25G 0x00040000 +#define ETH_EXT_ADV_SPEED_40G 0x00080000 +#define ETH_EXT_ADV_SPEED_50G_BASE_R 0x00100000 +#define ETH_EXT_ADV_SPEED_50G_BASE_R2 0x00200000 +#define ETH_EXT_ADV_SPEED_100G_BASE_R2 0x00400000 +#define ETH_EXT_ADV_SPEED_100G_BASE_R4 0x00800000 +#define ETH_EXT_ADV_SPEED_100G_BASE_P4 0x01000000 +}; + +struct port_mf_cfg { + u32 dynamic_cfg; +#define PORT_MF_CFG_OV_TAG_MASK 0x0000ffff +#define PORT_MF_CFG_OV_TAG_SHIFT 0 +#define PORT_MF_CFG_OV_TAG_DEFAULT PORT_MF_CFG_OV_TAG_MASK + + u32 reserved[1]; +}; + +struct eth_stats { + u64 r64; + u64 r127; + u64 r255; + u64 r511; + u64 r1023; + u64 r1518; + + union { + struct { + u64 r1522; + u64 r2047; + u64 r4095; + u64 r9216; + u64 r16383; + } bb0; + struct { + u64 unused1; + u64 r1519_to_max; + u64 unused2; + u64 unused3; + u64 unused4; + } ah0; + } u0; + + u64 rfcs; + u64 rxcf; + u64 rxpf; + u64 rxpp; + u64 raln; + u64 rfcr; + u64 rovr; + u64 rjbr; + u64 rund; + u64 rfrg; + u64 t64; + u64 t127; + u64 t255; + u64 t511; + u64 t1023; + u64 t1518; + + union { + struct { + u64 t2047; + u64 t4095; + u64 t9216; + u64 t16383; + } bb1; + struct { + u64 t1519_to_max; + u64 unused6; + u64 unused7; + u64 unused8; + } ah1; + } u1; + + u64 txpf; + u64 txpp; + + union { + struct { + u64 tlpiec; + u64 tncl; + } bb2; + struct { + u64 unused9; + u64 unused10; + } ah2; + } u2; + + u64 rbyte; + u64 rxuca; + u64 rxmca; + u64 rxbca; + u64 rxpok; + u64 tbyte; + u64 txuca; + u64 txmca; + u64 txbca; + u64 txcf; +}; + +struct pkt_type_cnt { + u64 tc_tx_pkt_cnt[8]; + u64 tc_tx_oct_cnt[8]; + u64 priority_rx_pkt_cnt[8]; + u64 priority_rx_oct_cnt[8]; +}; + +struct brb_stats { + u64 brb_truncate[8]; + u64 brb_discard[8]; +}; + +struct port_stats { + struct brb_stats brb; + struct eth_stats eth; +}; + +struct couple_mode_teaming { + u8 port_cmt[MCP_GLOB_PORT_MAX]; +#define PORT_CMT_IN_TEAM BIT(0) + +#define PORT_CMT_PORT_ROLE BIT(1) +#define PORT_CMT_PORT_INACTIVE (0 << 1) +#define PORT_CMT_PORT_ACTIVE BIT(1) + +#define PORT_CMT_TEAM_MASK BIT(2) +#define PORT_CMT_TEAM0 (0 << 2) +#define PORT_CMT_TEAM1 BIT(2) +}; + +#define LLDP_CHASSIS_ID_STAT_LEN 4 +#define LLDP_PORT_ID_STAT_LEN 4 +#define DCBX_MAX_APP_PROTOCOL 32 +#define MAX_SYSTEM_LLDP_TLV_DATA 32 +#define MAX_TLV_BUFFER 128 + +enum _lldp_agent { + LLDP_NEAREST_BRIDGE = 0, + LLDP_NEAREST_NON_TPMR_BRIDGE, + LLDP_NEAREST_CUSTOMER_BRIDGE, + LLDP_MAX_LLDP_AGENTS +}; + +struct lldp_config_params_s { + u32 config; +#define LLDP_CONFIG_TX_INTERVAL_MASK 0x000000ff +#define LLDP_CONFIG_TX_INTERVAL_SHIFT 0 +#define LLDP_CONFIG_HOLD_MASK 0x00000f00 +#define LLDP_CONFIG_HOLD_SHIFT 8 +#define LLDP_CONFIG_MAX_CREDIT_MASK 0x0000f000 +#define LLDP_CONFIG_MAX_CREDIT_SHIFT 12 +#define LLDP_CONFIG_ENABLE_RX_MASK 0x40000000 +#define LLDP_CONFIG_ENABLE_RX_SHIFT 30 +#define LLDP_CONFIG_ENABLE_TX_MASK 0x80000000 +#define LLDP_CONFIG_ENABLE_TX_SHIFT 31 + u32 local_chassis_id[LLDP_CHASSIS_ID_STAT_LEN]; + u32 local_port_id[LLDP_PORT_ID_STAT_LEN]; +}; + +struct lldp_status_params_s { + u32 prefix_seq_num; + u32 status; + u32 peer_chassis_id[LLDP_CHASSIS_ID_STAT_LEN]; + u32 peer_port_id[LLDP_PORT_ID_STAT_LEN]; + u32 suffix_seq_num; +}; + +struct dcbx_ets_feature { + u32 flags; +#define DCBX_ETS_ENABLED_MASK 0x00000001 +#define DCBX_ETS_ENABLED_SHIFT 0 +#define DCBX_ETS_WILLING_MASK 0x00000002 +#define DCBX_ETS_WILLING_SHIFT 1 +#define DCBX_ETS_ERROR_MASK 0x00000004 +#define DCBX_ETS_ERROR_SHIFT 2 +#define DCBX_ETS_CBS_MASK 0x00000008 +#define DCBX_ETS_CBS_SHIFT 3 +#define DCBX_ETS_MAX_TCS_MASK 0x000000f0 +#define DCBX_ETS_MAX_TCS_SHIFT 4 +#define DCBX_OOO_TC_MASK 0x00000f00 +#define DCBX_OOO_TC_SHIFT 8 + u32 pri_tc_tbl[1]; +#define DCBX_TCP_OOO_TC (4) +#define DCBX_TCP_OOO_K2_4PORT_TC (3) + +#define NIG_ETS_ISCSI_OOO_CLIENT_OFFSET (DCBX_TCP_OOO_TC + 1) +#define DCBX_CEE_STRICT_PRIORITY 0xf + u32 tc_bw_tbl[2]; + u32 tc_tsa_tbl[2]; +#define DCBX_ETS_TSA_STRICT 0 +#define DCBX_ETS_TSA_CBS 1 +#define DCBX_ETS_TSA_ETS 2 +}; + +#define DCBX_TCP_OOO_TC (4) +#define DCBX_TCP_OOO_K2_4PORT_TC (3) + +struct dcbx_app_priority_entry { + u32 entry; +#define DCBX_APP_PRI_MAP_MASK 0x000000ff +#define DCBX_APP_PRI_MAP_SHIFT 0 +#define DCBX_APP_PRI_0 0x01 +#define DCBX_APP_PRI_1 0x02 +#define DCBX_APP_PRI_2 0x04 +#define DCBX_APP_PRI_3 0x08 +#define DCBX_APP_PRI_4 0x10 +#define DCBX_APP_PRI_5 0x20 +#define DCBX_APP_PRI_6 0x40 +#define DCBX_APP_PRI_7 0x80 +#define DCBX_APP_SF_MASK 0x00000300 +#define DCBX_APP_SF_SHIFT 8 +#define DCBX_APP_SF_ETHTYPE 0 +#define DCBX_APP_SF_PORT 1 +#define DCBX_APP_SF_IEEE_MASK 0x0000f000 +#define DCBX_APP_SF_IEEE_SHIFT 12 +#define DCBX_APP_SF_IEEE_RESERVED 0 +#define DCBX_APP_SF_IEEE_ETHTYPE 1 +#define DCBX_APP_SF_IEEE_TCP_PORT 2 +#define DCBX_APP_SF_IEEE_UDP_PORT 3 +#define DCBX_APP_SF_IEEE_TCP_UDP_PORT 4 + +#define DCBX_APP_PROTOCOL_ID_MASK 0xffff0000 +#define DCBX_APP_PROTOCOL_ID_SHIFT 16 +}; + +struct dcbx_app_priority_feature { + u32 flags; +#define DCBX_APP_ENABLED_MASK 0x00000001 +#define DCBX_APP_ENABLED_SHIFT 0 +#define DCBX_APP_WILLING_MASK 0x00000002 +#define DCBX_APP_WILLING_SHIFT 1 +#define DCBX_APP_ERROR_MASK 0x00000004 +#define DCBX_APP_ERROR_SHIFT 2 +#define DCBX_APP_MAX_TCS_MASK 0x0000f000 +#define DCBX_APP_MAX_TCS_SHIFT 12 +#define DCBX_APP_NUM_ENTRIES_MASK 0x00ff0000 +#define DCBX_APP_NUM_ENTRIES_SHIFT 16 + struct dcbx_app_priority_entry app_pri_tbl[DCBX_MAX_APP_PROTOCOL]; +}; + +struct dcbx_features { + struct dcbx_ets_feature ets; + u32 pfc; +#define DCBX_PFC_PRI_EN_BITMAP_MASK 0x000000ff +#define DCBX_PFC_PRI_EN_BITMAP_SHIFT 0 +#define DCBX_PFC_PRI_EN_BITMAP_PRI_0 0x01 +#define DCBX_PFC_PRI_EN_BITMAP_PRI_1 0x02 +#define DCBX_PFC_PRI_EN_BITMAP_PRI_2 0x04 +#define DCBX_PFC_PRI_EN_BITMAP_PRI_3 0x08 +#define DCBX_PFC_PRI_EN_BITMAP_PRI_4 0x10 +#define DCBX_PFC_PRI_EN_BITMAP_PRI_5 0x20 +#define DCBX_PFC_PRI_EN_BITMAP_PRI_6 0x40 +#define DCBX_PFC_PRI_EN_BITMAP_PRI_7 0x80 + +#define DCBX_PFC_FLAGS_MASK 0x0000ff00 +#define DCBX_PFC_FLAGS_SHIFT 8 +#define DCBX_PFC_CAPS_MASK 0x00000f00 +#define DCBX_PFC_CAPS_SHIFT 8 +#define DCBX_PFC_MBC_MASK 0x00004000 +#define DCBX_PFC_MBC_SHIFT 14 +#define DCBX_PFC_WILLING_MASK 0x00008000 +#define DCBX_PFC_WILLING_SHIFT 15 +#define DCBX_PFC_ENABLED_MASK 0x00010000 +#define DCBX_PFC_ENABLED_SHIFT 16 +#define DCBX_PFC_ERROR_MASK 0x00020000 +#define DCBX_PFC_ERROR_SHIFT 17 + + struct dcbx_app_priority_feature app; +}; + +struct dcbx_local_params { + u32 config; +#define DCBX_CONFIG_VERSION_MASK 0x00000007 +#define DCBX_CONFIG_VERSION_SHIFT 0 +#define DCBX_CONFIG_VERSION_DISABLED 0 +#define DCBX_CONFIG_VERSION_IEEE 1 +#define DCBX_CONFIG_VERSION_CEE 2 +#define DCBX_CONFIG_VERSION_STATIC 4 + + u32 flags; + struct dcbx_features features; +}; + +struct dcbx_mib { + u32 prefix_seq_num; + u32 flags; + struct dcbx_features features; + u32 suffix_seq_num; +}; + +struct lldp_system_tlvs_buffer_s { + u32 flags; +#define LLDP_SYSTEM_TLV_VALID_MASK 0x1 +#define LLDP_SYSTEM_TLV_VALID_OFFSET 0 +#define LLDP_SYSTEM_TLV_MANDATORY_MASK 0x2 +#define LLDP_SYSTEM_TLV_MANDATORY_SHIFT 1 +#define LLDP_SYSTEM_TLV_LENGTH_MASK 0xffff0000 +#define LLDP_SYSTEM_TLV_LENGTH_SHIFT 16 + u32 data[MAX_SYSTEM_LLDP_TLV_DATA]; +}; + +struct lldp_received_tlvs_s { + u32 prefix_seq_num; + u32 length; + u32 tlvs_buffer[MAX_TLV_BUFFER]; + u32 suffix_seq_num; +}; + +struct dcb_dscp_map { + u32 flags; +#define DCB_DSCP_ENABLE_MASK 0x1 +#define DCB_DSCP_ENABLE_SHIFT 0 +#define DCB_DSCP_ENABLE 1 + u32 dscp_pri_map[8]; +}; + +struct mcp_val64 { + u32 lo; + u32 hi; +}; + +struct generic_idc_msg_s { + u32 source_pf; + struct mcp_val64 msg; +}; + +struct pcie_stats_stc { + u32 sr_cnt_wr_byte_msb; + u32 sr_cnt_wr_byte_lsb; + u32 sr_cnt_wr_cnt; + u32 sr_cnt_rd_byte_msb; + u32 sr_cnt_rd_byte_lsb; + u32 sr_cnt_rd_cnt; +}; + +enum _attribute_commands_e { + ATTRIBUTE_CMD_READ = 0, + ATTRIBUTE_CMD_WRITE, + ATTRIBUTE_CMD_READ_CLEAR, + ATTRIBUTE_CMD_CLEAR, + ATTRIBUTE_NUM_OF_COMMANDS +}; + +struct public_global { + u32 max_path; + u32 max_ports; +#define MODE_1P 1 +#define MODE_2P 2 +#define MODE_3P 3 +#define MODE_4P 4 + u32 debug_mb_offset; + u32 phymod_dbg_mb_offset; + struct couple_mode_teaming cmt; + s32 internal_temperature; + u32 mfw_ver; + u32 running_bundle_id; + s32 external_temperature; + u32 mdump_reason; + u32 ext_phy_upgrade_fw; + u8 runtime_port_swap_map[MODE_4P]; + u32 data_ptr; + u32 data_size; + u32 bmb_error_status_cnt; + u32 bmb_jumbo_frame_cnt; + u32 sent_to_bmc_cnt; + u32 handled_by_mfw; + u32 sent_to_nw_cnt; + u32 to_bmc_kb_per_second; + u32 bcast_dropped_to_bmc_cnt; + u32 mcast_dropped_to_bmc_cnt; + u32 ucast_dropped_to_bmc_cnt; + u32 ncsi_response_failure_cnt; + u32 device_attr; + u32 vpd_warning; +}; + +struct fw_flr_mb { + u32 aggint; + u32 opgen_addr; + u32 accum_ack; +}; + +struct public_path { + struct fw_flr_mb flr_mb; + u32 mcp_vf_disabled[VF_MAX_STATIC / 32]; + + u32 process_kill; +#define PROCESS_KILL_COUNTER_MASK 0x0000ffff +#define PROCESS_KILL_COUNTER_SHIFT 0 +#define PROCESS_KILL_GLOB_AEU_BIT_MASK 0xffff0000 +#define PROCESS_KILL_GLOB_AEU_BIT_SHIFT 16 +#define GLOBAL_AEU_BIT(aeu_reg_id, aeu_bit) ((aeu_reg_id) * 32 + (aeu_bit)) +}; + +#define FC_NPIV_WWPN_SIZE 8 +#define FC_NPIV_WWNN_SIZE 8 +struct dci_npiv_settings { + u8 npiv_wwpn[FC_NPIV_WWPN_SIZE]; + u8 npiv_wwnn[FC_NPIV_WWNN_SIZE]; +}; + +struct dci_fc_npiv_cfg { + /* hdr used internally by the MFW */ + u32 hdr; + u32 num_of_npiv; +}; + +#define MAX_NUMBER_NPIV 64 +struct dci_fc_npiv_tbl { + struct dci_fc_npiv_cfg fc_npiv_cfg; + struct dci_npiv_settings settings[MAX_NUMBER_NPIV]; +}; + +struct pause_flood_monitor { + u8 period_cnt; + u8 any_brb_prs_packet_hist; + u8 any_brb_block_is_full_hist; + u8 flags; + u32 num_of_state_changes; +}; + +struct public_port { + u32 validity_map; + + u32 link_status; +#define LINK_STATUS_LINK_UP 0x00000001 +#define LINK_STATUS_SPEED_AND_DUPLEX_MASK 0x0000001e +#define LINK_STATUS_SPEED_AND_DUPLEX_1000THD BIT(1) +#define LINK_STATUS_SPEED_AND_DUPLEX_1000TFD (2 << 1) +#define LINK_STATUS_SPEED_AND_DUPLEX_10G (3 << 1) +#define LINK_STATUS_SPEED_AND_DUPLEX_20G (4 << 1) +#define LINK_STATUS_SPEED_AND_DUPLEX_40G (5 << 1) +#define LINK_STATUS_SPEED_AND_DUPLEX_50G (6 << 1) +#define LINK_STATUS_SPEED_AND_DUPLEX_100G (7 << 1) +#define LINK_STATUS_SPEED_AND_DUPLEX_25G (8 << 1) +#define LINK_STATUS_AUTO_NEGOTIATE_ENABLED 0x00000020 +#define LINK_STATUS_AUTO_NEGOTIATE_COMPLETE 0x00000040 +#define LINK_STATUS_PARALLEL_DETECTION_USED 0x00000080 +#define LINK_STATUS_PFC_ENABLED 0x00000100 +#define LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE 0x00000200 +#define LINK_STATUS_LINK_PARTNER_1000THD_CAPABLE 0x00000400 +#define LINK_STATUS_LINK_PARTNER_10G_CAPABLE 0x00000800 +#define LINK_STATUS_LINK_PARTNER_20G_CAPABLE 0x00001000 +#define LINK_STATUS_LINK_PARTNER_40G_CAPABLE 0x00002000 +#define LINK_STATUS_LINK_PARTNER_50G_CAPABLE 0x00004000 +#define LINK_STATUS_LINK_PARTNER_100G_CAPABLE 0x00008000 +#define LINK_STATUS_LINK_PARTNER_25G_CAPABLE 0x00010000 +#define LINK_STATUS_LINK_PARTNER_FLOW_CONTROL_MASK 0x000c0000 +#define LINK_STATUS_LINK_PARTNER_NOT_PAUSE_CAPABLE (0 << 18) +#define LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE BIT(18) +#define LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE (2 << 18) +#define LINK_STATUS_LINK_PARTNER_BOTH_PAUSE (3 << 18) +#define LINK_STATUS_SFP_TX_FAULT 0x00100000 +#define LINK_STATUS_TX_FLOW_CONTROL_ENABLED 0x00200000 +#define LINK_STATUS_RX_FLOW_CONTROL_ENABLED 0x00400000 +#define LINK_STATUS_RX_SIGNAL_PRESENT 0x00800000 +#define LINK_STATUS_MAC_LOCAL_FAULT 0x01000000 +#define LINK_STATUS_MAC_REMOTE_FAULT 0x02000000 +#define LINK_STATUS_UNSUPPORTED_SPD_REQ 0x04000000 + +#define LINK_STATUS_FEC_MODE_MASK 0x38000000 +#define LINK_STATUS_FEC_MODE_NONE (0 << 27) +#define LINK_STATUS_FEC_MODE_FIRECODE_CL74 BIT(27) +#define LINK_STATUS_FEC_MODE_RS_CL91 (2 << 27) +#define LINK_STATUS_EXT_PHY_LINK_UP BIT(30) + + u32 link_status1; + u32 ext_phy_fw_version; + u32 drv_phy_cfg_addr; + + u32 port_stx; + + u32 stat_nig_timer; + + struct port_mf_cfg port_mf_config; + struct port_stats stats; + + u32 media_type; +#define MEDIA_UNSPECIFIED 0x0 +#define MEDIA_SFPP_10G_FIBER 0x1 +#define MEDIA_XFP_FIBER 0x2 +#define MEDIA_DA_TWINAX 0x3 +#define MEDIA_BASE_T 0x4 +#define MEDIA_SFP_1G_FIBER 0x5 +#define MEDIA_MODULE_FIBER 0x6 +#define MEDIA_KR 0xf0 +#define MEDIA_NOT_PRESENT 0xff + + u32 lfa_status; + u32 link_change_count; + + struct lldp_config_params_s lldp_config_params[LLDP_MAX_LLDP_AGENTS]; + struct lldp_status_params_s lldp_status_params[LLDP_MAX_LLDP_AGENTS]; + struct lldp_system_tlvs_buffer_s system_lldp_tlvs_buf; + + /* DCBX related MIB */ + struct dcbx_local_params local_admin_dcbx_mib; + struct dcbx_mib remote_dcbx_mib; + struct dcbx_mib operational_dcbx_mib; + + u32 fc_npiv_nvram_tbl_addr; + u32 fc_npiv_nvram_tbl_size; + + u32 transceiver_data; +#define ETH_TRANSCEIVER_STATE_MASK 0x000000ff +#define ETH_TRANSCEIVER_STATE_SHIFT 0x00000000 +#define ETH_TRANSCEIVER_STATE_OFFSET 0x00000000 +#define ETH_TRANSCEIVER_STATE_UNPLUGGED 0x00000000 +#define ETH_TRANSCEIVER_STATE_PRESENT 0x00000001 +#define ETH_TRANSCEIVER_STATE_VALID 0x00000003 +#define ETH_TRANSCEIVER_STATE_UPDATING 0x00000008 +#define ETH_TRANSCEIVER_STATE_IN_SETUP 0x10 +#define ETH_TRANSCEIVER_TYPE_MASK 0x0000ff00 +#define ETH_TRANSCEIVER_TYPE_OFFSET 0x8 +#define ETH_TRANSCEIVER_TYPE_NONE 0x00 +#define ETH_TRANSCEIVER_TYPE_UNKNOWN 0xff +#define ETH_TRANSCEIVER_TYPE_1G_PCC 0x01 +#define ETH_TRANSCEIVER_TYPE_1G_ACC 0x02 +#define ETH_TRANSCEIVER_TYPE_1G_LX 0x03 +#define ETH_TRANSCEIVER_TYPE_1G_SX 0x04 +#define ETH_TRANSCEIVER_TYPE_10G_SR 0x05 +#define ETH_TRANSCEIVER_TYPE_10G_LR 0x06 +#define ETH_TRANSCEIVER_TYPE_10G_LRM 0x07 +#define ETH_TRANSCEIVER_TYPE_10G_ER 0x08 +#define ETH_TRANSCEIVER_TYPE_10G_PCC 0x09 +#define ETH_TRANSCEIVER_TYPE_10G_ACC 0x0a +#define ETH_TRANSCEIVER_TYPE_XLPPI 0x0b +#define ETH_TRANSCEIVER_TYPE_40G_LR4 0x0c +#define ETH_TRANSCEIVER_TYPE_40G_SR4 0x0d +#define ETH_TRANSCEIVER_TYPE_40G_CR4 0x0e +#define ETH_TRANSCEIVER_TYPE_100G_AOC 0x0f +#define ETH_TRANSCEIVER_TYPE_100G_SR4 0x10 +#define ETH_TRANSCEIVER_TYPE_100G_LR4 0x11 +#define ETH_TRANSCEIVER_TYPE_100G_ER4 0x12 +#define ETH_TRANSCEIVER_TYPE_100G_ACC 0x13 +#define ETH_TRANSCEIVER_TYPE_100G_CR4 0x14 +#define ETH_TRANSCEIVER_TYPE_4x10G_SR 0x15 +#define ETH_TRANSCEIVER_TYPE_25G_CA_N 0x16 +#define ETH_TRANSCEIVER_TYPE_25G_ACC_S 0x17 +#define ETH_TRANSCEIVER_TYPE_25G_CA_S 0x18 +#define ETH_TRANSCEIVER_TYPE_25G_ACC_M 0x19 +#define ETH_TRANSCEIVER_TYPE_25G_CA_L 0x1a +#define ETH_TRANSCEIVER_TYPE_25G_ACC_L 0x1b +#define ETH_TRANSCEIVER_TYPE_25G_SR 0x1c +#define ETH_TRANSCEIVER_TYPE_25G_LR 0x1d +#define ETH_TRANSCEIVER_TYPE_25G_AOC 0x1e +#define ETH_TRANSCEIVER_TYPE_4x10G 0x1f +#define ETH_TRANSCEIVER_TYPE_4x25G_CR 0x20 +#define ETH_TRANSCEIVER_TYPE_1000BASET 0x21 +#define ETH_TRANSCEIVER_TYPE_10G_BASET 0x22 +#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_SR 0x30 +#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_CR 0x31 +#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_LR 0x32 +#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_SR 0x33 +#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_CR 0x34 +#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_LR 0x35 +#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_AOC 0x36 +#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_25G_SR 0x37 +#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_25G_LR 0x38 +#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_1G_10G_SR 0x39 +#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_1G_10G_LR 0x3a + + u32 wol_info; + u32 wol_pkt_len; + u32 wol_pkt_details; + struct dcb_dscp_map dcb_dscp_map; + + u32 eee_status; +#define EEE_ACTIVE_BIT BIT(0) +#define EEE_LD_ADV_STATUS_MASK 0x000000f0 +#define EEE_LD_ADV_STATUS_OFFSET 4 +#define EEE_1G_ADV BIT(1) +#define EEE_10G_ADV BIT(2) +#define EEE_LP_ADV_STATUS_MASK 0x00000f00 +#define EEE_LP_ADV_STATUS_OFFSET 8 +#define EEE_SUPPORTED_SPEED_MASK 0x0000f000 +#define EEE_SUPPORTED_SPEED_OFFSET 12 +#define EEE_1G_SUPPORTED BIT(1) +#define EEE_10G_SUPPORTED BIT(2) + + u32 eee_remote; +#define EEE_REMOTE_TW_TX_MASK 0x0000ffff +#define EEE_REMOTE_TW_TX_OFFSET 0 +#define EEE_REMOTE_TW_RX_MASK 0xffff0000 +#define EEE_REMOTE_TW_RX_OFFSET 16 + + u32 module_info; + + u32 oem_cfg_port; +#define OEM_CFG_CHANNEL_TYPE_MASK 0x00000003 +#define OEM_CFG_CHANNEL_TYPE_OFFSET 0 +#define OEM_CFG_CHANNEL_TYPE_VLAN_PARTITION 0x1 +#define OEM_CFG_CHANNEL_TYPE_STAGGED 0x2 +#define OEM_CFG_SCHED_TYPE_MASK 0x0000000C +#define OEM_CFG_SCHED_TYPE_OFFSET 2 +#define OEM_CFG_SCHED_TYPE_ETS 0x1 +#define OEM_CFG_SCHED_TYPE_VNIC_BW 0x2 + + struct lldp_received_tlvs_s lldp_received_tlvs[LLDP_MAX_LLDP_AGENTS]; + u32 system_lldp_tlvs_buf2[MAX_SYSTEM_LLDP_TLV_DATA]; + u32 phy_module_temperature; + u32 nig_reg_stat_rx_bmb_packet; + u32 nig_reg_rx_llh_ncsi_mcp_mask; + u32 nig_reg_rx_llh_ncsi_mcp_mask_2; + struct pause_flood_monitor pause_flood_monitor; + u32 nig_drain_cnt; + struct pkt_type_cnt pkt_tc_priority_cnt; +}; + +#define MCP_DRV_VER_STR_SIZE 16 +#define MCP_DRV_VER_STR_SIZE_DWORD (MCP_DRV_VER_STR_SIZE / sizeof(u32)) +#define MCP_DRV_NVM_BUF_LEN 32 +struct drv_version_stc { + u32 version; + u8 name[MCP_DRV_VER_STR_SIZE - 4]; +}; + +struct public_func { + u32 iscsi_boot_signature; + u32 iscsi_boot_block_offset; + + u32 mtu_size; + + u32 c2s_pcp_map_lower; + u32 c2s_pcp_map_upper; + u32 c2s_pcp_map_default; + + struct generic_idc_msg_s generic_idc_msg; + + u32 num_of_msix; + + u32 config; +#define FUNC_MF_CFG_FUNC_HIDE 0x00000001 +#define FUNC_MF_CFG_PAUSE_ON_HOST_RING 0x00000002 +#define FUNC_MF_CFG_PAUSE_ON_HOST_RING_SHIFT 0x00000001 + +#define FUNC_MF_CFG_PROTOCOL_MASK 0x000000f0 +#define FUNC_MF_CFG_PROTOCOL_SHIFT 4 +#define FUNC_MF_CFG_PROTOCOL_ETHERNET 0x00000000 +#define FUNC_MF_CFG_PROTOCOL_ISCSI 0x00000010 +#define FUNC_MF_CFG_PROTOCOL_FCOE 0x00000020 +#define FUNC_MF_CFG_PROTOCOL_ROCE 0x00000030 +#define FUNC_MF_CFG_PROTOCOL_MAX 0x00000030 + +#define FUNC_MF_CFG_MIN_BW_MASK 0x0000ff00 +#define FUNC_MF_CFG_MIN_BW_SHIFT 8 +#define FUNC_MF_CFG_MIN_BW_DEFAULT 0x00000000 +#define FUNC_MF_CFG_MAX_BW_MASK 0x00ff0000 +#define FUNC_MF_CFG_MAX_BW_SHIFT 16 +#define FUNC_MF_CFG_MAX_BW_DEFAULT 0x00640000 + + u32 status; +#define FUNC_STATUS_VIRTUAL_LINK_UP 0x00000001 + + u32 mac_upper; +#define FUNC_MF_CFG_UPPERMAC_MASK 0x0000ffff +#define FUNC_MF_CFG_UPPERMAC_SHIFT 0 +#define FUNC_MF_CFG_UPPERMAC_DEFAULT FUNC_MF_CFG_UPPERMAC_MASK + u32 mac_lower; +#define FUNC_MF_CFG_LOWERMAC_DEFAULT 0xffffffff + + u32 fcoe_wwn_port_name_upper; + u32 fcoe_wwn_port_name_lower; + + u32 fcoe_wwn_node_name_upper; + u32 fcoe_wwn_node_name_lower; + + u32 ovlan_stag; +#define FUNC_MF_CFG_OV_STAG_MASK 0x0000ffff +#define FUNC_MF_CFG_OV_STAG_SHIFT 0 +#define FUNC_MF_CFG_OV_STAG_DEFAULT FUNC_MF_CFG_OV_STAG_MASK + + u32 pf_allocation; + + u32 preserve_data; + + u32 driver_last_activity_ts; + + u32 drv_ack_vf_disabled[VF_MAX_STATIC / 32]; + + u32 drv_id; +#define DRV_ID_PDA_COMP_VER_MASK 0x0000ffff +#define DRV_ID_PDA_COMP_VER_SHIFT 0 + +#define LOAD_REQ_HSI_VERSION 2 +#define DRV_ID_MCP_HSI_VER_MASK 0x00ff0000 +#define DRV_ID_MCP_HSI_VER_SHIFT 16 +#define DRV_ID_MCP_HSI_VER_CURRENT (LOAD_REQ_HSI_VERSION << \ + DRV_ID_MCP_HSI_VER_SHIFT) + +#define DRV_ID_DRV_TYPE_MASK 0x7f000000 +#define DRV_ID_DRV_TYPE_SHIFT 24 +#define DRV_ID_DRV_TYPE_UNKNOWN (0 << DRV_ID_DRV_TYPE_SHIFT) +#define DRV_ID_DRV_TYPE_LINUX BIT(DRV_ID_DRV_TYPE_SHIFT) + +#define DRV_ID_DRV_INIT_HW_MASK 0x80000000 +#define DRV_ID_DRV_INIT_HW_SHIFT 31 +#define DRV_ID_DRV_INIT_HW_FLAG BIT(DRV_ID_DRV_INIT_HW_SHIFT) + + u32 oem_cfg_func; +#define OEM_CFG_FUNC_TC_MASK 0x0000000F +#define OEM_CFG_FUNC_TC_OFFSET 0 +#define OEM_CFG_FUNC_TC_0 0x0 +#define OEM_CFG_FUNC_TC_1 0x1 +#define OEM_CFG_FUNC_TC_2 0x2 +#define OEM_CFG_FUNC_TC_3 0x3 +#define OEM_CFG_FUNC_TC_4 0x4 +#define OEM_CFG_FUNC_TC_5 0x5 +#define OEM_CFG_FUNC_TC_6 0x6 +#define OEM_CFG_FUNC_TC_7 0x7 + +#define OEM_CFG_FUNC_HOST_PRI_CTRL_MASK 0x00000030 +#define OEM_CFG_FUNC_HOST_PRI_CTRL_OFFSET 4 +#define OEM_CFG_FUNC_HOST_PRI_CTRL_VNIC 0x1 +#define OEM_CFG_FUNC_HOST_PRI_CTRL_OS 0x2 + + struct drv_version_stc drv_ver; +}; + +struct mcp_mac { + u32 mac_upper; + u32 mac_lower; +}; + +struct mcp_file_att { + u32 nvm_start_addr; + u32 len; +}; + +struct bist_nvm_image_att { + u32 return_code; + u32 image_type; + u32 nvm_start_addr; + u32 len; +}; + +struct lan_stats_stc { + u64 ucast_rx_pkts; + u64 ucast_tx_pkts; + u32 fcs_err; + u32 rserved; +}; + +struct fcoe_stats_stc { + u64 rx_pkts; + u64 tx_pkts; + u32 fcs_err; + u32 login_failure; +}; + +struct iscsi_stats_stc { + u64 rx_pdus; + u64 tx_pdus; + u64 rx_bytes; + u64 tx_bytes; +}; + +struct rdma_stats_stc { + u64 rx_pkts; + u64 tx_pkts; + u64 rx_bytes; + u64 tx_bytes; +}; + +struct ocbb_data_stc { + u32 ocbb_host_addr; + u32 ocsd_host_addr; + u32 ocsd_req_update_interval; +}; + +struct fcoe_cap_stc { + u32 max_ios; + u32 max_log; + u32 max_exch; + u32 max_npiv; + u32 max_tgt; + u32 max_outstnd; +}; + +#define MAX_NUM_OF_SENSORS 7 +struct temperature_status_stc { + u32 num_of_sensors; + u32 sensor[MAX_NUM_OF_SENSORS]; +}; + +/* crash dump configuration header */ +struct mdump_config_stc { + u32 version; + u32 config; + u32 epoc; + u32 num_of_logs; + u32 valid_logs; +}; + +enum resource_id_enum { + RESOURCE_NUM_SB_E = 0, + RESOURCE_NUM_L2_QUEUE_E = 1, + RESOURCE_NUM_VPORT_E = 2, + RESOURCE_NUM_VMQ_E = 3, + RESOURCE_FACTOR_NUM_RSS_PF_E = 4, + RESOURCE_FACTOR_RSS_PER_VF_E = 5, + RESOURCE_NUM_RL_E = 6, + RESOURCE_NUM_PQ_E = 7, + RESOURCE_NUM_VF_E = 8, + RESOURCE_VFC_FILTER_E = 9, + RESOURCE_ILT_E = 10, + RESOURCE_CQS_E = 11, + RESOURCE_GFT_PROFILES_E = 12, + RESOURCE_NUM_TC_E = 13, + RESOURCE_NUM_RSS_ENGINES_E = 14, + RESOURCE_LL2_QUEUE_E = 15, + RESOURCE_RDMA_STATS_QUEUE_E = 16, + RESOURCE_BDQ_E = 17, + RESOURCE_QCN_E = 18, + RESOURCE_LLH_FILTER_E = 19, + RESOURCE_VF_MAC_ADDR = 20, + RESOURCE_LL2_CQS_E = 21, + RESOURCE_VF_CNQS = 22, + RESOURCE_MAX_NUM, + RESOURCE_NUM_INVALID = 0xFFFFFFFF +}; + +/* Resource ID is to be filled by the driver in the MB request + * Size, offset & flags to be filled by the MFW in the MB response + */ +struct resource_info { + enum resource_id_enum res_id; + u32 size; /* number of allocated resources */ + u32 offset; /* Offset of the 1st resource */ + u32 vf_size; + u32 vf_offset; + u32 flags; +#define RESOURCE_ELEMENT_STRICT BIT(0) +}; + +struct mcp_wwn { + u32 wwn_upper; + u32 wwn_lower; +}; + +#define DRV_ROLE_NONE 0 +#define DRV_ROLE_PREBOOT 1 +#define DRV_ROLE_OS 2 +#define DRV_ROLE_KDUMP 3 + +struct load_req_stc { + u32 drv_ver_0; + u32 drv_ver_1; + u32 fw_ver; + u32 misc0; +#define LOAD_REQ_ROLE_MASK 0x000000FF +#define LOAD_REQ_ROLE_SHIFT 0 +#define LOAD_REQ_LOCK_TO_MASK 0x0000FF00 +#define LOAD_REQ_LOCK_TO_SHIFT 8 +#define LOAD_REQ_LOCK_TO_DEFAULT 0 +#define LOAD_REQ_LOCK_TO_NONE 255 +#define LOAD_REQ_FORCE_MASK 0x000F0000 +#define LOAD_REQ_FORCE_SHIFT 16 +#define LOAD_REQ_FORCE_NONE 0 +#define LOAD_REQ_FORCE_PF 1 +#define LOAD_REQ_FORCE_ALL 2 +#define LOAD_REQ_FLAGS0_MASK 0x00F00000 +#define LOAD_REQ_FLAGS0_SHIFT 20 +#define LOAD_REQ_FLAGS0_AVOID_RESET (0x1 << 0) +}; + +struct load_rsp_stc { + u32 drv_ver_0; + u32 drv_ver_1; + u32 fw_ver; + u32 misc0; +#define LOAD_RSP_ROLE_MASK 0x000000FF +#define LOAD_RSP_ROLE_SHIFT 0 +#define LOAD_RSP_HSI_MASK 0x0000FF00 +#define LOAD_RSP_HSI_SHIFT 8 +#define LOAD_RSP_FLAGS0_MASK 0x000F0000 +#define LOAD_RSP_FLAGS0_SHIFT 16 +#define LOAD_RSP_FLAGS0_DRV_EXISTS (0x1 << 0) +}; + +struct mdump_retain_data_stc { + u32 valid; + u32 epoch; + u32 pf; + u32 status; +}; + +struct attribute_cmd_write_stc { + u32 val; + u32 mask; + u32 offset; +}; + +struct lldp_stats_stc { + u32 tx_frames_total; + u32 rx_frames_total; + u32 rx_frames_discarded; + u32 rx_age_outs; +}; + +struct get_att_ctrl_stc { + u32 disabled_attns; + u32 controllable_attns; +}; + +struct trace_filter_stc { + u32 level; + u32 modules; +}; + +union drv_union_data { + struct mcp_mac wol_mac; + + struct eth_phy_cfg drv_phy_cfg; + + struct mcp_val64 val64; + + u8 raw_data[MCP_DRV_NVM_BUF_LEN]; + + struct mcp_file_att file_att; + + u32 ack_vf_disabled[EXT_VF_BITMAP_SIZE_IN_DWORDS]; + + struct drv_version_stc drv_version; + + struct lan_stats_stc lan_stats; + struct fcoe_stats_stc fcoe_stats; + struct iscsi_stats_stc iscsi_stats; + struct rdma_stats_stc rdma_stats; + struct ocbb_data_stc ocbb_info; + struct temperature_status_stc temp_info; + struct resource_info resource; + struct bist_nvm_image_att nvm_image_att; + struct mdump_config_stc mdump_config; + struct mcp_mac lldp_mac; + struct mcp_wwn fcoe_fabric_name; + u32 dword; + + struct load_req_stc load_req; + struct load_rsp_stc load_rsp; + struct mdump_retain_data_stc mdump_retain; + struct attribute_cmd_write_stc attribute_cmd_write; + struct lldp_stats_stc lldp_stats; + struct pcie_stats_stc pcie_stats; + + struct get_att_ctrl_stc get_att_ctrl; + struct fcoe_cap_stc fcoe_cap; + struct trace_filter_stc trace_filter; +}; + +struct public_drv_mb { + u32 drv_mb_header; +#define DRV_MSG_SEQ_NUMBER_MASK 0x0000ffff +#define DRV_MSG_SEQ_NUMBER_OFFSET 0 +#define DRV_MSG_CODE_MASK 0xffff0000 +#define DRV_MSG_CODE_OFFSET 16 + + u32 drv_mb_param; + + u32 fw_mb_header; +#define FW_MSG_SEQ_NUMBER_MASK 0x0000ffff +#define FW_MSG_SEQ_NUMBER_OFFSET 0 +#define FW_MSG_CODE_MASK 0xffff0000 +#define FW_MSG_CODE_OFFSET 16 + + u32 fw_mb_param; + + u32 drv_pulse_mb; +#define DRV_PULSE_SEQ_MASK 0x00007fff +#define DRV_PULSE_SYSTEM_TIME_MASK 0xffff0000 +#define DRV_PULSE_ALWAYS_ALIVE 0x00008000 + + u32 mcp_pulse_mb; +#define MCP_PULSE_SEQ_MASK 0x00007fff +#define MCP_PULSE_ALWAYS_ALIVE 0x00008000 +#define MCP_EVENT_MASK 0xffff0000 +#define MCP_EVENT_OTHER_DRIVER_RESET_REQ 0x00010000 + + union drv_union_data union_data; +}; + +#define DRV_MSG_CODE(_code_) ((_code_) << DRV_MSG_CODE_OFFSET) +enum drv_msg_code_enum { + DRV_MSG_CODE_NVM_PUT_FILE_BEGIN = DRV_MSG_CODE(0x0001), + DRV_MSG_CODE_NVM_PUT_FILE_DATA = DRV_MSG_CODE(0x0002), + DRV_MSG_CODE_NVM_GET_FILE_ATT = DRV_MSG_CODE(0x0003), + DRV_MSG_CODE_NVM_READ_NVRAM = DRV_MSG_CODE(0x0005), + DRV_MSG_CODE_NVM_WRITE_NVRAM = DRV_MSG_CODE(0x0006), + DRV_MSG_CODE_MCP_RESET = DRV_MSG_CODE(0x0009), + DRV_MSG_CODE_SET_VERSION = DRV_MSG_CODE(0x000f), + DRV_MSG_CODE_MCP_HALT = DRV_MSG_CODE(0x0010), + DRV_MSG_CODE_SET_VMAC = DRV_MSG_CODE(0x0011), + DRV_MSG_CODE_GET_VMAC = DRV_MSG_CODE(0x0012), + DRV_MSG_CODE_GET_STATS = DRV_MSG_CODE(0x0013), + DRV_MSG_CODE_TRANSCEIVER_READ = DRV_MSG_CODE(0x0016), + DRV_MSG_CODE_MASK_PARITIES = DRV_MSG_CODE(0x001a), + DRV_MSG_CODE_BIST_TEST = DRV_MSG_CODE(0x001e), + DRV_MSG_CODE_SET_LED_MODE = DRV_MSG_CODE(0x0020), + DRV_MSG_CODE_RESOURCE_CMD = DRV_MSG_CODE(0x0023), + DRV_MSG_CODE_MDUMP_CMD = DRV_MSG_CODE(0x0025), + DRV_MSG_CODE_GET_PF_RDMA_PROTOCOL = DRV_MSG_CODE(0x002b), + DRV_MSG_CODE_OS_WOL = DRV_MSG_CODE(0x002e), + DRV_MSG_CODE_GET_TLV_DONE = DRV_MSG_CODE(0x002f), + DRV_MSG_CODE_FEATURE_SUPPORT = DRV_MSG_CODE(0x0030), + DRV_MSG_CODE_GET_MFW_FEATURE_SUPPORT = DRV_MSG_CODE(0x0031), + DRV_MSG_CODE_GET_ENGINE_CONFIG = DRV_MSG_CODE(0x0037), + DRV_MSG_CODE_GET_NVM_CFG_OPTION = DRV_MSG_CODE(0x003e), + DRV_MSG_CODE_SET_NVM_CFG_OPTION = DRV_MSG_CODE(0x003f), + DRV_MSG_CODE_INITIATE_PF_FLR = DRV_MSG_CODE(0x0201), + DRV_MSG_CODE_LOAD_REQ = DRV_MSG_CODE(0x1000), + DRV_MSG_CODE_LOAD_DONE = DRV_MSG_CODE(0x1100), + DRV_MSG_CODE_INIT_HW = DRV_MSG_CODE(0x1200), + DRV_MSG_CODE_CANCEL_LOAD_REQ = DRV_MSG_CODE(0x1300), + DRV_MSG_CODE_UNLOAD_REQ = DRV_MSG_CODE(0x2000), + DRV_MSG_CODE_UNLOAD_DONE = DRV_MSG_CODE(0x2100), + DRV_MSG_CODE_INIT_PHY = DRV_MSG_CODE(0x2200), + DRV_MSG_CODE_LINK_RESET = DRV_MSG_CODE(0x2300), + DRV_MSG_CODE_SET_DCBX = DRV_MSG_CODE(0x2500), + DRV_MSG_CODE_OV_UPDATE_CURR_CFG = DRV_MSG_CODE(0x2600), + DRV_MSG_CODE_OV_UPDATE_BUS_NUM = DRV_MSG_CODE(0x2700), + DRV_MSG_CODE_OV_UPDATE_BOOT_PROGRESS = DRV_MSG_CODE(0x2800), + DRV_MSG_CODE_OV_UPDATE_STORM_FW_VER = DRV_MSG_CODE(0x2900), + DRV_MSG_CODE_NIG_DRAIN = DRV_MSG_CODE(0x3000), + DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE = DRV_MSG_CODE(0x3100), + DRV_MSG_CODE_BW_UPDATE_ACK = DRV_MSG_CODE(0x3200), + DRV_MSG_CODE_OV_UPDATE_MTU = DRV_MSG_CODE(0x3300), + DRV_MSG_GET_RESOURCE_ALLOC_MSG = DRV_MSG_CODE(0x3400), + DRV_MSG_SET_RESOURCE_VALUE_MSG = DRV_MSG_CODE(0x3500), + DRV_MSG_CODE_OV_UPDATE_WOL = DRV_MSG_CODE(0x3800), + DRV_MSG_CODE_OV_UPDATE_ESWITCH_MODE = DRV_MSG_CODE(0x3900), + DRV_MSG_CODE_S_TAG_UPDATE_ACK = DRV_MSG_CODE(0x3b00), + DRV_MSG_CODE_GET_OEM_UPDATES = DRV_MSG_CODE(0x4100), + DRV_MSG_CODE_GET_PPFID_BITMAP = DRV_MSG_CODE(0x4300), + DRV_MSG_CODE_VF_DISABLED_DONE = DRV_MSG_CODE(0xc000), + DRV_MSG_CODE_CFG_VF_MSIX = DRV_MSG_CODE(0xc001), + DRV_MSG_CODE_CFG_PF_VFS_MSIX = DRV_MSG_CODE(0xc002), + DRV_MSG_CODE_DEBUG_DATA_SEND = DRV_MSG_CODE(0xc004), +}; + +#define DRV_MSG_CODE_VMAC_TYPE_SHIFT 4 +#define DRV_MSG_CODE_VMAC_TYPE_MASK 0x30 +#define DRV_MSG_CODE_VMAC_TYPE_MAC 1 +#define DRV_MSG_CODE_VMAC_TYPE_WWNN 2 +#define DRV_MSG_CODE_VMAC_TYPE_WWPN 3 + +/* DRV_MSG_CODE_RETAIN_VMAC parameters */ +#define DRV_MSG_CODE_RETAIN_VMAC_FUNC_SHIFT 0 +#define DRV_MSG_CODE_RETAIN_VMAC_FUNC_MASK 0xf + +#define DRV_MSG_CODE_RETAIN_VMAC_TYPE_SHIFT 4 +#define DRV_MSG_CODE_RETAIN_VMAC_TYPE_MASK 0x70 +#define DRV_MSG_CODE_RETAIN_VMAC_TYPE_L2 0 +#define DRV_MSG_CODE_RETAIN_VMAC_TYPE_ISCSI 1 +#define DRV_MSG_CODE_RETAIN_VMAC_TYPE_FCOE 2 +#define DRV_MSG_CODE_RETAIN_VMAC_TYPE_WWNN 3 +#define DRV_MSG_CODE_RETAIN_VMAC_TYPE_WWPN 4 + +#define DRV_MSG_CODE_MCP_RESET_FORCE 0xf04ce + +#define DRV_MSG_CODE_STATS_TYPE_LAN 1 +#define DRV_MSG_CODE_STATS_TYPE_FCOE 2 +#define DRV_MSG_CODE_STATS_TYPE_ISCSI 3 +#define DRV_MSG_CODE_STATS_TYPE_RDMA 4 + +#define BW_MAX_MASK 0x000000ff +#define BW_MAX_OFFSET 0 +#define BW_MIN_MASK 0x0000ff00 +#define BW_MIN_OFFSET 8 + +#define DRV_MSG_FAN_FAILURE_TYPE BIT(0) +#define DRV_MSG_TEMPERATURE_FAILURE_TYPE BIT(1) + +#define RESOURCE_CMD_REQ_RESC_MASK 0x0000001F +#define RESOURCE_CMD_REQ_RESC_SHIFT 0 +#define RESOURCE_CMD_REQ_OPCODE_MASK 0x000000E0 +#define RESOURCE_CMD_REQ_OPCODE_SHIFT 5 +#define RESOURCE_OPCODE_REQ 1 +#define RESOURCE_OPCODE_REQ_WO_AGING 2 +#define RESOURCE_OPCODE_REQ_W_AGING 3 +#define RESOURCE_OPCODE_RELEASE 4 +#define RESOURCE_OPCODE_FORCE_RELEASE 5 +#define RESOURCE_CMD_REQ_AGE_MASK 0x0000FF00 +#define RESOURCE_CMD_REQ_AGE_SHIFT 8 + +#define RESOURCE_CMD_RSP_OWNER_MASK 0x000000FF +#define RESOURCE_CMD_RSP_OWNER_SHIFT 0 +#define RESOURCE_CMD_RSP_OPCODE_MASK 0x00000700 +#define RESOURCE_CMD_RSP_OPCODE_SHIFT 8 +#define RESOURCE_OPCODE_GNT 1 +#define RESOURCE_OPCODE_BUSY 2 +#define RESOURCE_OPCODE_RELEASED 3 +#define RESOURCE_OPCODE_RELEASED_PREVIOUS 4 +#define RESOURCE_OPCODE_WRONG_OWNER 5 +#define RESOURCE_OPCODE_UNKNOWN_CMD 255 + +#define RESOURCE_DUMP 0 + +/* DRV_MSG_CODE_MDUMP_CMD parameters */ +#define MDUMP_DRV_PARAM_OPCODE_MASK 0x000000ff +#define DRV_MSG_CODE_MDUMP_ACK 0x01 +#define DRV_MSG_CODE_MDUMP_SET_VALUES 0x02 +#define DRV_MSG_CODE_MDUMP_TRIGGER 0x03 +#define DRV_MSG_CODE_MDUMP_GET_CONFIG 0x04 +#define DRV_MSG_CODE_MDUMP_SET_ENABLE 0x05 +#define DRV_MSG_CODE_MDUMP_CLEAR_LOGS 0x06 +#define DRV_MSG_CODE_MDUMP_GET_RETAIN 0x07 +#define DRV_MSG_CODE_MDUMP_CLR_RETAIN 0x08 + +#define DRV_MSG_CODE_HW_DUMP_TRIGGER 0x0a + +#define DRV_MSG_CODE_MDUMP_FREE_DRIVER_BUF 0x0b +#define DRV_MSG_CODE_MDUMP_GEN_LINK_DUMP 0x0c +#define DRV_MSG_CODE_MDUMP_GEN_IDLE_CHK 0x0d + +/* DRV_MSG_CODE_MDUMP_CMD options */ +#define MDUMP_DRV_PARAM_OPTION_MASK 0x00000f00 +#define DRV_MSG_CODE_MDUMP_USE_DRIVER_BUF_OFFSET 8 +#define DRV_MSG_CODE_MDUMP_USE_DRIVER_BUF_MASK 0x100 + +/* DRV_MSG_CODE_EXT_PHY_READ/DRV_MSG_CODE_EXT_PHY_WRITE parameters */ +#define DRV_MB_PARAM_ADDR_SHIFT 0 +#define DRV_MB_PARAM_ADDR_MASK 0x0000FFFF +#define DRV_MB_PARAM_DEVAD_SHIFT 16 +#define DRV_MB_PARAM_DEVAD_MASK 0x001F0000 +#define DRV_MB_PARAM_PORT_SHIFT 21 +#define DRV_MB_PARAM_PORT_MASK 0x00600000 + +/* DRV_MSG_CODE_PMBUS_READ/DRV_MSG_CODE_PMBUS_WRITE parameters */ +#define DRV_MB_PARAM_PMBUS_CMD_SHIFT 0 +#define DRV_MB_PARAM_PMBUS_CMD_MASK 0xFF +#define DRV_MB_PARAM_PMBUS_LEN_SHIFT 8 +#define DRV_MB_PARAM_PMBUS_LEN_MASK 0x300 +#define DRV_MB_PARAM_PMBUS_DATA_SHIFT 16 +#define DRV_MB_PARAM_PMBUS_DATA_MASK 0xFFFF0000 + +/* UNLOAD_REQ params */ +#define DRV_MB_PARAM_UNLOAD_WOL_UNKNOWN 0x00000000 +#define DRV_MB_PARAM_UNLOAD_WOL_MCP 0x00000001 +#define DRV_MB_PARAM_UNLOAD_WOL_DISABLED 0x00000002 +#define DRV_MB_PARAM_UNLOAD_WOL_ENABLED 0x00000003 + +/* UNLOAD_DONE_params */ +#define DRV_MB_PARAM_UNLOAD_NON_D3_POWER 0x00000001 + +/* INIT_PHY params */ +#define DRV_MB_PARAM_INIT_PHY_FORCE 0x00000001 +#define DRV_MB_PARAM_INIT_PHY_DONT_CARE 0x00000002 + +/* LLDP / DCBX params*/ +#define DRV_MB_PARAM_LLDP_SEND_MASK 0x00000001 +#define DRV_MB_PARAM_LLDP_SEND_SHIFT 0 +#define DRV_MB_PARAM_LLDP_AGENT_MASK 0x00000006 +#define DRV_MB_PARAM_LLDP_AGENT_SHIFT 1 +#define DRV_MB_PARAM_LLDP_TLV_RX_VALID_MASK 0x00000001 +#define DRV_MB_PARAM_LLDP_TLV_RX_VALID_SHIFT 0 +#define DRV_MB_PARAM_LLDP_TLV_RX_TYPE_MASK 0x000007f0 +#define DRV_MB_PARAM_LLDP_TLV_RX_TYPE_SHIFT 4 +#define DRV_MB_PARAM_DCBX_NOTIFY_MASK 0x00000008 +#define DRV_MB_PARAM_DCBX_NOTIFY_SHIFT 3 +#define DRV_MB_PARAM_DCBX_ADMIN_CFG_NOTIFY_MASK 0x00000010 +#define DRV_MB_PARAM_DCBX_ADMIN_CFG_NOTIFY_SHIFT 4 + +#define DRV_MB_PARAM_NIG_DRAIN_PERIOD_MS_MASK 0x000000FF +#define DRV_MB_PARAM_NIG_DRAIN_PERIOD_MS_SHIFT 0 + +#define DRV_MB_PARAM_NVM_PUT_FILE_TYPE_MASK 0x000000ff +#define DRV_MB_PARAM_NVM_PUT_FILE_TYPE_SHIFT 0 +#define DRV_MB_PARAM_NVM_PUT_FILE_BEGIN_MFW 0x1 +#define DRV_MB_PARAM_NVM_PUT_FILE_BEGIN_IMAGE 0x2 + +#define DRV_MB_PARAM_NVM_PUT_FILE_BEGIN_MBI 0x3 +#define DRV_MB_PARAM_NVM_OFFSET_OFFSET 0 +#define DRV_MB_PARAM_NVM_OFFSET_MASK 0x00FFFFFF +#define DRV_MB_PARAM_NVM_LEN_OFFSET 24 +#define DRV_MB_PARAM_NVM_LEN_MASK 0xFF000000 + +#define DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_SHIFT 0 +#define DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_MASK 0x000000FF +#define DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_SHIFT 8 +#define DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_MASK 0x0000FF00 + +#define DRV_MB_PARAM_OV_CURR_CFG_SHIFT 0 +#define DRV_MB_PARAM_OV_CURR_CFG_MASK 0x0000000F +#define DRV_MB_PARAM_OV_CURR_CFG_NONE 0 +#define DRV_MB_PARAM_OV_CURR_CFG_OS 1 +#define DRV_MB_PARAM_OV_CURR_CFG_VENDOR_SPEC 2 +#define DRV_MB_PARAM_OV_CURR_CFG_OTHER 3 + +#define DRV_MB_PARAM_OV_STORM_FW_VER_SHIFT 0 +#define DRV_MB_PARAM_OV_STORM_FW_VER_MASK 0xFFFFFFFF +#define DRV_MB_PARAM_OV_STORM_FW_VER_MAJOR_MASK 0xFF000000 +#define DRV_MB_PARAM_OV_STORM_FW_VER_MINOR_MASK 0x00FF0000 +#define DRV_MB_PARAM_OV_STORM_FW_VER_BUILD_MASK 0x0000FF00 +#define DRV_MB_PARAM_OV_STORM_FW_VER_DROP_MASK 0x000000FF + +#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_SHIFT 0 +#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_MASK 0xF +#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_UNKNOWN 0x1 +#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_NOT_LOADED 0x2 +#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_LOADING 0x3 +#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_DISABLED 0x4 +#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_ACTIVE 0x5 + +#define DRV_MB_PARAM_OV_MTU_SIZE_SHIFT 0 +#define DRV_MB_PARAM_OV_MTU_SIZE_MASK 0xFFFFFFFF + +#define DRV_MB_PARAM_WOL_MASK (DRV_MB_PARAM_WOL_DEFAULT | \ + DRV_MB_PARAM_WOL_DISABLED | \ + DRV_MB_PARAM_WOL_ENABLED) +#define DRV_MB_PARAM_WOL_DEFAULT DRV_MB_PARAM_UNLOAD_WOL_MCP +#define DRV_MB_PARAM_WOL_DISABLED DRV_MB_PARAM_UNLOAD_WOL_DISABLED +#define DRV_MB_PARAM_WOL_ENABLED DRV_MB_PARAM_UNLOAD_WOL_ENABLED + +#define DRV_MB_PARAM_ESWITCH_MODE_MASK (DRV_MB_PARAM_ESWITCH_MODE_NONE | \ + DRV_MB_PARAM_ESWITCH_MODE_VEB | \ + DRV_MB_PARAM_ESWITCH_MODE_VEPA) +#define DRV_MB_PARAM_ESWITCH_MODE_NONE 0x0 +#define DRV_MB_PARAM_ESWITCH_MODE_VEB 0x1 +#define DRV_MB_PARAM_ESWITCH_MODE_VEPA 0x2 + +#define DRV_MB_PARAM_DUMMY_OEM_UPDATES_MASK 0x1 +#define DRV_MB_PARAM_DUMMY_OEM_UPDATES_OFFSET 0 + +#define DRV_MB_PARAM_SET_LED_MODE_OPER 0x0 +#define DRV_MB_PARAM_SET_LED_MODE_ON 0x1 +#define DRV_MB_PARAM_SET_LED_MODE_OFF 0x2 + +#define DRV_MB_PARAM_TRANSCEIVER_PORT_OFFSET 0 +#define DRV_MB_PARAM_TRANSCEIVER_PORT_MASK 0x00000003 +#define DRV_MB_PARAM_TRANSCEIVER_SIZE_OFFSET 2 +#define DRV_MB_PARAM_TRANSCEIVER_SIZE_MASK 0x000000fc +#define DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_OFFSET 8 +#define DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_MASK 0x0000ff00 +#define DRV_MB_PARAM_TRANSCEIVER_OFFSET_OFFSET 16 +#define DRV_MB_PARAM_TRANSCEIVER_OFFSET_MASK 0xffff0000 + + /* Resource Allocation params - Driver version support */ +#define DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_MASK 0xffff0000 +#define DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_SHIFT 16 +#define DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_MASK 0x0000ffff +#define DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_SHIFT 0 + +#define DRV_MB_PARAM_BIST_UNKNOWN_TEST 0 +#define DRV_MB_PARAM_BIST_REGISTER_TEST 1 +#define DRV_MB_PARAM_BIST_CLOCK_TEST 2 +#define DRV_MB_PARAM_BIST_NVM_TEST_NUM_IMAGES 3 +#define DRV_MB_PARAM_BIST_NVM_TEST_IMAGE_BY_INDEX 4 + +#define DRV_MB_PARAM_BIST_RC_UNKNOWN 0 +#define DRV_MB_PARAM_BIST_RC_PASSED 1 +#define DRV_MB_PARAM_BIST_RC_FAILED 2 +#define DRV_MB_PARAM_BIST_RC_INVALID_PARAMETER 3 + +#define DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT 0 +#define DRV_MB_PARAM_BIST_TEST_INDEX_MASK 0x000000ff +#define DRV_MB_PARAM_BIST_TEST_IMAGE_INDEX_SHIFT 8 +#define DRV_MB_PARAM_BIST_TEST_IMAGE_INDEX_MASK 0x0000ff00 + +#define DRV_MB_PARAM_FEATURE_SUPPORT_PORT_MASK 0x0000ffff +#define DRV_MB_PARAM_FEATURE_SUPPORT_PORT_OFFSET 0 +#define DRV_MB_PARAM_FEATURE_SUPPORT_PORT_SMARTLINQ 0x00000001 +#define DRV_MB_PARAM_FEATURE_SUPPORT_PORT_EEE 0x00000002 +#define DRV_MB_PARAM_FEATURE_SUPPORT_PORT_FEC_CONTROL 0x00000004 +#define DRV_MB_PARAM_FEATURE_SUPPORT_PORT_EXT_SPEED_FEC_CONTROL 0x00000008 +#define DRV_MB_PARAM_FEATURE_SUPPORT_FUNC_VLINK 0x00010000 + +/* DRV_MSG_CODE_DEBUG_DATA_SEND parameters */ +#define DRV_MSG_CODE_DEBUG_DATA_SEND_SIZE_OFFSET 0 +#define DRV_MSG_CODE_DEBUG_DATA_SEND_SIZE_MASK 0xff + +/* Driver attributes params */ +#define DRV_MB_PARAM_ATTRIBUTE_KEY_OFFSET 0 +#define DRV_MB_PARAM_ATTRIBUTE_KEY_MASK 0x00ffffff +#define DRV_MB_PARAM_ATTRIBUTE_CMD_OFFSET 24 +#define DRV_MB_PARAM_ATTRIBUTE_CMD_MASK 0xff000000 + +#define DRV_MB_PARAM_NVM_CFG_OPTION_ID_OFFSET 0 +#define DRV_MB_PARAM_NVM_CFG_OPTION_ID_MASK 0x0000ffff +#define DRV_MB_PARAM_NVM_CFG_OPTION_ID_IGNORE 0x0000ffff +#define DRV_MB_PARAM_NVM_CFG_OPTION_ID_SHIFT 0 +#define DRV_MB_PARAM_NVM_CFG_OPTION_ALL_SHIFT 16 +#define DRV_MB_PARAM_NVM_CFG_OPTION_ALL_MASK 0x00010000 +#define DRV_MB_PARAM_NVM_CFG_OPTION_INIT_SHIFT 17 +#define DRV_MB_PARAM_NVM_CFG_OPTION_INIT_MASK 0x00020000 +#define DRV_MB_PARAM_NVM_CFG_OPTION_COMMIT_SHIFT 18 +#define DRV_MB_PARAM_NVM_CFG_OPTION_COMMIT_MASK 0x00040000 +#define DRV_MB_PARAM_NVM_CFG_OPTION_FREE_SHIFT 19 +#define DRV_MB_PARAM_NVM_CFG_OPTION_FREE_MASK 0x00080000 +#define DRV_MB_PARAM_NVM_CFG_OPTION_ENTITY_SEL_SHIFT 20 +#define DRV_MB_PARAM_NVM_CFG_OPTION_ENTITY_SEL_MASK 0x00100000 +#define DRV_MB_PARAM_NVM_CFG_OPTION_DEFAULT_RESTORE_ALL_SHIFT 21 +#define DRV_MB_PARAM_NVM_CFG_OPTION_DEFAULT_RESTORE_ALL_MASK 0x00200000 +#define DRV_MB_PARAM_NVM_CFG_OPTION_ENTITY_ID_SHIFT 24 +#define DRV_MB_PARAM_NVM_CFG_OPTION_ENTITY_ID_MASK 0x0f000000 + +/*DRV_MSG_CODE_GET_PERM_MAC parametres*/ +#define DRV_MSG_CODE_GET_PERM_MAC_TYPE_SHIFT 0 +#define DRV_MSG_CODE_GET_PERM_MAC_TYPE_MASK 0xF +#define DRV_MSG_CODE_GET_PERM_MAC_TYPE_PF 0 +#define DRV_MSG_CODE_GET_PERM_MAC_TYPE_BMC 1 +#define DRV_MSG_CODE_GET_PERM_MAC_TYPE_VF 2 +#define DRV_MSG_CODE_GET_PERM_MAC_TYPE_LLDP 3 +#define DRV_MSG_CODE_GET_PERM_MAC_TYPE_MAX 4 +#define DRV_MSG_CODE_GET_PERM_MAC_INDEX_SHIFT 8 +#define DRV_MSG_CODE_GET_PERM_MAC_INDEX_MASK 0xFFFF00 + +#define FW_MSG_CODE(_code_) ((_code_) << FW_MSG_CODE_OFFSET) +enum fw_msg_code_enum { + FW_MSG_CODE_UNSUPPORTED = FW_MSG_CODE(0x0000), + FW_MSG_CODE_NVM_OK = FW_MSG_CODE(0x0001), + FW_MSG_CODE_NVM_PUT_FILE_FINISH_OK = FW_MSG_CODE(0x0040), + FW_MSG_CODE_PHY_OK = FW_MSG_CODE(0x0011), + FW_MSG_CODE_OK = FW_MSG_CODE(0x0016), + FW_MSG_CODE_ERROR = FW_MSG_CODE(0x0017), + FW_MSG_CODE_TRANSCEIVER_DIAG_OK = FW_MSG_CODE(0x0016), + FW_MSG_CODE_TRANSCEIVER_NOT_PRESENT = FW_MSG_CODE(0x0002), + FW_MSG_CODE_MDUMP_INVALID_CMD = FW_MSG_CODE(0x0003), + FW_MSG_CODE_OS_WOL_SUPPORTED = FW_MSG_CODE(0x0080), + FW_MSG_CODE_DRV_CFG_PF_VFS_MSIX_DONE = FW_MSG_CODE(0x0087), + FW_MSG_CODE_DRV_LOAD_ENGINE = FW_MSG_CODE(0x1010), + FW_MSG_CODE_DRV_LOAD_PORT = FW_MSG_CODE(0x1011), + FW_MSG_CODE_DRV_LOAD_FUNCTION = FW_MSG_CODE(0x1012), + FW_MSG_CODE_DRV_LOAD_REFUSED_PDA = FW_MSG_CODE(0x1020), + FW_MSG_CODE_DRV_LOAD_REFUSED_HSI_1 = FW_MSG_CODE(0x1021), + FW_MSG_CODE_DRV_LOAD_REFUSED_DIAG = FW_MSG_CODE(0x1022), + FW_MSG_CODE_DRV_LOAD_REFUSED_HSI = FW_MSG_CODE(0x1023), + FW_MSG_CODE_DRV_LOAD_REFUSED_REQUIRES_FORCE = FW_MSG_CODE(0x1030), + FW_MSG_CODE_DRV_LOAD_REFUSED_REJECT = FW_MSG_CODE(0x1031), + FW_MSG_CODE_DRV_LOAD_DONE = FW_MSG_CODE(0x1110), + FW_MSG_CODE_DRV_UNLOAD_ENGINE = FW_MSG_CODE(0x2011), + FW_MSG_CODE_DRV_UNLOAD_PORT = FW_MSG_CODE(0x2012), + FW_MSG_CODE_DRV_UNLOAD_FUNCTION = FW_MSG_CODE(0x2013), + FW_MSG_CODE_DRV_UNLOAD_DONE = FW_MSG_CODE(0x2110), + FW_MSG_CODE_RESOURCE_ALLOC_OK = FW_MSG_CODE(0x3400), + FW_MSG_CODE_RESOURCE_ALLOC_UNKNOWN = FW_MSG_CODE(0x3500), + FW_MSG_CODE_S_TAG_UPDATE_ACK_DONE = FW_MSG_CODE(0x3b00), + FW_MSG_CODE_DRV_CFG_VF_MSIX_DONE = FW_MSG_CODE(0xb001), + FW_MSG_CODE_DEBUG_NOT_ENABLED = FW_MSG_CODE(0xb00a), + FW_MSG_CODE_DEBUG_DATA_SEND_OK = FW_MSG_CODE(0xb00b), +}; + +#define FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_MASK 0xffff0000 +#define FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_SHIFT 16 +#define FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_MASK 0x0000ffff +#define FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_SHIFT 0 + +/* Get PF RDMA protocol command response */ +#define FW_MB_PARAM_GET_PF_RDMA_NONE 0x0 +#define FW_MB_PARAM_GET_PF_RDMA_ROCE 0x1 +#define FW_MB_PARAM_GET_PF_RDMA_IWARP 0x2 +#define FW_MB_PARAM_GET_PF_RDMA_BOTH 0x3 + +/* Get MFW feature support response */ +#define FW_MB_PARAM_FEATURE_SUPPORT_SMARTLINQ BIT(0) +#define FW_MB_PARAM_FEATURE_SUPPORT_EEE BIT(1) +#define FW_MB_PARAM_FEATURE_SUPPORT_DRV_LOAD_TO BIT(2) +#define FW_MB_PARAM_FEATURE_SUPPORT_LP_PRES_DET BIT(3) +#define FW_MB_PARAM_FEATURE_SUPPORT_RELAXED_ORD BIT(4) +#define FW_MB_PARAM_FEATURE_SUPPORT_FEC_CONTROL BIT(5) +#define FW_MB_PARAM_FEATURE_SUPPORT_EXT_SPEED_FEC_CONTROL BIT(6) +#define FW_MB_PARAM_FEATURE_SUPPORT_IGU_CLEANUP BIT(7) +#define FW_MB_PARAM_FEATURE_SUPPORT_VF_DPM BIT(8) +#define FW_MB_PARAM_FEATURE_SUPPORT_IDLE_CHK BIT(9) +#define FW_MB_PARAM_FEATURE_SUPPORT_VLINK BIT(16) +#define FW_MB_PARAM_FEATURE_SUPPORT_DISABLE_LLDP BIT(17) +#define FW_MB_PARAM_FEATURE_SUPPORT_ENHANCED_SYS_LCK BIT(18) +#define FW_MB_PARAM_FEATURE_SUPPORT_RESTORE_DEFAULT_CFG BIT(19) + +#define FW_MB_PARAM_MANAGEMENT_STATUS_LOCKDOWN_ENABLED 0x00000001 + +#define FW_MB_PARAM_LOAD_DONE_DID_EFUSE_ERROR BIT(0) + +#define FW_MB_PARAM_ENG_CFG_FIR_AFFIN_VALID_MASK 0x00000001 +#define FW_MB_PARAM_ENG_CFG_FIR_AFFIN_VALID_SHIFT 0 +#define FW_MB_PARAM_ENG_CFG_FIR_AFFIN_VALUE_MASK 0x00000002 +#define FW_MB_PARAM_ENG_CFG_FIR_AFFIN_VALUE_SHIFT 1 +#define FW_MB_PARAM_ENG_CFG_L2_AFFIN_VALID_MASK 0x00000004 +#define FW_MB_PARAM_ENG_CFG_L2_AFFIN_VALID_SHIFT 2 +#define FW_MB_PARAM_ENG_CFG_L2_AFFIN_VALUE_MASK 0x00000008 +#define FW_MB_PARAM_ENG_CFG_L2_AFFIN_VALUE_SHIFT 3 + +#define FW_MB_PARAM_PPFID_BITMAP_MASK 0xff +#define FW_MB_PARAM_PPFID_BITMAP_SHIFT 0 + +#define FW_MB_PARAM_NVM_PUT_FILE_REQ_OFFSET_MASK 0x00ffffff +#define FW_MB_PARAM_NVM_PUT_FILE_REQ_OFFSET_SHIFT 0 +#define FW_MB_PARAM_NVM_PUT_FILE_REQ_SIZE_MASK 0xff000000 +#define FW_MB_PARAM_NVM_PUT_FILE_REQ_SIZE_SHIFT 24 + +enum MFW_DRV_MSG_TYPE { + MFW_DRV_MSG_LINK_CHANGE, + MFW_DRV_MSG_FLR_FW_ACK_FAILED, + MFW_DRV_MSG_VF_DISABLED, + MFW_DRV_MSG_LLDP_DATA_UPDATED, + MFW_DRV_MSG_DCBX_REMOTE_MIB_UPDATED, + MFW_DRV_MSG_DCBX_OPERATIONAL_MIB_UPDATED, + MFW_DRV_MSG_ERROR_RECOVERY, + MFW_DRV_MSG_BW_UPDATE, + MFW_DRV_MSG_S_TAG_UPDATE, + MFW_DRV_MSG_GET_LAN_STATS, + MFW_DRV_MSG_GET_FCOE_STATS, + MFW_DRV_MSG_GET_ISCSI_STATS, + MFW_DRV_MSG_GET_RDMA_STATS, + MFW_DRV_MSG_FAILURE_DETECTED, + MFW_DRV_MSG_TRANSCEIVER_STATE_CHANGE, + MFW_DRV_MSG_CRITICAL_ERROR_OCCURRED, + MFW_DRV_MSG_EEE_NEGOTIATION_COMPLETE, + MFW_DRV_MSG_GET_TLV_REQ, + MFW_DRV_MSG_OEM_CFG_UPDATE, + MFW_DRV_MSG_LLDP_RECEIVED_TLVS_UPDATED, + MFW_DRV_MSG_GENERIC_IDC, + MFW_DRV_MSG_XCVR_TX_FAULT, + MFW_DRV_MSG_XCVR_RX_LOS, + MFW_DRV_MSG_GET_FCOE_CAP, + MFW_DRV_MSG_GEN_LINK_DUMP, + MFW_DRV_MSG_GEN_IDLE_CHK, + MFW_DRV_MSG_DCBX_ADMIN_CFG_APPLIED, + MFW_DRV_MSG_MAX +}; + +#define MFW_DRV_MSG_MAX_DWORDS(msgs) ((((msgs) - 1) >> 2) + 1) +#define MFW_DRV_MSG_DWORD(msg_id) ((msg_id) >> 2) +#define MFW_DRV_MSG_OFFSET(msg_id) (((msg_id) & 0x3) << 3) +#define MFW_DRV_MSG_MASK(msg_id) (0xff << MFW_DRV_MSG_OFFSET(msg_id)) + +struct public_mfw_mb { + u32 sup_msgs; + u32 msg[MFW_DRV_MSG_MAX_DWORDS(MFW_DRV_MSG_MAX)]; + u32 ack[MFW_DRV_MSG_MAX_DWORDS(MFW_DRV_MSG_MAX)]; +}; + +enum public_sections { + PUBLIC_DRV_MB, + PUBLIC_MFW_MB, + PUBLIC_GLOBAL, + PUBLIC_PATH, + PUBLIC_PORT, + PUBLIC_FUNC, + PUBLIC_MAX_SECTIONS +}; + +struct drv_ver_info_stc { + u32 ver; + u8 name[32]; +}; + +/* Runtime data needs about 1/2K. We use 2K to be on the safe side. + * Please make sure data does not exceed this size. + */ +#define NUM_RUNTIME_DWORDS 16 +struct drv_init_hw_stc { + u32 init_hw_bitmask[NUM_RUNTIME_DWORDS]; + u32 init_hw_data[NUM_RUNTIME_DWORDS * 32]; +}; + +struct mcp_public_data { + u32 num_sections; + u32 sections[PUBLIC_MAX_SECTIONS]; + struct public_drv_mb drv_mb[MCP_GLOB_FUNC_MAX]; + struct public_mfw_mb mfw_mb[MCP_GLOB_FUNC_MAX]; + struct public_global global; + struct public_path path[MCP_GLOB_PATH_MAX]; + struct public_port port[MCP_GLOB_PORT_MAX]; + struct public_func func[MCP_GLOB_FUNC_MAX]; +}; + +#define I2C_TRANSCEIVER_ADDR 0xa0 +#define MAX_I2C_TRANSACTION_SIZE 16 +#define MAX_I2C_TRANSCEIVER_PAGE_SIZE 256 + +/* OCBB definitions */ +enum tlvs { + /* Category 1: Device Properties */ + DRV_TLV_CLP_STR, + DRV_TLV_CLP_STR_CTD, + /* Category 6: Device Configuration */ + DRV_TLV_SCSI_TO, + DRV_TLV_R_T_TOV, + DRV_TLV_R_A_TOV, + DRV_TLV_E_D_TOV, + DRV_TLV_CR_TOV, + DRV_TLV_BOOT_TYPE, + /* Category 8: Port Configuration */ + DRV_TLV_NPIV_ENABLED, + /* Category 10: Function Configuration */ + DRV_TLV_FEATURE_FLAGS, + DRV_TLV_LOCAL_ADMIN_ADDR, + DRV_TLV_ADDITIONAL_MAC_ADDR_1, + DRV_TLV_ADDITIONAL_MAC_ADDR_2, + DRV_TLV_LSO_MAX_OFFLOAD_SIZE, + DRV_TLV_LSO_MIN_SEGMENT_COUNT, + DRV_TLV_PROMISCUOUS_MODE, + DRV_TLV_TX_DESCRIPTORS_QUEUE_SIZE, + DRV_TLV_RX_DESCRIPTORS_QUEUE_SIZE, + DRV_TLV_NUM_OF_NET_QUEUE_VMQ_CFG, + DRV_TLV_FLEX_NIC_OUTER_VLAN_ID, + DRV_TLV_OS_DRIVER_STATES, + DRV_TLV_PXE_BOOT_PROGRESS, + /* Category 12: FC/FCoE Configuration */ + DRV_TLV_NPIV_STATE, + DRV_TLV_NUM_OF_NPIV_IDS, + DRV_TLV_SWITCH_NAME, + DRV_TLV_SWITCH_PORT_NUM, + DRV_TLV_SWITCH_PORT_ID, + DRV_TLV_VENDOR_NAME, + DRV_TLV_SWITCH_MODEL, + DRV_TLV_SWITCH_FW_VER, + DRV_TLV_QOS_PRIORITY_PER_802_1P, + DRV_TLV_PORT_ALIAS, + DRV_TLV_PORT_STATE, + DRV_TLV_FIP_TX_DESCRIPTORS_QUEUE_SIZE, + DRV_TLV_FCOE_RX_DESCRIPTORS_QUEUE_SIZE, + DRV_TLV_LINK_FAILURE_COUNT, + DRV_TLV_FCOE_BOOT_PROGRESS, + /* Category 13: iSCSI Configuration */ + DRV_TLV_TARGET_LLMNR_ENABLED, + DRV_TLV_HEADER_DIGEST_FLAG_ENABLED, + DRV_TLV_DATA_DIGEST_FLAG_ENABLED, + DRV_TLV_AUTHENTICATION_METHOD, + DRV_TLV_ISCSI_BOOT_TARGET_PORTAL, + DRV_TLV_MAX_FRAME_SIZE, + DRV_TLV_PDU_TX_DESCRIPTORS_QUEUE_SIZE, + DRV_TLV_PDU_RX_DESCRIPTORS_QUEUE_SIZE, + DRV_TLV_ISCSI_BOOT_PROGRESS, + /* Category 20: Device Data */ + DRV_TLV_PCIE_BUS_RX_UTILIZATION, + DRV_TLV_PCIE_BUS_TX_UTILIZATION, + DRV_TLV_DEVICE_CPU_CORES_UTILIZATION, + DRV_TLV_LAST_VALID_DCC_TLV_RECEIVED, + DRV_TLV_NCSI_RX_BYTES_RECEIVED, + DRV_TLV_NCSI_TX_BYTES_SENT, + /* Category 22: Base Port Data */ + DRV_TLV_RX_DISCARDS, + DRV_TLV_RX_ERRORS, + DRV_TLV_TX_ERRORS, + DRV_TLV_TX_DISCARDS, + DRV_TLV_RX_FRAMES_RECEIVED, + DRV_TLV_TX_FRAMES_SENT, + /* Category 23: FC/FCoE Port Data */ + DRV_TLV_RX_BROADCAST_PACKETS, + DRV_TLV_TX_BROADCAST_PACKETS, + /* Category 28: Base Function Data */ + DRV_TLV_NUM_OFFLOADED_CONNECTIONS_TCP_IPV4, + DRV_TLV_NUM_OFFLOADED_CONNECTIONS_TCP_IPV6, + DRV_TLV_TX_DESCRIPTOR_QUEUE_AVG_DEPTH, + DRV_TLV_RX_DESCRIPTORS_QUEUE_AVG_DEPTH, + DRV_TLV_PF_RX_FRAMES_RECEIVED, + DRV_TLV_RX_BYTES_RECEIVED, + DRV_TLV_PF_TX_FRAMES_SENT, + DRV_TLV_TX_BYTES_SENT, + DRV_TLV_IOV_OFFLOAD, + DRV_TLV_PCI_ERRORS_CAP_ID, + DRV_TLV_UNCORRECTABLE_ERROR_STATUS, + DRV_TLV_UNCORRECTABLE_ERROR_MASK, + DRV_TLV_CORRECTABLE_ERROR_STATUS, + DRV_TLV_CORRECTABLE_ERROR_MASK, + DRV_TLV_PCI_ERRORS_AECC_REGISTER, + DRV_TLV_TX_QUEUES_EMPTY, + DRV_TLV_RX_QUEUES_EMPTY, + DRV_TLV_TX_QUEUES_FULL, + DRV_TLV_RX_QUEUES_FULL, + /* Category 29: FC/FCoE Function Data */ + DRV_TLV_FCOE_TX_DESCRIPTOR_QUEUE_AVG_DEPTH, + DRV_TLV_FCOE_RX_DESCRIPTORS_QUEUE_AVG_DEPTH, + DRV_TLV_FCOE_RX_FRAMES_RECEIVED, + DRV_TLV_FCOE_RX_BYTES_RECEIVED, + DRV_TLV_FCOE_TX_FRAMES_SENT, + DRV_TLV_FCOE_TX_BYTES_SENT, + DRV_TLV_CRC_ERROR_COUNT, + DRV_TLV_CRC_ERROR_1_RECEIVED_SOURCE_FC_ID, + DRV_TLV_CRC_ERROR_1_TIMESTAMP, + DRV_TLV_CRC_ERROR_2_RECEIVED_SOURCE_FC_ID, + DRV_TLV_CRC_ERROR_2_TIMESTAMP, + DRV_TLV_CRC_ERROR_3_RECEIVED_SOURCE_FC_ID, + DRV_TLV_CRC_ERROR_3_TIMESTAMP, + DRV_TLV_CRC_ERROR_4_RECEIVED_SOURCE_FC_ID, + DRV_TLV_CRC_ERROR_4_TIMESTAMP, + DRV_TLV_CRC_ERROR_5_RECEIVED_SOURCE_FC_ID, + DRV_TLV_CRC_ERROR_5_TIMESTAMP, + DRV_TLV_LOSS_OF_SYNC_ERROR_COUNT, + DRV_TLV_LOSS_OF_SIGNAL_ERRORS, + DRV_TLV_PRIMITIVE_SEQUENCE_PROTOCOL_ERROR_COUNT, + DRV_TLV_DISPARITY_ERROR_COUNT, + DRV_TLV_CODE_VIOLATION_ERROR_COUNT, + DRV_TLV_LAST_FLOGI_ISSUED_COMMON_PARAMETERS_WORD_1, + DRV_TLV_LAST_FLOGI_ISSUED_COMMON_PARAMETERS_WORD_2, + DRV_TLV_LAST_FLOGI_ISSUED_COMMON_PARAMETERS_WORD_3, + DRV_TLV_LAST_FLOGI_ISSUED_COMMON_PARAMETERS_WORD_4, + DRV_TLV_LAST_FLOGI_TIMESTAMP, + DRV_TLV_LAST_FLOGI_ACC_COMMON_PARAMETERS_WORD_1, + DRV_TLV_LAST_FLOGI_ACC_COMMON_PARAMETERS_WORD_2, + DRV_TLV_LAST_FLOGI_ACC_COMMON_PARAMETERS_WORD_3, + DRV_TLV_LAST_FLOGI_ACC_COMMON_PARAMETERS_WORD_4, + DRV_TLV_LAST_FLOGI_ACC_TIMESTAMP, + DRV_TLV_LAST_FLOGI_RJT, + DRV_TLV_LAST_FLOGI_RJT_TIMESTAMP, + DRV_TLV_FDISCS_SENT_COUNT, + DRV_TLV_FDISC_ACCS_RECEIVED, + DRV_TLV_FDISC_RJTS_RECEIVED, + DRV_TLV_PLOGI_SENT_COUNT, + DRV_TLV_PLOGI_ACCS_RECEIVED, + DRV_TLV_PLOGI_RJTS_RECEIVED, + DRV_TLV_PLOGI_1_SENT_DESTINATION_FC_ID, + DRV_TLV_PLOGI_1_TIMESTAMP, + DRV_TLV_PLOGI_2_SENT_DESTINATION_FC_ID, + DRV_TLV_PLOGI_2_TIMESTAMP, + DRV_TLV_PLOGI_3_SENT_DESTINATION_FC_ID, + DRV_TLV_PLOGI_3_TIMESTAMP, + DRV_TLV_PLOGI_4_SENT_DESTINATION_FC_ID, + DRV_TLV_PLOGI_4_TIMESTAMP, + DRV_TLV_PLOGI_5_SENT_DESTINATION_FC_ID, + DRV_TLV_PLOGI_5_TIMESTAMP, + DRV_TLV_PLOGI_1_ACC_RECEIVED_SOURCE_FC_ID, + DRV_TLV_PLOGI_1_ACC_TIMESTAMP, + DRV_TLV_PLOGI_2_ACC_RECEIVED_SOURCE_FC_ID, + DRV_TLV_PLOGI_2_ACC_TIMESTAMP, + DRV_TLV_PLOGI_3_ACC_RECEIVED_SOURCE_FC_ID, + DRV_TLV_PLOGI_3_ACC_TIMESTAMP, + DRV_TLV_PLOGI_4_ACC_RECEIVED_SOURCE_FC_ID, + DRV_TLV_PLOGI_4_ACC_TIMESTAMP, + DRV_TLV_PLOGI_5_ACC_RECEIVED_SOURCE_FC_ID, + DRV_TLV_PLOGI_5_ACC_TIMESTAMP, + DRV_TLV_LOGOS_ISSUED, + DRV_TLV_LOGO_ACCS_RECEIVED, + DRV_TLV_LOGO_RJTS_RECEIVED, + DRV_TLV_LOGO_1_RECEIVED_SOURCE_FC_ID, + DRV_TLV_LOGO_1_TIMESTAMP, + DRV_TLV_LOGO_2_RECEIVED_SOURCE_FC_ID, + DRV_TLV_LOGO_2_TIMESTAMP, + DRV_TLV_LOGO_3_RECEIVED_SOURCE_FC_ID, + DRV_TLV_LOGO_3_TIMESTAMP, + DRV_TLV_LOGO_4_RECEIVED_SOURCE_FC_ID, + DRV_TLV_LOGO_4_TIMESTAMP, + DRV_TLV_LOGO_5_RECEIVED_SOURCE_FC_ID, + DRV_TLV_LOGO_5_TIMESTAMP, + DRV_TLV_LOGOS_RECEIVED, + DRV_TLV_ACCS_ISSUED, + DRV_TLV_PRLIS_ISSUED, + DRV_TLV_ACCS_RECEIVED, + DRV_TLV_ABTS_SENT_COUNT, + DRV_TLV_ABTS_ACCS_RECEIVED, + DRV_TLV_ABTS_RJTS_RECEIVED, + DRV_TLV_ABTS_1_SENT_DESTINATION_FC_ID, + DRV_TLV_ABTS_1_TIMESTAMP, + DRV_TLV_ABTS_2_SENT_DESTINATION_FC_ID, + DRV_TLV_ABTS_2_TIMESTAMP, + DRV_TLV_ABTS_3_SENT_DESTINATION_FC_ID, + DRV_TLV_ABTS_3_TIMESTAMP, + DRV_TLV_ABTS_4_SENT_DESTINATION_FC_ID, + DRV_TLV_ABTS_4_TIMESTAMP, + DRV_TLV_ABTS_5_SENT_DESTINATION_FC_ID, + DRV_TLV_ABTS_5_TIMESTAMP, + DRV_TLV_RSCNS_RECEIVED, + DRV_TLV_LAST_RSCN_RECEIVED_N_PORT_1, + DRV_TLV_LAST_RSCN_RECEIVED_N_PORT_2, + DRV_TLV_LAST_RSCN_RECEIVED_N_PORT_3, + DRV_TLV_LAST_RSCN_RECEIVED_N_PORT_4, + DRV_TLV_LUN_RESETS_ISSUED, + DRV_TLV_ABORT_TASK_SETS_ISSUED, + DRV_TLV_TPRLOS_SENT, + DRV_TLV_NOS_SENT_COUNT, + DRV_TLV_NOS_RECEIVED_COUNT, + DRV_TLV_OLS_COUNT, + DRV_TLV_LR_COUNT, + DRV_TLV_LRR_COUNT, + DRV_TLV_LIP_SENT_COUNT, + DRV_TLV_LIP_RECEIVED_COUNT, + DRV_TLV_EOFA_COUNT, + DRV_TLV_EOFNI_COUNT, + DRV_TLV_SCSI_STATUS_CHECK_CONDITION_COUNT, + DRV_TLV_SCSI_STATUS_CONDITION_MET_COUNT, + DRV_TLV_SCSI_STATUS_BUSY_COUNT, + DRV_TLV_SCSI_STATUS_INTERMEDIATE_COUNT, + DRV_TLV_SCSI_STATUS_INTERMEDIATE_CONDITION_MET_COUNT, + DRV_TLV_SCSI_STATUS_RESERVATION_CONFLICT_COUNT, + DRV_TLV_SCSI_STATUS_TASK_SET_FULL_COUNT, + DRV_TLV_SCSI_STATUS_ACA_ACTIVE_COUNT, + DRV_TLV_SCSI_STATUS_TASK_ABORTED_COUNT, + DRV_TLV_SCSI_CHECK_CONDITION_1_RECEIVED_SK_ASC_ASCQ, + DRV_TLV_SCSI_CHECK_1_TIMESTAMP, + DRV_TLV_SCSI_CHECK_CONDITION_2_RECEIVED_SK_ASC_ASCQ, + DRV_TLV_SCSI_CHECK_2_TIMESTAMP, + DRV_TLV_SCSI_CHECK_CONDITION_3_RECEIVED_SK_ASC_ASCQ, + DRV_TLV_SCSI_CHECK_3_TIMESTAMP, + DRV_TLV_SCSI_CHECK_CONDITION_4_RECEIVED_SK_ASC_ASCQ, + DRV_TLV_SCSI_CHECK_4_TIMESTAMP, + DRV_TLV_SCSI_CHECK_CONDITION_5_RECEIVED_SK_ASC_ASCQ, + DRV_TLV_SCSI_CHECK_5_TIMESTAMP, + /* Category 30: iSCSI Function Data */ + DRV_TLV_PDU_TX_DESCRIPTOR_QUEUE_AVG_DEPTH, + DRV_TLV_PDU_RX_DESCRIPTORS_QUEUE_AVG_DEPTH, + DRV_TLV_ISCSI_PDU_RX_FRAMES_RECEIVED, + DRV_TLV_ISCSI_PDU_RX_BYTES_RECEIVED, + DRV_TLV_ISCSI_PDU_TX_FRAMES_SENT, + DRV_TLV_ISCSI_PDU_TX_BYTES_SENT, + DRV_TLV_RDMA_DRV_VERSION +}; + +#define I2C_DEV_ADDR_A2 0xa2 +#define SFP_EEPROM_A2_TEMPERATURE_ADDR 0x60 +#define SFP_EEPROM_A2_TEMPERATURE_SIZE 2 +#define SFP_EEPROM_A2_VCC_ADDR 0x62 +#define SFP_EEPROM_A2_VCC_SIZE 2 +#define SFP_EEPROM_A2_TX_BIAS_ADDR 0x64 +#define SFP_EEPROM_A2_TX_BIAS_SIZE 2 +#define SFP_EEPROM_A2_TX_POWER_ADDR 0x66 +#define SFP_EEPROM_A2_TX_POWER_SIZE 2 +#define SFP_EEPROM_A2_RX_POWER_ADDR 0x68 +#define SFP_EEPROM_A2_RX_POWER_SIZE 2 + +#define I2C_DEV_ADDR_A0 0xa0 +#define QSFP_EEPROM_A0_TEMPERATURE_ADDR 0x16 +#define QSFP_EEPROM_A0_TEMPERATURE_SIZE 2 +#define QSFP_EEPROM_A0_VCC_ADDR 0x1a +#define QSFP_EEPROM_A0_VCC_SIZE 2 +#define QSFP_EEPROM_A0_TX1_BIAS_ADDR 0x2a +#define QSFP_EEPROM_A0_TX1_BIAS_SIZE 2 +#define QSFP_EEPROM_A0_TX1_POWER_ADDR 0x32 +#define QSFP_EEPROM_A0_TX1_POWER_SIZE 2 +#define QSFP_EEPROM_A0_RX1_POWER_ADDR 0x22 +#define QSFP_EEPROM_A0_RX1_POWER_SIZE 2 + +struct nvm_cfg_mac_address { + u32 mac_addr_hi; +#define NVM_CFG_MAC_ADDRESS_HI_MASK 0x0000ffff +#define NVM_CFG_MAC_ADDRESS_HI_OFFSET 0 + + u32 mac_addr_lo; +}; + +struct nvm_cfg1_glob { + u32 generic_cont0; +#define NVM_CFG1_GLOB_MF_MODE_MASK 0x00000ff0 +#define NVM_CFG1_GLOB_MF_MODE_OFFSET 4 +#define NVM_CFG1_GLOB_MF_MODE_MF_ALLOWED 0x0 +#define NVM_CFG1_GLOB_MF_MODE_DEFAULT 0x1 +#define NVM_CFG1_GLOB_MF_MODE_SPIO4 0x2 +#define NVM_CFG1_GLOB_MF_MODE_NPAR1_0 0x3 +#define NVM_CFG1_GLOB_MF_MODE_NPAR1_5 0x4 +#define NVM_CFG1_GLOB_MF_MODE_NPAR2_0 0x5 +#define NVM_CFG1_GLOB_MF_MODE_BD 0x6 +#define NVM_CFG1_GLOB_MF_MODE_UFP 0x7 + + u32 engineering_change[3]; + u32 manufacturing_id; + u32 serial_number[4]; + u32 pcie_cfg; + u32 mgmt_traffic; + + u32 core_cfg; +#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_MASK 0x000000ff +#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_OFFSET 0 +#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_2X40G 0x0 +#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X50G 0x1 +#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_1X100G 0x2 +#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_4X10G_F 0x3 +#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_4X10G_E 0x4 +#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_4X20G 0x5 +#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_1X40G 0xb +#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X25G 0xc +#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_1X25G 0xd +#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_4X25G 0xe +#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X10G 0xf +#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_AHP_2X50G_R1 0x11 +#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_AHP_4X50G_R1 0x12 +#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_AHP_1X100G_R2 0x13 +#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_AHP_2X100G_R2 0x14 +#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_AHP_1X100G_R4 0x15 + + u32 e_lane_cfg1; + u32 e_lane_cfg2; + u32 f_lane_cfg1; + u32 f_lane_cfg2; + u32 mps10_preemphasis; + u32 mps10_driver_current; + u32 mps25_preemphasis; + u32 mps25_driver_current; + u32 pci_id; + u32 pci_subsys_id; + u32 bar; + u32 mps10_txfir_main; + u32 mps10_txfir_post; + u32 mps25_txfir_main; + u32 mps25_txfir_post; + u32 manufacture_ver; + u32 manufacture_time; + u32 led_global_settings; + u32 generic_cont1; + + u32 mbi_version; +#define NVM_CFG1_GLOB_MBI_VERSION_0_MASK 0x000000ff +#define NVM_CFG1_GLOB_MBI_VERSION_0_OFFSET 0 +#define NVM_CFG1_GLOB_MBI_VERSION_1_MASK 0x0000ff00 +#define NVM_CFG1_GLOB_MBI_VERSION_1_OFFSET 8 +#define NVM_CFG1_GLOB_MBI_VERSION_2_MASK 0x00ff0000 +#define NVM_CFG1_GLOB_MBI_VERSION_2_OFFSET 16 + + u32 mbi_date; + u32 misc_sig; + + u32 device_capabilities; +#define NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ETHERNET 0x1 +#define NVM_CFG1_GLOB_DEVICE_CAPABILITIES_FCOE 0x2 +#define NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ISCSI 0x4 +#define NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ROCE 0x8 +#define NVM_CFG1_GLOB_DEVICE_CAPABILITIES_IWARP 0x10 + + u32 power_dissipated; + u32 power_consumed; + u32 efi_version; + u32 multi_network_modes_capability; + u32 nvm_cfg_version; + u32 nvm_cfg_new_option_seq; + u32 nvm_cfg_removed_option_seq; + u32 nvm_cfg_updated_value_seq; + u32 extended_serial_number[8]; + u32 option_kit_pn[8]; + u32 spare_pn[8]; + u32 mps25_active_txfir_pre; + u32 mps25_active_txfir_main; + u32 mps25_active_txfir_post; + u32 features; + u32 tx_rx_eq_25g_hlpc; + u32 tx_rx_eq_25g_llpc; + u32 tx_rx_eq_25g_ac; + u32 tx_rx_eq_10g_pc; + u32 tx_rx_eq_10g_ac; + u32 tx_rx_eq_1g; + u32 tx_rx_eq_25g_bt; + u32 tx_rx_eq_10g_bt; + u32 generic_cont4; + u32 preboot_debug_mode_std; + u32 preboot_debug_mode_ext; + u32 ext_phy_cfg1; + u32 clocks; + u32 pre2_generic_cont_1; + u32 pre2_generic_cont_2; + u32 pre2_generic_cont_3; + u32 tx_rx_eq_50g_hlpc; + u32 tx_rx_eq_50g_mlpc; + u32 tx_rx_eq_50g_llpc; + u32 tx_rx_eq_50g_ac; + u32 trace_modules; + u32 pcie_class_code_fcoe; + u32 pcie_class_code_iscsi; + u32 no_provisioned_mac; + u32 lowest_mbi_version; + u32 generic_cont5; + u32 pre2_generic_cont_4; + u32 reserved[40]; +}; + +struct nvm_cfg1_path { + u32 reserved[1]; +}; + +struct nvm_cfg1_port { + u32 rel_to_opt123; + u32 rel_to_opt124; + + u32 generic_cont0; +#define NVM_CFG1_PORT_DCBX_MODE_MASK 0x000f0000 +#define NVM_CFG1_PORT_DCBX_MODE_OFFSET 16 +#define NVM_CFG1_PORT_DCBX_MODE_DISABLED 0x0 +#define NVM_CFG1_PORT_DCBX_MODE_IEEE 0x1 +#define NVM_CFG1_PORT_DCBX_MODE_CEE 0x2 +#define NVM_CFG1_PORT_DCBX_MODE_DYNAMIC 0x3 +#define NVM_CFG1_PORT_DEFAULT_ENABLED_PROTOCOLS_MASK 0x00f00000 +#define NVM_CFG1_PORT_DEFAULT_ENABLED_PROTOCOLS_OFFSET 20 +#define NVM_CFG1_PORT_DEFAULT_ENABLED_PROTOCOLS_ETHERNET 0x1 +#define NVM_CFG1_PORT_DEFAULT_ENABLED_PROTOCOLS_FCOE 0x2 +#define NVM_CFG1_PORT_DEFAULT_ENABLED_PROTOCOLS_ISCSI 0x4 + + u32 pcie_cfg; + u32 features; + + u32 speed_cap_mask; +#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_MASK 0x0000ffff +#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_OFFSET 0 +#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G 0x1 +#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G 0x2 +#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G 0x4 +#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G 0x8 +#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G 0x10 +#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G 0x20 +#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G 0x40 + + u32 link_settings; +#define NVM_CFG1_PORT_DRV_LINK_SPEED_MASK 0x0000000f +#define NVM_CFG1_PORT_DRV_LINK_SPEED_OFFSET 0 +#define NVM_CFG1_PORT_DRV_LINK_SPEED_AUTONEG 0x0 +#define NVM_CFG1_PORT_DRV_LINK_SPEED_1G 0x1 +#define NVM_CFG1_PORT_DRV_LINK_SPEED_10G 0x2 +#define NVM_CFG1_PORT_DRV_LINK_SPEED_20G 0x3 +#define NVM_CFG1_PORT_DRV_LINK_SPEED_25G 0x4 +#define NVM_CFG1_PORT_DRV_LINK_SPEED_40G 0x5 +#define NVM_CFG1_PORT_DRV_LINK_SPEED_50G 0x6 +#define NVM_CFG1_PORT_DRV_LINK_SPEED_BB_100G 0x7 +#define NVM_CFG1_PORT_DRV_LINK_SPEED_SMARTLINQ 0x8 +#define NVM_CFG1_PORT_DRV_FLOW_CONTROL_MASK 0x00000070 +#define NVM_CFG1_PORT_DRV_FLOW_CONTROL_OFFSET 4 +#define NVM_CFG1_PORT_DRV_FLOW_CONTROL_AUTONEG 0x1 +#define NVM_CFG1_PORT_DRV_FLOW_CONTROL_RX 0x2 +#define NVM_CFG1_PORT_DRV_FLOW_CONTROL_TX 0x4 +#define NVM_CFG1_PORT_FEC_FORCE_MODE_MASK 0x000e0000 +#define NVM_CFG1_PORT_FEC_FORCE_MODE_OFFSET 17 +#define NVM_CFG1_PORT_FEC_FORCE_MODE_NONE 0x0 +#define NVM_CFG1_PORT_FEC_FORCE_MODE_FIRECODE 0x1 +#define NVM_CFG1_PORT_FEC_FORCE_MODE_RS 0x2 +#define NVM_CFG1_PORT_FEC_FORCE_MODE_AUTO 0x7 + + u32 phy_cfg; + u32 mgmt_traffic; + + u32 ext_phy; + /* EEE power saving mode */ +#define NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_MASK 0x00ff0000 +#define NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_OFFSET 16 +#define NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_DISABLED 0x0 +#define NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_BALANCED 0x1 +#define NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_AGGRESSIVE 0x2 +#define NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_LOW_LATENCY 0x3 + + u32 mba_cfg1; + u32 mba_cfg2; + u32 vf_cfg; + struct nvm_cfg_mac_address lldp_mac_address; + u32 led_port_settings; + u32 transceiver_00; + u32 device_ids; + + u32 board_cfg; +#define NVM_CFG1_PORT_PORT_TYPE_MASK 0x000000ff +#define NVM_CFG1_PORT_PORT_TYPE_OFFSET 0 +#define NVM_CFG1_PORT_PORT_TYPE_UNDEFINED 0x0 +#define NVM_CFG1_PORT_PORT_TYPE_MODULE 0x1 +#define NVM_CFG1_PORT_PORT_TYPE_BACKPLANE 0x2 +#define NVM_CFG1_PORT_PORT_TYPE_EXT_PHY 0x3 +#define NVM_CFG1_PORT_PORT_TYPE_MODULE_SLAVE 0x4 + + u32 mnm_10g_cap; + u32 mnm_10g_ctrl; + u32 mnm_10g_misc; + u32 mnm_25g_cap; + u32 mnm_25g_ctrl; + u32 mnm_25g_misc; + u32 mnm_40g_cap; + u32 mnm_40g_ctrl; + u32 mnm_40g_misc; + u32 mnm_50g_cap; + u32 mnm_50g_ctrl; + u32 mnm_50g_misc; + u32 mnm_100g_cap; + u32 mnm_100g_ctrl; + u32 mnm_100g_misc; + + u32 temperature; + u32 ext_phy_cfg1; + + u32 extended_speed; +#define NVM_CFG1_PORT_EXTENDED_SPEED_MASK 0x0000ffff +#define NVM_CFG1_PORT_EXTENDED_SPEED_OFFSET 0 +#define NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_AN 0x1 +#define NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_1G 0x2 +#define NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_10G 0x4 +#define NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_20G 0x8 +#define NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_25G 0x10 +#define NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_40G 0x20 +#define NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_50G_R 0x40 +#define NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_50G_R2 0x80 +#define NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_100G_R2 0x100 +#define NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_100G_R4 0x200 +#define NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_100G_P4 0x400 +#define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_MASK 0xffff0000 +#define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_OFFSET 16 +#define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_RESERVED 0x1 +#define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_1G 0x2 +#define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_10G 0x4 +#define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_20G 0x8 +#define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_25G 0x10 +#define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_40G 0x20 +#define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_50G_R 0x40 +#define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_50G_R2 0x80 +#define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_100G_R2 0x100 +#define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_100G_R4 0x200 +#define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_100G_P4 0x400 + + u32 extended_fec_mode; + u32 port_generic_cont_01; + u32 port_generic_cont_02; + u32 phy_temp_monitor; + u32 reserved[109]; +}; + +struct nvm_cfg1_func { + struct nvm_cfg_mac_address mac_address; + u32 rsrv1; + u32 rsrv2; + u32 device_id; + u32 cmn_cfg; + u32 pci_cfg; + struct nvm_cfg_mac_address fcoe_node_wwn_mac_addr; + struct nvm_cfg_mac_address fcoe_port_wwn_mac_addr; + u32 preboot_generic_cfg; + u32 features; + u32 mf_mode_feature; + u32 reserved[6]; +}; + +struct nvm_cfg1 { + struct nvm_cfg1_glob glob; + struct nvm_cfg1_path path[MCP_GLOB_PATH_MAX]; + struct nvm_cfg1_port port[MCP_GLOB_PORT_MAX]; + struct nvm_cfg1_func func[MCP_GLOB_FUNC_MAX]; +}; + +struct board_info { + u16 vendor_id; + u16 eth_did_suffix; + u16 sub_vendor_id; + u16 sub_device_id; + char *board_name; + char *friendly_name; +}; + +struct trace_module_info { + char *module_name; +}; + +#define NUM_TRACE_MODULES 25 + +enum nvm_cfg_sections { + NVM_CFG_SECTION_NVM_CFG1, + NVM_CFG_SECTION_MAX +}; + +struct nvm_cfg { + u32 num_sections; + u32 sections_offset[NVM_CFG_SECTION_MAX]; + struct nvm_cfg1 cfg1; +}; + +#define PORT_0 0 +#define PORT_1 1 +#define PORT_2 2 +#define PORT_3 3 + +extern struct spad_layout g_spad; +struct spad_layout { + struct nvm_cfg nvm_cfg; + struct mcp_public_data public_data; +}; + +#define MCP_SPAD_SIZE 0x00028000 /* 160 KB */ + +#define SPAD_OFFSET(addr) (((u32)(addr) - (u32)CPU_SPAD_BASE)) + +#define TO_OFFSIZE(_offset, _size) \ + ((u32)((((u32)(_offset) >> 2) << OFFSIZE_OFFSET_OFFSET) | \ + (((u32)(_size) >> 2) << OFFSIZE_SIZE_OFFSET))) + +enum spad_sections { + SPAD_SECTION_TRACE, + SPAD_SECTION_NVM_CFG, + SPAD_SECTION_PUBLIC, + SPAD_SECTION_PRIVATE, + SPAD_SECTION_MAX +}; + +#define STRUCT_OFFSET(f) (STATIC_INIT_BASE + \ + __builtin_offsetof(struct static_init, f)) + +/* This section is located at a fixed location in the beginning of the + * scratchpad, to ensure that the MCP trace is not run over during MFW upgrade. + * All the rest of data has a floating location which differs from version to + * version, and is pointed by the mcp_meta_data below. + * Moreover, the spad_layout section is part of the MFW firmware, and is loaded + * with it from nvram in order to clear this portion. + */ +struct static_init { + u32 num_sections; + offsize_t sections[SPAD_SECTION_MAX]; +#define SECTION(_sec_) (*((offsize_t *)(STRUCT_OFFSET(sections[_sec_])))) + + u32 tim_hash[8]; +#define PRESERVED_TIM_HASH ((u8 *)(STRUCT_OFFSET(tim_hash))) + u32 tpu_hash[8]; +#define PRESERVED_TPU_HASH ((u8 *)(STRUCT_OFFSET(tpu_hash))) + u32 secure_pcie_fw_ver; +#define SECURE_PCIE_FW_VER (*((u32 *)(STRUCT_OFFSET(secure_pcie_fw_ver)))) + u32 secure_running_mfw; +#define SECURE_RUNNING_MFW (*((u32 *)(STRUCT_OFFSET(secure_running_mfw)))) + struct mcp_trace trace; +}; + +#define CRC_MAGIC_VALUE 0xDEBB20E3 +#define CRC32_POLYNOMIAL 0xEDB88320 +#define _KB(x) ((x) * 1024) +#define _MB(x) (_KB(x) * 1024) +#define NVM_CRC_SIZE (sizeof(u32)) +enum nvm_sw_arbitrator { + NVM_SW_ARB_HOST, + NVM_SW_ARB_MCP, + NVM_SW_ARB_UART, + NVM_SW_ARB_RESERVED +}; + +struct legacy_bootstrap_region { + u32 magic_value; +#define NVM_MAGIC_VALUE 0x669955aa + u32 sram_start_addr; + u32 code_len; + u32 code_start_addr; + u32 crc; +}; + +struct nvm_code_entry { + u32 image_type; + u32 nvm_start_addr; + u32 len; + u32 sram_start_addr; + u32 sram_run_addr; +}; + +enum nvm_image_type { + NVM_TYPE_TIM1 = 0x01, + NVM_TYPE_TIM2 = 0x02, + NVM_TYPE_MIM1 = 0x03, + NVM_TYPE_MIM2 = 0x04, + NVM_TYPE_MBA = 0x05, + NVM_TYPE_MODULES_PN = 0x06, + NVM_TYPE_VPD = 0x07, + NVM_TYPE_MFW_TRACE1 = 0x08, + NVM_TYPE_MFW_TRACE2 = 0x09, + NVM_TYPE_NVM_CFG1 = 0x0a, + NVM_TYPE_L2B = 0x0b, + NVM_TYPE_DIR1 = 0x0c, + NVM_TYPE_EAGLE_FW1 = 0x0d, + NVM_TYPE_FALCON_FW1 = 0x0e, + NVM_TYPE_PCIE_FW1 = 0x0f, + NVM_TYPE_HW_SET = 0x10, + NVM_TYPE_LIM = 0x11, + NVM_TYPE_AVS_FW1 = 0x12, + NVM_TYPE_DIR2 = 0x13, + NVM_TYPE_CCM = 0x14, + NVM_TYPE_EAGLE_FW2 = 0x15, + NVM_TYPE_FALCON_FW2 = 0x16, + NVM_TYPE_PCIE_FW2 = 0x17, + NVM_TYPE_AVS_FW2 = 0x18, + NVM_TYPE_INIT_HW = 0x19, + NVM_TYPE_DEFAULT_CFG = 0x1a, + NVM_TYPE_MDUMP = 0x1b, + NVM_TYPE_NVM_META = 0x1c, + NVM_TYPE_ISCSI_CFG = 0x1d, + NVM_TYPE_FCOE_CFG = 0x1f, + NVM_TYPE_ETH_PHY_FW1 = 0x20, + NVM_TYPE_ETH_PHY_FW2 = 0x21, + NVM_TYPE_BDN = 0x22, + NVM_TYPE_8485X_PHY_FW = 0x23, + NVM_TYPE_PUB_KEY = 0x24, + NVM_TYPE_RECOVERY = 0x25, + NVM_TYPE_PLDM = 0x26, + NVM_TYPE_UPK1 = 0x27, + NVM_TYPE_UPK2 = 0x28, + NVM_TYPE_MASTER_KC = 0x29, + NVM_TYPE_BACKUP_KC = 0x2a, + NVM_TYPE_HW_DUMP = 0x2b, + NVM_TYPE_HW_DUMP_OUT = 0x2c, + NVM_TYPE_BIN_NVM_META = 0x30, + NVM_TYPE_ROM_TEST = 0xf0, + NVM_TYPE_88X33X0_PHY_FW = 0x31, + NVM_TYPE_88X33X0_PHY_SLAVE_FW = 0x32, + NVM_TYPE_IDLE_CHK = 0x33, + NVM_TYPE_MAX, +}; + +#define MAX_NVM_DIR_ENTRIES 100 + +struct nvm_dir_meta { + u32 dir_id; + u32 nvm_dir_addr; + u32 num_images; + u32 next_mfw_to_run; +}; + +struct nvm_dir { + s32 seq; +#define NVM_DIR_NEXT_MFW_MASK 0x00000001 +#define NVM_DIR_SEQ_MASK 0xfffffffe +#define NVM_DIR_NEXT_MFW(seq) ((seq) & NVM_DIR_NEXT_MFW_MASK) +#define NVM_DIR_UPDATE_SEQ(_seq, swap_mfw)\ + ({ \ + _seq = (((_seq + 2) & \ + NVM_DIR_SEQ_MASK) | \ + (NVM_DIR_NEXT_MFW(_seq ^ (swap_mfw))));\ + }) + +#define IS_DIR_SEQ_VALID(seq) (((seq) & NVM_DIR_SEQ_MASK) != \ + NVM_DIR_SEQ_MASK) + + u32 num_images; + u32 rsrv; + struct nvm_code_entry code[1]; /* Up to MAX_NVM_DIR_ENTRIES */ +}; + +#define NVM_DIR_SIZE(_num_images) (sizeof(struct nvm_dir) + \ + ((_num_images) - 1) *\ + sizeof(struct nvm_code_entry) +\ + NVM_CRC_SIZE) + +struct nvm_vpd_image { + u32 format_revision; +#define VPD_IMAGE_VERSION 1 + + u8 vpd_data[1]; +}; + +#define DIR_ID_1 (0) +#define DIR_ID_2 (1) +#define MAX_DIR_IDS (2) + +#define MFW_BUNDLE_1 (0) +#define MFW_BUNDLE_2 (1) +#define MAX_MFW_BUNDLES (2) + +#define FLASH_PAGE_SIZE 0x1000 +#define NVM_DIR_MAX_SIZE (FLASH_PAGE_SIZE) +#define LEGACY_ASIC_MIM_MAX_SIZE (_KB(1200)) + +#define FPGA_MIM_MAX_SIZE (0x40000) + +#define LIM_MAX_SIZE ((2 * FLASH_PAGE_SIZE) - \ + sizeof(struct legacy_bootstrap_region) \ + - NVM_RSV_SIZE) +#define LIM_OFFSET (NVM_OFFSET(lim_image)) +#define NVM_RSV_SIZE (44) +#define GET_MIM_MAX_SIZE(is_asic, is_e4) (LEGACY_ASIC_MIM_MAX_SIZE) +#define GET_MIM_OFFSET(idx, is_asic, is_e4) (NVM_OFFSET(dir[MAX_MFW_BUNDLES])\ + + (((idx) == NVM_TYPE_MIM2) ? \ + GET_MIM_MAX_SIZE(is_asic, is_e4)\ + : 0)) +#define GET_NVM_FIXED_AREA_SIZE(is_asic, is_e4) (sizeof(struct nvm_image) + \ + GET_MIM_MAX_SIZE(is_asic,\ + is_e4) * 2) + +union nvm_dir_union { + struct nvm_dir dir; + u8 page[FLASH_PAGE_SIZE]; +}; + +struct nvm_image { + struct legacy_bootstrap_region bootstrap; + u8 rsrv[NVM_RSV_SIZE]; + u8 lim_image[LIM_MAX_SIZE]; + union nvm_dir_union dir[MAX_MFW_BUNDLES]; +}; + +#define NVM_OFFSET(f) ((u32_t)((int_ptr_t)(&(((struct nvm_image *)0)->(f))))) + +struct hw_set_info { + u32 reg_type; +#define GRC_REG_TYPE 1 +#define PHY_REG_TYPE 2 +#define PCI_REG_TYPE 4 + + u32 bank_num; + u32 pf_num; + u32 operation; +#define READ_OP 1 +#define WRITE_OP 2 +#define RMW_SET_OP 3 +#define RMW_CLR_OP 4 + + u32 reg_addr; + u32 reg_data; + + u32 reset_type; +#define POR_RESET_TYPE BIT(0) +#define HARD_RESET_TYPE BIT(1) +#define CORE_RESET_TYPE BIT(2) +#define MCP_RESET_TYPE BIT(3) +#define PERSET_ASSERT BIT(4) +#define PERSET_DEASSERT BIT(5) +}; + +struct hw_set_image { + u32 format_version; +#define HW_SET_IMAGE_VERSION 1 + u32 no_hw_sets; + struct hw_set_info hw_sets[1]; +}; + +#define MAX_SUPPORTED_NVM_OPTIONS 1000 + +#define NVM_META_BIN_OPTION_OFFSET_MASK 0x0000ffff +#define NVM_META_BIN_OPTION_OFFSET_SHIFT 0 +#define NVM_META_BIN_OPTION_LEN_MASK 0x00ff0000 +#define NVM_META_BIN_OPTION_LEN_OFFSET 16 +#define NVM_META_BIN_OPTION_ENTITY_MASK 0x03000000 +#define NVM_META_BIN_OPTION_ENTITY_SHIFT 24 +#define NVM_META_BIN_OPTION_ENTITY_GLOB 0 +#define NVM_META_BIN_OPTION_ENTITY_PORT 1 +#define NVM_META_BIN_OPTION_ENTITY_FUNC 2 +#define NVM_META_BIN_OPTION_CONFIG_TYPE_MASK 0x0c000000 +#define NVM_META_BIN_OPTION_CONFIG_TYPE_SHIFT 26 +#define NVM_META_BIN_OPTION_CONFIG_TYPE_USER 0 +#define NVM_META_BIN_OPTION_CONFIG_TYPE_FIXED 1 +#define NVM_META_BIN_OPTION_CONFIG_TYPE_FORCED 2 + +struct nvm_meta_bin_t { + u32 magic; +#define NVM_META_BIN_MAGIC 0x669955bb + u32 version; +#define NVM_META_BIN_VERSION 1 + u32 num_options; + u32 options[0]; +}; +#endif diff --git a/drivers/net/ethernet/qlogic/qed/qed_ooo.c b/drivers/net/ethernet/qlogic/qed/qed_ooo.c index b8c5641b29a8..5d725f59db24 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_ooo.c +++ b/drivers/net/ethernet/qlogic/qed/qed_ooo.c @@ -26,12 +26,12 @@ static struct qed_ooo_archipelago u32 idx = (cid & 0xffff) - p_ooo_info->cid_base; struct qed_ooo_archipelago *p_archipelago; - if (idx >= p_ooo_info->max_num_archipelagos) + if (unlikely(idx >= p_ooo_info->max_num_archipelagos)) return NULL; p_archipelago = &p_ooo_info->p_archipelagos_mem[idx]; - if (list_empty(&p_archipelago->isles_list)) + if (unlikely(list_empty(&p_archipelago->isles_list))) return NULL; return p_archipelago; @@ -46,7 +46,7 @@ static struct qed_ooo_isle *qed_ooo_seek_isle(struct qed_hwfn *p_hwfn, u8 the_num_of_isle = 1; p_archipelago = qed_ooo_seek_archipelago(p_hwfn, p_ooo_info, cid); - if (!p_archipelago) { + if (unlikely(!p_archipelago)) { DP_NOTICE(p_hwfn, "Connection %d is not found in OOO list\n", cid); return NULL; @@ -362,7 +362,7 @@ void qed_ooo_add_new_isle(struct qed_hwfn *p_hwfn, if (ooo_isle > 1) { p_prev_isle = qed_ooo_seek_isle(p_hwfn, p_ooo_info, cid, ooo_isle - 1); - if (!p_prev_isle) { + if (unlikely(!p_prev_isle)) { DP_NOTICE(p_hwfn, "Isle %d is not found(cid %d)\n", ooo_isle - 1, cid); @@ -370,7 +370,7 @@ void qed_ooo_add_new_isle(struct qed_hwfn *p_hwfn, } } p_archipelago = qed_ooo_seek_archipelago(p_hwfn, p_ooo_info, cid); - if (!p_archipelago && (ooo_isle != 1)) { + if (unlikely(!p_archipelago && ooo_isle != 1)) { DP_NOTICE(p_hwfn, "Connection %d is not found in OOO list\n", cid); return; @@ -381,7 +381,7 @@ void qed_ooo_add_new_isle(struct qed_hwfn *p_hwfn, struct qed_ooo_isle, list_entry); list_del(&p_isle->list_entry); - if (!list_empty(&p_isle->buffers_list)) { + if (unlikely(!list_empty(&p_isle->buffers_list))) { DP_NOTICE(p_hwfn, "Free isle is not empty\n"); INIT_LIST_HEAD(&p_isle->buffers_list); } @@ -418,13 +418,13 @@ void qed_ooo_add_new_buffer(struct qed_hwfn *p_hwfn, struct qed_ooo_isle *p_isle = NULL; p_isle = qed_ooo_seek_isle(p_hwfn, p_ooo_info, cid, ooo_isle); - if (!p_isle) { + if (unlikely(!p_isle)) { DP_NOTICE(p_hwfn, "Isle %d is not found(cid %d)\n", ooo_isle, cid); return; } - if (buffer_side == QED_OOO_LEFT_BUF) + if (unlikely(buffer_side == QED_OOO_LEFT_BUF)) list_add(&p_buffer->list_entry, &p_isle->buffers_list); else list_add_tail(&p_buffer->list_entry, &p_isle->buffers_list); @@ -438,7 +438,7 @@ void qed_ooo_join_isles(struct qed_hwfn *p_hwfn, p_right_isle = qed_ooo_seek_isle(p_hwfn, p_ooo_info, cid, left_isle + 1); - if (!p_right_isle) { + if (unlikely(!p_right_isle)) { DP_NOTICE(p_hwfn, "Right isle %d is not found(cid %d)\n", left_isle + 1, cid); @@ -450,7 +450,7 @@ void qed_ooo_join_isles(struct qed_hwfn *p_hwfn, if (left_isle) { p_left_isle = qed_ooo_seek_isle(p_hwfn, p_ooo_info, cid, left_isle); - if (!p_left_isle) { + if (unlikely(!p_left_isle)) { DP_NOTICE(p_hwfn, "Left isle %d is not found(cid %d)\n", left_isle, cid); diff --git a/drivers/net/ethernet/qlogic/qed/qed_ptp.c b/drivers/net/ethernet/qlogic/qed/qed_ptp.c index 2c62d732e5c2..295ce435a1a4 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_ptp.c +++ b/drivers/net/ethernet/qlogic/qed/qed_ptp.c @@ -63,12 +63,12 @@ static int qed_ptp_res_lock(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) DP_INFO(p_hwfn, "PF doesn't have lock ownership\n"); return -EBUSY; - } else if (!rc && !params.b_granted) { + } else if (!params.b_granted) { DP_INFO(p_hwfn, "Failed to acquire ptp resource lock\n"); return -EBUSY; } - return rc; + return 0; } static int qed_ptp_res_unlock(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) diff --git a/drivers/net/ethernet/qlogic/qed/qed_rdma.c b/drivers/net/ethernet/qlogic/qed/qed_rdma.c index 4f4b79250a2b..7f3e84b8622d 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_rdma.c +++ b/drivers/net/ethernet/qlogic/qed/qed_rdma.c @@ -22,6 +22,7 @@ #include "qed.h" #include "qed_cxt.h" #include "qed_hsi.h" +#include "qed_iro_hsi.h" #include "qed_hw.h" #include "qed_init_ops.h" #include "qed_int.h" @@ -33,7 +34,6 @@ #include "qed_roce.h" #include "qed_sp.h" - int qed_rdma_bmap_alloc(struct qed_hwfn *p_hwfn, struct qed_bmap *bmap, u32 max_count, char *name) { @@ -865,8 +865,8 @@ static void qed_rdma_cnq_prod_update(void *rdma_cxt, u8 qz_offset, u16 prod) } qz_num = p_hwfn->p_rdma_info->queue_zone_base + qz_offset; - addr = GTT_BAR0_MAP_REG_USDM_RAM + - USTORM_COMMON_QUEUE_CONS_OFFSET(qz_num); + addr = GET_GTT_REG_ADDR(GTT_BAR0_MAP_REG_USDM_RAM, + USTORM_COMMON_QUEUE_CONS, qz_num); REG_WR16(p_hwfn, addr, prod); @@ -1903,7 +1903,6 @@ void qed_rdma_dpm_conf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) val, p_hwfn->dcbx_no_edpm, p_hwfn->db_bar_no_edpm); } - void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { p_hwfn->db_bar_no_edpm = true; @@ -1966,7 +1965,7 @@ static void qed_rdma_remove_user(void *rdma_cxt, u16 dpi) static int qed_roce_ll2_set_mac_filter(struct qed_dev *cdev, u8 *old_mac_address, - u8 *new_mac_address) + const u8 *new_mac_address) { int rc = 0; diff --git a/drivers/net/ethernet/qlogic/qed/qed_rdma.h b/drivers/net/ethernet/qlogic/qed/qed_rdma.h index 6a1de3a25257..2753723011dd 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_rdma.h +++ b/drivers/net/ethernet/qlogic/qed/qed_rdma.h @@ -168,16 +168,19 @@ static inline bool qed_rdma_is_xrc_qp(struct qed_rdma_qp *qp) return false; } + #if IS_ENABLED(CONFIG_QED_RDMA) void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); void qed_rdma_dpm_conf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); int qed_rdma_info_alloc(struct qed_hwfn *p_hwfn); void qed_rdma_info_free(struct qed_hwfn *p_hwfn); #else -static inline void qed_rdma_dpm_conf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) {} +static inline void qed_rdma_dpm_conf(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt) {} static inline void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) {} -static inline int qed_rdma_info_alloc(struct qed_hwfn *p_hwfn) {return -EINVAL;} +static inline int qed_rdma_info_alloc(struct qed_hwfn *p_hwfn) + {return -EINVAL; } static inline void qed_rdma_info_free(struct qed_hwfn *p_hwfn) {} #endif diff --git a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h index da1b7fdcbda7..6f1a52e6beb2 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h +++ b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h @@ -126,6 +126,8 @@ 0x1009c4UL #define QM_REG_PF_EN \ 0x2f2ea4UL +#define QM_REG_RLGLBLUPPERBOUND \ + 0x2f3c00UL #define TCFC_REG_WEAK_ENABLE_VF \ 0x2d0704UL #define TCFC_REG_STRONG_ENABLE_PF \ @@ -576,7 +578,7 @@ #define PRS_REG_ENCAPSULATION_TYPE_EN 0x1f0730UL #define PRS_REG_GRE_PROTOCOL 0x1f0734UL #define PRS_REG_VXLAN_PORT 0x1f0738UL -#define PRS_REG_OUTPUT_FORMAT_4_0_BB_K2 0x1f099cUL +#define PRS_REG_OUTPUT_FORMAT_4_0 0x1f099cUL #define NIG_REG_ENC_TYPE_ENABLE 0x501058UL #define NIG_REG_ENC_TYPE_ENABLE_ETH_OVER_GRE_ENABLE (0x1 << 0) @@ -595,8 +597,8 @@ #define DORQ_REG_L2_EDPM_TUNNEL_GRE_ETH_EN 0x10090cUL #define DORQ_REG_L2_EDPM_TUNNEL_GRE_IP_EN 0x100910UL #define DORQ_REG_L2_EDPM_TUNNEL_VXLAN_EN 0x100914UL -#define DORQ_REG_L2_EDPM_TUNNEL_NGE_IP_EN_K2_E5 0x10092cUL -#define DORQ_REG_L2_EDPM_TUNNEL_NGE_ETH_EN_K2_E5 0x100930UL +#define DORQ_REG_L2_EDPM_TUNNEL_NGE_IP_EN_K2 0x10092cUL +#define DORQ_REG_L2_EDPM_TUNNEL_NGE_ETH_EN_K2 0x100930UL #define NIG_REG_NGE_IP_ENABLE 0x508b28UL #define NIG_REG_NGE_ETH_ENABLE 0x508b2cUL @@ -606,7 +608,10 @@ #define QM_REG_WFQPFWEIGHT 0x2f4e80UL #define QM_REG_WFQVPWEIGHT 0x2fa000UL - +#define QM_REG_WFQVPUPPERBOUND \ + 0x2fb000UL +#define QM_REG_WFQVPCRD \ + 0x2fc000UL #define PGLCS_REG_DBG_SELECT_K2_E5 \ 0x001d14UL #define PGLCS_REG_DBG_DWORD_ENABLE_K2_E5 \ @@ -1437,29 +1442,29 @@ 0x1401140UL #define XSEM_REG_SYNC_DBG_EMPTY \ 0x1401160UL -#define XSEM_REG_SLOW_DBG_ACTIVE_BB_K2 \ +#define XSEM_REG_SLOW_DBG_ACTIVE \ 0x1401400UL -#define XSEM_REG_SLOW_DBG_MODE_BB_K2 \ +#define XSEM_REG_SLOW_DBG_MODE \ 0x1401404UL -#define XSEM_REG_DBG_FRAME_MODE_BB_K2 \ +#define XSEM_REG_DBG_FRAME_MODE \ 0x1401408UL #define XSEM_REG_DBG_GPRE_VECT \ 0x1401410UL -#define XSEM_REG_DBG_MODE1_CFG_BB_K2 \ +#define XSEM_REG_DBG_MODE1_CFG \ 0x1401420UL #define XSEM_REG_FAST_MEMORY \ 0x1440000UL #define YSEM_REG_SYNC_DBG_EMPTY \ 0x1501160UL -#define YSEM_REG_SLOW_DBG_ACTIVE_BB_K2 \ +#define YSEM_REG_SLOW_DBG_ACTIVE \ 0x1501400UL -#define YSEM_REG_SLOW_DBG_MODE_BB_K2 \ +#define YSEM_REG_SLOW_DBG_MODE \ 0x1501404UL -#define YSEM_REG_DBG_FRAME_MODE_BB_K2 \ +#define YSEM_REG_DBG_FRAME_MODE \ 0x1501408UL #define YSEM_REG_DBG_GPRE_VECT \ 0x1501410UL -#define YSEM_REG_DBG_MODE1_CFG_BB_K2 \ +#define YSEM_REG_DBG_MODE1_CFG \ 0x1501420UL #define YSEM_REG_FAST_MEMORY \ 0x1540000UL @@ -1467,15 +1472,15 @@ 0x1601140UL #define PSEM_REG_SYNC_DBG_EMPTY \ 0x1601160UL -#define PSEM_REG_SLOW_DBG_ACTIVE_BB_K2 \ +#define PSEM_REG_SLOW_DBG_ACTIVE \ 0x1601400UL -#define PSEM_REG_SLOW_DBG_MODE_BB_K2 \ +#define PSEM_REG_SLOW_DBG_MODE \ 0x1601404UL -#define PSEM_REG_DBG_FRAME_MODE_BB_K2 \ +#define PSEM_REG_DBG_FRAME_MODE \ 0x1601408UL #define PSEM_REG_DBG_GPRE_VECT \ 0x1601410UL -#define PSEM_REG_DBG_MODE1_CFG_BB_K2 \ +#define PSEM_REG_DBG_MODE1_CFG \ 0x1601420UL #define PSEM_REG_FAST_MEMORY \ 0x1640000UL @@ -1483,15 +1488,15 @@ 0x1701140UL #define TSEM_REG_SYNC_DBG_EMPTY \ 0x1701160UL -#define TSEM_REG_SLOW_DBG_ACTIVE_BB_K2 \ +#define TSEM_REG_SLOW_DBG_ACTIVE \ 0x1701400UL -#define TSEM_REG_SLOW_DBG_MODE_BB_K2 \ +#define TSEM_REG_SLOW_DBG_MODE \ 0x1701404UL -#define TSEM_REG_DBG_FRAME_MODE_BB_K2 \ +#define TSEM_REG_DBG_FRAME_MODE \ 0x1701408UL #define TSEM_REG_DBG_GPRE_VECT \ 0x1701410UL -#define TSEM_REG_DBG_MODE1_CFG_BB_K2 \ +#define TSEM_REG_DBG_MODE1_CFG \ 0x1701420UL #define TSEM_REG_FAST_MEMORY \ 0x1740000UL @@ -1499,15 +1504,15 @@ 0x1801140UL #define MSEM_REG_SYNC_DBG_EMPTY \ 0x1801160UL -#define MSEM_REG_SLOW_DBG_ACTIVE_BB_K2 \ +#define MSEM_REG_SLOW_DBG_ACTIVE \ 0x1801400UL -#define MSEM_REG_SLOW_DBG_MODE_BB_K2 \ +#define MSEM_REG_SLOW_DBG_MODE \ 0x1801404UL -#define MSEM_REG_DBG_FRAME_MODE_BB_K2 \ +#define MSEM_REG_DBG_FRAME_MODE \ 0x1801408UL #define MSEM_REG_DBG_GPRE_VECT \ 0x1801410UL -#define MSEM_REG_DBG_MODE1_CFG_BB_K2 \ +#define MSEM_REG_DBG_MODE1_CFG \ 0x1801420UL #define MSEM_REG_FAST_MEMORY \ 0x1840000UL @@ -1517,21 +1522,21 @@ 20480 #define USEM_REG_SYNC_DBG_EMPTY \ 0x1901160UL -#define USEM_REG_SLOW_DBG_ACTIVE_BB_K2 \ +#define USEM_REG_SLOW_DBG_ACTIVE \ 0x1901400UL -#define USEM_REG_SLOW_DBG_MODE_BB_K2 \ +#define USEM_REG_SLOW_DBG_MODE \ 0x1901404UL -#define USEM_REG_DBG_FRAME_MODE_BB_K2 \ +#define USEM_REG_DBG_FRAME_MODE \ 0x1901408UL #define USEM_REG_DBG_GPRE_VECT \ 0x1901410UL -#define USEM_REG_DBG_MODE1_CFG_BB_K2 \ +#define USEM_REG_DBG_MODE1_CFG \ 0x1901420UL #define USEM_REG_FAST_MEMORY \ 0x1940000UL #define SEM_FAST_REG_DBG_MODE23_SRC_DISABLE \ 0x000748UL -#define SEM_FAST_REG_DBG_MODE4_SRC_DISABLE \ +#define SEM_FAST_REG_DBG_MODSRC_DISABLE \ 0x00074cUL #define SEM_FAST_REG_DBG_MODE6_SRC_DISABLE \ 0x000750UL @@ -1561,7 +1566,7 @@ 0x341500UL #define BRB_REG_BIG_RAM_DATA_SIZE \ 64 -#define SEM_FAST_REG_STALL_0_BB_K2 \ +#define SEM_FAST_REG_STALL_0 \ 0x000488UL #define SEM_FAST_REG_STALLED \ 0x000494UL @@ -1619,35 +1624,35 @@ 0x008c14UL #define NWS_REG_NWS_CMU_K2 \ 0x720000UL -#define PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_7_0_K2_E5 \ +#define PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_7_0_K2 \ 0x000680UL -#define PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_15_8_K2_E5 \ +#define PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_15_8_K2 \ 0x000684UL -#define PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_7_0_K2_E5 \ +#define PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_7_0_K2 \ 0x0006c0UL -#define PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_11_8_K2_E5 \ +#define PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_11_8_K2 \ 0x0006c4UL -#define MS_REG_MS_CMU_K2_E5 \ +#define MS_REG_MS_CMU_K2 \ 0x6a4000UL -#define PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X130_K2_E5 \ +#define PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X130_K2 \ 0x000208UL -#define PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X131_K2_E5 \ +#define PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X131_K2 \ 0x00020cUL -#define PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X132_K2_E5 \ +#define PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X132_K2 \ 0x000210UL -#define PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X133_K2_E5 \ +#define PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X133_K2 \ 0x000214UL -#define PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X130_K2_E5 \ +#define PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X130_K2 \ 0x000208UL -#define PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X131_K2_E5 \ +#define PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X131_K2 \ 0x00020cUL -#define PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X132_K2_E5 \ +#define PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X132_K2 \ 0x000210UL -#define PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X133_K2_E5 \ +#define PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X133_K2 \ 0x000214UL -#define PHY_PCIE_REG_PHY0_K2_E5 \ +#define PHY_PCIE_REG_PHY0_K2 \ 0x620000UL -#define PHY_PCIE_REG_PHY1_K2_E5 \ +#define PHY_PCIE_REG_PHY1_K2 \ 0x624000UL #define NIG_REG_ROCE_DUPLICATE_TO_HOST 0x5088f0UL #define NIG_REG_PPF_TO_ENGINE_SEL 0x508900UL diff --git a/drivers/net/ethernet/qlogic/qed/qed_roce.c b/drivers/net/ethernet/qlogic/qed/qed_roce.c index cf5baa5e59bc..071b4aeaddf2 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_roce.c +++ b/drivers/net/ethernet/qlogic/qed/qed_roce.c @@ -792,7 +792,6 @@ static int qed_roce_sp_destroy_qp_requester(struct qed_hwfn *p_hwfn, if (rc) goto err; - /* Free ORQ - only if ramrod succeeded, in case FW is still using it */ dma_free_coherent(&p_hwfn->cdev->pdev->dev, qp->orq_num_pages * RDMA_RING_PAGE_SIZE, diff --git a/drivers/net/ethernet/qlogic/qed/qed_selftest.h b/drivers/net/ethernet/qlogic/qed/qed_selftest.h index e27dd9a4547e..7a3bd749e1e4 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_selftest.h +++ b/drivers/net/ethernet/qlogic/qed/qed_selftest.h @@ -6,47 +6,47 @@ #include <linux/types.h> /** - * @brief qed_selftest_memory - Perform memory test + * qed_selftest_memory(): Perform memory test. * - * @param cdev + * @cdev: Qed dev pointer. * - * @return int + * Return: Int. */ int qed_selftest_memory(struct qed_dev *cdev); /** - * @brief qed_selftest_interrupt - Perform interrupt test + * qed_selftest_interrupt(): Perform interrupt test. * - * @param cdev + * @cdev: Qed dev pointer. * - * @return int + * Return: Int. */ int qed_selftest_interrupt(struct qed_dev *cdev); /** - * @brief qed_selftest_register - Perform register test + * qed_selftest_register(): Perform register test. * - * @param cdev + * @cdev: Qed dev pointer. * - * @return int + * Return: Int. */ int qed_selftest_register(struct qed_dev *cdev); /** - * @brief qed_selftest_clock - Perform clock test + * qed_selftest_clock(): Perform clock test. * - * @param cdev + * @cdev: Qed dev pointer. * - * @return int + * Return: Int. */ int qed_selftest_clock(struct qed_dev *cdev); /** - * @brief qed_selftest_nvram - Perform nvram test + * qed_selftest_nvram(): Perform nvram test. * - * @param cdev + * @cdev: Qed dev pointer. * - * @return int + * Return: Int. */ int qed_selftest_nvram(struct qed_dev *cdev); diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp.h b/drivers/net/ethernet/qlogic/qed/qed_sp.h index 60ff3222bf55..4fb02a5579ee 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sp.h +++ b/drivers/net/ethernet/qlogic/qed/qed_sp.h @@ -23,31 +23,26 @@ enum spq_mode { }; struct qed_spq_comp_cb { - void (*function)(struct qed_hwfn *, - void *, - union event_ring_data *, + void (*function)(struct qed_hwfn *p_hwfn, + void *cookie, + union event_ring_data *data, u8 fw_return_code); void *cookie; }; /** - * @brief qed_eth_cqe_completion - handles the completion of a - * ramrod on the cqe ring + * qed_eth_cqe_completion(): handles the completion of a + * ramrod on the cqe ring. * - * @param p_hwfn - * @param cqe + * @p_hwfn: HW device data. + * @cqe: CQE. * - * @return int + * Return: Int. */ int qed_eth_cqe_completion(struct qed_hwfn *p_hwfn, struct eth_slow_path_rx_cqe *cqe); -/** - * @file - * - * QED Slow-hwfn queue interface - */ - + /* QED Slow-hwfn queue interface */ union ramrod_data { struct pf_start_ramrod_data pf_start; struct pf_update_ramrod_data pf_update; @@ -58,7 +53,7 @@ union ramrod_data { struct tx_queue_stop_ramrod_data tx_queue_stop; struct vport_start_ramrod_data vport_start; struct vport_stop_ramrod_data vport_stop; - struct rx_update_gft_filter_data rx_update_gft; + struct rx_update_gft_filter_ramrod_data rx_update_gft; struct vport_update_ramrod_data vport_update; struct core_rx_start_ramrod_data core_rx_queue_start; struct core_rx_stop_ramrod_data core_rx_queue_stop; @@ -207,117 +202,128 @@ struct qed_spq { }; /** - * @brief qed_spq_post - Posts a Slow hwfn request to FW, or lacking that - * Pends it to the future list. + * qed_spq_post(): Posts a Slow hwfn request to FW, or lacking that + * Pends it to the future list. * - * @param p_hwfn - * @param p_req + * @p_hwfn: HW device data. + * @p_ent: Ent. + * @fw_return_code: Return code from firmware. * - * @return int + * Return: Int. */ int qed_spq_post(struct qed_hwfn *p_hwfn, struct qed_spq_entry *p_ent, u8 *fw_return_code); /** - * @brief qed_spq_allocate - Alloocates & initializes the SPQ and EQ. + * qed_spq_alloc(): Alloocates & initializes the SPQ and EQ. * - * @param p_hwfn + * @p_hwfn: HW device data. * - * @return int + * Return: Int. */ int qed_spq_alloc(struct qed_hwfn *p_hwfn); /** - * @brief qed_spq_setup - Reset the SPQ to its start state. + * qed_spq_setup(): Reset the SPQ to its start state. * - * @param p_hwfn + * @p_hwfn: HW device data. + * + * Return: Void. */ void qed_spq_setup(struct qed_hwfn *p_hwfn); /** - * @brief qed_spq_deallocate - Deallocates the given SPQ struct. + * qed_spq_free(): Deallocates the given SPQ struct. + * + * @p_hwfn: HW device data. * - * @param p_hwfn + * Return: Void. */ void qed_spq_free(struct qed_hwfn *p_hwfn); /** - * @brief qed_spq_get_entry - Obtain an entrry from the spq - * free pool list. - * - * + * qed_spq_get_entry(): Obtain an entrry from the spq + * free pool list. * - * @param p_hwfn - * @param pp_ent + * @p_hwfn: HW device data. + * @pp_ent: PP ENT. * - * @return int + * Return: Int. */ int qed_spq_get_entry(struct qed_hwfn *p_hwfn, struct qed_spq_entry **pp_ent); /** - * @brief qed_spq_return_entry - Return an entry to spq free - * pool list + * qed_spq_return_entry(): Return an entry to spq free pool list. * - * @param p_hwfn - * @param p_ent + * @p_hwfn: HW device data. + * @p_ent: P ENT. + * + * Return: Void. */ void qed_spq_return_entry(struct qed_hwfn *p_hwfn, struct qed_spq_entry *p_ent); /** - * @brief qed_eq_allocate - Allocates & initializes an EQ struct + * qed_eq_alloc(): Allocates & initializes an EQ struct. * - * @param p_hwfn - * @param num_elem number of elements in the eq + * @p_hwfn: HW device data. + * @num_elem: number of elements in the eq. * - * @return int + * Return: Int. */ int qed_eq_alloc(struct qed_hwfn *p_hwfn, u16 num_elem); /** - * @brief qed_eq_setup - Reset the EQ to its start state. + * qed_eq_setup(): Reset the EQ to its start state. + * + * @p_hwfn: HW device data. * - * @param p_hwfn + * Return: Void. */ void qed_eq_setup(struct qed_hwfn *p_hwfn); /** - * @brief qed_eq_free - deallocates the given EQ struct. + * qed_eq_free(): deallocates the given EQ struct. * - * @param p_hwfn + * @p_hwfn: HW device data. + * + * Return: Void. */ void qed_eq_free(struct qed_hwfn *p_hwfn); /** - * @brief qed_eq_prod_update - update the FW with default EQ producer + * qed_eq_prod_update(): update the FW with default EQ producer. + * + * @p_hwfn: HW device data. + * @prod: Prod. * - * @param p_hwfn - * @param prod + * Return: Void. */ void qed_eq_prod_update(struct qed_hwfn *p_hwfn, u16 prod); /** - * @brief qed_eq_completion - Completes currently pending EQ elements + * qed_eq_completion(): Completes currently pending EQ elements. * - * @param p_hwfn - * @param cookie + * @p_hwfn: HW device data. + * @cookie: Cookie. * - * @return int + * Return: Int. */ int qed_eq_completion(struct qed_hwfn *p_hwfn, void *cookie); /** - * @brief qed_spq_completion - Completes a single event + * qed_spq_completion(): Completes a single event. * - * @param p_hwfn - * @param echo - echo value from cookie (used for determining completion) - * @param p_data - data from cookie (used in callback function if applicable) + * @p_hwfn: HW device data. + * @echo: echo value from cookie (used for determining completion). + * @fw_return_code: FW return code. + * @p_data: data from cookie (used in callback function if applicable). * - * @return int + * Return: Int. */ int qed_spq_completion(struct qed_hwfn *p_hwfn, __le16 echo, @@ -325,44 +331,43 @@ int qed_spq_completion(struct qed_hwfn *p_hwfn, union event_ring_data *p_data); /** - * @brief qed_spq_get_cid - Given p_hwfn, return cid for the hwfn's SPQ + * qed_spq_get_cid(): Given p_hwfn, return cid for the hwfn's SPQ. * - * @param p_hwfn + * @p_hwfn: HW device data. * - * @return u32 - SPQ CID + * Return: u32 - SPQ CID. */ u32 qed_spq_get_cid(struct qed_hwfn *p_hwfn); /** - * @brief qed_consq_alloc - Allocates & initializes an ConsQ - * struct + * qed_consq_alloc(): Allocates & initializes an ConsQ struct. * - * @param p_hwfn + * @p_hwfn: HW device data. * - * @return int + * Return: Int. */ int qed_consq_alloc(struct qed_hwfn *p_hwfn); /** - * @brief qed_consq_setup - Reset the ConsQ to its start state. + * qed_consq_setup(): Reset the ConsQ to its start state. * - * @param p_hwfn + * @p_hwfn: HW device data. + * + * Return Void. */ void qed_consq_setup(struct qed_hwfn *p_hwfn); /** - * @brief qed_consq_free - deallocates the given ConsQ struct. + * qed_consq_free(): deallocates the given ConsQ struct. + * + * @p_hwfn: HW device data. * - * @param p_hwfn + * Return Void. */ void qed_consq_free(struct qed_hwfn *p_hwfn); int qed_spq_pend_post(struct qed_hwfn *p_hwfn); -/** - * @file - * - * @brief Slow-hwfn low-level commands (Ramrods) function definitions. - */ +/* Slow-hwfn low-level commands (Ramrods) function definitions. */ #define QED_SP_EQ_COMPLETION 0x01 #define QED_SP_CQE_COMPLETION 0x02 @@ -377,12 +382,15 @@ struct qed_sp_init_data { }; /** - * @brief Returns a SPQ entry to the pool / frees the entry if allocated. - * Should be called on in error flows after initializing the SPQ entry - * and before posting it. + * qed_sp_destroy_request(): Returns a SPQ entry to the pool / frees the + * entry if allocated. Should be called on in error + * flows after initializing the SPQ entry + * and before posting it. + * + * @p_hwfn: HW device data. + * @p_ent: Ent. * - * @param p_hwfn - * @param p_ent + * Return: Void. */ void qed_sp_destroy_request(struct qed_hwfn *p_hwfn, struct qed_spq_entry *p_ent); @@ -394,7 +402,14 @@ int qed_sp_init_request(struct qed_hwfn *p_hwfn, struct qed_sp_init_data *p_data); /** - * @brief qed_sp_pf_start - PF Function Start Ramrod + * qed_sp_pf_start(): PF Function Start Ramrod. + * + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @p_tunn: P_tunn. + * @allow_npar_tx_switch: Allow NPAR TX Switch. + * + * Return: Int. * * This ramrod is sent to initialize a physical function (PF). It will * configure the function related parameters and write its completion to the @@ -404,12 +419,6 @@ int qed_sp_init_request(struct qed_hwfn *p_hwfn, * allocated by the driver on host memory and its parameters are written * to the internal RAM of the UStorm by the Function Start Ramrod. * - * @param p_hwfn - * @param p_ptt - * @param p_tunn - * @param allow_npar_tx_switch - * - * @return int */ int qed_sp_pf_start(struct qed_hwfn *p_hwfn, @@ -418,47 +427,33 @@ int qed_sp_pf_start(struct qed_hwfn *p_hwfn, bool allow_npar_tx_switch); /** - * @brief qed_sp_pf_update - PF Function Update Ramrod + * qed_sp_pf_update(): PF Function Update Ramrod. * - * This ramrod updates function-related parameters. Every parameter can be - * updated independently, according to configuration flags. + * @p_hwfn: HW device data. * - * @param p_hwfn + * Return: Int. * - * @return int + * This ramrod updates function-related parameters. Every parameter can be + * updated independently, according to configuration flags. */ int qed_sp_pf_update(struct qed_hwfn *p_hwfn); /** - * @brief qed_sp_pf_update_stag - Update firmware of new outer tag + * qed_sp_pf_update_stag(): Update firmware of new outer tag. * - * @param p_hwfn + * @p_hwfn: HW device data. * - * @return int + * Return: Int. */ int qed_sp_pf_update_stag(struct qed_hwfn *p_hwfn); /** - * @brief qed_sp_pf_stop - PF Function Stop Ramrod - * - * This ramrod is sent to close a Physical Function (PF). It is the last ramrod - * sent and the last completion written to the PFs Event Ring. This ramrod also - * deletes the context for the Slowhwfn connection on this PF. - * - * @note Not required for first packet. - * - * @param p_hwfn - * - * @return int - */ - -/** - * @brief qed_sp_pf_update_ufp - PF ufp update Ramrod + * qed_sp_pf_update_ufp(): PF ufp update Ramrod. * - * @param p_hwfn + * @p_hwfn: HW device data. * - * @return int + * Return: Int. */ int qed_sp_pf_update_ufp(struct qed_hwfn *p_hwfn); @@ -470,11 +465,11 @@ int qed_sp_pf_update_tunn_cfg(struct qed_hwfn *p_hwfn, enum spq_mode comp_mode, struct qed_spq_comp_cb *p_comp_data); /** - * @brief qed_sp_heartbeat_ramrod - Send empty Ramrod + * qed_sp_heartbeat_ramrod(): Send empty Ramrod. * - * @param p_hwfn + * @p_hwfn: HW device data. * - * @return int + * Return: Int. */ int qed_sp_heartbeat_ramrod(struct qed_hwfn *p_hwfn); diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c index b4ed54ffef9b..648176dfb871 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c +++ b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c @@ -369,8 +369,12 @@ int qed_sp_pf_start(struct qed_hwfn *p_hwfn, qed_chain_get_pbl_phys(&p_hwfn->p_eq->chain)); page_cnt = (u8)qed_chain_get_page_cnt(&p_hwfn->p_eq->chain); p_ramrod->event_ring_num_pages = page_cnt; - DMA_REGPAIR_LE(p_ramrod->consolid_q_pbl_addr, + + /* Place consolidation queue address in ramrod */ + DMA_REGPAIR_LE(p_ramrod->consolid_q_pbl_base_addr, qed_chain_get_pbl_phys(&p_hwfn->p_consq->chain)); + page_cnt = (u8)qed_chain_get_page_cnt(&p_hwfn->p_consq->chain); + p_ramrod->consolid_q_num_pages = page_cnt; qed_tunn_set_pf_start_params(p_hwfn, p_tunn, &p_ramrod->tunnel_config); @@ -401,8 +405,8 @@ int qed_sp_pf_start(struct qed_hwfn *p_hwfn, if (p_hwfn->cdev->p_iov_info) { struct qed_hw_sriov_info *p_iov = p_hwfn->cdev->p_iov_info; - p_ramrod->base_vf_id = (u8) p_iov->first_vf_in_pf; - p_ramrod->num_vfs = (u8) p_iov->total_vfs; + p_ramrod->base_vf_id = (u8)p_iov->first_vf_in_pf; + p_ramrod->num_vfs = (u8)p_iov->total_vfs; } p_ramrod->hsi_fp_ver.major_ver_arr[ETH_VER_KEY] = ETH_HSI_VER_MAJOR; p_ramrod->hsi_fp_ver.minor_ver_arr[ETH_VER_KEY] = ETH_HSI_VER_MINOR; diff --git a/drivers/net/ethernet/qlogic/qed/qed_spq.c b/drivers/net/ethernet/qlogic/qed/qed_spq.c index 0bc1a0aeb56e..e0473729b161 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_spq.c +++ b/drivers/net/ethernet/qlogic/qed/qed_spq.c @@ -20,6 +20,7 @@ #include "qed_cxt.h" #include "qed_dev_api.h" #include "qed_hsi.h" +#include "qed_iro_hsi.h" #include "qed_hw.h" #include "qed_int.h" #include "qed_iscsi.h" @@ -31,8 +32,8 @@ #include "qed_rdma.h" /*************************************************************************** -* Structures & Definitions -***************************************************************************/ + * Structures & Definitions + ***************************************************************************/ #define SPQ_HIGH_PRI_RESERVE_DEFAULT (1) @@ -42,8 +43,8 @@ #define SPQ_BLOCK_SLEEP_MS (5) /*************************************************************************** -* Blocking Imp. (BLOCK/EBLOCK mode) -***************************************************************************/ + * Blocking Imp. (BLOCK/EBLOCK mode) + ***************************************************************************/ static void qed_spq_blocking_cb(struct qed_hwfn *p_hwfn, void *cookie, union event_ring_data *data, u8 fw_return_code) @@ -149,8 +150,8 @@ err: } /*************************************************************************** -* SPQ entries inner API -***************************************************************************/ + * SPQ entries inner API + ***************************************************************************/ static int qed_spq_fill_entry(struct qed_hwfn *p_hwfn, struct qed_spq_entry *p_ent) { @@ -184,12 +185,12 @@ static int qed_spq_fill_entry(struct qed_hwfn *p_hwfn, } /*************************************************************************** -* HSI access -***************************************************************************/ + * HSI access + ***************************************************************************/ static void qed_spq_hw_initialize(struct qed_hwfn *p_hwfn, struct qed_spq *p_spq) { - struct e4_core_conn_context *p_cxt; + struct core_conn_context *p_cxt; struct qed_cxt_info cxt_info; u16 physical_q; int rc; @@ -207,23 +208,20 @@ static void qed_spq_hw_initialize(struct qed_hwfn *p_hwfn, p_cxt = cxt_info.p_cxt; SET_FIELD(p_cxt->xstorm_ag_context.flags10, - E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN, 1); + XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN, 1); SET_FIELD(p_cxt->xstorm_ag_context.flags1, - E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE, 1); + XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE, 1); SET_FIELD(p_cxt->xstorm_ag_context.flags9, - E4_XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN, 1); + XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN, 1); /* QM physical queue */ physical_q = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_LB); p_cxt->xstorm_ag_context.physical_q0 = cpu_to_le16(physical_q); - p_cxt->xstorm_st_context.spq_base_lo = + p_cxt->xstorm_st_context.spq_base_addr.lo = DMA_LO_LE(p_spq->chain.p_phys_addr); - p_cxt->xstorm_st_context.spq_base_hi = + p_cxt->xstorm_st_context.spq_base_addr.hi = DMA_HI_LE(p_spq->chain.p_phys_addr); - - DMA_REGPAIR_LE(p_cxt->xstorm_st_context.consolid_base_addr, - p_hwfn->p_consq->chain.p_phys_addr); } static int qed_spq_hw_post(struct qed_hwfn *p_hwfn, @@ -265,8 +263,8 @@ static int qed_spq_hw_post(struct qed_hwfn *p_hwfn, } /*************************************************************************** -* Asynchronous events -***************************************************************************/ + * Asynchronous events + ***************************************************************************/ static int qed_async_event_completion(struct qed_hwfn *p_hwfn, struct event_ring_entry *p_eqe) @@ -311,12 +309,12 @@ qed_spq_unregister_async_cb(struct qed_hwfn *p_hwfn, } /*************************************************************************** -* EQ API -***************************************************************************/ + * EQ API + ***************************************************************************/ void qed_eq_prod_update(struct qed_hwfn *p_hwfn, u16 prod) { - u32 addr = GTT_BAR0_MAP_REG_USDM_RAM + - USTORM_EQE_CONS_OFFSET(p_hwfn->rel_pf_id); + u32 addr = GET_GTT_REG_ADDR(GTT_BAR0_MAP_REG_USDM_RAM, + USTORM_EQE_CONS, p_hwfn->rel_pf_id); REG_WR16(p_hwfn, addr, prod); } @@ -433,8 +431,8 @@ void qed_eq_free(struct qed_hwfn *p_hwfn) } /*************************************************************************** -* CQE API - manipulate EQ functionality -***************************************************************************/ + * CQE API - manipulate EQ functionality + ***************************************************************************/ static int qed_cqe_completion(struct qed_hwfn *p_hwfn, struct eth_slow_path_rx_cqe *cqe, enum protocol_type protocol) @@ -464,8 +462,8 @@ int qed_eth_cqe_completion(struct qed_hwfn *p_hwfn, } /*************************************************************************** -* Slow hwfn Queue (spq) -***************************************************************************/ + * Slow hwfn Queue (spq) + ***************************************************************************/ void qed_spq_setup(struct qed_hwfn *p_hwfn) { struct qed_spq *p_spq = p_hwfn->p_spq; @@ -548,7 +546,7 @@ int qed_spq_alloc(struct qed_hwfn *p_hwfn) int ret; /* SPQ struct */ - p_spq = kzalloc(sizeof(struct qed_spq), GFP_KERNEL); + p_spq = kzalloc(sizeof(*p_spq), GFP_KERNEL); if (!p_spq) return -ENOMEM; @@ -676,7 +674,6 @@ static int qed_spq_add_entry(struct qed_hwfn *p_hwfn, struct qed_spq *p_spq = p_hwfn->p_spq; if (p_ent->queue == &p_spq->unlimited_pending) { - if (list_empty(&p_spq->free_pool)) { list_add_tail(&p_ent->list, &p_spq->unlimited_pending); p_spq->unlimited_pending_count++; @@ -725,8 +722,8 @@ static int qed_spq_add_entry(struct qed_hwfn *p_hwfn, } /*************************************************************************** -* Accessor -***************************************************************************/ + * Accessor + ***************************************************************************/ u32 qed_spq_get_cid(struct qed_hwfn *p_hwfn) { if (!p_hwfn->p_spq) @@ -735,8 +732,8 @@ u32 qed_spq_get_cid(struct qed_hwfn *p_hwfn) } /*************************************************************************** -* Posting new Ramrods -***************************************************************************/ + * Posting new Ramrods + ***************************************************************************/ static int qed_spq_post_list(struct qed_hwfn *p_hwfn, struct list_head *head, u32 keep_reserve) { diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c index ed2b6fe5a78d..8ac38828ba45 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c +++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c @@ -11,6 +11,7 @@ #include <linux/qed/qed_iov_if.h> #include "qed_cxt.h" #include "qed_hsi.h" +#include "qed_iro_hsi.h" #include "qed_hw.h" #include "qed_init_ops.h" #include "qed_int.h" @@ -19,12 +20,13 @@ #include "qed_sp.h" #include "qed_sriov.h" #include "qed_vf.h" -static int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn, - u8 opcode, - __le16 echo, - union event_ring_data *data, u8 fw_return_code); static int qed_iov_bulletin_set_mac(struct qed_hwfn *p_hwfn, u8 *mac, int vfid); +static u16 qed_vf_from_entity_id(__le16 entity_id) +{ + return le16_to_cpu(entity_id) - MAX_NUM_PFS; +} + static u8 qed_vf_calculate_legacy(struct qed_vf_info *p_vf) { u8 legacy = 0; @@ -169,8 +171,8 @@ static struct qed_vf_info *qed_iov_get_vf_info(struct qed_hwfn *p_hwfn, b_enabled_only, false)) vf = &p_hwfn->pf_iov_info->vfs_array[relative_vf_id]; else - DP_ERR(p_hwfn, "qed_iov_get_vf_info: VF[%d] is not enabled\n", - relative_vf_id); + DP_ERR(p_hwfn, "%s: VF[%d] is not enabled\n", + __func__, relative_vf_id); return vf; } @@ -308,7 +310,7 @@ static int qed_iov_post_vf_bulletin(struct qed_hwfn *p_hwfn, struct qed_dmae_params params; struct qed_vf_info *p_vf; - p_vf = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true); + p_vf = qed_iov_get_vf_info(p_hwfn, (u16)vfid, true); if (!p_vf) return -EINVAL; @@ -420,7 +422,7 @@ static void qed_iov_setup_vfdb(struct qed_hwfn *p_hwfn) bulletin_p = p_iov_info->bulletins_phys; if (!p_req_virt_addr || !p_reply_virt_addr || !p_bulletin_virt) { DP_ERR(p_hwfn, - "qed_iov_setup_vfdb called without allocating mem first\n"); + "%s called without allocating mem first\n", __func__); return; } @@ -464,7 +466,7 @@ static int qed_iov_allocate_vfdb(struct qed_hwfn *p_hwfn) num_vfs = p_hwfn->cdev->p_iov_info->total_vfs; DP_VERBOSE(p_hwfn, QED_MSG_IOV, - "qed_iov_allocate_vfdb for %d VFs\n", num_vfs); + "%s for %d VFs\n", __func__, num_vfs); /* Allocate PF Mailbox buffer (per-VF) */ p_iov_info->mbx_msg_size = sizeof(union vfpf_tlvs) * num_vfs; @@ -500,10 +502,10 @@ static int qed_iov_allocate_vfdb(struct qed_hwfn *p_hwfn) QED_MSG_IOV, "PF's Requests mailbox [%p virt 0x%llx phys], Response mailbox [%p virt 0x%llx phys] Bulletins [%p virt 0x%llx phys]\n", p_iov_info->mbx_msg_virt_addr, - (u64) p_iov_info->mbx_msg_phys_addr, + (u64)p_iov_info->mbx_msg_phys_addr, p_iov_info->mbx_reply_virt_addr, - (u64) p_iov_info->mbx_reply_phys_addr, - p_iov_info->p_bulletins, (u64) p_iov_info->bulletins_phys); + (u64)p_iov_info->mbx_reply_phys_addr, + p_iov_info->p_bulletins, (u64)p_iov_info->bulletins_phys); return 0; } @@ -608,7 +610,7 @@ int qed_iov_hw_info(struct qed_hwfn *p_hwfn) if (rc) return rc; - /* We want PF IOV to be synonemous with the existance of p_iov_info; + /* We want PF IOV to be synonemous with the existence of p_iov_info; * In case the capability is published but there are no VFs, simply * de-allocate the struct. */ @@ -714,12 +716,12 @@ static void qed_iov_vf_igu_reset(struct qed_hwfn *p_hwfn, int i; /* Set VF masks and configuration - pretend */ - qed_fid_pretend(p_hwfn, p_ptt, (u16) vf->concrete_fid); + qed_fid_pretend(p_hwfn, p_ptt, (u16)vf->concrete_fid); qed_wr(p_hwfn, p_ptt, IGU_REG_STATISTIC_NUM_VF_MSG_SENT, 0); /* unpretend */ - qed_fid_pretend(p_hwfn, p_ptt, (u16) p_hwfn->hw_info.concrete_fid); + qed_fid_pretend(p_hwfn, p_ptt, (u16)p_hwfn->hw_info.concrete_fid); /* iterate over all queues, clear sb consumer */ for (i = 0; i < vf->num_sbs; i++) @@ -734,7 +736,7 @@ static void qed_iov_vf_igu_set_int(struct qed_hwfn *p_hwfn, { u32 igu_vf_conf; - qed_fid_pretend(p_hwfn, p_ptt, (u16) vf->concrete_fid); + qed_fid_pretend(p_hwfn, p_ptt, (u16)vf->concrete_fid); igu_vf_conf = qed_rd(p_hwfn, p_ptt, IGU_REG_VF_CONFIGURATION); @@ -746,7 +748,7 @@ static void qed_iov_vf_igu_set_int(struct qed_hwfn *p_hwfn, qed_wr(p_hwfn, p_ptt, IGU_REG_VF_CONFIGURATION, igu_vf_conf); /* unpretend */ - qed_fid_pretend(p_hwfn, p_ptt, (u16) p_hwfn->hw_info.concrete_fid); + qed_fid_pretend(p_hwfn, p_ptt, (u16)p_hwfn->hw_info.concrete_fid); } static int @@ -807,7 +809,7 @@ static int qed_iov_enable_vf_access(struct qed_hwfn *p_hwfn, if (rc) return rc; - qed_fid_pretend(p_hwfn, p_ptt, (u16) vf->concrete_fid); + qed_fid_pretend(p_hwfn, p_ptt, (u16)vf->concrete_fid); SET_FIELD(igu_vf_conf, IGU_VF_CONF_PARENT, p_hwfn->rel_pf_id); STORE_RT_REG(p_hwfn, IGU_REG_VF_CONFIGURATION_RT_OFFSET, igu_vf_conf); @@ -816,7 +818,7 @@ static int qed_iov_enable_vf_access(struct qed_hwfn *p_hwfn, p_hwfn->hw_info.hw_mode); /* unpretend */ - qed_fid_pretend(p_hwfn, p_ptt, (u16) p_hwfn->hw_info.concrete_fid); + qed_fid_pretend(p_hwfn, p_ptt, (u16)p_hwfn->hw_info.concrete_fid); vf->state = VF_FREE; @@ -904,7 +906,7 @@ static u8 qed_iov_alloc_vf_igu_sbs(struct qed_hwfn *p_hwfn, p_block->igu_sb_id * sizeof(u64), 2, NULL); } - vf->num_sbs = (u8) num_rx_queues; + vf->num_sbs = (u8)num_rx_queues; return vf->num_sbs; } @@ -988,7 +990,7 @@ static int qed_iov_init_hw_for_vf(struct qed_hwfn *p_hwfn, vf = qed_iov_get_vf_info(p_hwfn, p_params->rel_vf_id, false); if (!vf) { - DP_ERR(p_hwfn, "qed_iov_init_hw_for_vf : vf is NULL\n"); + DP_ERR(p_hwfn, "%s : vf is NULL\n", __func__); return -EINVAL; } @@ -1092,7 +1094,7 @@ static int qed_iov_release_hw_for_vf(struct qed_hwfn *p_hwfn, vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, true); if (!vf) { - DP_ERR(p_hwfn, "qed_iov_release_hw_for_vf : vf is NULL\n"); + DP_ERR(p_hwfn, "%s : vf is NULL\n", __func__); return -EINVAL; } @@ -1220,8 +1222,8 @@ static void qed_iov_send_response(struct qed_hwfn *p_hwfn, * channel would be re-set to ready prior to that. */ REG_WR(p_hwfn, - GTT_BAR0_MAP_REG_USDM_RAM + - USTORM_VF_PF_CHANNEL_READY_OFFSET(eng_vf_id), 1); + GET_GTT_REG_ADDR(GTT_BAR0_MAP_REG_USDM_RAM, + USTORM_VF_PF_CHANNEL_READY, eng_vf_id), 1); qed_dmae_host2host(p_hwfn, p_ptt, mbx->reply_phys, mbx->req_virt->first_tlv.reply_address, @@ -1545,7 +1547,7 @@ static void qed_iov_vf_mbx_acquire(struct qed_hwfn *p_hwfn, memset(resp, 0, sizeof(*resp)); /* Write the PF version so that VF would know which version - * is supported - might be later overriden. This guarantees that + * is supported - might be later overridden. This guarantees that * VF could recognize legacy PF based on lack of versions in reply. */ pfdev_info->major_fp_hsi = ETH_HSI_VER_MAJOR; @@ -1603,7 +1605,7 @@ static void qed_iov_vf_mbx_acquire(struct qed_hwfn *p_hwfn, /* fill in pfdev info */ pfdev_info->chip_num = p_hwfn->cdev->chip_num; pfdev_info->db_size = 0; - pfdev_info->indices_per_sb = PIS_PER_SB_E4; + pfdev_info->indices_per_sb = PIS_PER_SB; pfdev_info->capabilities = PFVF_ACQUIRE_CAP_DEFAULT_UNTAGGED | PFVF_ACQUIRE_CAP_POST_FW_OVERRIDE; @@ -1897,7 +1899,7 @@ static void qed_iov_vf_mbx_start_vport(struct qed_hwfn *p_hwfn, int sb_id; int rc; - vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vf->relative_vf_id, true); + vf_info = qed_iov_get_vf_info(p_hwfn, (u16)vf->relative_vf_id, true); if (!vf_info) { DP_NOTICE(p_hwfn->cdev, "Failed to get VF info, invalid vfid [%d]\n", @@ -1957,7 +1959,7 @@ static void qed_iov_vf_mbx_start_vport(struct qed_hwfn *p_hwfn, rc = qed_sp_eth_vport_start(p_hwfn, ¶ms); if (rc) { DP_ERR(p_hwfn, - "qed_iov_vf_mbx_start_vport returned error %d\n", rc); + "%s returned error %d\n", __func__, rc); status = PFVF_STATUS_FAILURE; } else { vf->vport_instance++; @@ -1993,8 +1995,8 @@ static void qed_iov_vf_mbx_stop_vport(struct qed_hwfn *p_hwfn, rc = qed_sp_vport_stop(p_hwfn, vf->opaque_fid, vf->vport_id); if (rc) { - DP_ERR(p_hwfn, "qed_iov_vf_mbx_stop_vport returned error %d\n", - rc); + DP_ERR(p_hwfn, "%s returned error %d\n", + __func__, rc); status = PFVF_STATUS_FAILURE; } @@ -2138,10 +2140,10 @@ static void qed_iov_vf_mbx_start_rxq(struct qed_hwfn *p_hwfn, * calculate on their own and clean the producer prior to this. */ if (!(vf_legacy & QED_QCID_LEGACY_VF_RX_PROD)) - REG_WR(p_hwfn, - GTT_BAR0_MAP_REG_MSDM_RAM + - MSTORM_ETH_VF_PRODS_OFFSET(vf->abs_vf_id, req->rx_qid), - 0); + qed_wr(p_hwfn, p_ptt, MSEM_REG_FAST_MEMORY + + SEM_FAST_REG_INT_RAM + + MSTORM_ETH_VF_PRODS_OFFSET(vf->abs_vf_id, + req->rx_qid), 0); rc = qed_eth_rxq_start_ramrod(p_hwfn, p_cid, req->bd_max_bytes, @@ -3030,7 +3032,7 @@ static void qed_iov_vf_mbx_vport_update(struct qed_hwfn *p_hwfn, goto out; } p_rss_params = vzalloc(sizeof(*p_rss_params)); - if (p_rss_params == NULL) { + if (!p_rss_params) { status = PFVF_STATUS_FAILURE; goto out; } @@ -3550,6 +3552,7 @@ out: qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_COALESCE_UPDATE, sizeof(struct pfvf_def_resp_tlv), status); } + static int qed_iov_vf_flr_poll_dorq(struct qed_hwfn *p_hwfn, struct qed_vf_info *p_vf, struct qed_ptt *p_ptt) @@ -3557,7 +3560,7 @@ qed_iov_vf_flr_poll_dorq(struct qed_hwfn *p_hwfn, int cnt; u32 val; - qed_fid_pretend(p_hwfn, p_ptt, (u16) p_vf->concrete_fid); + qed_fid_pretend(p_hwfn, p_ptt, (u16)p_vf->concrete_fid); for (cnt = 0; cnt < 50; cnt++) { val = qed_rd(p_hwfn, p_ptt, DORQ_REG_VF_USAGE_CNT); @@ -3565,7 +3568,7 @@ qed_iov_vf_flr_poll_dorq(struct qed_hwfn *p_hwfn, break; msleep(20); } - qed_fid_pretend(p_hwfn, p_ptt, (u16) p_hwfn->hw_info.concrete_fid); + qed_fid_pretend(p_hwfn, p_ptt, (u16)p_hwfn->hw_info.concrete_fid); if (cnt == 50) { DP_ERR(p_hwfn, @@ -3577,48 +3580,73 @@ qed_iov_vf_flr_poll_dorq(struct qed_hwfn *p_hwfn, return 0; } +#define MAX_NUM_EXT_VOQS (MAX_NUM_PORTS * NUM_OF_TCS) + static int qed_iov_vf_flr_poll_pbf(struct qed_hwfn *p_hwfn, struct qed_vf_info *p_vf, struct qed_ptt *p_ptt) { - u32 cons[MAX_NUM_VOQS_E4], distance[MAX_NUM_VOQS_E4]; - int i, cnt; + u32 prod, cons[MAX_NUM_EXT_VOQS], distance[MAX_NUM_EXT_VOQS], tmp; + u8 max_phys_tcs_per_port = p_hwfn->qm_info.max_phys_tcs_per_port; + u8 max_ports_per_engine = p_hwfn->cdev->num_ports_in_engine; + u32 prod_voq0_addr = PBF_REG_NUM_BLOCKS_ALLOCATED_PROD_VOQ0; + u32 cons_voq0_addr = PBF_REG_NUM_BLOCKS_ALLOCATED_CONS_VOQ0; + u8 port_id, tc, tc_id = 0, voq = 0; + int cnt; - /* Read initial consumers & producers */ - for (i = 0; i < MAX_NUM_VOQS_E4; i++) { - u32 prod; + memset(cons, 0, MAX_NUM_EXT_VOQS * sizeof(u32)); + memset(distance, 0, MAX_NUM_EXT_VOQS * sizeof(u32)); - cons[i] = qed_rd(p_hwfn, p_ptt, - PBF_REG_NUM_BLOCKS_ALLOCATED_CONS_VOQ0 + - i * 0x40); - prod = qed_rd(p_hwfn, p_ptt, - PBF_REG_NUM_BLOCKS_ALLOCATED_PROD_VOQ0 + - i * 0x40); - distance[i] = prod - cons[i]; + /* Read initial consumers & producers */ + for (port_id = 0; port_id < max_ports_per_engine; port_id++) { + /* "max_phys_tcs_per_port" active TCs + 1 pure LB TC */ + for (tc = 0; tc < max_phys_tcs_per_port + 1; tc++) { + tc_id = (tc < max_phys_tcs_per_port) ? tc : PURE_LB_TC; + voq = VOQ(port_id, tc_id, max_phys_tcs_per_port); + cons[voq] = qed_rd(p_hwfn, p_ptt, + cons_voq0_addr + voq * 0x40); + prod = qed_rd(p_hwfn, p_ptt, + prod_voq0_addr + voq * 0x40); + distance[voq] = prod - cons[voq]; + } } /* Wait for consumers to pass the producers */ - i = 0; + port_id = 0; + tc = 0; for (cnt = 0; cnt < 50; cnt++) { - for (; i < MAX_NUM_VOQS_E4; i++) { - u32 tmp; + for (; port_id < max_ports_per_engine; port_id++) { + /* "max_phys_tcs_per_port" active TCs + 1 pure LB TC */ + for (; tc < max_phys_tcs_per_port + 1; tc++) { + tc_id = (tc < max_phys_tcs_per_port) ? + tc : PURE_LB_TC; + voq = VOQ(port_id, + tc_id, max_phys_tcs_per_port); + tmp = qed_rd(p_hwfn, p_ptt, + cons_voq0_addr + voq * 0x40); + if (distance[voq] > tmp - cons[voq]) + break; + } - tmp = qed_rd(p_hwfn, p_ptt, - PBF_REG_NUM_BLOCKS_ALLOCATED_CONS_VOQ0 + - i * 0x40); - if (distance[i] > tmp - cons[i]) + if (tc == max_phys_tcs_per_port + 1) + tc = 0; + else break; } - if (i == MAX_NUM_VOQS_E4) + if (port_id == max_ports_per_engine) break; msleep(20); } if (cnt == 50) { - DP_ERR(p_hwfn, "VF[%d] - pbf polling failed on VOQ %d\n", - p_vf->abs_vf_id, i); + DP_ERR(p_hwfn, "VF[%d]: pbf poll failed on VOQ%d\n", + p_vf->abs_vf_id, (int)voq); + + DP_ERR(p_hwfn, "VOQ %d has port_id as %d and tc_id as %d]\n", + (int)voq, (int)port_id, (int)tc_id); + return -EBUSY; } @@ -3680,8 +3708,8 @@ qed_iov_execute_vf_flr_cleanup(struct qed_hwfn *p_hwfn, * doesn't do that as a part of FLR. */ REG_WR(p_hwfn, - GTT_BAR0_MAP_REG_USDM_RAM + - USTORM_VF_PF_CHANNEL_READY_OFFSET(vfid), 1); + GET_GTT_REG_ADDR(GTT_BAR0_MAP_REG_USDM_RAM, + USTORM_VF_PF_CHANNEL_READY, vfid), 1); /* VF_STOPPED has to be set only after final cleanup * but prior to re-enabling the VF. @@ -3842,7 +3870,7 @@ static void qed_iov_process_mbx_req(struct qed_hwfn *p_hwfn, struct qed_iov_vf_mbx *mbx; struct qed_vf_info *p_vf; - p_vf = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true); + p_vf = qed_iov_get_vf_info(p_hwfn, (u16)vfid, true); if (!p_vf) return; @@ -3979,7 +4007,7 @@ static void qed_iov_pf_get_pending_events(struct qed_hwfn *p_hwfn, u64 *events) static struct qed_vf_info *qed_sriov_get_vf_from_absid(struct qed_hwfn *p_hwfn, u16 abs_vfid) { - u8 min = (u8) p_hwfn->cdev->p_iov_info->first_vf_in_pf; + u8 min = (u8)p_hwfn->cdev->p_iov_info->first_vf_in_pf; if (!_qed_iov_pf_sanity_check(p_hwfn, (int)abs_vfid - min, false)) { DP_VERBOSE(p_hwfn, @@ -3989,7 +4017,7 @@ static struct qed_vf_info *qed_sriov_get_vf_from_absid(struct qed_hwfn *p_hwfn, return NULL; } - return &p_hwfn->pf_iov_info->vfs_array[(u8) abs_vfid - min]; + return &p_hwfn->pf_iov_info->vfs_array[(u8)abs_vfid - min]; } static int qed_sriov_vfpf_msg(struct qed_hwfn *p_hwfn, @@ -4013,13 +4041,13 @@ static int qed_sriov_vfpf_msg(struct qed_hwfn *p_hwfn, return 0; } -static void qed_sriov_vfpf_malicious(struct qed_hwfn *p_hwfn, - struct malicious_vf_eqe_data *p_data) +void qed_sriov_vfpf_malicious(struct qed_hwfn *p_hwfn, + struct fw_err_data *p_data) { struct qed_vf_info *p_vf; - p_vf = qed_sriov_get_vf_from_absid(p_hwfn, p_data->vf_id); - + p_vf = qed_sriov_get_vf_from_absid(p_hwfn, qed_vf_from_entity_id + (p_data->entity_id)); if (!p_vf) return; @@ -4036,16 +4064,13 @@ static void qed_sriov_vfpf_malicious(struct qed_hwfn *p_hwfn, } } -static int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn, u8 opcode, __le16 echo, - union event_ring_data *data, u8 fw_return_code) +int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn, u8 opcode, __le16 echo, + union event_ring_data *data, u8 fw_return_code) { switch (opcode) { case COMMON_EVENT_VF_PF_CHANNEL: return qed_sriov_vfpf_msg(p_hwfn, le16_to_cpu(echo), &data->vf_pf_channel.msg_addr); - case COMMON_EVENT_MALICIOUS_VF: - qed_sriov_vfpf_malicious(p_hwfn, &data->malicious_vf); - return 0; default: DP_INFO(p_hwfn->cdev, "Unknown sriov eqe event 0x%02x\n", opcode); @@ -4075,7 +4100,7 @@ static int qed_iov_copy_vf_msg(struct qed_hwfn *p_hwfn, struct qed_ptt *ptt, struct qed_dmae_params params; struct qed_vf_info *vf_info; - vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true); + vf_info = qed_iov_get_vf_info(p_hwfn, (u16)vfid, true); if (!vf_info) return -EINVAL; @@ -4176,7 +4201,7 @@ static void qed_iov_bulletin_set_forced_vlan(struct qed_hwfn *p_hwfn, struct qed_vf_info *vf_info; u64 feature; - vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true); + vf_info = qed_iov_get_vf_info(p_hwfn, (u16)vfid, true); if (!vf_info) { DP_NOTICE(p_hwfn->cdev, "Can not set forced MAC, invalid vfid [%d]\n", vfid); @@ -4226,7 +4251,7 @@ static bool qed_iov_vf_has_vport_instance(struct qed_hwfn *p_hwfn, int vfid) { struct qed_vf_info *p_vf_info; - p_vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true); + p_vf_info = qed_iov_get_vf_info(p_hwfn, (u16)vfid, true); if (!p_vf_info) return false; @@ -4237,7 +4262,7 @@ static bool qed_iov_is_vf_stopped(struct qed_hwfn *p_hwfn, int vfid) { struct qed_vf_info *p_vf_info; - p_vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true); + p_vf_info = qed_iov_get_vf_info(p_hwfn, (u16)vfid, true); if (!p_vf_info) return true; @@ -4248,7 +4273,7 @@ static bool qed_iov_spoofchk_get(struct qed_hwfn *p_hwfn, int vfid) { struct qed_vf_info *vf_info; - vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true); + vf_info = qed_iov_get_vf_info(p_hwfn, (u16)vfid, true); if (!vf_info) return false; @@ -4266,7 +4291,7 @@ static int qed_iov_spoofchk_set(struct qed_hwfn *p_hwfn, int vfid, bool val) goto out; } - vf = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true); + vf = qed_iov_get_vf_info(p_hwfn, (u16)vfid, true); if (!vf) goto out; @@ -4345,7 +4370,8 @@ static int qed_iov_configure_tx_rate(struct qed_hwfn *p_hwfn, return rc; rl_id = abs_vp_id; /* The "rl_id" is set as the "vport_id" */ - return qed_init_global_rl(p_hwfn, p_ptt, rl_id, (u32)val); + return qed_init_global_rl(p_hwfn, p_ptt, rl_id, (u32)val, + QM_RL_TYPE_NORMAL); } static int @@ -4376,7 +4402,7 @@ static int qed_iov_get_vf_min_rate(struct qed_hwfn *p_hwfn, int vfid) struct qed_wfq_data *vf_vp_wfq; struct qed_vf_info *vf_info; - vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true); + vf_info = qed_iov_get_vf_info(p_hwfn, (u16)vfid, true); if (!vf_info) return 0; @@ -4395,8 +4421,10 @@ static int qed_iov_get_vf_min_rate(struct qed_hwfn *p_hwfn, int vfid) */ void qed_schedule_iov(struct qed_hwfn *hwfn, enum qed_iov_wq_flag flag) { + /* Memory barrier for setting atomic bit */ smp_mb__before_atomic(); set_bit(flag, &hwfn->iov_task_flags); + /* Memory barrier after setting atomic bit */ smp_mb__after_atomic(); DP_VERBOSE(hwfn, QED_MSG_IOV, "Scheduling iov task [Flag: %d]\n", flag); queue_delayed_work(hwfn->iov_wq, &hwfn->iov_task, 0); @@ -4407,8 +4435,8 @@ void qed_vf_start_iov_wq(struct qed_dev *cdev) int i; for_each_hwfn(cdev, i) - queue_delayed_work(cdev->hwfns[i].iov_wq, - &cdev->hwfns[i].iov_task, 0); + queue_delayed_work(cdev->hwfns[i].iov_wq, + &cdev->hwfns[i].iov_task, 0); } int qed_sriov_disable(struct qed_dev *cdev, bool pci_enabled) @@ -4416,8 +4444,8 @@ int qed_sriov_disable(struct qed_dev *cdev, bool pci_enabled) int i, j; for_each_hwfn(cdev, i) - if (cdev->hwfns[i].iov_wq) - flush_workqueue(cdev->hwfns[i].iov_wq); + if (cdev->hwfns[i].iov_wq) + flush_workqueue(cdev->hwfns[i].iov_wq); /* Mark VFs for disablement */ qed_iov_set_vfs_to_disable(cdev, true); @@ -5010,7 +5038,7 @@ static void qed_handle_bulletin_post(struct qed_hwfn *hwfn) } qed_for_each_vf(hwfn, i) - qed_iov_post_vf_bulletin(hwfn, i, ptt); + qed_iov_post_vf_bulletin(hwfn, i, ptt); qed_ptt_release(hwfn, ptt); } @@ -5196,7 +5224,6 @@ void qed_iov_wq_stop(struct qed_dev *cdev, bool schedule_first) cancel_delayed_work_sync(&cdev->hwfns[i].iov_task); } - flush_workqueue(cdev->hwfns[i].iov_wq); destroy_workqueue(cdev->hwfns[i].iov_wq); } } diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.h b/drivers/net/ethernet/qlogic/qed/qed_sriov.h index eacd6457f195..f448e3dd6c8b 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sriov.h +++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.h @@ -142,7 +142,7 @@ struct qed_vf_queue { enum vf_state { VF_FREE = 0, /* VF ready to be acquired holds no resc */ - VF_ACQUIRED, /* VF, acquired, but not initalized */ + VF_ACQUIRED, /* VF, acquired, but not initialized */ VF_ENABLED, /* VF, Enabled */ VF_RESET, /* VF, FLR'd, pending cleanup */ VF_STOPPED /* VF, Stopped */ @@ -250,29 +250,31 @@ extern const struct qed_iov_hv_ops qed_iov_ops_pass; #ifdef CONFIG_QED_SRIOV /** - * @brief Check if given VF ID @vfid is valid - * w.r.t. @b_enabled_only value - * if b_enabled_only = true - only enabled VF id is valid - * else any VF id less than max_vfs is valid + * qed_iov_is_valid_vfid(): Check if given VF ID @vfid is valid + * w.r.t. @b_enabled_only value + * if b_enabled_only = true - only enabled + * VF id is valid. + * else any VF id less than max_vfs is valid. * - * @param p_hwfn - * @param rel_vf_id - Relative VF ID - * @param b_enabled_only - consider only enabled VF - * @param b_non_malicious - true iff we want to validate vf isn't malicious. + * @p_hwfn: HW device data. + * @rel_vf_id: Relative VF ID. + * @b_enabled_only: consider only enabled VF. + * @b_non_malicious: true iff we want to validate vf isn't malicious. * - * @return bool - true for valid VF ID + * Return: bool - true for valid VF ID */ bool qed_iov_is_valid_vfid(struct qed_hwfn *p_hwfn, int rel_vf_id, bool b_enabled_only, bool b_non_malicious); /** - * @brief - Given a VF index, return index of next [including that] active VF. + * qed_iov_get_next_active_vf(): Given a VF index, return index of + * next [including that] active VF. * - * @param p_hwfn - * @param rel_vf_id + * @p_hwfn: HW device data. + * @rel_vf_id: VF ID. * - * @return MAX_NUM_VFS in case no further active VFs, otherwise index. + * Return: MAX_NUM_VFS in case no further active VFs, otherwise index. */ u16 qed_iov_get_next_active_vf(struct qed_hwfn *p_hwfn, u16 rel_vf_id); @@ -280,83 +282,117 @@ void qed_iov_bulletin_set_udp_ports(struct qed_hwfn *p_hwfn, int vfid, u16 vxlan_port, u16 geneve_port); /** - * @brief Read sriov related information and allocated resources - * reads from configuration space, shmem, etc. + * qed_iov_hw_info(): Read sriov related information and allocated resources + * reads from configuration space, shmem, etc. * - * @param p_hwfn + * @p_hwfn: HW device data. * - * @return int + * Return: Int. */ int qed_iov_hw_info(struct qed_hwfn *p_hwfn); /** - * @brief qed_add_tlv - place a given tlv on the tlv buffer at next offset + * qed_add_tlv(): place a given tlv on the tlv buffer at next offset * - * @param p_hwfn - * @param p_iov - * @param type - * @param length + * @p_hwfn: HW device data. + * @offset: offset. + * @type: Type + * @length: Length. * - * @return pointer to the newly placed tlv + * Return: pointer to the newly placed tlv */ void *qed_add_tlv(struct qed_hwfn *p_hwfn, u8 **offset, u16 type, u16 length); /** - * @brief list the types and lengths of the tlvs on the buffer + * qed_dp_tlv_list(): list the types and lengths of the tlvs on the buffer * - * @param p_hwfn - * @param tlvs_list + * @p_hwfn: HW device data. + * @tlvs_list: Tlvs_list. + * + * Return: Void. */ void qed_dp_tlv_list(struct qed_hwfn *p_hwfn, void *tlvs_list); /** - * @brief qed_iov_alloc - allocate sriov related resources + * qed_sriov_vfpf_malicious(): Handle malicious VF/PF. + * + * @p_hwfn: HW device data. + * @p_data: Pointer to data. + * + * Return: Void. + */ +void qed_sriov_vfpf_malicious(struct qed_hwfn *p_hwfn, + struct fw_err_data *p_data); + +/** + * qed_sriov_eqe_event(): Callback for SRIOV events. + * + * @p_hwfn: HW device data. + * @opcode: Opcode. + * @echo: Echo. + * @data: data + * @fw_return_code: FW return code. + * + * Return: Int. + */ +int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn, u8 opcode, __le16 echo, + union event_ring_data *data, u8 fw_return_code); + +/** + * qed_iov_alloc(): allocate sriov related resources * - * @param p_hwfn + * @p_hwfn: HW device data. * - * @return int + * Return: Int. */ int qed_iov_alloc(struct qed_hwfn *p_hwfn); /** - * @brief qed_iov_setup - setup sriov related resources + * qed_iov_setup(): setup sriov related resources * - * @param p_hwfn + * @p_hwfn: HW device data. + * + * Return: Void. */ void qed_iov_setup(struct qed_hwfn *p_hwfn); /** - * @brief qed_iov_free - free sriov related resources + * qed_iov_free(): free sriov related resources + * + * @p_hwfn: HW device data. * - * @param p_hwfn + * Return: Void. */ void qed_iov_free(struct qed_hwfn *p_hwfn); /** - * @brief free sriov related memory that was allocated during hw_prepare + * qed_iov_free_hw_info(): free sriov related memory that was + * allocated during hw_prepare + * + * @cdev: Qed dev pointer. * - * @param cdev + * Return: Void. */ void qed_iov_free_hw_info(struct qed_dev *cdev); /** - * @brief Mark structs of vfs that have been FLR-ed. + * qed_iov_mark_vf_flr(): Mark structs of vfs that have been FLR-ed. * - * @param p_hwfn - * @param disabled_vfs - bitmask of all VFs on path that were FLRed + * @p_hwfn: HW device data. + * @disabled_vfs: bitmask of all VFs on path that were FLRed * - * @return true iff one of the PF's vfs got FLRed. false otherwise. + * Return: true iff one of the PF's vfs got FLRed. false otherwise. */ bool qed_iov_mark_vf_flr(struct qed_hwfn *p_hwfn, u32 *disabled_vfs); /** - * @brief Search extended TLVs in request/reply buffer. + * qed_iov_search_list_tlvs(): Search extended TLVs in request/reply buffer. * - * @param p_hwfn - * @param p_tlvs_list - Pointer to tlvs list - * @param req_type - Type of TLV + * @p_hwfn: HW device data. + * @p_tlvs_list: Pointer to tlvs list + * @req_type: Type of TLV * - * @return pointer to tlv type if found, otherwise returns NULL. + * Return: pointer to tlv type if found, otherwise returns NULL. */ void *qed_iov_search_list_tlvs(struct qed_hwfn *p_hwfn, void *p_tlvs_list, u16 req_type); @@ -442,6 +478,18 @@ static inline int qed_sriov_disable(struct qed_dev *cdev, bool pci_enabled) static inline void qed_inform_vf_link_state(struct qed_hwfn *hwfn) { } + +static inline void qed_sriov_vfpf_malicious(struct qed_hwfn *p_hwfn, + struct fw_err_data *p_data) +{ +} + +static inline int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn, u8 opcode, + __le16 echo, union event_ring_data *data, + u8 fw_return_code) +{ + return 0; +} #endif #define qed_for_each_vf(_p_hwfn, _i) \ diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.c b/drivers/net/ethernet/qlogic/qed/qed_vf.c index 72a38d53d33f..597cd9cd57b5 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_vf.c +++ b/drivers/net/ethernet/qlogic/qed/qed_vf.c @@ -27,7 +27,7 @@ static void *qed_vf_pf_prep(struct qed_hwfn *p_hwfn, u16 type, u16 length) "preparing to send 0x%04x tlv over vf pf channel\n", type); - /* Reset Requst offset */ + /* Reset Request offset */ p_iov->offset = (u8 *)p_iov->vf2pf_request; /* Clear mailbox - both request and reply */ @@ -444,7 +444,7 @@ int qed_vf_hw_prepare(struct qed_hwfn *p_hwfn) u32 reg; int rc; - /* Set number of hwfns - might be overriden once leading hwfn learns + /* Set number of hwfns - might be overridden once leading hwfn learns * actual configuration from PF. */ if (IS_LEAD_HWFN(p_hwfn)) @@ -504,7 +504,7 @@ int qed_vf_hw_prepare(struct qed_hwfn *p_hwfn) QED_MSG_IOV, "VF's Request mailbox [%p virt 0x%llx phys], Response mailbox [%p virt 0x%llx phys]\n", p_iov->vf2pf_request, - (u64) p_iov->vf2pf_request_phys, + (u64)p_iov->vf2pf_request_phys, p_iov->pf2vf_reply, (u64)p_iov->pf2vf_reply_phys); /* Allocate Bulletin board */ @@ -561,6 +561,7 @@ free_p_iov: return -ENOMEM; } + #define TSTORM_QZONE_START PXP_VF_BAR0_START_SDM_ZONE_A #define MSTORM_QZONE_START(dev) (TSTORM_QZONE_START + \ (TSTORM_QZONE_SIZE * NUM_OF_L2_QUEUES(dev))) @@ -1285,8 +1286,8 @@ int qed_vf_pf_filter_ucast(struct qed_hwfn *p_hwfn, /* clear mailbox and prep first tlv */ req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_UCAST_FILTER, sizeof(*req)); - req->opcode = (u8) p_ucast->opcode; - req->type = (u8) p_ucast->type; + req->opcode = (u8)p_ucast->opcode; + req->type = (u8)p_ucast->type; memcpy(req->mac, p_ucast->mac, ETH_ALEN); req->vlan = p_ucast->vlan; @@ -1372,7 +1373,7 @@ exit: int qed_vf_pf_bulletin_update_mac(struct qed_hwfn *p_hwfn, - u8 *p_mac) + const u8 *p_mac) { struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; struct vfpf_bulletin_update_mac_tlv *p_req; diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.h b/drivers/net/ethernet/qlogic/qed/qed_vf.h index 60d2bb64e65f..306b5f4bc632 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_vf.h +++ b/drivers/net/ethernet/qlogic/qed/qed_vf.h @@ -48,7 +48,7 @@ struct channel_tlv { u16 length; }; -/* header of first vf->pf tlv carries the offset used to calculate reponse +/* header of first vf->pf tlv carries the offset used to calculate response * buffer address */ struct vfpf_first_tlv { @@ -85,8 +85,8 @@ struct vfpf_acquire_tlv { struct vfpf_first_tlv first_tlv; struct vf_pf_vfdev_info { -#define VFPF_ACQUIRE_CAP_PRE_FP_HSI (1 << 0) /* VF pre-FP hsi version */ -#define VFPF_ACQUIRE_CAP_100G (1 << 1) /* VF can support 100g */ +#define VFPF_ACQUIRE_CAP_PRE_FP_HSI BIT(0) /* VF pre-FP hsi version */ +#define VFPF_ACQUIRE_CAP_100G BIT(1) /* VF can support 100g */ /* A requirement for supporting multi-Tx queues on a single queue-zone, * VF would pass qids as additional information whenever passing queue * references. @@ -688,13 +688,16 @@ struct qed_vf_iov { }; /** - * @brief VF - Set Rx/Tx coalesce per VF's relative queue. - * Coalesce value '0' will omit the configuration. + * qed_vf_pf_set_coalesce(): VF - Set Rx/Tx coalesce per VF's relative queue. + * Coalesce value '0' will omit the + * configuration. * - * @param p_hwfn - * @param rx_coal - coalesce value in micro second for rx queue - * @param tx_coal - coalesce value in micro second for tx queue - * @param p_cid - queue cid + * @p_hwfn: HW device data. + * @rx_coal: coalesce value in micro second for rx queue. + * @tx_coal: coalesce value in micro second for tx queue. + * @p_cid: queue cid. + * + * Return: Int. * **/ int qed_vf_pf_set_coalesce(struct qed_hwfn *p_hwfn, @@ -702,148 +705,172 @@ int qed_vf_pf_set_coalesce(struct qed_hwfn *p_hwfn, u16 tx_coal, struct qed_queue_cid *p_cid); /** - * @brief VF - Get coalesce per VF's relative queue. + * qed_vf_pf_get_coalesce(): VF - Get coalesce per VF's relative queue. * - * @param p_hwfn - * @param p_coal - coalesce value in micro second for VF queues. - * @param p_cid - queue cid + * @p_hwfn: HW device data. + * @p_coal: coalesce value in micro second for VF queues. + * @p_cid: queue cid. * + * Return: Int. **/ int qed_vf_pf_get_coalesce(struct qed_hwfn *p_hwfn, u16 *p_coal, struct qed_queue_cid *p_cid); #ifdef CONFIG_QED_SRIOV /** - * @brief Read the VF bulletin and act on it if needed + * qed_vf_read_bulletin(): Read the VF bulletin and act on it if needed. * - * @param p_hwfn - * @param p_change - qed fills 1 iff bulletin board has changed, 0 otherwise. + * @p_hwfn: HW device data. + * @p_change: qed fills 1 iff bulletin board has changed, 0 otherwise. * - * @return enum _qed_status + * Return: enum _qed_status. */ int qed_vf_read_bulletin(struct qed_hwfn *p_hwfn, u8 *p_change); /** - * @brief Get link paramters for VF from qed + * qed_vf_get_link_params(): Get link parameters for VF from qed + * + * @p_hwfn: HW device data. + * @params: the link params structure to be filled for the VF. * - * @param p_hwfn - * @param params - the link params structure to be filled for the VF + * Return: Void. */ void qed_vf_get_link_params(struct qed_hwfn *p_hwfn, struct qed_mcp_link_params *params); /** - * @brief Get link state for VF from qed + * qed_vf_get_link_state(): Get link state for VF from qed. + * + * @p_hwfn: HW device data. + * @link: the link state structure to be filled for the VF * - * @param p_hwfn - * @param link - the link state structure to be filled for the VF + * Return: Void. */ void qed_vf_get_link_state(struct qed_hwfn *p_hwfn, struct qed_mcp_link_state *link); /** - * @brief Get link capabilities for VF from qed + * qed_vf_get_link_caps(): Get link capabilities for VF from qed. * - * @param p_hwfn - * @param p_link_caps - the link capabilities structure to be filled for the VF + * @p_hwfn: HW device data. + * @p_link_caps: the link capabilities structure to be filled for the VF + * + * Return: Void. */ void qed_vf_get_link_caps(struct qed_hwfn *p_hwfn, struct qed_mcp_link_capabilities *p_link_caps); /** - * @brief Get number of Rx queues allocated for VF by qed + * qed_vf_get_num_rxqs(): Get number of Rx queues allocated for VF by qed + * + * @p_hwfn: HW device data. + * @num_rxqs: allocated RX queues * - * @param p_hwfn - * @param num_rxqs - allocated RX queues + * Return: Void. */ void qed_vf_get_num_rxqs(struct qed_hwfn *p_hwfn, u8 *num_rxqs); /** - * @brief Get number of Rx queues allocated for VF by qed + * qed_vf_get_num_txqs(): Get number of Rx queues allocated for VF by qed * - * @param p_hwfn - * @param num_txqs - allocated RX queues + * @p_hwfn: HW device data. + * @num_txqs: allocated RX queues + * + * Return: Void. */ void qed_vf_get_num_txqs(struct qed_hwfn *p_hwfn, u8 *num_txqs); /** - * @brief Get number of available connections [both Rx and Tx] for VF + * qed_vf_get_num_cids(): Get number of available connections + * [both Rx and Tx] for VF + * + * @p_hwfn: HW device data. + * @num_cids: allocated number of connections * - * @param p_hwfn - * @param num_cids - allocated number of connections + * Return: Void. */ void qed_vf_get_num_cids(struct qed_hwfn *p_hwfn, u8 *num_cids); /** - * @brief Get port mac address for VF + * qed_vf_get_port_mac(): Get port mac address for VF. * - * @param p_hwfn - * @param port_mac - destination location for port mac + * @p_hwfn: HW device data. + * @port_mac: destination location for port mac + * + * Return: Void. */ void qed_vf_get_port_mac(struct qed_hwfn *p_hwfn, u8 *port_mac); /** - * @brief Get number of VLAN filters allocated for VF by qed + * qed_vf_get_num_vlan_filters(): Get number of VLAN filters allocated + * for VF by qed. + * + * @p_hwfn: HW device data. + * @num_vlan_filters: allocated VLAN filters * - * @param p_hwfn - * @param num_rxqs - allocated VLAN filters + * Return: Void. */ void qed_vf_get_num_vlan_filters(struct qed_hwfn *p_hwfn, u8 *num_vlan_filters); /** - * @brief Get number of MAC filters allocated for VF by qed + * qed_vf_get_num_mac_filters(): Get number of MAC filters allocated + * for VF by qed * - * @param p_hwfn - * @param num_rxqs - allocated MAC filters + * @p_hwfn: HW device data. + * @num_mac_filters: allocated MAC filters + * + * Return: Void. */ void qed_vf_get_num_mac_filters(struct qed_hwfn *p_hwfn, u8 *num_mac_filters); /** - * @brief Check if VF can set a MAC address + * qed_vf_check_mac(): Check if VF can set a MAC address * - * @param p_hwfn - * @param mac + * @p_hwfn: HW device data. + * @mac: Mac. * - * @return bool + * Return: bool. */ bool qed_vf_check_mac(struct qed_hwfn *p_hwfn, u8 *mac); /** - * @brief Set firmware version information in dev_info from VFs acquire response tlv + * qed_vf_get_fw_version(): Set firmware version information + * in dev_info from VFs acquire response tlv + * + * @p_hwfn: HW device data. + * @fw_major: FW major. + * @fw_minor: FW minor. + * @fw_rev: FW rev. + * @fw_eng: FW eng. * - * @param p_hwfn - * @param fw_major - * @param fw_minor - * @param fw_rev - * @param fw_eng + * Return: Void. */ void qed_vf_get_fw_version(struct qed_hwfn *p_hwfn, u16 *fw_major, u16 *fw_minor, u16 *fw_rev, u16 *fw_eng); /** - * @brief hw preparation for VF - * sends ACQUIRE message + * qed_vf_hw_prepare(): hw preparation for VF sends ACQUIRE message * - * @param p_hwfn + * @p_hwfn: HW device data. * - * @return int + * Return: Int. */ int qed_vf_hw_prepare(struct qed_hwfn *p_hwfn); /** - * @brief VF - start the RX Queue by sending a message to the PF - * @param p_hwfn - * @param p_cid - Only relative fields are relevant - * @param bd_max_bytes - maximum number of bytes per bd - * @param bd_chain_phys_addr - physical address of bd chain - * @param cqe_pbl_addr - physical address of pbl - * @param cqe_pbl_size - pbl size - * @param pp_prod - pointer to the producer to be - * used in fastpath + * qed_vf_pf_rxq_start(): start the RX Queue by sending a message to the PF + * + * @p_hwfn: HW device data. + * @p_cid: Only relative fields are relevant + * @bd_max_bytes: maximum number of bytes per bd + * @bd_chain_phys_addr: physical address of bd chain + * @cqe_pbl_addr: physical address of pbl + * @cqe_pbl_size: pbl size + * @pp_prod: pointer to the producer to be used in fastpath * - * @return int + * Return: Int. */ int qed_vf_pf_rxq_start(struct qed_hwfn *p_hwfn, struct qed_queue_cid *p_cid, @@ -853,18 +880,16 @@ int qed_vf_pf_rxq_start(struct qed_hwfn *p_hwfn, u16 cqe_pbl_size, void __iomem **pp_prod); /** - * @brief VF - start the TX queue by sending a message to the - * PF. + * qed_vf_pf_txq_start(): VF - start the TX queue by sending a message to the + * PF. * - * @param p_hwfn - * @param tx_queue_id - zero based within the VF - * @param sb - status block for this queue - * @param sb_index - index within the status block - * @param bd_chain_phys_addr - physical address of tx chain - * @param pp_doorbell - pointer to address to which to - * write the doorbell too.. + * @p_hwfn: HW device data. + * @p_cid: CID. + * @pbl_addr: PBL address. + * @pbl_size: PBL Size. + * @pp_doorbell: pointer to address to which to write the doorbell too. * - * @return int + * Return: Int. */ int qed_vf_pf_txq_start(struct qed_hwfn *p_hwfn, @@ -873,90 +898,91 @@ qed_vf_pf_txq_start(struct qed_hwfn *p_hwfn, u16 pbl_size, void __iomem **pp_doorbell); /** - * @brief VF - stop the RX queue by sending a message to the PF + * qed_vf_pf_rxq_stop(): VF - stop the RX queue by sending a message to the PF. * - * @param p_hwfn - * @param p_cid - * @param cqe_completion + * @p_hwfn: HW device data. + * @p_cid: CID. + * @cqe_completion: CQE Completion. * - * @return int + * Return: Int. */ int qed_vf_pf_rxq_stop(struct qed_hwfn *p_hwfn, struct qed_queue_cid *p_cid, bool cqe_completion); /** - * @brief VF - stop the TX queue by sending a message to the PF + * qed_vf_pf_txq_stop(): VF - stop the TX queue by sending a message to the PF. * - * @param p_hwfn - * @param tx_qid + * @p_hwfn: HW device data. + * @p_cid: CID. * - * @return int + * Return: Int. */ int qed_vf_pf_txq_stop(struct qed_hwfn *p_hwfn, struct qed_queue_cid *p_cid); /** - * @brief VF - send a vport update command + * qed_vf_pf_vport_update(): VF - send a vport update command. * - * @param p_hwfn - * @param params + * @p_hwfn: HW device data. + * @p_params: Params * - * @return int + * Return: Int. */ int qed_vf_pf_vport_update(struct qed_hwfn *p_hwfn, struct qed_sp_vport_update_params *p_params); /** + * qed_vf_pf_reset(): VF - send a close message to PF. * - * @brief VF - send a close message to PF + * @p_hwfn: HW device data. * - * @param p_hwfn - * - * @return enum _qed_status + * Return: enum _qed_status */ int qed_vf_pf_reset(struct qed_hwfn *p_hwfn); /** - * @brief VF - free vf`s memories + * qed_vf_pf_release(): VF - free vf`s memories. * - * @param p_hwfn + * @p_hwfn: HW device data. * - * @return enum _qed_status + * Return: enum _qed_status */ int qed_vf_pf_release(struct qed_hwfn *p_hwfn); /** - * @brief qed_vf_get_igu_sb_id - Get the IGU SB ID for a given + * qed_vf_get_igu_sb_id(): Get the IGU SB ID for a given * sb_id. For VFs igu sbs don't have to be contiguous * - * @param p_hwfn - * @param sb_id + * @p_hwfn: HW device data. + * @sb_id: SB ID. * - * @return INLINE u16 + * Return: INLINE u16 */ u16 qed_vf_get_igu_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id); /** - * @brief Stores [or removes] a configured sb_info. + * qed_vf_set_sb_info(): Stores [or removes] a configured sb_info. + * + * @p_hwfn: HW device data. + * @sb_id: zero-based SB index [for fastpath] + * @p_sb: may be NULL [during removal]. * - * @param p_hwfn - * @param sb_id - zero-based SB index [for fastpath] - * @param sb_info - may be NULL [during removal]. + * Return: Void. */ void qed_vf_set_sb_info(struct qed_hwfn *p_hwfn, u16 sb_id, struct qed_sb_info *p_sb); /** - * @brief qed_vf_pf_vport_start - perform vport start for VF. + * qed_vf_pf_vport_start(): perform vport start for VF. * - * @param p_hwfn - * @param vport_id - * @param mtu - * @param inner_vlan_removal - * @param tpa_mode - * @param max_buffers_per_cqe, - * @param only_untagged - default behavior regarding vlan acceptance + * @p_hwfn: HW device data. + * @vport_id: Vport ID. + * @mtu: MTU. + * @inner_vlan_removal: Innter VLAN removal. + * @tpa_mode: TPA mode + * @max_buffers_per_cqe: Max buffer pre CQE. + * @only_untagged: default behavior regarding vlan acceptance * - * @return enum _qed_status + * Return: enum _qed_status */ int qed_vf_pf_vport_start(struct qed_hwfn *p_hwfn, u8 vport_id, @@ -966,11 +992,11 @@ int qed_vf_pf_vport_start(struct qed_hwfn *p_hwfn, u8 max_buffers_per_cqe, u8 only_untagged); /** - * @brief qed_vf_pf_vport_stop - stop the VF's vport + * qed_vf_pf_vport_stop(): stop the VF's vport * - * @param p_hwfn + * @p_hwfn: HW device data. * - * @return enum _qed_status + * Return: enum _qed_status */ int qed_vf_pf_vport_stop(struct qed_hwfn *p_hwfn); @@ -981,42 +1007,49 @@ void qed_vf_pf_filter_mcast(struct qed_hwfn *p_hwfn, struct qed_filter_mcast *p_filter_cmd); /** - * @brief qed_vf_pf_int_cleanup - clean the SB of the VF + * qed_vf_pf_int_cleanup(): clean the SB of the VF * - * @param p_hwfn + * @p_hwfn: HW device data. * - * @return enum _qed_status + * Return: enum _qed_status */ int qed_vf_pf_int_cleanup(struct qed_hwfn *p_hwfn); /** - * @brief - return the link params in a given bulletin board + * __qed_vf_get_link_params(): return the link params in a given bulletin board * - * @param p_hwfn - * @param p_params - pointer to a struct to fill with link params - * @param p_bulletin + * @p_hwfn: HW device data. + * @p_params: pointer to a struct to fill with link params + * @p_bulletin: Bulletin. + * + * Return: Void. */ void __qed_vf_get_link_params(struct qed_hwfn *p_hwfn, struct qed_mcp_link_params *p_params, struct qed_bulletin_content *p_bulletin); /** - * @brief - return the link state in a given bulletin board + * __qed_vf_get_link_state(): return the link state in a given bulletin board + * + * @p_hwfn: HW device data. + * @p_link: pointer to a struct to fill with link state + * @p_bulletin: Bulletin. * - * @param p_hwfn - * @param p_link - pointer to a struct to fill with link state - * @param p_bulletin + * Return: Void. */ void __qed_vf_get_link_state(struct qed_hwfn *p_hwfn, struct qed_mcp_link_state *p_link, struct qed_bulletin_content *p_bulletin); /** - * @brief - return the link capabilities in a given bulletin board + * __qed_vf_get_link_caps(): return the link capabilities in a given + * bulletin board * - * @param p_hwfn - * @param p_link - pointer to a struct to fill with link capabilities - * @param p_bulletin + * @p_hwfn: HW device data. + * @p_link_caps: pointer to a struct to fill with link capabilities + * @p_bulletin: Bulletin. + * + * Return: Void. */ void __qed_vf_get_link_caps(struct qed_hwfn *p_hwfn, struct qed_mcp_link_capabilities *p_link_caps, @@ -1029,11 +1062,15 @@ int qed_vf_pf_tunnel_param_update(struct qed_hwfn *p_hwfn, u32 qed_vf_hw_bar_size(struct qed_hwfn *p_hwfn, enum BAR_ID bar_id); /** - * @brief - Ask PF to update the MAC address in it's bulletin board + * qed_vf_pf_bulletin_update_mac(): Ask PF to update the MAC address in + * it's bulletin board + * + * @p_hwfn: HW device data. + * @p_mac: mac address to be updated in bulletin board * - * @param p_mac - mac address to be updated in bulletin board + * Return: Int. */ -int qed_vf_pf_bulletin_update_mac(struct qed_hwfn *p_hwfn, u8 *p_mac); +int qed_vf_pf_bulletin_update_mac(struct qed_hwfn *p_hwfn, const u8 *p_mac); #else static inline void qed_vf_get_link_params(struct qed_hwfn *p_hwfn, @@ -1222,7 +1259,7 @@ static inline int qed_vf_pf_tunnel_param_update(struct qed_hwfn *p_hwfn, } static inline int qed_vf_pf_bulletin_update_mac(struct qed_hwfn *p_hwfn, - u8 *p_mac) + const u8 *p_mac) { return -EINVAL; } diff --git a/drivers/net/ethernet/qlogic/qede/qede_filter.c b/drivers/net/ethernet/qlogic/qede/qede_filter.c index a2e4dfb5cb44..3010833ddde3 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_filter.c +++ b/drivers/net/ethernet/qlogic/qede/qede_filter.c @@ -557,7 +557,7 @@ void qede_force_mac(void *dev, u8 *mac, bool forced) return; } - ether_addr_copy(edev->ndev->dev_addr, mac); + eth_hw_addr_set(edev->ndev, mac); __qede_unlock(edev); } @@ -617,32 +617,30 @@ void qede_fill_rss_params(struct qede_dev *edev, static int qede_set_ucast_rx_mac(struct qede_dev *edev, enum qed_filter_xcast_params_type opcode, - unsigned char mac[ETH_ALEN]) + const unsigned char mac[ETH_ALEN]) { - struct qed_filter_params filter_cmd; + struct qed_filter_ucast_params ucast; - memset(&filter_cmd, 0, sizeof(filter_cmd)); - filter_cmd.type = QED_FILTER_TYPE_UCAST; - filter_cmd.filter.ucast.type = opcode; - filter_cmd.filter.ucast.mac_valid = 1; - ether_addr_copy(filter_cmd.filter.ucast.mac, mac); + memset(&ucast, 0, sizeof(ucast)); + ucast.type = opcode; + ucast.mac_valid = 1; + ether_addr_copy(ucast.mac, mac); - return edev->ops->filter_config(edev->cdev, &filter_cmd); + return edev->ops->filter_config_ucast(edev->cdev, &ucast); } static int qede_set_ucast_rx_vlan(struct qede_dev *edev, enum qed_filter_xcast_params_type opcode, u16 vid) { - struct qed_filter_params filter_cmd; + struct qed_filter_ucast_params ucast; - memset(&filter_cmd, 0, sizeof(filter_cmd)); - filter_cmd.type = QED_FILTER_TYPE_UCAST; - filter_cmd.filter.ucast.type = opcode; - filter_cmd.filter.ucast.vlan_valid = 1; - filter_cmd.filter.ucast.vlan = vid; + memset(&ucast, 0, sizeof(ucast)); + ucast.type = opcode; + ucast.vlan_valid = 1; + ucast.vlan = vid; - return edev->ops->filter_config(edev->cdev, &filter_cmd); + return edev->ops->filter_config_ucast(edev->cdev, &ucast); } static int qede_config_accept_any_vlan(struct qede_dev *edev, bool action) @@ -1057,18 +1055,17 @@ static int qede_set_mcast_rx_mac(struct qede_dev *edev, enum qed_filter_xcast_params_type opcode, unsigned char *mac, int num_macs) { - struct qed_filter_params filter_cmd; + struct qed_filter_mcast_params mcast; int i; - memset(&filter_cmd, 0, sizeof(filter_cmd)); - filter_cmd.type = QED_FILTER_TYPE_MCAST; - filter_cmd.filter.mcast.type = opcode; - filter_cmd.filter.mcast.num = num_macs; + memset(&mcast, 0, sizeof(mcast)); + mcast.type = opcode; + mcast.num = num_macs; for (i = 0; i < num_macs; i++, mac += ETH_ALEN) - ether_addr_copy(filter_cmd.filter.mcast.mac[i], mac); + ether_addr_copy(mcast.mac[i], mac); - return edev->ops->filter_config(edev->cdev, &filter_cmd); + return edev->ops->filter_config_mcast(edev->cdev, &mcast); } int qede_set_mac_addr(struct net_device *ndev, void *p) @@ -1104,7 +1101,7 @@ int qede_set_mac_addr(struct net_device *ndev, void *p) goto out; } - ether_addr_copy(ndev->dev_addr, addr->sa_data); + eth_hw_addr_set(ndev, addr->sa_data); DP_INFO(edev, "Setting device MAC to %pM\n", addr->sa_data); if (edev->state != QEDE_STATE_OPEN) { @@ -1194,7 +1191,6 @@ void qede_config_rx_mode(struct net_device *ndev) { enum qed_filter_rx_mode_type accept_flags; struct qede_dev *edev = netdev_priv(ndev); - struct qed_filter_params rx_mode; unsigned char *uc_macs, *temp; struct netdev_hw_addr *ha; int rc, uc_count; @@ -1220,10 +1216,6 @@ void qede_config_rx_mode(struct net_device *ndev) netif_addr_unlock_bh(ndev); - /* Configure the struct for the Rx mode */ - memset(&rx_mode, 0, sizeof(struct qed_filter_params)); - rx_mode.type = QED_FILTER_TYPE_RX_MODE; - /* Remove all previous unicast secondary macs and multicast macs * (configure / leave the primary mac) */ @@ -1271,8 +1263,7 @@ void qede_config_rx_mode(struct net_device *ndev) qede_config_accept_any_vlan(edev, false); } - rx_mode.filter.accept_flags = accept_flags; - edev->ops->filter_config(edev->cdev, &rx_mode); + edev->ops->filter_config_rx_mode(edev->cdev, accept_flags); out: kfree(uc_macs); } diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c index 9837bdb89cd4..06c6a5813606 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_main.c +++ b/drivers/net/ethernet/qlogic/qede/qede_main.c @@ -836,7 +836,7 @@ static void qede_init_ndev(struct qede_dev *edev) ndev->max_mtu = QEDE_MAX_JUMBO_PACKET_SIZE; /* Set network device HW mac */ - ether_addr_copy(edev->ndev->dev_addr, edev->dev_info.common.hw_mac); + eth_hw_addr_set(edev->ndev, edev->dev_info.common.hw_mac); ndev->mtu = edev->dev_info.common.mtu; } @@ -1176,19 +1176,17 @@ static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level, edev->devlink = qed_ops->common->devlink_register(cdev); if (IS_ERR(edev->devlink)) { DP_NOTICE(edev, "Cannot register devlink\n"); + rc = PTR_ERR(edev->devlink); edev->devlink = NULL; - /* Go on, we can live without devlink */ + goto err3; } } else { struct net_device *ndev = pci_get_drvdata(pdev); + struct qed_devlink *qdl; edev = netdev_priv(ndev); - - if (edev->devlink) { - struct qed_devlink *qdl = devlink_priv(edev->devlink); - - qdl->cdev = cdev; - } + qdl = devlink_priv(edev->devlink); + qdl->cdev = cdev; edev->cdev = cdev; memset(&edev->stats, 0, sizeof(edev->stats)); memcpy(&edev->dev_info, &dev_info, sizeof(dev_info)); @@ -1397,7 +1395,7 @@ static void qede_free_mem_sb(struct qede_dev *edev, struct qed_sb_info *sb_info, static int qede_alloc_mem_sb(struct qede_dev *edev, struct qed_sb_info *sb_info, u16 sb_id) { - struct status_block_e4 *sb_virt; + struct status_block *sb_virt; dma_addr_t sb_phys; int rc; @@ -2802,10 +2800,13 @@ static void qede_get_eth_tlv_data(void *dev, void *data) } /** - * qede_io_error_detected - called when PCI error is detected + * qede_io_error_detected(): Called when PCI error is detected + * * @pdev: Pointer to PCI device * @state: The current pci connection state * + *Return: pci_ers_result_t. + * * This function is called after a PCI bus error affecting * this device has been detected. */ diff --git a/drivers/net/ethernet/qlogic/qla3xxx.c b/drivers/net/ethernet/qlogic/qla3xxx.c index c00ad57575ea..1e6d72adfe43 100644 --- a/drivers/net/ethernet/qlogic/qla3xxx.c +++ b/drivers/net/ethernet/qlogic/qla3xxx.c @@ -508,10 +508,12 @@ static void eeprom_readword(struct ql3_adapter *qdev, static void ql_set_mac_addr(struct net_device *ndev, u16 *addr) { - __le16 *p = (__le16 *)ndev->dev_addr; - p[0] = cpu_to_le16(addr[0]); - p[1] = cpu_to_le16(addr[1]); - p[2] = cpu_to_le16(addr[2]); + __le16 buf[ETH_ALEN / 2]; + + buf[0] = cpu_to_le16(addr[0]); + buf[1] = cpu_to_le16(addr[1]); + buf[2] = cpu_to_le16(addr[2]); + eth_hw_addr_set(ndev, (u8 *)buf); } static int ql_get_nvram_params(struct ql3_adapter *qdev) @@ -3564,7 +3566,7 @@ static int ql3xxx_set_mac_address(struct net_device *ndev, void *p) if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; - memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len); + eth_hw_addr_set(ndev, addr->sa_data); spin_lock_irqsave(&qdev->hw_lock, hw_flags); /* Program lower 32 bits of the MAC address */ diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c index 75960a29f80e..ed84f0f97623 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c @@ -304,7 +304,7 @@ int qlcnic_read_mac_addr(struct qlcnic_adapter *adapter) if (ret) return ret; - memcpy(netdev->dev_addr, mac_addr, ETH_ALEN); + eth_hw_addr_set(netdev, mac_addr); memcpy(adapter->mac_addr, netdev->dev_addr, netdev->addr_len); /* set station address */ @@ -356,7 +356,7 @@ static int qlcnic_set_mac(struct net_device *netdev, void *p) qlcnic_delete_adapter_mac(adapter); memcpy(adapter->mac_addr, addr->sa_data, netdev->addr_len); - memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); + eth_hw_addr_set(netdev, addr->sa_data); qlcnic_set_multi(adapter->netdev); if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) { |