summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/qlogic
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/qlogic')
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c9
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed.h149
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_cxt.c189
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_cxt.h24
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dcbx.c157
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dcbx.h5
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_debug.c1566
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dev.c1727
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dev_api.h81
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_fcoe.c48
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_hsi.h1110
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_hw.c59
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_hw.h3
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c185
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_init_ops.c2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_int.c3
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_iscsi.c82
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_iscsi.h14
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_l2.c373
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_l2.h8
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_ll2.c113
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_main.c149
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mcp.c1346
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mcp.h225
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_ooo.c102
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_ooo.h6
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_ptp.c199
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_ptp.h47
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_reg_addr.h56
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_roce.c338
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_roce.h13
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sp.h5
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sp_commands.c330
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_spq.c48
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sriov.c564
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sriov.h19
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_vf.c185
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_vf.h62
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede.h97
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_dcbnl.c5
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_ethtool.c85
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_filter.c536
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_fp.c93
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_main.c356
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_ptp.c188
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_ptp.h6
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c2
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge_main.c3
48 files changed, 8066 insertions, 2906 deletions
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
index 7b43a3b4abdc..3dd973475125 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
@@ -1375,13 +1375,8 @@ netxen_receive_peg_ready(struct netxen_adapter *adapter)
} while (--retries);
- if (!retries) {
- printk(KERN_ERR "Receive Peg initialization not "
- "complete, state: 0x%x.\n", val);
- return -EIO;
- }
-
- return 0;
+ pr_err("Receive Peg initialization not complete, state: 0x%x.\n", val);
+ return -EIO;
}
int netxen_init_firmware(struct netxen_adapter *adapter)
diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h
index 00c17fa6545b..2ab1aab7c3fe 100644
--- a/drivers/net/ethernet/qlogic/qed/qed.h
+++ b/drivers/net/ethernet/qlogic/qed/qed.h
@@ -51,7 +51,19 @@
#include "qed_hsi.h"
extern const struct qed_common_ops qed_common_ops_pass;
-#define DRV_MODULE_VERSION "8.10.10.20"
+
+#define QED_MAJOR_VERSION 8
+#define QED_MINOR_VERSION 10
+#define QED_REVISION_VERSION 10
+#define QED_ENGINEERING_VERSION 21
+
+#define QED_VERSION \
+ ((QED_MAJOR_VERSION << 24) | (QED_MINOR_VERSION << 16) | \
+ (QED_REVISION_VERSION << 8) | QED_ENGINEERING_VERSION)
+
+#define STORM_FW_VERSION \
+ ((FW_MAJOR_VERSION << 24) | (FW_MINOR_VERSION << 16) | \
+ (FW_REVISION_VERSION << 8) | FW_ENGINEERING_VERSION)
#define MAX_HWFNS_PER_DEVICE (4)
#define NAME_SIZE 16
@@ -59,9 +71,8 @@ extern const struct qed_common_ops qed_common_ops_pass;
#define QED_WFQ_UNIT 100
-#define ISCSI_BDQ_ID(_port_id) (_port_id)
-#define FCOE_BDQ_ID(_port_id) ((_port_id) + 2)
#define QED_WID_SIZE (1024)
+#define QED_MIN_WIDS (4)
#define QED_PF_DEMS_SIZE (4)
/* cau states */
@@ -76,6 +87,15 @@ union qed_mcp_protocol_stats;
enum qed_mcp_protocol_type;
/* helpers */
+#define QED_MFW_GET_FIELD(name, field) \
+ (((name) & (field ## _MASK)) >> (field ## _SHIFT))
+
+#define QED_MFW_SET_FIELD(name, field, value) \
+ do { \
+ (name) &= ~((field ## _MASK) << (field ## _SHIFT)); \
+ (name) |= (((value) << (field ## _SHIFT)) & (field ## _MASK));\
+ } while (0)
+
static inline u32 qed_db_addr(u32 cid, u32 DEMS)
{
u32 db_addr = FIELD_VALUE(DB_LEGACY_ADDR_DEMS, DEMS) |
@@ -130,9 +150,35 @@ enum qed_tunn_clss {
QED_TUNN_CLSS_MAC_VNI,
QED_TUNN_CLSS_INNER_MAC_VLAN,
QED_TUNN_CLSS_INNER_MAC_VNI,
+ QED_TUNN_CLSS_MAC_VLAN_DUAL_STAGE,
MAX_QED_TUNN_CLSS,
};
+struct qed_tunn_update_type {
+ bool b_update_mode;
+ bool b_mode_enabled;
+ enum qed_tunn_clss tun_cls;
+};
+
+struct qed_tunn_update_udp_port {
+ bool b_update_port;
+ u16 port;
+};
+
+struct qed_tunnel_info {
+ struct qed_tunn_update_type vxlan;
+ struct qed_tunn_update_type l2_geneve;
+ struct qed_tunn_update_type ip_geneve;
+ struct qed_tunn_update_type l2_gre;
+ struct qed_tunn_update_type ip_gre;
+
+ struct qed_tunn_update_udp_port vxlan_port;
+ struct qed_tunn_update_udp_port geneve_port;
+
+ bool b_update_rx_cls;
+ bool b_update_tx_cls;
+};
+
struct qed_tunn_start_params {
unsigned long tunn_mode;
u16 vxlan_udp_port;
@@ -198,6 +244,7 @@ enum qed_resources {
QED_LL2_QUEUE,
QED_CMDQS_CQS,
QED_RDMA_STATS_QUEUE,
+ QED_BDQ,
QED_MAX_RESC,
};
@@ -205,8 +252,9 @@ enum QED_FEATURE {
QED_PF_L2_QUE,
QED_VF,
QED_RDMA_CNQ,
- QED_VF_L2_QUE,
+ QED_ISCSI_CQ,
QED_FCOE_CQ,
+ QED_VF_L2_QUE,
QED_MAX_FEATURES,
};
@@ -219,7 +267,9 @@ enum QED_PORT_MODE {
QED_PORT_MODE_DE_4X20G,
QED_PORT_MODE_DE_1X40G,
QED_PORT_MODE_DE_2X25G,
- QED_PORT_MODE_DE_1X25G
+ QED_PORT_MODE_DE_1X25G,
+ QED_PORT_MODE_DE_4X25G,
+ QED_PORT_MODE_DE_2X10G,
};
enum qed_dev_cap {
@@ -249,9 +299,14 @@ struct qed_hw_info {
RESC_NUM(_p_hwfn, resc))
#define FEAT_NUM(_p_hwfn, resc) ((_p_hwfn)->hw_info.feat_num[resc])
- u8 num_tc;
+ /* Amount of traffic classes HW supports */
+ u8 num_hw_tc;
+
+ /* Amount of TCs which should be active according to DCBx or upper
+ * layer driver configuration.
+ */
+ u8 num_active_tc;
u8 offload_tc;
- u8 non_offload_tc;
u32 concrete_fid;
u16 opaque_fid;
@@ -314,15 +369,19 @@ struct qed_qm_info {
struct init_qm_port_params *qm_port_params;
u16 start_pq;
u8 start_vport;
- u8 pure_lb_pq;
- u8 offload_pq;
- u8 pure_ack_pq;
- u8 ooo_pq;
- u8 vf_queues_offset;
+ u16 pure_lb_pq;
+ u16 offload_pq;
+ u16 low_latency_pq;
+ u16 pure_ack_pq;
+ u16 ooo_pq;
+ u16 first_vf_pq;
+ u16 first_mcos_pq;
+ u16 first_rl_pq;
u16 num_pqs;
u16 num_vf_pqs;
u8 num_vports;
u8 max_phys_tcs_per_port;
+ u8 ooo_tc;
bool pf_rl_en;
bool pf_wfq_en;
bool vport_rl_en;
@@ -353,6 +412,12 @@ struct qed_fw_data {
u32 init_ops_size;
};
+#define DRV_MODULE_VERSION \
+ __stringify(QED_MAJOR_VERSION) "." \
+ __stringify(QED_MINOR_VERSION) "." \
+ __stringify(QED_REVISION_VERSION) "." \
+ __stringify(QED_ENGINEERING_VERSION)
+
struct qed_simd_fp_handler {
void *token;
void (*func)(void *);
@@ -364,7 +429,8 @@ struct qed_hwfn {
#define IS_LEAD_HWFN(edev) (!((edev)->my_id))
u8 rel_pf_id; /* Relative to engine*/
u8 abs_pf_id;
-#define QED_PATH_ID(_p_hwfn) ((_p_hwfn)->abs_pf_id & 1)
+#define QED_PATH_ID(_p_hwfn) \
+ (QED_IS_K2((_p_hwfn)->cdev) ? 0 : ((_p_hwfn)->abs_pf_id & 1))
u8 port_id;
bool b_active;
@@ -409,6 +475,11 @@ struct qed_hwfn {
struct qed_ptt *p_main_ptt;
struct qed_ptt *p_dpc_ptt;
+ /* PTP will be used only by the leading function.
+ * Usage of all PTP-apis should be synchronized as result.
+ */
+ struct qed_ptt *p_ptp_ptt;
+
struct qed_sb_sp_info *p_sp_sb;
struct qed_sb_attn_info *p_sb_attn;
@@ -455,6 +526,7 @@ struct qed_hwfn {
struct dbg_tools_data dbg_info;
/* PWM region specific data */
+ u16 wid_count;
u32 dpi_size;
u32 dpi_count;
@@ -465,8 +537,8 @@ struct qed_hwfn {
u8 dcbx_no_edpm;
u8 db_bar_no_edpm;
- /* p_ptp_ptt is valid for leading HWFN only */
- struct qed_ptt *p_ptp_ptt;
+ struct qed_ptt *p_arfs_ptt;
+
struct qed_simd_fp_handler simd_proto_handler[64];
#ifdef CONFIG_QED_SRIOV
@@ -523,9 +595,7 @@ struct qed_dev {
u8 dp_level;
char name[NAME_SIZE];
- u8 type;
-#define QED_DEV_TYPE_BB (0 << 0)
-#define QED_DEV_TYPE_AH BIT(0)
+ enum qed_dev_type type;
/* Translate type/revision combo into the proper conditions */
#define QED_IS_BB(dev) ((dev)->type == QED_DEV_TYPE_BB)
#define QED_IS_BB_A0(dev) (QED_IS_BB(dev) && \
@@ -540,6 +610,9 @@ struct qed_dev {
u16 vendor_id;
u16 device_id;
+#define QED_DEV_ID_MASK 0xff00
+#define QED_DEV_ID_MASK_BB 0x1600
+#define QED_DEV_ID_MASK_AH 0x8000
u16 chip_num;
#define CHIP_NUM_MASK 0xffff
@@ -606,9 +679,7 @@ struct qed_dev {
/* SRIOV */
struct qed_hw_sriov_info *p_iov_info;
#define IS_QED_SRIOV(cdev) (!!(cdev)->p_iov_info)
-
- unsigned long tunn_mode;
-
+ struct qed_tunnel_info tunnel;
bool b_is_vf;
u32 drv_type;
struct qed_eth_stats *reset_stats;
@@ -652,12 +723,19 @@ struct qed_dev {
u32 rdma_max_sge;
u32 rdma_max_inline;
u32 rdma_max_srq_sge;
+ u16 tunn_feature_mask;
};
-#define NUM_OF_VFS(dev) MAX_NUM_VFS_BB
-#define NUM_OF_L2_QUEUES(dev) MAX_NUM_L2_QUEUES_BB
-#define NUM_OF_SBS(dev) MAX_SB_PER_PATH_BB
-#define NUM_OF_ENG_PFS(dev) MAX_NUM_PFS_BB
+#define NUM_OF_VFS(dev) (QED_IS_BB(dev) ? MAX_NUM_VFS_BB \
+ : MAX_NUM_VFS_K2)
+#define NUM_OF_L2_QUEUES(dev) (QED_IS_BB(dev) ? MAX_NUM_L2_QUEUES_BB \
+ : MAX_NUM_L2_QUEUES_K2)
+#define NUM_OF_PORTS(dev) (QED_IS_BB(dev) ? MAX_NUM_PORTS_BB \
+ : MAX_NUM_PORTS_K2)
+#define NUM_OF_SBS(dev) (QED_IS_BB(dev) ? MAX_SB_PER_PATH_BB \
+ : MAX_SB_PER_PATH_K2)
+#define NUM_OF_ENG_PFS(dev) (QED_IS_BB(dev) ? MAX_NUM_PFS_BB \
+ : MAX_NUM_PFS_K2)
/**
* @brief qed_concrete_to_sw_fid - get the sw function id from
@@ -693,6 +771,26 @@ void qed_configure_vp_wfq_on_link_change(struct qed_dev *cdev,
u32 min_pf_rate);
void qed_clean_wfq_db(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
+int qed_device_num_engines(struct qed_dev *cdev);
+int qed_device_get_port_id(struct qed_dev *cdev);
+
+#define QED_LEADING_HWFN(dev) (&dev->hwfns[0])
+
+/* Flags for indication of required queues */
+#define PQ_FLAGS_RLS (BIT(0))
+#define PQ_FLAGS_MCOS (BIT(1))
+#define PQ_FLAGS_LB (BIT(2))
+#define PQ_FLAGS_OOO (BIT(3))
+#define PQ_FLAGS_ACK (BIT(4))
+#define PQ_FLAGS_OFLD (BIT(5))
+#define PQ_FLAGS_VFS (BIT(6))
+#define PQ_FLAGS_LLT (BIT(7))
+
+/* physical queue index for cm context intialization */
+u16 qed_get_cm_pq_idx(struct qed_hwfn *p_hwfn, u32 pq_flags);
+u16 qed_get_cm_pq_idx_mcos(struct qed_hwfn *p_hwfn, u8 tc);
+u16 qed_get_cm_pq_idx_vf(struct qed_hwfn *p_hwfn, u16 vf);
+
#define QED_LEADING_HWFN(dev) (&dev->hwfns[0])
/* Other Linux specific common definitions */
@@ -721,5 +819,6 @@ void qed_get_protocol_stats(struct qed_dev *cdev,
enum qed_mcp_protocol_type type,
union qed_mcp_protocol_stats *stats);
int qed_slowpath_irq_req(struct qed_hwfn *hwfn);
+void qed_slowpath_irq_sync(struct qed_hwfn *p_hwfn);
#endif /* _QED_H */
diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.c b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
index 7e3a6fed3da6..b3aaa985956e 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_cxt.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
@@ -71,8 +71,7 @@
#define TM_ALIGN BIT(TM_SHIFT)
#define TM_ELEM_SIZE 4
-/* For RoCE we configure to 64K to cover for RoCE max tasks 256K purpose. */
-#define ILT_DEFAULT_HW_P_SIZE (IS_ENABLED(CONFIG_QED_RDMA) ? 4 : 3)
+#define ILT_DEFAULT_HW_P_SIZE 4
#define ILT_PAGE_IN_BYTES(hw_p_size) (1U << ((hw_p_size) + 12))
#define ILT_CFG_REG(cli, reg) PSWRQ2_REG_ ## cli ## _ ## reg ## _RT_OFFSET
@@ -220,9 +219,6 @@ struct qed_cxt_mngr {
*/
u32 vf_count;
- /* total number of SRQ's for this hwfn */
- u32 srq_count;
-
/* Acquired CIDs */
struct qed_cid_acquired_map acquired[MAX_CONN_TYPES];
@@ -238,12 +234,17 @@ struct qed_cxt_mngr {
u32 t2_num_pages;
u64 first_free;
u64 last_free;
+
+ /* total number of SRQ's for this hwfn */
+ u32 srq_count;
+
+ /* Maximal number of L2 steering filters */
+ u32 arfs_count;
};
static bool src_proto(enum protocol_type type)
{
return type == PROTOCOLID_ISCSI ||
- type == PROTOCOLID_FCOE ||
- type == PROTOCOLID_ROCE;
+ type == PROTOCOLID_FCOE;
}
static bool tm_cid_proto(enum protocol_type type)
@@ -293,6 +294,9 @@ static void qed_cxt_src_iids(struct qed_cxt_mngr *p_mngr,
iids->pf_cids += p_mngr->conn_cfg[i].cid_count;
iids->per_vf_cids += p_mngr->conn_cfg[i].cids_per_vf;
}
+
+ /* Add L2 filtering filters in addition */
+ iids->pf_cids += p_mngr->arfs_count;
}
/* counts the iids for the Timers block configuration */
@@ -304,16 +308,34 @@ struct qed_tm_iids {
u32 per_vf_tids;
};
-static void qed_cxt_tm_iids(struct qed_cxt_mngr *p_mngr,
+static void qed_cxt_tm_iids(struct qed_hwfn *p_hwfn,
+ struct qed_cxt_mngr *p_mngr,
struct qed_tm_iids *iids)
{
- u32 i, j;
-
- for (i = 0; i < MAX_CONN_TYPES; i++) {
+ bool tm_vf_required = false;
+ bool tm_required = false;
+ int i, j;
+
+ /* Timers is a special case -> we don't count how many cids require
+ * timers but what's the max cid that will be used by the timer block.
+ * therefore we traverse in reverse order, and once we hit a protocol
+ * that requires the timers memory, we'll sum all the protocols up
+ * to that one.
+ */
+ for (i = MAX_CONN_TYPES - 1; i >= 0; i--) {
struct qed_conn_type_cfg *p_cfg = &p_mngr->conn_cfg[i];
- if (tm_cid_proto(i)) {
+ if (tm_cid_proto(i) || tm_required) {
+ if (p_cfg->cid_count)
+ tm_required = true;
+
iids->pf_cids += p_cfg->cid_count;
+ }
+
+ if (tm_cid_proto(i) || tm_vf_required) {
+ if (p_cfg->cids_per_vf)
+ tm_vf_required = true;
+
iids->per_vf_cids += p_cfg->cids_per_vf;
}
@@ -527,7 +549,22 @@ static u32 qed_ilt_get_dynamic_line_cnt(struct qed_hwfn *p_hwfn,
return lines_to_skip;
}
-int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn)
+static struct qed_ilt_client_cfg *qed_cxt_set_cli(struct qed_ilt_client_cfg
+ *p_cli)
+{
+ p_cli->active = false;
+ p_cli->first.val = 0;
+ p_cli->last.val = 0;
+ return p_cli;
+}
+
+static struct qed_ilt_cli_blk *qed_cxt_set_blk(struct qed_ilt_cli_blk *p_blk)
+{
+ p_blk->total_size = 0;
+ return p_blk;
+}
+
+int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn, u32 *line_count)
{
struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
u32 curr_line, total, i, task_size, line;
@@ -551,7 +588,8 @@ int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn)
p_hwfn->my_id, p_hwfn->p_cxt_mngr->pf_start_line);
/* CDUC */
- p_cli = &p_mngr->clients[ILT_CLI_CDUC];
+ p_cli = qed_cxt_set_cli(&p_mngr->clients[ILT_CLI_CDUC]);
+
curr_line = p_mngr->pf_start_line;
/* CDUC PF */
@@ -560,7 +598,7 @@ int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn)
/* get the counters for the CDUC and QM clients */
qed_cxt_cdu_iids(p_mngr, &cdu_iids);
- p_blk = &p_cli->pf_blks[CDUC_BLK];
+ p_blk = qed_cxt_set_blk(&p_cli->pf_blks[CDUC_BLK]);
total = cdu_iids.pf_cids * CONN_CXT_SIZE(p_hwfn);
@@ -574,7 +612,7 @@ int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn)
ILT_CLI_CDUC);
/* CDUC VF */
- p_blk = &p_cli->vf_blks[CDUC_BLK];
+ p_blk = qed_cxt_set_blk(&p_cli->vf_blks[CDUC_BLK]);
total = cdu_iids.per_vf_cids * CONN_CXT_SIZE(p_hwfn);
qed_ilt_cli_blk_fill(p_cli, p_blk, curr_line,
@@ -588,7 +626,7 @@ int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn)
ILT_CLI_CDUC);
/* CDUT PF */
- p_cli = &p_mngr->clients[ILT_CLI_CDUT];
+ p_cli = qed_cxt_set_cli(&p_mngr->clients[ILT_CLI_CDUT]);
p_cli->first.val = curr_line;
/* first the 'working' task memory */
@@ -597,7 +635,7 @@ int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn)
if (!p_seg || p_seg->count == 0)
continue;
- p_blk = &p_cli->pf_blks[CDUT_SEG_BLK(i)];
+ p_blk = qed_cxt_set_blk(&p_cli->pf_blks[CDUT_SEG_BLK(i)]);
total = p_seg->count * p_mngr->task_type_size[p_seg->type];
qed_ilt_cli_blk_fill(p_cli, p_blk, curr_line, total,
p_mngr->task_type_size[p_seg->type]);
@@ -612,7 +650,8 @@ int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn)
if (!p_seg || p_seg->count == 0)
continue;
- p_blk = &p_cli->pf_blks[CDUT_FL_SEG_BLK(i, PF)];
+ p_blk =
+ qed_cxt_set_blk(&p_cli->pf_blks[CDUT_FL_SEG_BLK(i, PF)]);
if (!p_seg->has_fl_mem) {
/* The segment is active (total size pf 'working'
@@ -657,7 +696,7 @@ int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn)
/* 'working' memory */
total = p_seg->count * p_mngr->task_type_size[p_seg->type];
- p_blk = &p_cli->vf_blks[CDUT_SEG_BLK(0)];
+ p_blk = qed_cxt_set_blk(&p_cli->vf_blks[CDUT_SEG_BLK(0)]);
qed_ilt_cli_blk_fill(p_cli, p_blk,
curr_line, total,
p_mngr->task_type_size[p_seg->type]);
@@ -666,7 +705,8 @@ int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn)
ILT_CLI_CDUT);
/* 'init' memory */
- p_blk = &p_cli->vf_blks[CDUT_FL_SEG_BLK(0, VF)];
+ p_blk =
+ qed_cxt_set_blk(&p_cli->vf_blks[CDUT_FL_SEG_BLK(0, VF)]);
if (!p_seg->has_fl_mem) {
/* see comment above */
line = p_cli->vf_blks[CDUT_SEG_BLK(0)].start_line;
@@ -694,8 +734,8 @@ int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn)
}
/* QM */
- p_cli = &p_mngr->clients[ILT_CLI_QM];
- p_blk = &p_cli->pf_blks[0];
+ p_cli = qed_cxt_set_cli(&p_mngr->clients[ILT_CLI_QM]);
+ p_blk = qed_cxt_set_blk(&p_cli->pf_blks[0]);
qed_cxt_qm_iids(p_hwfn, &qm_iids);
total = qed_qm_pf_mem_size(p_hwfn->rel_pf_id, qm_iids.cids,
@@ -719,7 +759,7 @@ int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn)
p_cli->pf_total_lines = curr_line - p_blk->start_line;
/* SRC */
- p_cli = &p_mngr->clients[ILT_CLI_SRC];
+ p_cli = qed_cxt_set_cli(&p_mngr->clients[ILT_CLI_SRC]);
qed_cxt_src_iids(p_mngr, &src_iids);
/* Both the PF and VFs searcher connections are stored in the per PF
@@ -733,7 +773,7 @@ int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn)
total = roundup_pow_of_two(local_max);
- p_blk = &p_cli->pf_blks[0];
+ p_blk = qed_cxt_set_blk(&p_cli->pf_blks[0]);
qed_ilt_cli_blk_fill(p_cli, p_blk, curr_line,
total * sizeof(struct src_ent),
sizeof(struct src_ent));
@@ -744,11 +784,11 @@ int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn)
}
/* TM PF */
- p_cli = &p_mngr->clients[ILT_CLI_TM];
- qed_cxt_tm_iids(p_mngr, &tm_iids);
+ p_cli = qed_cxt_set_cli(&p_mngr->clients[ILT_CLI_TM]);
+ qed_cxt_tm_iids(p_hwfn, p_mngr, &tm_iids);
total = tm_iids.pf_cids + tm_iids.pf_tids_total;
if (total) {
- p_blk = &p_cli->pf_blks[0];
+ p_blk = qed_cxt_set_blk(&p_cli->pf_blks[0]);
qed_ilt_cli_blk_fill(p_cli, p_blk, curr_line,
total * TM_ELEM_SIZE, TM_ELEM_SIZE);
@@ -760,14 +800,14 @@ int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn)
/* TM VF */
total = tm_iids.per_vf_cids + tm_iids.per_vf_tids;
if (total) {
- p_blk = &p_cli->vf_blks[0];
+ p_blk = qed_cxt_set_blk(&p_cli->vf_blks[0]);
qed_ilt_cli_blk_fill(p_cli, p_blk, curr_line,
total * TM_ELEM_SIZE, TM_ELEM_SIZE);
qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
ILT_CLI_TM);
- p_cli->pf_total_lines = curr_line - p_blk->start_line;
+ p_cli->vf_total_lines = curr_line - p_blk->start_line;
for (i = 1; i < p_mngr->vf_count; i++)
qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
ILT_CLI_TM);
@@ -777,8 +817,8 @@ int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn)
total = qed_cxt_get_srq_count(p_hwfn);
if (total) {
- p_cli = &p_mngr->clients[ILT_CLI_TSDM];
- p_blk = &p_cli->pf_blks[SRQ_BLK];
+ p_cli = qed_cxt_set_cli(&p_mngr->clients[ILT_CLI_TSDM]);
+ p_blk = qed_cxt_set_blk(&p_cli->pf_blks[SRQ_BLK]);
qed_ilt_cli_blk_fill(p_cli, p_blk, curr_line,
total * SRQ_CXT_SIZE, SRQ_CXT_SIZE);
@@ -787,13 +827,50 @@ int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn)
p_cli->pf_total_lines = curr_line - p_blk->start_line;
}
+ *line_count = curr_line - p_hwfn->p_cxt_mngr->pf_start_line;
+
if (curr_line - p_hwfn->p_cxt_mngr->pf_start_line >
- RESC_NUM(p_hwfn, QED_ILT)) {
- DP_ERR(p_hwfn, "too many ilt lines...#lines=%d\n",
- curr_line - p_hwfn->p_cxt_mngr->pf_start_line);
+ RESC_NUM(p_hwfn, QED_ILT))
return -EINVAL;
+
+ return 0;
+}
+
+u32 qed_cxt_cfg_ilt_compute_excess(struct qed_hwfn *p_hwfn, u32 used_lines)
+{
+ struct qed_ilt_client_cfg *p_cli;
+ u32 excess_lines, available_lines;
+ struct qed_cxt_mngr *p_mngr;
+ u32 ilt_page_size, elem_size;
+ struct qed_tid_seg *p_seg;
+ int i;
+
+ available_lines = RESC_NUM(p_hwfn, QED_ILT);
+ excess_lines = used_lines - available_lines;
+
+ if (!excess_lines)
+ return 0;
+
+ if (p_hwfn->hw_info.personality != QED_PCI_ETH_ROCE)
+ return 0;
+
+ p_mngr = p_hwfn->p_cxt_mngr;
+ p_cli = &p_mngr->clients[ILT_CLI_CDUT];
+ ilt_page_size = ILT_PAGE_IN_BYTES(p_cli->p_size.val);
+
+ for (i = 0; i < NUM_TASK_PF_SEGMENTS; i++) {
+ p_seg = qed_cxt_tid_seg_info(p_hwfn, i);
+ if (!p_seg || p_seg->count == 0)
+ continue;
+
+ elem_size = p_mngr->task_type_size[p_seg->type];
+ if (!elem_size)
+ continue;
+
+ return (ilt_page_size / elem_size) * excess_lines;
}
+ DP_NOTICE(p_hwfn, "failed computing excess ILT lines\n");
return 0;
}
@@ -1127,7 +1204,7 @@ int qed_cxt_mngr_alloc(struct qed_hwfn *p_hwfn)
clients[ILT_CLI_TSDM].first.reg = ILT_CFG_REG(TSDM, FIRST_ILT);
clients[ILT_CLI_TSDM].last.reg = ILT_CFG_REG(TSDM, LAST_ILT);
clients[ILT_CLI_TSDM].p_size.reg = ILT_CFG_REG(TSDM, P_SIZE);
- /* default ILT page size for all clients is 32K */
+ /* default ILT page size for all clients is 64K */
for (i = 0; i < ILT_CLI_MAX; i++)
p_mngr->clients[i].p_size.val = ILT_DEFAULT_HW_P_SIZE;
@@ -1367,7 +1444,7 @@ static void qed_cdu_init_pf(struct qed_hwfn *p_hwfn)
}
}
-void qed_qm_init_pf(struct qed_hwfn *p_hwfn)
+void qed_qm_init_pf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{
struct qed_qm_pf_rt_init_params params;
struct qed_qm_info *qm_info = &p_hwfn->qm_info;
@@ -1393,22 +1470,15 @@ void qed_qm_init_pf(struct qed_hwfn *p_hwfn)
params.pq_params = qm_info->qm_pq_params;
params.vport_params = qm_info->qm_vport_params;
- qed_qm_pf_rt_init(p_hwfn, p_hwfn->p_main_ptt, &params);
+ qed_qm_pf_rt_init(p_hwfn, p_ptt, &params);
}
/* CM PF */
-static int qed_cm_init_pf(struct qed_hwfn *p_hwfn)
+void qed_cm_init_pf(struct qed_hwfn *p_hwfn)
{
- union qed_qm_pq_params pq_params;
- u16 pq;
-
/* XCM pure-LB queue */
- memset(&pq_params, 0, sizeof(pq_params));
- pq_params.core.tc = LB_TC;
- pq = qed_get_qm_pq(p_hwfn, PROTOCOLID_CORE, &pq_params);
- STORE_RT_REG(p_hwfn, XCM_REG_CON_PHY_Q3_RT_OFFSET, pq);
-
- return 0;
+ STORE_RT_REG(p_hwfn, XCM_REG_CON_PHY_Q3_RT_OFFSET,
+ qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_LB));
}
/* DQ PF */
@@ -1640,7 +1710,7 @@ static void qed_tm_init_pf(struct qed_hwfn *p_hwfn)
u8 i;
memset(&tm_iids, 0, sizeof(tm_iids));
- qed_cxt_tm_iids(p_mngr, &tm_iids);
+ qed_cxt_tm_iids(p_hwfn, p_mngr, &tm_iids);
/* @@@TBD No pre-scan for now */
@@ -1758,9 +1828,9 @@ void qed_cxt_hw_init_common(struct qed_hwfn *p_hwfn)
qed_prs_init_common(p_hwfn);
}
-void qed_cxt_hw_init_pf(struct qed_hwfn *p_hwfn)
+void qed_cxt_hw_init_pf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{
- qed_qm_init_pf(p_hwfn);
+ qed_qm_init_pf(p_hwfn, p_ptt);
qed_cm_init_pf(p_hwfn);
qed_dq_init_pf(p_hwfn);
qed_cdu_init_pf(p_hwfn);
@@ -1884,13 +1954,12 @@ int qed_cxt_get_cid_info(struct qed_hwfn *p_hwfn, struct qed_cxt_info *p_info)
}
static void qed_rdma_set_pf_params(struct qed_hwfn *p_hwfn,
- struct qed_rdma_pf_params *p_params)
+ struct qed_rdma_pf_params *p_params,
+ u32 num_tasks)
{
- u32 num_cons, num_tasks, num_qps, num_mrs, num_srqs;
+ u32 num_cons, num_qps, num_srqs;
enum protocol_type proto;
- num_mrs = min_t(u32, RDMA_MAX_TIDS, p_params->num_mrs);
- num_tasks = num_mrs; /* each mr uses a single task id */
num_srqs = min_t(u32, 32 * 1024, p_params->num_srqs);
switch (p_hwfn->hw_info.personality) {
@@ -1919,7 +1988,7 @@ static void qed_rdma_set_pf_params(struct qed_hwfn *p_hwfn,
}
}
-int qed_cxt_set_pf_params(struct qed_hwfn *p_hwfn)
+int qed_cxt_set_pf_params(struct qed_hwfn *p_hwfn, u32 rdma_tasks)
{
/* Set the number of required CORE connections */
u32 core_cids = 1; /* SPQ */
@@ -1931,9 +2000,10 @@ int qed_cxt_set_pf_params(struct qed_hwfn *p_hwfn)
switch (p_hwfn->hw_info.personality) {
case QED_PCI_ETH_ROCE:
{
- qed_rdma_set_pf_params(p_hwfn,
- &p_hwfn->
- pf_params.rdma_pf_params);
+ qed_rdma_set_pf_params(p_hwfn,
+ &p_hwfn->
+ pf_params.rdma_pf_params,
+ rdma_tasks);
/* no need for break since RoCE coexist with Ethernet */
}
case QED_PCI_ETH:
@@ -1943,6 +2013,7 @@ int qed_cxt_set_pf_params(struct qed_hwfn *p_hwfn)
qed_cxt_set_proto_cid_count(p_hwfn, PROTOCOLID_ETH,
p_params->num_cons, 1);
+ p_hwfn->p_cxt_mngr->arfs_count = p_params->num_arfs_filters;
break;
}
case QED_PCI_FCOE:
diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.h b/drivers/net/ethernet/qlogic/qed/qed_cxt.h
index 8b010324268a..53ad532dc212 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_cxt.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.h
@@ -105,19 +105,28 @@ u32 qed_cxt_get_proto_cid_count(struct qed_hwfn *p_hwfn,
* @brief qed_cxt_set_pf_params - Set the PF params for cxt init
*
* @param p_hwfn
- *
+ * @param rdma_tasks - requested maximum
* @return int
*/
-int qed_cxt_set_pf_params(struct qed_hwfn *p_hwfn);
+int qed_cxt_set_pf_params(struct qed_hwfn *p_hwfn, u32 rdma_tasks);
/**
* @brief qed_cxt_cfg_ilt_compute - compute ILT init parameters
*
* @param p_hwfn
+ * @param last_line
*
* @return int
*/
-int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn);
+int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn, u32 *last_line);
+
+/**
+ * @brief qed_cxt_cfg_ilt_compute_excess - how many lines can be decreased
+ *
+ * @param p_hwfn
+ * @param used_lines
+ */
+u32 qed_cxt_cfg_ilt_compute_excess(struct qed_hwfn *p_hwfn, u32 used_lines);
/**
* @brief qed_cxt_mngr_alloc - Allocate and init the context manager struct
@@ -163,19 +172,18 @@ void qed_cxt_hw_init_common(struct qed_hwfn *p_hwfn);
/**
* @brief qed_cxt_hw_init_pf - Initailze ILT and DQ, PF phase, per path.
*
- *
- *
* @param p_hwfn
+ * @param p_ptt
*/
-void qed_cxt_hw_init_pf(struct qed_hwfn *p_hwfn);
+void qed_cxt_hw_init_pf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
/**
* @brief qed_qm_init_pf - Initailze the QM PF phase, per path
*
* @param p_hwfn
+ * @param p_ptt
*/
-
-void qed_qm_init_pf(struct qed_hwfn *p_hwfn);
+void qed_qm_init_pf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
/**
* @brief Reconfigures QM pf on the fly
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
index cfdadb658ade..d883ad5bec6d 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
@@ -183,7 +183,7 @@ qed_dcbx_dp_protocol(struct qed_hwfn *p_hwfn, struct qed_dcbx_results *p_data)
"%s info: update %d, enable %d, prio %d, tc %d, num_tc %d\n",
qed_dcbx_app_update[i].name, p_data->arr[id].update,
p_data->arr[id].enable, p_data->arr[id].priority,
- p_data->arr[id].tc, p_hwfn->hw_info.num_tc);
+ p_data->arr[id].tc, p_hwfn->hw_info.num_active_tc);
}
}
@@ -204,12 +204,8 @@ qed_dcbx_set_params(struct qed_dcbx_results *p_data,
p_data->arr[type].tc = tc;
/* QM reconf data */
- if (p_info->personality == personality) {
- if (personality == QED_PCI_ETH)
- p_info->non_offload_tc = tc;
- else
- p_info->offload_tc = tc;
- }
+ if (p_info->personality == personality)
+ p_info->offload_tc = tc;
}
/* Update app protocol data and hw_info fields with the TLV info */
@@ -275,8 +271,8 @@ qed_dcbx_process_tlv(struct qed_hwfn *p_hwfn,
struct dcbx_app_priority_entry *p_tbl,
u32 pri_tc_tbl, int count, u8 dcbx_version)
{
- u8 tc, priority_map;
enum dcbx_protocol_type type;
+ u8 tc, priority_map;
bool enable, ieee;
u16 protocol_id;
int priority;
@@ -376,7 +372,9 @@ static int qed_dcbx_process_mib_info(struct qed_hwfn *p_hwfn)
if (rc)
return rc;
- p_info->num_tc = QED_MFW_GET_FIELD(p_ets->flags, DCBX_ETS_MAX_TCS);
+ p_info->num_active_tc = QED_MFW_GET_FIELD(p_ets->flags,
+ DCBX_ETS_MAX_TCS);
+ p_hwfn->qm_info.ooo_tc = QED_MFW_GET_FIELD(p_ets->flags, DCBX_OOO_TC);
data.pf_id = p_hwfn->rel_pf_id;
data.dcbx_enabled = !!dcbx_version;
@@ -558,8 +556,9 @@ qed_dcbx_get_pfc_data(struct qed_hwfn *p_hwfn,
p_params->pfc.prio[7] = !!(pfc_map & DCBX_PFC_PRI_EN_BITMAP_PRI_7);
DP_VERBOSE(p_hwfn, QED_MSG_DCB,
- "PFC params: willing %d, pfc_bitmap %d\n",
- p_params->pfc.willing, pfc_map);
+ "PFC params: willing %d, pfc_bitmap %u max_tc = %u enabled = %d\n",
+ p_params->pfc.willing, pfc_map, p_params->pfc.max_tc,
+ p_params->pfc.enabled);
}
static void
@@ -578,10 +577,10 @@ qed_dcbx_get_ets_data(struct qed_hwfn *p_hwfn,
p_params->max_ets_tc = QED_MFW_GET_FIELD(p_ets->flags,
DCBX_ETS_MAX_TCS);
DP_VERBOSE(p_hwfn, QED_MSG_DCB,
- "ETS params: willing %d, ets_cbs %d pri_tc_tbl_0 %x max_ets_tc %d\n",
- p_params->ets_willing,
- p_params->ets_cbs,
- p_ets->pri_tc_tbl[0], p_params->max_ets_tc);
+ "ETS params: willing %d, enabled = %d ets_cbs %d pri_tc_tbl_0 %x max_ets_tc %d\n",
+ p_params->ets_willing, p_params->ets_enabled,
+ p_params->ets_cbs, p_ets->pri_tc_tbl[0],
+ p_params->max_ets_tc);
if (p_params->ets_enabled && !p_params->max_ets_tc) {
p_params->max_ets_tc = QED_MAX_PFC_PRIORITIES;
@@ -622,8 +621,7 @@ qed_dcbx_get_common_params(struct qed_hwfn *p_hwfn,
}
static void
-qed_dcbx_get_local_params(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt, struct qed_dcbx_get *params)
+qed_dcbx_get_local_params(struct qed_hwfn *p_hwfn, struct qed_dcbx_get *params)
{
struct dcbx_features *p_feat;
@@ -635,8 +633,7 @@ qed_dcbx_get_local_params(struct qed_hwfn *p_hwfn,
}
static void
-qed_dcbx_get_remote_params(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt, struct qed_dcbx_get *params)
+qed_dcbx_get_remote_params(struct qed_hwfn *p_hwfn, struct qed_dcbx_get *params)
{
struct dcbx_features *p_feat;
@@ -649,7 +646,6 @@ qed_dcbx_get_remote_params(struct qed_hwfn *p_hwfn,
static void
qed_dcbx_get_operational_params(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
struct qed_dcbx_get *params)
{
struct qed_dcbx_operational_params *p_operational;
@@ -670,6 +666,7 @@ qed_dcbx_get_operational_params(struct qed_hwfn *p_hwfn,
if (!enabled) {
p_operational->enabled = enabled;
p_operational->valid = false;
+ DP_VERBOSE(p_hwfn, QED_MSG_DCB, "Dcbx is disabled\n");
return;
}
@@ -683,8 +680,14 @@ qed_dcbx_get_operational_params(struct qed_hwfn *p_hwfn,
DCBX_CONFIG_VERSION_CEE);
p_operational->cee = val;
- DP_VERBOSE(p_hwfn, QED_MSG_DCB, "Version support: ieee %d, cee %d\n",
- p_operational->ieee, p_operational->cee);
+ val = !!(QED_MFW_GET_FIELD(flags, DCBX_CONFIG_VERSION) ==
+ DCBX_CONFIG_VERSION_STATIC);
+ p_operational->local = val;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_DCB,
+ "Version support: ieee %d, cee %d, static %d\n",
+ p_operational->ieee, p_operational->cee,
+ p_operational->local);
qed_dcbx_get_common_params(p_hwfn, &p_feat->app,
p_feat->app.app_pri_tbl, &p_feat->ets,
@@ -699,7 +702,6 @@ qed_dcbx_get_operational_params(struct qed_hwfn *p_hwfn,
static void
qed_dcbx_get_local_lldp_params(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
struct qed_dcbx_get *params)
{
struct lldp_config_params_s *p_local;
@@ -714,7 +716,6 @@ qed_dcbx_get_local_lldp_params(struct qed_hwfn *p_hwfn,
static void
qed_dcbx_get_remote_lldp_params(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
struct qed_dcbx_get *params)
{
struct lldp_status_params_s *p_remote;
@@ -728,25 +729,24 @@ qed_dcbx_get_remote_lldp_params(struct qed_hwfn *p_hwfn,
}
static int
-qed_dcbx_get_params(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
- struct qed_dcbx_get *p_params,
+qed_dcbx_get_params(struct qed_hwfn *p_hwfn, struct qed_dcbx_get *p_params,
enum qed_mib_read_type type)
{
switch (type) {
case QED_DCBX_REMOTE_MIB:
- qed_dcbx_get_remote_params(p_hwfn, p_ptt, p_params);
+ qed_dcbx_get_remote_params(p_hwfn, p_params);
break;
case QED_DCBX_LOCAL_MIB:
- qed_dcbx_get_local_params(p_hwfn, p_ptt, p_params);
+ qed_dcbx_get_local_params(p_hwfn, p_params);
break;
case QED_DCBX_OPERATIONAL_MIB:
- qed_dcbx_get_operational_params(p_hwfn, p_ptt, p_params);
+ qed_dcbx_get_operational_params(p_hwfn, p_params);
break;
case QED_DCBX_REMOTE_LLDP_MIB:
- qed_dcbx_get_remote_lldp_params(p_hwfn, p_ptt, p_params);
+ qed_dcbx_get_remote_lldp_params(p_hwfn, p_params);
break;
case QED_DCBX_LOCAL_LLDP_MIB:
- qed_dcbx_get_local_lldp_params(p_hwfn, p_ptt, p_params);
+ qed_dcbx_get_local_lldp_params(p_hwfn, p_params);
break;
default:
DP_ERR(p_hwfn, "MIB read err, unknown mib type %d\n", type);
@@ -904,7 +904,8 @@ qed_dcbx_mib_update_event(struct qed_hwfn *p_hwfn,
qed_sp_pf_update(p_hwfn);
}
}
- qed_dcbx_get_params(p_hwfn, p_ptt, &p_hwfn->p_dcbx_info->get, type);
+
+ qed_dcbx_get_params(p_hwfn, &p_hwfn->p_dcbx_info->get, type);
qed_dcbx_aen(p_hwfn, type);
return rc;
@@ -912,17 +913,14 @@ qed_dcbx_mib_update_event(struct qed_hwfn *p_hwfn,
int qed_dcbx_info_alloc(struct qed_hwfn *p_hwfn)
{
- int rc = 0;
-
p_hwfn->p_dcbx_info = kzalloc(sizeof(*p_hwfn->p_dcbx_info), GFP_KERNEL);
if (!p_hwfn->p_dcbx_info)
- rc = -ENOMEM;
+ return -ENOMEM;
- return rc;
+ return 0;
}
-void qed_dcbx_info_free(struct qed_hwfn *p_hwfn,
- struct qed_dcbx_info *p_dcbx_info)
+void qed_dcbx_info_free(struct qed_hwfn *p_hwfn)
{
kfree(p_hwfn->p_dcbx_info);
}
@@ -961,14 +959,9 @@ void qed_dcbx_set_pf_update_params(struct qed_dcbx_results *p_src,
p_dcb_data = &p_dest->fcoe_dcb_data;
qed_dcbx_update_protocol_data(p_dcb_data, p_src, DCBX_PROTOCOL_FCOE);
p_dcb_data = &p_dest->roce_dcb_data;
-
- if (p_src->arr[DCBX_PROTOCOL_ROCE].update)
- qed_dcbx_update_protocol_data(p_dcb_data, p_src,
- DCBX_PROTOCOL_ROCE);
- if (p_src->arr[DCBX_PROTOCOL_ROCE_V2].update)
- qed_dcbx_update_protocol_data(p_dcb_data, p_src,
- DCBX_PROTOCOL_ROCE_V2);
-
+ qed_dcbx_update_protocol_data(p_dcb_data, p_src, DCBX_PROTOCOL_ROCE);
+ p_dcb_data = &p_dest->rroce_dcb_data;
+ qed_dcbx_update_protocol_data(p_dcb_data, p_src, DCBX_PROTOCOL_ROCE_V2);
p_dcb_data = &p_dest->iscsi_dcb_data;
qed_dcbx_update_protocol_data(p_dcb_data, p_src, DCBX_PROTOCOL_ISCSI);
p_dcb_data = &p_dest->eth_dcb_data;
@@ -994,7 +987,7 @@ static int qed_dcbx_query_params(struct qed_hwfn *p_hwfn,
if (rc)
goto out;
- rc = qed_dcbx_get_params(p_hwfn, p_ptt, p_get, type);
+ rc = qed_dcbx_get_params(p_hwfn, p_get, type);
out:
qed_ptt_release(p_hwfn, p_ptt);
@@ -1169,6 +1162,9 @@ qed_dcbx_set_local_params(struct qed_hwfn *p_hwfn,
local_admin->config = DCBX_CONFIG_VERSION_DISABLED;
}
+ DP_VERBOSE(p_hwfn, QED_MSG_DCB, "Dcbx version = %d\n",
+ local_admin->config);
+
if (params->override_flags & QED_DCBX_OVERRIDE_PFC_CFG)
qed_dcbx_set_pfc_data(p_hwfn, &local_admin->features.pfc,
&params->config.params);
@@ -1245,6 +1241,8 @@ int qed_dcbx_get_config_params(struct qed_hwfn *p_hwfn,
p_hwfn->p_dcbx_info->set.ver_num |= DCBX_CONFIG_VERSION_CEE;
if (dcbx_info->operational.ieee)
p_hwfn->p_dcbx_info->set.ver_num |= DCBX_CONFIG_VERSION_IEEE;
+ if (dcbx_info->operational.local)
+ p_hwfn->p_dcbx_info->set.ver_num |= DCBX_CONFIG_VERSION_STATIC;
p_hwfn->p_dcbx_info->set.enabled = dcbx_info->operational.enabled;
memcpy(&p_hwfn->p_dcbx_info->set.config.params,
@@ -1794,8 +1792,9 @@ static u8 qed_dcbnl_setdcbx(struct qed_dev *cdev, u8 mode)
DP_VERBOSE(hwfn, QED_MSG_DCB, "new mode = %x\n", mode);
- if (!(mode & DCB_CAP_DCBX_VER_IEEE) && !(mode & DCB_CAP_DCBX_VER_CEE)) {
- DP_INFO(hwfn, "Allowed mode is cee, ieee or both\n");
+ if (!(mode & DCB_CAP_DCBX_VER_IEEE) &&
+ !(mode & DCB_CAP_DCBX_VER_CEE) && !(mode & DCB_CAP_DCBX_STATIC)) {
+ DP_INFO(hwfn, "Allowed modes are cee, ieee or static\n");
return 1;
}
@@ -1815,6 +1814,11 @@ static u8 qed_dcbnl_setdcbx(struct qed_dev *cdev, u8 mode)
dcbx_set.enabled = true;
}
+ if (mode & DCB_CAP_DCBX_STATIC) {
+ dcbx_set.ver_num |= DCBX_CONFIG_VERSION_STATIC;
+ dcbx_set.enabled = true;
+ }
+
ptt = qed_ptt_acquire(hwfn);
if (!ptt)
return 1;
@@ -1823,7 +1827,7 @@ static u8 qed_dcbnl_setdcbx(struct qed_dev *cdev, u8 mode)
qed_ptt_release(hwfn, ptt);
- return 0;
+ return rc;
}
static u8 qed_dcbnl_getfeatcfg(struct qed_dev *cdev, int featid, u8 *flags)
@@ -2202,15 +2206,46 @@ qed_dcbnl_ieee_peer_getpfc(struct qed_dev *cdev, struct ieee_pfc *pfc)
return qed_dcbnl_get_ieee_pfc(cdev, pfc, true);
}
+static int qed_get_sf_ieee_value(u8 selector, u8 *sf_ieee)
+{
+ switch (selector) {
+ case IEEE_8021QAZ_APP_SEL_ETHERTYPE:
+ *sf_ieee = QED_DCBX_SF_IEEE_ETHTYPE;
+ break;
+ case IEEE_8021QAZ_APP_SEL_STREAM:
+ *sf_ieee = QED_DCBX_SF_IEEE_TCP_PORT;
+ break;
+ case IEEE_8021QAZ_APP_SEL_DGRAM:
+ *sf_ieee = QED_DCBX_SF_IEEE_UDP_PORT;
+ break;
+ case IEEE_8021QAZ_APP_SEL_ANY:
+ *sf_ieee = QED_DCBX_SF_IEEE_TCP_UDP_PORT;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static int qed_dcbnl_ieee_getapp(struct qed_dev *cdev, struct dcb_app *app)
{
struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
struct qed_dcbx_get *dcbx_info;
struct qed_app_entry *entry;
- bool ethtype;
u8 prio = 0;
+ u8 sf_ieee;
int i;
+ DP_VERBOSE(hwfn, QED_MSG_DCB, "selector = %d protocol = %d\n",
+ app->selector, app->protocol);
+
+ if (qed_get_sf_ieee_value(app->selector, &sf_ieee)) {
+ DP_INFO(cdev, "Invalid selector field value %d\n",
+ app->selector);
+ return -EINVAL;
+ }
+
dcbx_info = qed_dcbnl_get_dcbx(hwfn, QED_DCBX_OPERATIONAL_MIB);
if (!dcbx_info)
return -EINVAL;
@@ -2221,11 +2256,9 @@ static int qed_dcbnl_ieee_getapp(struct qed_dev *cdev, struct dcb_app *app)
return -EINVAL;
}
- /* ieee defines the selector field value for ethertype to be 1 */
- ethtype = !!((app->selector - 1) == DCB_APP_IDTYPE_ETHTYPE);
for (i = 0; i < QED_DCBX_MAX_APP_PROTOCOL; i++) {
entry = &dcbx_info->operational.params.app_entry[i];
- if ((entry->ethtype == ethtype) &&
+ if ((entry->sf_ieee == sf_ieee) &&
(entry->proto_id == app->protocol)) {
prio = entry->prio;
break;
@@ -2253,14 +2286,22 @@ static int qed_dcbnl_ieee_setapp(struct qed_dev *cdev, struct dcb_app *app)
struct qed_dcbx_set dcbx_set;
struct qed_app_entry *entry;
struct qed_ptt *ptt;
- bool ethtype;
+ u8 sf_ieee;
int rc, i;
+ DP_VERBOSE(hwfn, QED_MSG_DCB, "selector = %d protocol = %d pri = %d\n",
+ app->selector, app->protocol, app->priority);
if (app->priority < 0 || app->priority >= QED_MAX_PFC_PRIORITIES) {
DP_INFO(hwfn, "Invalid priority %d\n", app->priority);
return -EINVAL;
}
+ if (qed_get_sf_ieee_value(app->selector, &sf_ieee)) {
+ DP_INFO(cdev, "Invalid selector field value %d\n",
+ app->selector);
+ return -EINVAL;
+ }
+
dcbx_info = qed_dcbnl_get_dcbx(hwfn, QED_DCBX_OPERATIONAL_MIB);
if (!dcbx_info)
return -EINVAL;
@@ -2278,11 +2319,9 @@ static int qed_dcbnl_ieee_setapp(struct qed_dev *cdev, struct dcb_app *app)
if (rc)
return -EINVAL;
- /* ieee defines the selector field value for ethertype to be 1 */
- ethtype = !!((app->selector - 1) == DCB_APP_IDTYPE_ETHTYPE);
for (i = 0; i < QED_DCBX_MAX_APP_PROTOCOL; i++) {
entry = &dcbx_set.config.params.app_entry[i];
- if ((entry->ethtype == ethtype) &&
+ if ((entry->sf_ieee == sf_ieee) &&
(entry->proto_id == app->protocol))
break;
/* First empty slot */
@@ -2298,7 +2337,7 @@ static int qed_dcbnl_ieee_setapp(struct qed_dev *cdev, struct dcb_app *app)
}
dcbx_set.override_flags |= QED_DCBX_OVERRIDE_APP_CFG;
- dcbx_set.config.params.app_entry[i].ethtype = ethtype;
+ dcbx_set.config.params.app_entry[i].sf_ieee = sf_ieee;
dcbx_set.config.params.app_entry[i].proto_id = app->protocol;
dcbx_set.config.params.app_entry[i].prio = BIT(app->priority);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dcbx.h b/drivers/net/ethernet/qlogic/qed/qed_dcbx.h
index 0fabe97f998d..414e26268f3a 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dcbx.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_dcbx.h
@@ -85,9 +85,6 @@ struct qed_dcbx_app_metadata {
enum qed_pci_personality personality;
};
-#define QED_MFW_GET_FIELD(name, field) \
- (((name) & (field ## _MASK)) >> (field ## _SHIFT))
-
struct qed_dcbx_info {
struct lldp_status_params_s lldp_remote[LLDP_MAX_LLDP_AGENTS];
struct lldp_config_params_s lldp_local[LLDP_MAX_LLDP_AGENTS];
@@ -122,7 +119,7 @@ qed_dcbx_mib_update_event(struct qed_hwfn *,
struct qed_ptt *, enum qed_mib_read_type);
int qed_dcbx_info_alloc(struct qed_hwfn *p_hwfn);
-void qed_dcbx_info_free(struct qed_hwfn *, struct qed_dcbx_info *);
+void qed_dcbx_info_free(struct qed_hwfn *p_hwfn);
void qed_dcbx_set_pf_update_params(struct qed_dcbx_results *p_src,
struct pf_update_ramrod_data *p_dest);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_debug.c b/drivers/net/ethernet/qlogic/qed/qed_debug.c
index 68f19ca57f96..483241b4b05d 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_debug.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_debug.c
@@ -17,7 +17,6 @@
/* Chip IDs enum */
enum chip_ids {
- CHIP_RESERVED,
CHIP_BB_B0,
CHIP_K2,
MAX_CHIP_IDS
@@ -40,6 +39,7 @@ enum mem_groups {
MEM_GROUP_BTB_RAM,
MEM_GROUP_RDIF_CTX,
MEM_GROUP_TDIF_CTX,
+ MEM_GROUP_CFC_MEM,
MEM_GROUP_CONN_CFC_MEM,
MEM_GROUP_TASK_CFC_MEM,
MEM_GROUP_CAU_PI,
@@ -72,6 +72,7 @@ static const char * const s_mem_group_names[] = {
"BTB_RAM",
"RDIF_CTX",
"TDIF_CTX",
+ "CFC_MEM",
"CONN_CFC_MEM",
"TASK_CFC_MEM",
"CAU_PI",
@@ -185,13 +186,16 @@ struct dbg_array {
u32 size_in_dwords;
};
+struct chip_platform_defs {
+ u8 num_ports;
+ u8 num_pfs;
+ u8 num_vfs;
+};
+
/* Chip constant definitions */
struct chip_defs {
const char *name;
- struct {
- u8 num_ports;
- u8 num_pfs;
- } per_platform[MAX_PLATFORM_IDS];
+ struct chip_platform_defs per_platform[MAX_PLATFORM_IDS];
};
/* Platform constant definitions */
@@ -405,22 +409,23 @@ struct phy_defs {
/***************************** Constant Arrays *******************************/
/* Debug arrays */
-static struct dbg_array s_dbg_arrays[MAX_BIN_DBG_BUFFER_TYPE] = { {NULL} };
+static struct dbg_array s_dbg_arrays[MAX_BIN_DBG_BUFFER_TYPE] = { {0} };
/* Chip constant definitions array */
static struct chip_defs s_chip_defs[MAX_CHIP_IDS] = {
- { "reserved", { {0, 0}, {0, 0}, {0, 0}, {0, 0} } },
{ "bb_b0",
- { {MAX_NUM_PORTS_BB, MAX_NUM_PFS_BB}, {0, 0}, {0, 0}, {0, 0} } },
- { "k2", { {MAX_NUM_PORTS_K2, MAX_NUM_PFS_K2}, {0, 0}, {0, 0}, {0, 0} } }
+ { {MAX_NUM_PORTS_BB, MAX_NUM_PFS_BB, MAX_NUM_VFS_BB}, {0, 0, 0},
+ {0, 0, 0}, {0, 0, 0} } },
+ { "k2",
+ { {MAX_NUM_PORTS_K2, MAX_NUM_PFS_K2, MAX_NUM_VFS_K2}, {0, 0, 0},
+ {0, 0, 0}, {0, 0, 0} } }
};
/* Storm constant definitions array */
static struct storm_defs s_storm_defs[] = {
/* Tstorm */
{'T', BLOCK_TSEM,
- {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT,
- DBG_BUS_CLIENT_RBCT}, true,
+ {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT}, true,
TSEM_REG_FAST_MEMORY,
TSEM_REG_DBG_FRAME_MODE, TSEM_REG_SLOW_DBG_ACTIVE,
TSEM_REG_SLOW_DBG_MODE, TSEM_REG_DBG_MODE1_CFG,
@@ -432,8 +437,7 @@ static struct storm_defs s_storm_defs[] = {
4, TCM_REG_SM_TASK_CTX},
/* Mstorm */
{'M', BLOCK_MSEM,
- {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT,
- DBG_BUS_CLIENT_RBCM}, false,
+ {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM}, false,
MSEM_REG_FAST_MEMORY,
MSEM_REG_DBG_FRAME_MODE, MSEM_REG_SLOW_DBG_ACTIVE,
MSEM_REG_SLOW_DBG_MODE, MSEM_REG_DBG_MODE1_CFG,
@@ -445,8 +449,7 @@ static struct storm_defs s_storm_defs[] = {
7, MCM_REG_SM_TASK_CTX},
/* Ustorm */
{'U', BLOCK_USEM,
- {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU,
- DBG_BUS_CLIENT_RBCU}, false,
+ {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU}, false,
USEM_REG_FAST_MEMORY,
USEM_REG_DBG_FRAME_MODE, USEM_REG_SLOW_DBG_ACTIVE,
USEM_REG_SLOW_DBG_MODE, USEM_REG_DBG_MODE1_CFG,
@@ -458,8 +461,7 @@ static struct storm_defs s_storm_defs[] = {
3, UCM_REG_SM_TASK_CTX},
/* Xstorm */
{'X', BLOCK_XSEM,
- {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX,
- DBG_BUS_CLIENT_RBCX}, false,
+ {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX}, false,
XSEM_REG_FAST_MEMORY,
XSEM_REG_DBG_FRAME_MODE, XSEM_REG_SLOW_DBG_ACTIVE,
XSEM_REG_SLOW_DBG_MODE, XSEM_REG_DBG_MODE1_CFG,
@@ -471,8 +473,7 @@ static struct storm_defs s_storm_defs[] = {
0, 0},
/* Ystorm */
{'Y', BLOCK_YSEM,
- {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX,
- DBG_BUS_CLIENT_RBCY}, false,
+ {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY}, false,
YSEM_REG_FAST_MEMORY,
YSEM_REG_DBG_FRAME_MODE, YSEM_REG_SLOW_DBG_ACTIVE,
YSEM_REG_SLOW_DBG_MODE, YSEM_REG_DBG_MODE1_CFG,
@@ -484,8 +485,7 @@ static struct storm_defs s_storm_defs[] = {
12, YCM_REG_SM_TASK_CTX},
/* Pstorm */
{'P', BLOCK_PSEM,
- {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS,
- DBG_BUS_CLIENT_RBCS}, true,
+ {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS}, true,
PSEM_REG_FAST_MEMORY,
PSEM_REG_DBG_FRAME_MODE, PSEM_REG_SLOW_DBG_ACTIVE,
PSEM_REG_SLOW_DBG_MODE, PSEM_REG_DBG_MODE1_CFG,
@@ -499,8 +499,9 @@ static struct storm_defs s_storm_defs[] = {
/* Block definitions array */
static struct block_defs block_grc_defs = {
- "grc", {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCN, DBG_BUS_CLIENT_RBCN, DBG_BUS_CLIENT_RBCN},
+ "grc",
+ {true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCN, DBG_BUS_CLIENT_RBCN},
GRC_REG_DBG_SELECT, GRC_REG_DBG_DWORD_ENABLE,
GRC_REG_DBG_SHIFT, GRC_REG_DBG_FORCE_VALID,
GRC_REG_DBG_FORCE_FRAME,
@@ -508,29 +509,30 @@ static struct block_defs block_grc_defs = {
};
static struct block_defs block_miscs_defs = {
- "miscs", {false, false, false}, false, 0,
- {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+ "miscs", {false, false}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
0, 0, 0, 0, 0,
false, false, MAX_DBG_RESET_REGS, 0
};
static struct block_defs block_misc_defs = {
- "misc", {false, false, false}, false, 0,
- {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+ "misc", {false, false}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
0, 0, 0, 0, 0,
false, false, MAX_DBG_RESET_REGS, 0
};
static struct block_defs block_dbu_defs = {
- "dbu", {false, false, false}, false, 0,
- {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+ "dbu", {false, false}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
0, 0, 0, 0, 0,
false, false, MAX_DBG_RESET_REGS, 0
};
static struct block_defs block_pglue_b_defs = {
- "pglue_b", {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCH, DBG_BUS_CLIENT_RBCH, DBG_BUS_CLIENT_RBCH},
+ "pglue_b",
+ {true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCH, DBG_BUS_CLIENT_RBCH},
PGLUE_B_REG_DBG_SELECT, PGLUE_B_REG_DBG_DWORD_ENABLE,
PGLUE_B_REG_DBG_SHIFT, PGLUE_B_REG_DBG_FORCE_VALID,
PGLUE_B_REG_DBG_FORCE_FRAME,
@@ -538,8 +540,9 @@ static struct block_defs block_pglue_b_defs = {
};
static struct block_defs block_cnig_defs = {
- "cnig", {false, false, true}, false, 0,
- {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCW},
+ "cnig",
+ {false, true}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCW},
CNIG_REG_DBG_SELECT_K2, CNIG_REG_DBG_DWORD_ENABLE_K2,
CNIG_REG_DBG_SHIFT_K2, CNIG_REG_DBG_FORCE_VALID_K2,
CNIG_REG_DBG_FORCE_FRAME_K2,
@@ -547,15 +550,16 @@ static struct block_defs block_cnig_defs = {
};
static struct block_defs block_cpmu_defs = {
- "cpmu", {false, false, false}, false, 0,
- {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+ "cpmu", {false, false}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
0, 0, 0, 0, 0,
true, false, DBG_RESET_REG_MISCS_PL_HV, 8
};
static struct block_defs block_ncsi_defs = {
- "ncsi", {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ},
+ "ncsi",
+ {true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ},
NCSI_REG_DBG_SELECT, NCSI_REG_DBG_DWORD_ENABLE,
NCSI_REG_DBG_SHIFT, NCSI_REG_DBG_FORCE_VALID,
NCSI_REG_DBG_FORCE_FRAME,
@@ -563,15 +567,16 @@ static struct block_defs block_ncsi_defs = {
};
static struct block_defs block_opte_defs = {
- "opte", {false, false, false}, false, 0,
- {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+ "opte", {false, false}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
0, 0, 0, 0, 0,
true, false, DBG_RESET_REG_MISCS_PL_HV, 4
};
static struct block_defs block_bmb_defs = {
- "bmb", {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCB},
+ "bmb",
+ {true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCB},
BMB_REG_DBG_SELECT, BMB_REG_DBG_DWORD_ENABLE,
BMB_REG_DBG_SHIFT, BMB_REG_DBG_FORCE_VALID,
BMB_REG_DBG_FORCE_FRAME,
@@ -579,8 +584,9 @@ static struct block_defs block_bmb_defs = {
};
static struct block_defs block_pcie_defs = {
- "pcie", {false, false, true}, false, 0,
- {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH},
+ "pcie",
+ {false, true}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH},
PCIE_REG_DBG_COMMON_SELECT, PCIE_REG_DBG_COMMON_DWORD_ENABLE,
PCIE_REG_DBG_COMMON_SHIFT, PCIE_REG_DBG_COMMON_FORCE_VALID,
PCIE_REG_DBG_COMMON_FORCE_FRAME,
@@ -588,15 +594,16 @@ static struct block_defs block_pcie_defs = {
};
static struct block_defs block_mcp_defs = {
- "mcp", {false, false, false}, false, 0,
- {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+ "mcp", {false, false}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
0, 0, 0, 0, 0,
false, false, MAX_DBG_RESET_REGS, 0
};
static struct block_defs block_mcp2_defs = {
- "mcp2", {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ},
+ "mcp2",
+ {true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ},
MCP2_REG_DBG_SELECT, MCP2_REG_DBG_DWORD_ENABLE,
MCP2_REG_DBG_SHIFT, MCP2_REG_DBG_FORCE_VALID,
MCP2_REG_DBG_FORCE_FRAME,
@@ -604,8 +611,9 @@ static struct block_defs block_mcp2_defs = {
};
static struct block_defs block_pswhst_defs = {
- "pswhst", {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
+ "pswhst",
+ {true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
PSWHST_REG_DBG_SELECT, PSWHST_REG_DBG_DWORD_ENABLE,
PSWHST_REG_DBG_SHIFT, PSWHST_REG_DBG_FORCE_VALID,
PSWHST_REG_DBG_FORCE_FRAME,
@@ -613,8 +621,9 @@ static struct block_defs block_pswhst_defs = {
};
static struct block_defs block_pswhst2_defs = {
- "pswhst2", {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
+ "pswhst2",
+ {true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
PSWHST2_REG_DBG_SELECT, PSWHST2_REG_DBG_DWORD_ENABLE,
PSWHST2_REG_DBG_SHIFT, PSWHST2_REG_DBG_FORCE_VALID,
PSWHST2_REG_DBG_FORCE_FRAME,
@@ -622,8 +631,9 @@ static struct block_defs block_pswhst2_defs = {
};
static struct block_defs block_pswrd_defs = {
- "pswrd", {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
+ "pswrd",
+ {true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
PSWRD_REG_DBG_SELECT, PSWRD_REG_DBG_DWORD_ENABLE,
PSWRD_REG_DBG_SHIFT, PSWRD_REG_DBG_FORCE_VALID,
PSWRD_REG_DBG_FORCE_FRAME,
@@ -631,8 +641,9 @@ static struct block_defs block_pswrd_defs = {
};
static struct block_defs block_pswrd2_defs = {
- "pswrd2", {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
+ "pswrd2",
+ {true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
PSWRD2_REG_DBG_SELECT, PSWRD2_REG_DBG_DWORD_ENABLE,
PSWRD2_REG_DBG_SHIFT, PSWRD2_REG_DBG_FORCE_VALID,
PSWRD2_REG_DBG_FORCE_FRAME,
@@ -640,8 +651,9 @@ static struct block_defs block_pswrd2_defs = {
};
static struct block_defs block_pswwr_defs = {
- "pswwr", {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
+ "pswwr",
+ {true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
PSWWR_REG_DBG_SELECT, PSWWR_REG_DBG_DWORD_ENABLE,
PSWWR_REG_DBG_SHIFT, PSWWR_REG_DBG_FORCE_VALID,
PSWWR_REG_DBG_FORCE_FRAME,
@@ -649,15 +661,16 @@ static struct block_defs block_pswwr_defs = {
};
static struct block_defs block_pswwr2_defs = {
- "pswwr2", {false, false, false}, false, 0,
- {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+ "pswwr2", {false, false}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
0, 0, 0, 0, 0,
true, false, DBG_RESET_REG_MISC_PL_HV, 3
};
static struct block_defs block_pswrq_defs = {
- "pswrq", {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
+ "pswrq",
+ {true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
PSWRQ_REG_DBG_SELECT, PSWRQ_REG_DBG_DWORD_ENABLE,
PSWRQ_REG_DBG_SHIFT, PSWRQ_REG_DBG_FORCE_VALID,
PSWRQ_REG_DBG_FORCE_FRAME,
@@ -665,8 +678,9 @@ static struct block_defs block_pswrq_defs = {
};
static struct block_defs block_pswrq2_defs = {
- "pswrq2", {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
+ "pswrq2",
+ {true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
PSWRQ2_REG_DBG_SELECT, PSWRQ2_REG_DBG_DWORD_ENABLE,
PSWRQ2_REG_DBG_SHIFT, PSWRQ2_REG_DBG_FORCE_VALID,
PSWRQ2_REG_DBG_FORCE_FRAME,
@@ -674,8 +688,9 @@ static struct block_defs block_pswrq2_defs = {
};
static struct block_defs block_pglcs_defs = {
- "pglcs", {false, false, true}, false, 0,
- {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH},
+ "pglcs",
+ {false, true}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH},
PGLCS_REG_DBG_SELECT, PGLCS_REG_DBG_DWORD_ENABLE,
PGLCS_REG_DBG_SHIFT, PGLCS_REG_DBG_FORCE_VALID,
PGLCS_REG_DBG_FORCE_FRAME,
@@ -683,8 +698,9 @@ static struct block_defs block_pglcs_defs = {
};
static struct block_defs block_ptu_defs = {
- "ptu", {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
+ "ptu",
+ {true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
PTU_REG_DBG_SELECT, PTU_REG_DBG_DWORD_ENABLE,
PTU_REG_DBG_SHIFT, PTU_REG_DBG_FORCE_VALID,
PTU_REG_DBG_FORCE_FRAME,
@@ -692,8 +708,9 @@ static struct block_defs block_ptu_defs = {
};
static struct block_defs block_dmae_defs = {
- "dmae", {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
+ "dmae",
+ {true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
DMAE_REG_DBG_SELECT, DMAE_REG_DBG_DWORD_ENABLE,
DMAE_REG_DBG_SHIFT, DMAE_REG_DBG_FORCE_VALID,
DMAE_REG_DBG_FORCE_FRAME,
@@ -701,8 +718,9 @@ static struct block_defs block_dmae_defs = {
};
static struct block_defs block_tcm_defs = {
- "tcm", {true, true, true}, true, DBG_TSTORM_ID,
- {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT},
+ "tcm",
+ {true, true}, true, DBG_TSTORM_ID,
+ {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT},
TCM_REG_DBG_SELECT, TCM_REG_DBG_DWORD_ENABLE,
TCM_REG_DBG_SHIFT, TCM_REG_DBG_FORCE_VALID,
TCM_REG_DBG_FORCE_FRAME,
@@ -710,8 +728,9 @@ static struct block_defs block_tcm_defs = {
};
static struct block_defs block_mcm_defs = {
- "mcm", {true, true, true}, true, DBG_MSTORM_ID,
- {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
+ "mcm",
+ {true, true}, true, DBG_MSTORM_ID,
+ {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
MCM_REG_DBG_SELECT, MCM_REG_DBG_DWORD_ENABLE,
MCM_REG_DBG_SHIFT, MCM_REG_DBG_FORCE_VALID,
MCM_REG_DBG_FORCE_FRAME,
@@ -719,8 +738,9 @@ static struct block_defs block_mcm_defs = {
};
static struct block_defs block_ucm_defs = {
- "ucm", {true, true, true}, true, DBG_USTORM_ID,
- {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
+ "ucm",
+ {true, true}, true, DBG_USTORM_ID,
+ {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
UCM_REG_DBG_SELECT, UCM_REG_DBG_DWORD_ENABLE,
UCM_REG_DBG_SHIFT, UCM_REG_DBG_FORCE_VALID,
UCM_REG_DBG_FORCE_FRAME,
@@ -728,8 +748,9 @@ static struct block_defs block_ucm_defs = {
};
static struct block_defs block_xcm_defs = {
- "xcm", {true, true, true}, true, DBG_XSTORM_ID,
- {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX},
+ "xcm",
+ {true, true}, true, DBG_XSTORM_ID,
+ {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX},
XCM_REG_DBG_SELECT, XCM_REG_DBG_DWORD_ENABLE,
XCM_REG_DBG_SHIFT, XCM_REG_DBG_FORCE_VALID,
XCM_REG_DBG_FORCE_FRAME,
@@ -737,8 +758,9 @@ static struct block_defs block_xcm_defs = {
};
static struct block_defs block_ycm_defs = {
- "ycm", {true, true, true}, true, DBG_YSTORM_ID,
- {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY},
+ "ycm",
+ {true, true}, true, DBG_YSTORM_ID,
+ {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY},
YCM_REG_DBG_SELECT, YCM_REG_DBG_DWORD_ENABLE,
YCM_REG_DBG_SHIFT, YCM_REG_DBG_FORCE_VALID,
YCM_REG_DBG_FORCE_FRAME,
@@ -746,8 +768,9 @@ static struct block_defs block_ycm_defs = {
};
static struct block_defs block_pcm_defs = {
- "pcm", {true, true, true}, true, DBG_PSTORM_ID,
- {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
+ "pcm",
+ {true, true}, true, DBG_PSTORM_ID,
+ {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
PCM_REG_DBG_SELECT, PCM_REG_DBG_DWORD_ENABLE,
PCM_REG_DBG_SHIFT, PCM_REG_DBG_FORCE_VALID,
PCM_REG_DBG_FORCE_FRAME,
@@ -755,8 +778,9 @@ static struct block_defs block_pcm_defs = {
};
static struct block_defs block_qm_defs = {
- "qm", {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCQ},
+ "qm",
+ {true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCQ},
QM_REG_DBG_SELECT, QM_REG_DBG_DWORD_ENABLE,
QM_REG_DBG_SHIFT, QM_REG_DBG_FORCE_VALID,
QM_REG_DBG_FORCE_FRAME,
@@ -764,8 +788,9 @@ static struct block_defs block_qm_defs = {
};
static struct block_defs block_tm_defs = {
- "tm", {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
+ "tm",
+ {true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
TM_REG_DBG_SELECT, TM_REG_DBG_DWORD_ENABLE,
TM_REG_DBG_SHIFT, TM_REG_DBG_FORCE_VALID,
TM_REG_DBG_FORCE_FRAME,
@@ -773,8 +798,9 @@ static struct block_defs block_tm_defs = {
};
static struct block_defs block_dorq_defs = {
- "dorq", {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY},
+ "dorq",
+ {true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY},
DORQ_REG_DBG_SELECT, DORQ_REG_DBG_DWORD_ENABLE,
DORQ_REG_DBG_SHIFT, DORQ_REG_DBG_FORCE_VALID,
DORQ_REG_DBG_FORCE_FRAME,
@@ -782,8 +808,9 @@ static struct block_defs block_dorq_defs = {
};
static struct block_defs block_brb_defs = {
- "brb", {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR},
+ "brb",
+ {true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR},
BRB_REG_DBG_SELECT, BRB_REG_DBG_DWORD_ENABLE,
BRB_REG_DBG_SHIFT, BRB_REG_DBG_FORCE_VALID,
BRB_REG_DBG_FORCE_FRAME,
@@ -791,8 +818,9 @@ static struct block_defs block_brb_defs = {
};
static struct block_defs block_src_defs = {
- "src", {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF},
+ "src",
+ {true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF},
SRC_REG_DBG_SELECT, SRC_REG_DBG_DWORD_ENABLE,
SRC_REG_DBG_SHIFT, SRC_REG_DBG_FORCE_VALID,
SRC_REG_DBG_FORCE_FRAME,
@@ -800,8 +828,9 @@ static struct block_defs block_src_defs = {
};
static struct block_defs block_prs_defs = {
- "prs", {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR},
+ "prs",
+ {true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR},
PRS_REG_DBG_SELECT, PRS_REG_DBG_DWORD_ENABLE,
PRS_REG_DBG_SHIFT, PRS_REG_DBG_FORCE_VALID,
PRS_REG_DBG_FORCE_FRAME,
@@ -809,8 +838,9 @@ static struct block_defs block_prs_defs = {
};
static struct block_defs block_tsdm_defs = {
- "tsdm", {true, true, true}, true, DBG_TSTORM_ID,
- {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT},
+ "tsdm",
+ {true, true}, true, DBG_TSTORM_ID,
+ {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT},
TSDM_REG_DBG_SELECT, TSDM_REG_DBG_DWORD_ENABLE,
TSDM_REG_DBG_SHIFT, TSDM_REG_DBG_FORCE_VALID,
TSDM_REG_DBG_FORCE_FRAME,
@@ -818,8 +848,9 @@ static struct block_defs block_tsdm_defs = {
};
static struct block_defs block_msdm_defs = {
- "msdm", {true, true, true}, true, DBG_MSTORM_ID,
- {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
+ "msdm",
+ {true, true}, true, DBG_MSTORM_ID,
+ {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
MSDM_REG_DBG_SELECT, MSDM_REG_DBG_DWORD_ENABLE,
MSDM_REG_DBG_SHIFT, MSDM_REG_DBG_FORCE_VALID,
MSDM_REG_DBG_FORCE_FRAME,
@@ -827,8 +858,9 @@ static struct block_defs block_msdm_defs = {
};
static struct block_defs block_usdm_defs = {
- "usdm", {true, true, true}, true, DBG_USTORM_ID,
- {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
+ "usdm",
+ {true, true}, true, DBG_USTORM_ID,
+ {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
USDM_REG_DBG_SELECT, USDM_REG_DBG_DWORD_ENABLE,
USDM_REG_DBG_SHIFT, USDM_REG_DBG_FORCE_VALID,
USDM_REG_DBG_FORCE_FRAME,
@@ -836,8 +868,9 @@ static struct block_defs block_usdm_defs = {
};
static struct block_defs block_xsdm_defs = {
- "xsdm", {true, true, true}, true, DBG_XSTORM_ID,
- {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX},
+ "xsdm",
+ {true, true}, true, DBG_XSTORM_ID,
+ {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX},
XSDM_REG_DBG_SELECT, XSDM_REG_DBG_DWORD_ENABLE,
XSDM_REG_DBG_SHIFT, XSDM_REG_DBG_FORCE_VALID,
XSDM_REG_DBG_FORCE_FRAME,
@@ -845,8 +878,9 @@ static struct block_defs block_xsdm_defs = {
};
static struct block_defs block_ysdm_defs = {
- "ysdm", {true, true, true}, true, DBG_YSTORM_ID,
- {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY},
+ "ysdm",
+ {true, true}, true, DBG_YSTORM_ID,
+ {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY},
YSDM_REG_DBG_SELECT, YSDM_REG_DBG_DWORD_ENABLE,
YSDM_REG_DBG_SHIFT, YSDM_REG_DBG_FORCE_VALID,
YSDM_REG_DBG_FORCE_FRAME,
@@ -854,8 +888,9 @@ static struct block_defs block_ysdm_defs = {
};
static struct block_defs block_psdm_defs = {
- "psdm", {true, true, true}, true, DBG_PSTORM_ID,
- {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
+ "psdm",
+ {true, true}, true, DBG_PSTORM_ID,
+ {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
PSDM_REG_DBG_SELECT, PSDM_REG_DBG_DWORD_ENABLE,
PSDM_REG_DBG_SHIFT, PSDM_REG_DBG_FORCE_VALID,
PSDM_REG_DBG_FORCE_FRAME,
@@ -863,8 +898,9 @@ static struct block_defs block_psdm_defs = {
};
static struct block_defs block_tsem_defs = {
- "tsem", {true, true, true}, true, DBG_TSTORM_ID,
- {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT},
+ "tsem",
+ {true, true}, true, DBG_TSTORM_ID,
+ {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT},
TSEM_REG_DBG_SELECT, TSEM_REG_DBG_DWORD_ENABLE,
TSEM_REG_DBG_SHIFT, TSEM_REG_DBG_FORCE_VALID,
TSEM_REG_DBG_FORCE_FRAME,
@@ -872,8 +908,9 @@ static struct block_defs block_tsem_defs = {
};
static struct block_defs block_msem_defs = {
- "msem", {true, true, true}, true, DBG_MSTORM_ID,
- {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
+ "msem",
+ {true, true}, true, DBG_MSTORM_ID,
+ {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
MSEM_REG_DBG_SELECT, MSEM_REG_DBG_DWORD_ENABLE,
MSEM_REG_DBG_SHIFT, MSEM_REG_DBG_FORCE_VALID,
MSEM_REG_DBG_FORCE_FRAME,
@@ -881,8 +918,9 @@ static struct block_defs block_msem_defs = {
};
static struct block_defs block_usem_defs = {
- "usem", {true, true, true}, true, DBG_USTORM_ID,
- {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
+ "usem",
+ {true, true}, true, DBG_USTORM_ID,
+ {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
USEM_REG_DBG_SELECT, USEM_REG_DBG_DWORD_ENABLE,
USEM_REG_DBG_SHIFT, USEM_REG_DBG_FORCE_VALID,
USEM_REG_DBG_FORCE_FRAME,
@@ -890,8 +928,9 @@ static struct block_defs block_usem_defs = {
};
static struct block_defs block_xsem_defs = {
- "xsem", {true, true, true}, true, DBG_XSTORM_ID,
- {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX},
+ "xsem",
+ {true, true}, true, DBG_XSTORM_ID,
+ {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX},
XSEM_REG_DBG_SELECT, XSEM_REG_DBG_DWORD_ENABLE,
XSEM_REG_DBG_SHIFT, XSEM_REG_DBG_FORCE_VALID,
XSEM_REG_DBG_FORCE_FRAME,
@@ -899,8 +938,9 @@ static struct block_defs block_xsem_defs = {
};
static struct block_defs block_ysem_defs = {
- "ysem", {true, true, true}, true, DBG_YSTORM_ID,
- {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY},
+ "ysem",
+ {true, true}, true, DBG_YSTORM_ID,
+ {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY},
YSEM_REG_DBG_SELECT, YSEM_REG_DBG_DWORD_ENABLE,
YSEM_REG_DBG_SHIFT, YSEM_REG_DBG_FORCE_VALID,
YSEM_REG_DBG_FORCE_FRAME,
@@ -908,8 +948,9 @@ static struct block_defs block_ysem_defs = {
};
static struct block_defs block_psem_defs = {
- "psem", {true, true, true}, true, DBG_PSTORM_ID,
- {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
+ "psem",
+ {true, true}, true, DBG_PSTORM_ID,
+ {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
PSEM_REG_DBG_SELECT, PSEM_REG_DBG_DWORD_ENABLE,
PSEM_REG_DBG_SHIFT, PSEM_REG_DBG_FORCE_VALID,
PSEM_REG_DBG_FORCE_FRAME,
@@ -917,8 +958,9 @@ static struct block_defs block_psem_defs = {
};
static struct block_defs block_rss_defs = {
- "rss", {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT},
+ "rss",
+ {true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT},
RSS_REG_DBG_SELECT, RSS_REG_DBG_DWORD_ENABLE,
RSS_REG_DBG_SHIFT, RSS_REG_DBG_FORCE_VALID,
RSS_REG_DBG_FORCE_FRAME,
@@ -926,8 +968,9 @@ static struct block_defs block_rss_defs = {
};
static struct block_defs block_tmld_defs = {
- "tmld", {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
+ "tmld",
+ {true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
TMLD_REG_DBG_SELECT, TMLD_REG_DBG_DWORD_ENABLE,
TMLD_REG_DBG_SHIFT, TMLD_REG_DBG_FORCE_VALID,
TMLD_REG_DBG_FORCE_FRAME,
@@ -935,8 +978,9 @@ static struct block_defs block_tmld_defs = {
};
static struct block_defs block_muld_defs = {
- "muld", {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
+ "muld",
+ {true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
MULD_REG_DBG_SELECT, MULD_REG_DBG_DWORD_ENABLE,
MULD_REG_DBG_SHIFT, MULD_REG_DBG_FORCE_VALID,
MULD_REG_DBG_FORCE_FRAME,
@@ -944,8 +988,9 @@ static struct block_defs block_muld_defs = {
};
static struct block_defs block_yuld_defs = {
- "yuld", {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
+ "yuld",
+ {true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
YULD_REG_DBG_SELECT, YULD_REG_DBG_DWORD_ENABLE,
YULD_REG_DBG_SHIFT, YULD_REG_DBG_FORCE_VALID,
YULD_REG_DBG_FORCE_FRAME,
@@ -953,8 +998,9 @@ static struct block_defs block_yuld_defs = {
};
static struct block_defs block_xyld_defs = {
- "xyld", {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX},
+ "xyld",
+ {true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX},
XYLD_REG_DBG_SELECT, XYLD_REG_DBG_DWORD_ENABLE,
XYLD_REG_DBG_SHIFT, XYLD_REG_DBG_FORCE_VALID,
XYLD_REG_DBG_FORCE_FRAME,
@@ -962,8 +1008,9 @@ static struct block_defs block_xyld_defs = {
};
static struct block_defs block_prm_defs = {
- "prm", {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
+ "prm",
+ {true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
PRM_REG_DBG_SELECT, PRM_REG_DBG_DWORD_ENABLE,
PRM_REG_DBG_SHIFT, PRM_REG_DBG_FORCE_VALID,
PRM_REG_DBG_FORCE_FRAME,
@@ -971,8 +1018,9 @@ static struct block_defs block_prm_defs = {
};
static struct block_defs block_pbf_pb1_defs = {
- "pbf_pb1", {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCV},
+ "pbf_pb1",
+ {true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCV},
PBF_PB1_REG_DBG_SELECT, PBF_PB1_REG_DBG_DWORD_ENABLE,
PBF_PB1_REG_DBG_SHIFT, PBF_PB1_REG_DBG_FORCE_VALID,
PBF_PB1_REG_DBG_FORCE_FRAME,
@@ -981,8 +1029,9 @@ static struct block_defs block_pbf_pb1_defs = {
};
static struct block_defs block_pbf_pb2_defs = {
- "pbf_pb2", {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCV},
+ "pbf_pb2",
+ {true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCV},
PBF_PB2_REG_DBG_SELECT, PBF_PB2_REG_DBG_DWORD_ENABLE,
PBF_PB2_REG_DBG_SHIFT, PBF_PB2_REG_DBG_FORCE_VALID,
PBF_PB2_REG_DBG_FORCE_FRAME,
@@ -991,8 +1040,9 @@ static struct block_defs block_pbf_pb2_defs = {
};
static struct block_defs block_rpb_defs = {
- "rpb", {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
+ "rpb",
+ {true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
RPB_REG_DBG_SELECT, RPB_REG_DBG_DWORD_ENABLE,
RPB_REG_DBG_SHIFT, RPB_REG_DBG_FORCE_VALID,
RPB_REG_DBG_FORCE_FRAME,
@@ -1000,8 +1050,9 @@ static struct block_defs block_rpb_defs = {
};
static struct block_defs block_btb_defs = {
- "btb", {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCV},
+ "btb",
+ {true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCV},
BTB_REG_DBG_SELECT, BTB_REG_DBG_DWORD_ENABLE,
BTB_REG_DBG_SHIFT, BTB_REG_DBG_FORCE_VALID,
BTB_REG_DBG_FORCE_FRAME,
@@ -1009,8 +1060,9 @@ static struct block_defs block_btb_defs = {
};
static struct block_defs block_pbf_defs = {
- "pbf", {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCV},
+ "pbf",
+ {true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCV},
PBF_REG_DBG_SELECT, PBF_REG_DBG_DWORD_ENABLE,
PBF_REG_DBG_SHIFT, PBF_REG_DBG_FORCE_VALID,
PBF_REG_DBG_FORCE_FRAME,
@@ -1018,8 +1070,9 @@ static struct block_defs block_pbf_defs = {
};
static struct block_defs block_rdif_defs = {
- "rdif", {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
+ "rdif",
+ {true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
RDIF_REG_DBG_SELECT, RDIF_REG_DBG_DWORD_ENABLE,
RDIF_REG_DBG_SHIFT, RDIF_REG_DBG_FORCE_VALID,
RDIF_REG_DBG_FORCE_FRAME,
@@ -1027,8 +1080,9 @@ static struct block_defs block_rdif_defs = {
};
static struct block_defs block_tdif_defs = {
- "tdif", {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
+ "tdif",
+ {true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
TDIF_REG_DBG_SELECT, TDIF_REG_DBG_DWORD_ENABLE,
TDIF_REG_DBG_SHIFT, TDIF_REG_DBG_FORCE_VALID,
TDIF_REG_DBG_FORCE_FRAME,
@@ -1036,8 +1090,9 @@ static struct block_defs block_tdif_defs = {
};
static struct block_defs block_cdu_defs = {
- "cdu", {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF},
+ "cdu",
+ {true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF},
CDU_REG_DBG_SELECT, CDU_REG_DBG_DWORD_ENABLE,
CDU_REG_DBG_SHIFT, CDU_REG_DBG_FORCE_VALID,
CDU_REG_DBG_FORCE_FRAME,
@@ -1045,8 +1100,9 @@ static struct block_defs block_cdu_defs = {
};
static struct block_defs block_ccfc_defs = {
- "ccfc", {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF},
+ "ccfc",
+ {true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF},
CCFC_REG_DBG_SELECT, CCFC_REG_DBG_DWORD_ENABLE,
CCFC_REG_DBG_SHIFT, CCFC_REG_DBG_FORCE_VALID,
CCFC_REG_DBG_FORCE_FRAME,
@@ -1054,8 +1110,9 @@ static struct block_defs block_ccfc_defs = {
};
static struct block_defs block_tcfc_defs = {
- "tcfc", {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF},
+ "tcfc",
+ {true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF},
TCFC_REG_DBG_SELECT, TCFC_REG_DBG_DWORD_ENABLE,
TCFC_REG_DBG_SHIFT, TCFC_REG_DBG_FORCE_VALID,
TCFC_REG_DBG_FORCE_FRAME,
@@ -1063,8 +1120,9 @@ static struct block_defs block_tcfc_defs = {
};
static struct block_defs block_igu_defs = {
- "igu", {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
+ "igu",
+ {true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
IGU_REG_DBG_SELECT, IGU_REG_DBG_DWORD_ENABLE,
IGU_REG_DBG_SHIFT, IGU_REG_DBG_FORCE_VALID,
IGU_REG_DBG_FORCE_FRAME,
@@ -1072,8 +1130,9 @@ static struct block_defs block_igu_defs = {
};
static struct block_defs block_cau_defs = {
- "cau", {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
+ "cau",
+ {true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
CAU_REG_DBG_SELECT, CAU_REG_DBG_DWORD_ENABLE,
CAU_REG_DBG_SHIFT, CAU_REG_DBG_FORCE_VALID,
CAU_REG_DBG_FORCE_FRAME,
@@ -1081,8 +1140,9 @@ static struct block_defs block_cau_defs = {
};
static struct block_defs block_umac_defs = {
- "umac", {false, false, true}, false, 0,
- {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCZ},
+ "umac",
+ {false, true}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCZ},
UMAC_REG_DBG_SELECT, UMAC_REG_DBG_DWORD_ENABLE,
UMAC_REG_DBG_SHIFT, UMAC_REG_DBG_FORCE_VALID,
UMAC_REG_DBG_FORCE_FRAME,
@@ -1090,22 +1150,23 @@ static struct block_defs block_umac_defs = {
};
static struct block_defs block_xmac_defs = {
- "xmac", {false, false, false}, false, 0,
- {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+ "xmac", {false, false}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
0, 0, 0, 0, 0,
false, false, MAX_DBG_RESET_REGS, 0
};
static struct block_defs block_dbg_defs = {
- "dbg", {false, false, false}, false, 0,
- {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+ "dbg", {false, false}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
0, 0, 0, 0, 0,
true, true, DBG_RESET_REG_MISC_PL_PDA_VAUX, 3
};
static struct block_defs block_nig_defs = {
- "nig", {true, true, true}, false, 0,
- {DBG_BUS_CLIENT_RBCN, DBG_BUS_CLIENT_RBCN, DBG_BUS_CLIENT_RBCN},
+ "nig",
+ {true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCN, DBG_BUS_CLIENT_RBCN},
NIG_REG_DBG_SELECT, NIG_REG_DBG_DWORD_ENABLE,
NIG_REG_DBG_SHIFT, NIG_REG_DBG_FORCE_VALID,
NIG_REG_DBG_FORCE_FRAME,
@@ -1113,8 +1174,9 @@ static struct block_defs block_nig_defs = {
};
static struct block_defs block_wol_defs = {
- "wol", {false, false, true}, false, 0,
- {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCZ},
+ "wol",
+ {false, true}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCZ},
WOL_REG_DBG_SELECT, WOL_REG_DBG_DWORD_ENABLE,
WOL_REG_DBG_SHIFT, WOL_REG_DBG_FORCE_VALID,
WOL_REG_DBG_FORCE_FRAME,
@@ -1122,8 +1184,9 @@ static struct block_defs block_wol_defs = {
};
static struct block_defs block_bmbn_defs = {
- "bmbn", {false, false, true}, false, 0,
- {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCB},
+ "bmbn",
+ {false, true}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCB},
BMBN_REG_DBG_SELECT, BMBN_REG_DBG_DWORD_ENABLE,
BMBN_REG_DBG_SHIFT, BMBN_REG_DBG_FORCE_VALID,
BMBN_REG_DBG_FORCE_FRAME,
@@ -1131,15 +1194,16 @@ static struct block_defs block_bmbn_defs = {
};
static struct block_defs block_ipc_defs = {
- "ipc", {false, false, false}, false, 0,
- {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+ "ipc", {false, false}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
0, 0, 0, 0, 0,
true, false, DBG_RESET_REG_MISCS_PL_UA, 8
};
static struct block_defs block_nwm_defs = {
- "nwm", {false, false, true}, false, 0,
- {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCW},
+ "nwm",
+ {false, true}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCW},
NWM_REG_DBG_SELECT, NWM_REG_DBG_DWORD_ENABLE,
NWM_REG_DBG_SHIFT, NWM_REG_DBG_FORCE_VALID,
NWM_REG_DBG_FORCE_FRAME,
@@ -1147,22 +1211,29 @@ static struct block_defs block_nwm_defs = {
};
static struct block_defs block_nws_defs = {
- "nws", {false, false, false}, false, 0,
- {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
- 0, 0, 0, 0, 0,
+ "nws",
+ {false, true}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCW},
+ NWS_REG_DBG_SELECT, NWS_REG_DBG_DWORD_ENABLE,
+ NWS_REG_DBG_SHIFT, NWS_REG_DBG_FORCE_VALID,
+ NWS_REG_DBG_FORCE_FRAME,
true, false, DBG_RESET_REG_MISCS_PL_HV, 12
};
static struct block_defs block_ms_defs = {
- "ms", {false, false, false}, false, 0,
- {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
- 0, 0, 0, 0, 0,
+ "ms",
+ {false, true}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCZ},
+ MS_REG_DBG_SELECT, MS_REG_DBG_DWORD_ENABLE,
+ MS_REG_DBG_SHIFT, MS_REG_DBG_FORCE_VALID,
+ MS_REG_DBG_FORCE_FRAME,
true, false, DBG_RESET_REG_MISCS_PL_HV, 13
};
static struct block_defs block_phy_pcie_defs = {
- "phy_pcie", {false, false, true}, false, 0,
- {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH},
+ "phy_pcie",
+ {false, true}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH},
PCIE_REG_DBG_COMMON_SELECT, PCIE_REG_DBG_COMMON_DWORD_ENABLE,
PCIE_REG_DBG_COMMON_SHIFT, PCIE_REG_DBG_COMMON_FORCE_VALID,
PCIE_REG_DBG_COMMON_FORCE_FRAME,
@@ -1170,22 +1241,57 @@ static struct block_defs block_phy_pcie_defs = {
};
static struct block_defs block_led_defs = {
- "led", {false, false, false}, false, 0,
- {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+ "led", {false, false}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+ 0, 0, 0, 0, 0,
+ true, false, DBG_RESET_REG_MISCS_PL_HV, 14
+};
+
+static struct block_defs block_avs_wrap_defs = {
+ "avs_wrap", {false, false}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+ 0, 0, 0, 0, 0,
+ true, false, DBG_RESET_REG_MISCS_PL_UA, 11
+};
+
+static struct block_defs block_rgfs_defs = {
+ "rgfs", {false, false}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
0, 0, 0, 0, 0,
- true, true, DBG_RESET_REG_MISCS_PL_HV, 14
+ false, false, MAX_DBG_RESET_REGS, 0
+};
+
+static struct block_defs block_tgfs_defs = {
+ "tgfs", {false, false}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+ 0, 0, 0, 0, 0,
+ false, false, MAX_DBG_RESET_REGS, 0
+};
+
+static struct block_defs block_ptld_defs = {
+ "ptld", {false, false}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+ 0, 0, 0, 0, 0,
+ false, false, MAX_DBG_RESET_REGS, 0
+};
+
+static struct block_defs block_ypld_defs = {
+ "ypld", {false, false}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+ 0, 0, 0, 0, 0,
+ false, false, MAX_DBG_RESET_REGS, 0
};
static struct block_defs block_misc_aeu_defs = {
- "misc_aeu", {false, false, false}, false, 0,
- {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+ "misc_aeu", {false, false}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
0, 0, 0, 0, 0,
false, false, MAX_DBG_RESET_REGS, 0
};
static struct block_defs block_bar0_map_defs = {
- "bar0_map", {false, false, false}, false, 0,
- {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+ "bar0_map", {false, false}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
0, 0, 0, 0, 0,
false, false, MAX_DBG_RESET_REGS, 0
};
@@ -1269,6 +1375,11 @@ static struct block_defs *s_block_defs[MAX_BLOCK_ID] = {
&block_ms_defs,
&block_phy_pcie_defs,
&block_led_defs,
+ &block_avs_wrap_defs,
+ &block_rgfs_defs,
+ &block_tgfs_defs,
+ &block_ptld_defs,
+ &block_ypld_defs,
&block_misc_aeu_defs,
&block_bar0_map_defs,
};
@@ -1281,65 +1392,67 @@ static struct platform_defs s_platform_defs[] = {
};
static struct grc_param_defs s_grc_param_defs[] = {
- {{1, 1, 1}, 0, 1, false, 1, 1}, /* DBG_GRC_PARAM_DUMP_TSTORM */
- {{1, 1, 1}, 0, 1, false, 1, 1}, /* DBG_GRC_PARAM_DUMP_MSTORM */
- {{1, 1, 1}, 0, 1, false, 1, 1}, /* DBG_GRC_PARAM_DUMP_USTORM */
- {{1, 1, 1}, 0, 1, false, 1, 1}, /* DBG_GRC_PARAM_DUMP_XSTORM */
- {{1, 1, 1}, 0, 1, false, 1, 1}, /* DBG_GRC_PARAM_DUMP_YSTORM */
- {{1, 1, 1}, 0, 1, false, 1, 1}, /* DBG_GRC_PARAM_DUMP_PSTORM */
- {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_REGS */
- {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_RAM */
- {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_PBUF */
- {{0, 0, 0}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_IOR */
- {{0, 0, 0}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_VFC */
- {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_CM_CTX */
- {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_ILT */
- {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_RSS */
- {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_CAU */
- {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_QM */
- {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_MCP */
- {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_RESERVED */
- {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_CFC */
- {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_IGU */
- {{0, 0, 0}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_BRB */
- {{0, 0, 0}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_BTB */
- {{0, 0, 0}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_BMB */
- {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_NIG */
- {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_MULD */
- {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_PRS */
- {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_DMAE */
- {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_TM */
- {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_SDM */
- {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_DIF */
- {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_STATIC */
- {{0, 0, 0}, 0, 1, false, 0, 0}, /* DBG_GRC_PARAM_UNSTALL */
- {{MAX_LCIDS, MAX_LCIDS, MAX_LCIDS}, 1, MAX_LCIDS, false, MAX_LCIDS,
+ {{1, 1}, 0, 1, false, 1, 1}, /* DBG_GRC_PARAM_DUMP_TSTORM */
+ {{1, 1}, 0, 1, false, 1, 1}, /* DBG_GRC_PARAM_DUMP_MSTORM */
+ {{1, 1}, 0, 1, false, 1, 1}, /* DBG_GRC_PARAM_DUMP_USTORM */
+ {{1, 1}, 0, 1, false, 1, 1}, /* DBG_GRC_PARAM_DUMP_XSTORM */
+ {{1, 1}, 0, 1, false, 1, 1}, /* DBG_GRC_PARAM_DUMP_YSTORM */
+ {{1, 1}, 0, 1, false, 1, 1}, /* DBG_GRC_PARAM_DUMP_PSTORM */
+ {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_REGS */
+ {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_RAM */
+ {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_PBUF */
+ {{0, 0}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_IOR */
+ {{0, 0}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_VFC */
+ {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_CM_CTX */
+ {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_ILT */
+ {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_RSS */
+ {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_CAU */
+ {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_QM */
+ {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_MCP */
+ {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_RESERVED */
+ {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_CFC */
+ {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_IGU */
+ {{0, 0}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_BRB */
+ {{0, 0}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_BTB */
+ {{0, 0}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_BMB */
+ {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_NIG */
+ {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_MULD */
+ {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_PRS */
+ {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_DMAE */
+ {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_TM */
+ {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_SDM */
+ {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_DIF */
+ {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_STATIC */
+ {{0, 0}, 0, 1, false, 0, 0}, /* DBG_GRC_PARAM_UNSTALL */
+ {{MAX_LCIDS, MAX_LCIDS}, 1, MAX_LCIDS, false, MAX_LCIDS,
MAX_LCIDS}, /* DBG_GRC_PARAM_NUM_LCIDS */
- {{MAX_LTIDS, MAX_LTIDS, MAX_LTIDS}, 1, MAX_LTIDS, false, MAX_LTIDS,
+ {{MAX_LTIDS, MAX_LTIDS}, 1, MAX_LTIDS, false, MAX_LTIDS,
MAX_LTIDS}, /* DBG_GRC_PARAM_NUM_LTIDS */
- {{0, 0, 0}, 0, 1, true, 0, 0}, /* DBG_GRC_PARAM_EXCLUDE_ALL */
- {{0, 0, 0}, 0, 1, true, 0, 0}, /* DBG_GRC_PARAM_CRASH */
- {{0, 0, 0}, 0, 1, false, 1, 0}, /* DBG_GRC_PARAM_PARITY_SAFE */
- {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_CM */
- {{1, 1, 1}, 0, 1, false, 0, 1} /* DBG_GRC_PARAM_DUMP_PHY */
+ {{0, 0}, 0, 1, true, 0, 0}, /* DBG_GRC_PARAM_EXCLUDE_ALL */
+ {{0, 0}, 0, 1, true, 0, 0}, /* DBG_GRC_PARAM_CRASH */
+ {{0, 0}, 0, 1, false, 1, 0}, /* DBG_GRC_PARAM_PARITY_SAFE */
+ {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_CM */
+ {{1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_PHY */
+ {{0, 0}, 0, 1, false, 0, 0}, /* DBG_GRC_PARAM_NO_MCP */
+ {{0, 0}, 0, 1, false, 0, 0} /* DBG_GRC_PARAM_NO_FW_VER */
};
static struct rss_mem_defs s_rss_mem_defs[] = {
{ "rss_mem_cid", "rss_cid", 0,
- {256, 256, 320},
- {32, 32, 32} },
+ {256, 320},
+ {32, 32} },
{ "rss_mem_key_msb", "rss_key", 1024,
- {128, 128, 208},
- {256, 256, 256} },
+ {128, 208},
+ {256, 256} },
{ "rss_mem_key_lsb", "rss_key", 2048,
- {128, 128, 208},
- {64, 64, 64} },
+ {128, 208},
+ {64, 64} },
{ "rss_mem_info", "rss_info", 3072,
- {128, 128, 208},
- {16, 16, 16} },
+ {128, 208},
+ {16, 16} },
{ "rss_mem_ind", "rss_ind", 4096,
- {(128 * 128), (128 * 128), (128 * 208)},
- {16, 16, 16} }
+ {(128 * 128), (128 * 208)},
+ {16, 16} }
};
static struct vfc_ram_defs s_vfc_ram_defs[] = {
@@ -1352,32 +1465,32 @@ static struct vfc_ram_defs s_vfc_ram_defs[] = {
static struct big_ram_defs s_big_ram_defs[] = {
{ "BRB", MEM_GROUP_BRB_MEM, MEM_GROUP_BRB_RAM, DBG_GRC_PARAM_DUMP_BRB,
BRB_REG_BIG_RAM_ADDRESS, BRB_REG_BIG_RAM_DATA,
- {4800, 4800, 5632} },
+ {4800, 5632} },
{ "BTB", MEM_GROUP_BTB_MEM, MEM_GROUP_BTB_RAM, DBG_GRC_PARAM_DUMP_BTB,
BTB_REG_BIG_RAM_ADDRESS, BTB_REG_BIG_RAM_DATA,
- {2880, 2880, 3680} },
+ {2880, 3680} },
{ "BMB", MEM_GROUP_BMB_MEM, MEM_GROUP_BMB_RAM, DBG_GRC_PARAM_DUMP_BMB,
BMB_REG_BIG_RAM_ADDRESS, BMB_REG_BIG_RAM_DATA,
- {1152, 1152, 1152} }
+ {1152, 1152} }
};
static struct reset_reg_defs s_reset_regs_defs[] = {
{ MISCS_REG_RESET_PL_UA, 0x0,
- {true, true, true} }, /* DBG_RESET_REG_MISCS_PL_UA */
+ {true, true} }, /* DBG_RESET_REG_MISCS_PL_UA */
{ MISCS_REG_RESET_PL_HV, 0x0,
- {true, true, true} }, /* DBG_RESET_REG_MISCS_PL_HV */
+ {true, true} }, /* DBG_RESET_REG_MISCS_PL_HV */
{ MISCS_REG_RESET_PL_HV_2, 0x0,
- {false, false, true} }, /* DBG_RESET_REG_MISCS_PL_HV_2 */
+ {false, true} }, /* DBG_RESET_REG_MISCS_PL_HV_2 */
{ MISC_REG_RESET_PL_UA, 0x0,
- {true, true, true} }, /* DBG_RESET_REG_MISC_PL_UA */
+ {true, true} }, /* DBG_RESET_REG_MISC_PL_UA */
{ MISC_REG_RESET_PL_HV, 0x0,
- {true, true, true} }, /* DBG_RESET_REG_MISC_PL_HV */
+ {true, true} }, /* DBG_RESET_REG_MISC_PL_HV */
{ MISC_REG_RESET_PL_PDA_VMAIN_1, 0x4404040,
- {true, true, true} }, /* DBG_RESET_REG_MISC_PL_PDA_VMAIN_1 */
+ {true, true} }, /* DBG_RESET_REG_MISC_PL_PDA_VMAIN_1 */
{ MISC_REG_RESET_PL_PDA_VMAIN_2, 0x7c00007,
- {true, true, true} }, /* DBG_RESET_REG_MISC_PL_PDA_VMAIN_2 */
+ {true, true} }, /* DBG_RESET_REG_MISC_PL_PDA_VMAIN_2 */
{ MISC_REG_RESET_PL_PDA_VAUX, 0x2,
- {true, true, true} }, /* DBG_RESET_REG_MISC_PL_PDA_VAUX */
+ {true, true} }, /* DBG_RESET_REG_MISC_PL_PDA_VAUX */
};
static struct phy_defs s_phy_defs[] = {
@@ -1410,6 +1523,26 @@ static u32 qed_read_unaligned_dword(u8 *buf)
return dword;
}
+/* Returns the value of the specified GRC param */
+static u32 qed_grc_get_param(struct qed_hwfn *p_hwfn,
+ enum dbg_grc_params grc_param)
+{
+ struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
+
+ return dev_data->grc.param_val[grc_param];
+}
+
+/* Initializes the GRC parameters */
+static void qed_dbg_grc_init_params(struct qed_hwfn *p_hwfn)
+{
+ struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
+
+ if (!dev_data->grc.params_initialized) {
+ qed_dbg_grc_set_params_default(p_hwfn);
+ dev_data->grc.params_initialized = 1;
+ }
+}
+
/* Initializes debug data for the specified device */
static enum dbg_status qed_dbg_dev_init(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt)
@@ -1424,13 +1557,17 @@ static enum dbg_status qed_dbg_dev_init(struct qed_hwfn *p_hwfn,
dev_data->mode_enable[MODE_K2] = 1;
} else if (QED_IS_BB_B0(p_hwfn->cdev)) {
dev_data->chip_id = CHIP_BB_B0;
- dev_data->mode_enable[MODE_BB_B0] = 1;
+ dev_data->mode_enable[MODE_BB] = 1;
} else {
return DBG_STATUS_UNKNOWN_CHIP;
}
dev_data->platform_id = PLATFORM_ASIC;
dev_data->mode_enable[MODE_ASIC] = 1;
+
+ /* Initializes the GRC parameters */
+ qed_dbg_grc_init_params(p_hwfn);
+
dev_data->initialized = true;
return DBG_STATUS_OK;
}
@@ -1561,7 +1698,7 @@ static u32 qed_dump_fw_ver_param(struct qed_hwfn *p_hwfn,
int printed_chars;
u32 offset = 0;
- if (dump) {
+ if (dump && !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_FW_VER)) {
/* Read FW image/version from PRAM in a non-reset SEMI */
bool found = false;
u8 storm_id;
@@ -1622,7 +1759,7 @@ static u32 qed_dump_mfw_ver_param(struct qed_hwfn *p_hwfn,
{
char mfw_ver_str[16] = EMPTY_FW_VERSION_STR;
- if (dump) {
+ if (dump && !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_FW_VER)) {
u32 global_section_offsize, global_section_addr, mfw_ver;
u32 public_data_addr, global_section_offsize_addr;
int printed_chars;
@@ -1683,15 +1820,13 @@ static u32 qed_dump_common_global_params(struct qed_hwfn *p_hwfn,
bool dump,
u8 num_specific_global_params)
{
+ u8 num_params = NUM_COMMON_GLOBAL_PARAMS + num_specific_global_params;
struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
u32 offset = 0;
/* Find platform string and dump global params section header */
offset += qed_dump_section_hdr(dump_buf + offset,
- dump,
- "global_params",
- NUM_COMMON_GLOBAL_PARAMS +
- num_specific_global_params);
+ dump, "global_params", num_params);
/* Store params */
offset += qed_dump_fw_ver_param(p_hwfn, p_ptt, dump_buf + offset, dump);
@@ -1815,37 +1950,6 @@ static bool qed_is_mode_match(struct qed_hwfn *p_hwfn, u16 *modes_buf_offset)
}
}
-/* Returns the value of the specified GRC param */
-static u32 qed_grc_get_param(struct qed_hwfn *p_hwfn,
- enum dbg_grc_params grc_param)
-{
- struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
-
- return dev_data->grc.param_val[grc_param];
-}
-
-/* Clear all GRC params */
-static void qed_dbg_grc_clear_params(struct qed_hwfn *p_hwfn)
-{
- struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
- u32 i;
-
- for (i = 0; i < MAX_DBG_GRC_PARAMS; i++)
- dev_data->grc.param_set_by_user[i] = 0;
-}
-
-/* Assign default GRC param values */
-static void qed_dbg_grc_set_params_default(struct qed_hwfn *p_hwfn)
-{
- struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
- u32 i;
-
- for (i = 0; i < MAX_DBG_GRC_PARAMS; i++)
- if (!dev_data->grc.param_set_by_user[i])
- dev_data->grc.param_val[i] =
- s_grc_param_defs[i].default_val[dev_data->chip_id];
-}
-
/* Returns true if the specified entity (indicated by GRC param) should be
* included in the dump, false otherwise.
*/
@@ -1971,7 +2075,7 @@ static void qed_grc_unreset_blocks(struct qed_hwfn *p_hwfn,
}
}
-/* Returns the attention name offsets of the specified block */
+/* Returns the attention block data of the specified block */
static const struct dbg_attn_block_type_data *
qed_get_block_attn_data(enum block_id block_id, enum dbg_attn_type attn_type)
{
@@ -2040,7 +2144,7 @@ static void qed_grc_clear_all_prty(struct qed_hwfn *p_hwfn,
* The following parameters are dumped:
* - 'count' = num_dumped_entries
* - 'split' = split_type
- * - 'id'i = split_id (dumped only if split_id >= 0)
+ * - 'id' = split_id (dumped only if split_id >= 0)
* - 'param_name' = param_val (user param, dumped only if param_name != NULL and
* param_val != NULL)
*/
@@ -2069,21 +2173,81 @@ static u32 qed_grc_dump_regs_hdr(u32 *dump_buf,
return offset;
}
-/* Dumps GRC register/memory. Returns the dumped size in dwords. */
+/* Dumps the GRC registers in the specified address range.
+ * Returns the dumped size in dwords.
+ */
+static u32 qed_grc_dump_addr_range(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u32 *dump_buf,
+ bool dump, u32 addr, u32 len)
+{
+ u32 byte_addr = DWORDS_TO_BYTES(addr), offset = 0, i;
+
+ if (dump)
+ for (i = 0; i < len; i++, byte_addr += BYTES_IN_DWORD, offset++)
+ *(dump_buf + offset) = qed_rd(p_hwfn, p_ptt, byte_addr);
+ else
+ offset += len;
+ return offset;
+}
+
+/* Dumps GRC registers sequence header. Returns the dumped size in dwords. */
+static u32 qed_grc_dump_reg_entry_hdr(u32 *dump_buf, bool dump, u32 addr,
+ u32 len)
+{
+ if (dump)
+ *dump_buf = addr | (len << REG_DUMP_LEN_SHIFT);
+ return 1;
+}
+
+/* Dumps GRC registers sequence. Returns the dumped size in dwords. */
static u32 qed_grc_dump_reg_entry(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, u32 *dump_buf,
bool dump, u32 addr, u32 len)
{
- u32 offset = 0, i;
+ u32 offset = 0;
+
+ offset += qed_grc_dump_reg_entry_hdr(dump_buf, dump, addr, len);
+ offset += qed_grc_dump_addr_range(p_hwfn,
+ p_ptt,
+ dump_buf + offset, dump, addr, len);
+ return offset;
+}
+
+/* Dumps GRC registers sequence with skip cycle.
+ * Returns the dumped size in dwords.
+ */
+static u32 qed_grc_dump_reg_entry_skip(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u32 *dump_buf,
+ bool dump, u32 addr, u32 total_len,
+ u32 read_len, u32 skip_len)
+{
+ u32 offset = 0, reg_offset = 0;
+ offset += qed_grc_dump_reg_entry_hdr(dump_buf, dump, addr, total_len);
if (dump) {
- *(dump_buf + offset++) = addr | (len << REG_DUMP_LEN_SHIFT);
- for (i = 0; i < len; i++, addr++, offset++)
- *(dump_buf + offset) = qed_rd(p_hwfn,
- p_ptt,
- DWORDS_TO_BYTES(addr));
+ while (reg_offset < total_len) {
+ u32 curr_len = min_t(u32,
+ read_len,
+ total_len - reg_offset);
+ offset += qed_grc_dump_addr_range(p_hwfn,
+ p_ptt,
+ dump_buf + offset,
+ dump, addr, curr_len);
+ reg_offset += curr_len;
+ addr += curr_len;
+ if (reg_offset < total_len) {
+ curr_len = min_t(u32,
+ skip_len,
+ total_len - skip_len);
+ memset(dump_buf + offset, 0,
+ DWORDS_TO_BYTES(curr_len));
+ offset += curr_len;
+ reg_offset += curr_len;
+ addr += curr_len;
+ }
+ }
} else {
- offset += len + 1;
+ offset += total_len;
}
return offset;
@@ -2124,14 +2288,17 @@ static u32 qed_grc_dump_regs_entries(struct qed_hwfn *p_hwfn,
const struct dbg_dump_reg *reg =
(const struct dbg_dump_reg *)
&input_regs_arr.ptr[input_offset];
+ u32 addr, len;
+ addr = GET_FIELD(reg->data,
+ DBG_DUMP_REG_ADDRESS);
+ len = GET_FIELD(reg->data, DBG_DUMP_REG_LENGTH);
offset +=
- qed_grc_dump_reg_entry(p_hwfn, p_ptt,
- dump_buf + offset, dump,
- GET_FIELD(reg->data,
- DBG_DUMP_REG_ADDRESS),
- GET_FIELD(reg->data,
- DBG_DUMP_REG_LENGTH));
+ qed_grc_dump_reg_entry(p_hwfn, p_ptt,
+ dump_buf + offset,
+ dump,
+ addr,
+ len);
(*num_dumped_reg_entries)++;
}
} else {
@@ -2194,8 +2361,14 @@ static u32 qed_grc_dump_registers(struct qed_hwfn *p_hwfn,
const char *param_name, const char *param_val)
{
struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
+ struct chip_platform_defs *p_platform_defs;
u32 offset = 0, input_offset = 0;
- u8 port_id, pf_id;
+ struct chip_defs *p_chip_defs;
+ u8 port_id, pf_id, vf_id;
+ u16 fid;
+
+ p_chip_defs = &s_chip_defs[dev_data->chip_id];
+ p_platform_defs = &p_chip_defs->per_platform[dev_data->platform_id];
if (dump)
DP_VERBOSE(p_hwfn, QED_MSG_DEBUG, "Dumping registers...\n");
@@ -2214,7 +2387,6 @@ static u32 qed_grc_dump_registers(struct qed_hwfn *p_hwfn,
switch (split_type_id) {
case SPLIT_TYPE_NONE:
- case SPLIT_TYPE_VF:
offset += qed_grc_dump_split_data(p_hwfn,
p_ptt,
curr_input_regs_arr,
@@ -2227,10 +2399,7 @@ static u32 qed_grc_dump_registers(struct qed_hwfn *p_hwfn,
param_val);
break;
case SPLIT_TYPE_PORT:
- for (port_id = 0;
- port_id <
- s_chip_defs[dev_data->chip_id].
- per_platform[dev_data->platform_id].num_ports;
+ for (port_id = 0; port_id < p_platform_defs->num_ports;
port_id++) {
if (dump)
qed_port_pretend(p_hwfn, p_ptt,
@@ -2247,20 +2416,48 @@ static u32 qed_grc_dump_registers(struct qed_hwfn *p_hwfn,
break;
case SPLIT_TYPE_PF:
case SPLIT_TYPE_PORT_PF:
- for (pf_id = 0;
- pf_id <
- s_chip_defs[dev_data->chip_id].
- per_platform[dev_data->platform_id].num_pfs;
+ for (pf_id = 0; pf_id < p_platform_defs->num_pfs;
pf_id++) {
- if (dump)
- qed_fid_pretend(p_hwfn, p_ptt, pf_id);
- offset += qed_grc_dump_split_data(p_hwfn,
- p_ptt,
- curr_input_regs_arr,
- dump_buf + offset,
- dump, block_enable,
- "pf", pf_id, param_name,
- param_val);
+ u8 pfid_shift =
+ PXP_PRETEND_CONCRETE_FID_PFID_SHIFT;
+
+ if (dump) {
+ fid = pf_id << pfid_shift;
+ qed_fid_pretend(p_hwfn, p_ptt, fid);
+ }
+
+ offset +=
+ qed_grc_dump_split_data(p_hwfn, p_ptt,
+ curr_input_regs_arr,
+ dump_buf + offset,
+ dump, block_enable,
+ "pf", pf_id,
+ param_name,
+ param_val);
+ }
+ break;
+ case SPLIT_TYPE_VF:
+ for (vf_id = 0; vf_id < p_platform_defs->num_vfs;
+ vf_id++) {
+ u8 vfvalid_shift =
+ PXP_PRETEND_CONCRETE_FID_VFVALID_SHIFT;
+ u8 vfid_shift =
+ PXP_PRETEND_CONCRETE_FID_VFID_SHIFT;
+
+ if (dump) {
+ fid = BIT(vfvalid_shift) |
+ (vf_id << vfid_shift);
+ qed_fid_pretend(p_hwfn, p_ptt, fid);
+ }
+
+ offset +=
+ qed_grc_dump_split_data(p_hwfn, p_ptt,
+ curr_input_regs_arr,
+ dump_buf + offset,
+ dump, block_enable,
+ "vf", vf_id,
+ param_name,
+ param_val);
}
break;
default:
@@ -2271,8 +2468,11 @@ static u32 qed_grc_dump_registers(struct qed_hwfn *p_hwfn,
}
/* Pretend to original PF */
- if (dump)
- qed_fid_pretend(p_hwfn, p_ptt, p_hwfn->rel_pf_id);
+ if (dump) {
+ fid = p_hwfn->rel_pf_id << PXP_PRETEND_CONCRETE_FID_PFID_SHIFT;
+ qed_fid_pretend(p_hwfn, p_ptt, fid);
+ }
+
return offset;
}
@@ -2291,13 +2491,14 @@ static u32 qed_grc_dump_reset_regs(struct qed_hwfn *p_hwfn,
/* Write reset registers */
for (i = 0; i < MAX_DBG_RESET_REGS; i++) {
if (s_reset_regs_defs[i].exists[dev_data->chip_id]) {
+ u32 addr = BYTES_TO_DWORDS(s_reset_regs_defs[i].addr);
+
offset += qed_grc_dump_reg_entry(p_hwfn,
p_ptt,
dump_buf + offset,
dump,
- BYTES_TO_DWORDS
- (s_reset_regs_defs
- [i].addr), 1);
+ addr,
+ 1);
num_regs++;
}
}
@@ -2339,6 +2540,7 @@ static u32 qed_grc_dump_modified_regs(struct qed_hwfn *p_hwfn,
&attn_reg_arr[reg_idx];
u16 modes_buf_offset;
bool eval_mode;
+ u32 addr;
/* Check mode */
eval_mode = GET_FIELD(reg_data->mode.data,
@@ -2349,19 +2551,23 @@ static u32 qed_grc_dump_modified_regs(struct qed_hwfn *p_hwfn,
if (!eval_mode ||
qed_is_mode_match(p_hwfn, &modes_buf_offset)) {
/* Mode match - read and dump registers */
- offset += qed_grc_dump_reg_entry(p_hwfn,
- p_ptt,
- dump_buf + offset,
- dump,
- reg_data->mask_address,
- 1);
- offset += qed_grc_dump_reg_entry(p_hwfn,
- p_ptt,
- dump_buf + offset,
- dump,
- GET_FIELD(reg_data->data,
- DBG_ATTN_REG_STS_ADDRESS),
- 1);
+ addr = reg_data->mask_address;
+ offset +=
+ qed_grc_dump_reg_entry(p_hwfn,
+ p_ptt,
+ dump_buf + offset,
+ dump,
+ addr,
+ 1);
+ addr = GET_FIELD(reg_data->data,
+ DBG_ATTN_REG_STS_ADDRESS);
+ offset +=
+ qed_grc_dump_reg_entry(p_hwfn,
+ p_ptt,
+ dump_buf + offset,
+ dump,
+ addr,
+ 1);
num_reg_entries += 2;
}
}
@@ -2369,18 +2575,21 @@ static u32 qed_grc_dump_modified_regs(struct qed_hwfn *p_hwfn,
/* Write storm stall status registers */
for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
+ u32 addr;
+
if (dev_data->block_in_reset[s_storm_defs[storm_id].block_id] &&
dump)
continue;
+ addr =
+ BYTES_TO_DWORDS(s_storm_defs[storm_id].sem_fast_mem_addr +
+ SEM_FAST_REG_STALLED);
offset += qed_grc_dump_reg_entry(p_hwfn,
- p_ptt,
- dump_buf + offset,
- dump,
- BYTES_TO_DWORDS(s_storm_defs[storm_id].
- sem_fast_mem_addr +
- SEM_FAST_REG_STALLED),
- 1);
+ p_ptt,
+ dump_buf + offset,
+ dump,
+ addr,
+ 1);
num_reg_entries++;
}
@@ -2392,11 +2601,47 @@ static u32 qed_grc_dump_modified_regs(struct qed_hwfn *p_hwfn,
return offset;
}
+/* Dumps registers that can't be represented in the debug arrays */
+static u32 qed_grc_dump_special_regs(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf, bool dump)
+{
+ u32 offset = 0, addr;
+
+ offset += qed_grc_dump_regs_hdr(dump_buf,
+ dump, 2, "eng", -1, NULL, NULL);
+
+ /* Dump R/TDIF_REG_DEBUG_ERROR_INFO_SIZE (every 8'th register should be
+ * skipped).
+ */
+ addr = BYTES_TO_DWORDS(RDIF_REG_DEBUG_ERROR_INFO);
+ offset += qed_grc_dump_reg_entry_skip(p_hwfn,
+ p_ptt,
+ dump_buf + offset,
+ dump,
+ addr,
+ RDIF_REG_DEBUG_ERROR_INFO_SIZE,
+ 7,
+ 1);
+ addr = BYTES_TO_DWORDS(TDIF_REG_DEBUG_ERROR_INFO);
+ offset +=
+ qed_grc_dump_reg_entry_skip(p_hwfn,
+ p_ptt,
+ dump_buf + offset,
+ dump,
+ addr,
+ TDIF_REG_DEBUG_ERROR_INFO_SIZE,
+ 7,
+ 1);
+
+ return offset;
+}
+
/* Dumps a GRC memory header (section and params).
* The following parameters are dumped:
* name - name is dumped only if it's not NULL.
- * addr - byte_addr is dumped only if name is NULL.
- * len - dword_len is always dumped.
+ * addr - addr is dumped only if name is NULL.
+ * len - len is always dumped.
* width - bit_width is dumped if it's not zero.
* packed - packed=1 is dumped if it's not false.
* mem_group - mem_group is always dumped.
@@ -2408,8 +2653,8 @@ static u32 qed_grc_dump_mem_hdr(struct qed_hwfn *p_hwfn,
u32 *dump_buf,
bool dump,
const char *name,
- u32 byte_addr,
- u32 dword_len,
+ u32 addr,
+ u32 len,
u32 bit_width,
bool packed,
const char *mem_group,
@@ -2419,7 +2664,7 @@ static u32 qed_grc_dump_mem_hdr(struct qed_hwfn *p_hwfn,
u32 offset = 0;
char buf[64];
- if (!dword_len)
+ if (!len)
DP_NOTICE(p_hwfn,
"Unexpected GRC Dump error: dumped memory size must be non-zero\n");
if (bit_width)
@@ -2446,20 +2691,21 @@ static u32 qed_grc_dump_mem_hdr(struct qed_hwfn *p_hwfn,
DP_VERBOSE(p_hwfn,
QED_MSG_DEBUG,
"Dumping %d registers from %s...\n",
- dword_len, buf);
+ len, buf);
} else {
/* Dump address */
offset += qed_dump_num_param(dump_buf + offset,
- dump, "addr", byte_addr);
- if (dump && dword_len > 64)
+ dump, "addr",
+ DWORDS_TO_BYTES(addr));
+ if (dump && len > 64)
DP_VERBOSE(p_hwfn,
QED_MSG_DEBUG,
"Dumping %d registers from address 0x%x...\n",
- dword_len, byte_addr);
+ len, (u32)DWORDS_TO_BYTES(addr));
}
/* Dump len */
- offset += qed_dump_num_param(dump_buf + offset, dump, "len", dword_len);
+ offset += qed_dump_num_param(dump_buf + offset, dump, "len", len);
/* Dump bit width */
if (bit_width)
@@ -2492,8 +2738,8 @@ static u32 qed_grc_dump_mem(struct qed_hwfn *p_hwfn,
u32 *dump_buf,
bool dump,
const char *name,
- u32 byte_addr,
- u32 dword_len,
+ u32 addr,
+ u32 len,
u32 bit_width,
bool packed,
const char *mem_group,
@@ -2505,21 +2751,14 @@ static u32 qed_grc_dump_mem(struct qed_hwfn *p_hwfn,
dump_buf + offset,
dump,
name,
- byte_addr,
- dword_len,
+ addr,
+ len,
bit_width,
packed,
mem_group, is_storm, storm_letter);
- if (dump) {
- u32 i;
-
- for (i = 0; i < dword_len;
- i++, byte_addr += BYTES_IN_DWORD, offset++)
- *(dump_buf + offset) = qed_rd(p_hwfn, p_ptt, byte_addr);
- } else {
- offset += dword_len;
- }
-
+ offset += qed_grc_dump_addr_range(p_hwfn,
+ p_ptt,
+ dump_buf + offset, dump, addr, len);
return offset;
}
@@ -2575,25 +2814,41 @@ static u32 qed_grc_dump_mem_entries(struct qed_hwfn *p_hwfn,
if (qed_grc_is_mem_included(p_hwfn,
(enum block_id)cond_hdr->block_id,
mem_group_id)) {
- u32 mem_byte_addr =
- DWORDS_TO_BYTES(GET_FIELD(mem->dword0,
- DBG_DUMP_MEM_ADDRESS));
+ u32 mem_addr = GET_FIELD(mem->dword0,
+ DBG_DUMP_MEM_ADDRESS);
u32 mem_len = GET_FIELD(mem->dword1,
DBG_DUMP_MEM_LENGTH);
+ enum dbg_grc_params grc_param;
char storm_letter = 'a';
bool is_storm = false;
/* Update memory length for CCFC/TCFC memories
* according to number of LCIDs/LTIDs.
*/
- if (mem_group_id == MEM_GROUP_CONN_CFC_MEM)
+ if (mem_group_id == MEM_GROUP_CONN_CFC_MEM) {
+ if (mem_len % MAX_LCIDS != 0) {
+ DP_NOTICE(p_hwfn,
+ "Invalid CCFC connection memory size\n");
+ return 0;
+ }
+
+ grc_param = DBG_GRC_PARAM_NUM_LCIDS;
mem_len = qed_grc_get_param(p_hwfn,
- DBG_GRC_PARAM_NUM_LCIDS)
- * (mem_len / MAX_LCIDS);
- else if (mem_group_id == MEM_GROUP_TASK_CFC_MEM)
+ grc_param) *
+ (mem_len / MAX_LCIDS);
+ } else if (mem_group_id ==
+ MEM_GROUP_TASK_CFC_MEM) {
+ if (mem_len % MAX_LTIDS != 0) {
+ DP_NOTICE(p_hwfn,
+ "Invalid TCFC task memory size\n");
+ return 0;
+ }
+
+ grc_param = DBG_GRC_PARAM_NUM_LTIDS;
mem_len = qed_grc_get_param(p_hwfn,
- DBG_GRC_PARAM_NUM_LTIDS)
- * (mem_len / MAX_LTIDS);
+ grc_param) *
+ (mem_len / MAX_LTIDS);
+ }
/* If memory is associated with Storm, update
* Storm details.
@@ -2610,7 +2865,7 @@ static u32 qed_grc_dump_mem_entries(struct qed_hwfn *p_hwfn,
/* Dump memory */
offset += qed_grc_dump_mem(p_hwfn, p_ptt,
dump_buf + offset, dump, NULL,
- mem_byte_addr, mem_len, 0,
+ mem_addr, mem_len, 0,
false,
s_mem_group_names[mem_group_id],
is_storm, storm_letter);
@@ -2799,29 +3054,31 @@ static u32 qed_grc_dump_iors(struct qed_hwfn *p_hwfn,
u32 offset = 0;
for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
- if (qed_grc_is_storm_included(p_hwfn,
- (enum dbg_storms)storm_id)) {
- for (set_id = 0; set_id < NUM_IOR_SETS; set_id++) {
- u32 addr =
- s_storm_defs[storm_id].sem_fast_mem_addr +
- SEM_FAST_REG_STORM_REG_FILE +
- DWORDS_TO_BYTES(IOR_SET_OFFSET(set_id));
+ struct storm_defs *storm = &s_storm_defs[storm_id];
- buf[strlen(buf) - 1] = '0' + set_id;
- offset += qed_grc_dump_mem(p_hwfn,
- p_ptt,
- dump_buf + offset,
- dump,
- buf,
- addr,
- IORS_PER_SET,
- 32,
- false,
- "ior",
- true,
- s_storm_defs
- [storm_id].letter);
- }
+ if (!qed_grc_is_storm_included(p_hwfn,
+ (enum dbg_storms)storm_id))
+ continue;
+
+ for (set_id = 0; set_id < NUM_IOR_SETS; set_id++) {
+ u32 dwords, addr;
+
+ dwords = storm->sem_fast_mem_addr +
+ SEM_FAST_REG_STORM_REG_FILE;
+ addr = BYTES_TO_DWORDS(dwords) + IOR_SET_OFFSET(set_id);
+ buf[strlen(buf) - 1] = '0' + set_id;
+ offset += qed_grc_dump_mem(p_hwfn,
+ p_ptt,
+ dump_buf + offset,
+ dump,
+ buf,
+ addr,
+ IORS_PER_SET,
+ 32,
+ false,
+ "ior",
+ true,
+ storm->letter);
}
}
@@ -2990,34 +3247,39 @@ static u32 qed_grc_dump_rss(struct qed_hwfn *p_hwfn,
struct rss_mem_defs *rss_defs = &s_rss_mem_defs[rss_mem_id];
u32 num_entries = rss_defs->num_entries[dev_data->chip_id];
u32 entry_width = rss_defs->entry_width[dev_data->chip_id];
- u32 total_size = (num_entries * entry_width) / 32;
+ u32 total_dwords = (num_entries * entry_width) / 32;
+ u32 size = RSS_REG_RSS_RAM_DATA_SIZE;
bool packed = (entry_width == 16);
- u32 addr = rss_defs->addr;
- u32 i, j;
+ u32 rss_addr = rss_defs->addr;
+ u32 i, addr;
offset += qed_grc_dump_mem_hdr(p_hwfn,
dump_buf + offset,
dump,
rss_defs->mem_name,
- addr,
- total_size,
+ 0,
+ total_dwords,
entry_width,
packed,
rss_defs->type_name, false, 0);
if (!dump) {
- offset += total_size;
+ offset += total_dwords;
continue;
}
/* Dump RSS data */
- for (i = 0; i < BYTES_TO_DWORDS(total_size); i++, addr++) {
- qed_wr(p_hwfn, p_ptt, RSS_REG_RSS_RAM_ADDR, addr);
- for (j = 0; j < BYTES_IN_DWORD; j++, offset++)
- *(dump_buf + offset) =
- qed_rd(p_hwfn, p_ptt,
- RSS_REG_RSS_RAM_DATA +
- DWORDS_TO_BYTES(j));
+ for (i = 0; i < total_dwords;
+ i += RSS_REG_RSS_RAM_DATA_SIZE, rss_addr++) {
+ addr = BYTES_TO_DWORDS(RSS_REG_RSS_RAM_DATA);
+ qed_wr(p_hwfn, p_ptt, RSS_REG_RSS_RAM_ADDR, rss_addr);
+ offset += qed_grc_dump_addr_range(p_hwfn,
+ p_ptt,
+ dump_buf +
+ offset,
+ dump,
+ addr,
+ size);
}
}
@@ -3030,19 +3292,19 @@ static u32 qed_grc_dump_big_ram(struct qed_hwfn *p_hwfn,
u32 *dump_buf, bool dump, u8 big_ram_id)
{
struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
+ u32 total_blocks, ram_size, offset = 0, i;
char mem_name[12] = "???_BIG_RAM";
char type_name[8] = "???_RAM";
- u32 ram_size, total_blocks;
- u32 offset = 0, i, j;
+ struct big_ram_defs *big_ram;
- total_blocks =
- s_big_ram_defs[big_ram_id].num_of_blocks[dev_data->chip_id];
+ big_ram = &s_big_ram_defs[big_ram_id];
+ total_blocks = big_ram->num_of_blocks[dev_data->chip_id];
ram_size = total_blocks * BIG_RAM_BLOCK_SIZE_DWORDS;
- strncpy(type_name, s_big_ram_defs[big_ram_id].instance_name,
- strlen(s_big_ram_defs[big_ram_id].instance_name));
- strncpy(mem_name, s_big_ram_defs[big_ram_id].instance_name,
- strlen(s_big_ram_defs[big_ram_id].instance_name));
+ strncpy(type_name, big_ram->instance_name,
+ strlen(big_ram->instance_name));
+ strncpy(mem_name, big_ram->instance_name,
+ strlen(big_ram->instance_name));
/* Dump memory header */
offset += qed_grc_dump_mem_hdr(p_hwfn,
@@ -3059,13 +3321,17 @@ static u32 qed_grc_dump_big_ram(struct qed_hwfn *p_hwfn,
/* Read and dump Big RAM data */
for (i = 0; i < total_blocks / 2; i++) {
- qed_wr(p_hwfn, p_ptt, s_big_ram_defs[big_ram_id].addr_reg_addr,
- i);
- for (j = 0; j < 2 * BIG_RAM_BLOCK_SIZE_DWORDS; j++, offset++)
- *(dump_buf + offset) = qed_rd(p_hwfn, p_ptt,
- s_big_ram_defs[big_ram_id].
- data_reg_addr +
- DWORDS_TO_BYTES(j));
+ u32 addr, len;
+
+ qed_wr(p_hwfn, p_ptt, big_ram->addr_reg_addr, i);
+ addr = BYTES_TO_DWORDS(big_ram->data_reg_addr);
+ len = 2 * BIG_RAM_BLOCK_SIZE_DWORDS;
+ offset += qed_grc_dump_addr_range(p_hwfn,
+ p_ptt,
+ dump_buf + offset,
+ dump,
+ addr,
+ len);
}
return offset;
@@ -3075,11 +3341,11 @@ static u32 qed_grc_dump_mcp(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
{
bool block_enable[MAX_BLOCK_ID] = { 0 };
+ u32 offset = 0, addr;
bool halted = false;
- u32 offset = 0;
/* Halt MCP */
- if (dump) {
+ if (dump && !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_MCP)) {
halted = !qed_mcp_halt(p_hwfn, p_ptt);
if (!halted)
DP_NOTICE(p_hwfn, "MCP halt failed!\n");
@@ -3091,7 +3357,7 @@ static u32 qed_grc_dump_mcp(struct qed_hwfn *p_hwfn,
dump_buf + offset,
dump,
NULL,
- MCP_REG_SCRATCH,
+ BYTES_TO_DWORDS(MCP_REG_SCRATCH),
MCP_REG_SCRATCH_SIZE,
0, false, "MCP", false, 0);
@@ -3101,7 +3367,7 @@ static u32 qed_grc_dump_mcp(struct qed_hwfn *p_hwfn,
dump_buf + offset,
dump,
NULL,
- MCP_REG_CPU_REG_FILE,
+ BYTES_TO_DWORDS(MCP_REG_CPU_REG_FILE),
MCP_REG_CPU_REG_FILE_SIZE,
0, false, "MCP", false, 0);
@@ -3115,12 +3381,13 @@ static u32 qed_grc_dump_mcp(struct qed_hwfn *p_hwfn,
/* Dump required non-MCP registers */
offset += qed_grc_dump_regs_hdr(dump_buf + offset,
dump, 1, "eng", -1, "block", "MCP");
+ addr = BYTES_TO_DWORDS(MISC_REG_SHARED_MEM_ADDR);
offset += qed_grc_dump_reg_entry(p_hwfn,
p_ptt,
dump_buf + offset,
dump,
- BYTES_TO_DWORDS
- (MISC_REG_SHARED_MEM_ADDR), 1);
+ addr,
+ 1);
/* Release MCP */
if (halted && qed_mcp_resume(p_hwfn, p_ptt))
@@ -3212,7 +3479,7 @@ static u32 qed_grc_dump_static_debug(struct qed_hwfn *p_hwfn,
{
u32 block_dwords = NUM_DBG_BUS_LINES * STATIC_DEBUG_LINE_DWORDS;
struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
- u32 offset = 0, block_id, line_id, addr, i;
+ u32 offset = 0, block_id, line_id;
struct block_defs *p_block_defs;
if (dump) {
@@ -3255,6 +3522,8 @@ static u32 qed_grc_dump_static_debug(struct qed_hwfn *p_hwfn,
if (dump && !dev_data->block_in_reset[block_id]) {
u8 dbg_client_id =
p_block_defs->dbg_client_id[dev_data->chip_id];
+ u32 addr = BYTES_TO_DWORDS(DBG_REG_CALENDAR_OUT_DATA);
+ u32 len = STATIC_DEBUG_LINE_DWORDS;
/* Enable block's client */
qed_bus_enable_clients(p_hwfn, p_ptt,
@@ -3270,11 +3539,13 @@ static u32 qed_grc_dump_static_debug(struct qed_hwfn *p_hwfn,
0xf, 0, 0, 0);
/* Read debug line info */
- for (i = 0, addr = DBG_REG_CALENDAR_OUT_DATA;
- i < STATIC_DEBUG_LINE_DWORDS;
- i++, offset++, addr += BYTES_IN_DWORD)
- dump_buf[offset] = qed_rd(p_hwfn, p_ptt,
- addr);
+ offset +=
+ qed_grc_dump_addr_range(p_hwfn,
+ p_ptt,
+ dump_buf + offset,
+ dump,
+ addr,
+ len);
}
/* Disable block's client and debug output */
@@ -3311,14 +3582,8 @@ static enum dbg_status qed_grc_dump(struct qed_hwfn *p_hwfn,
u8 i, port_mode = 0;
u32 offset = 0;
- /* Check if emulation platform */
*num_dumped_dwords = 0;
- /* Fill GRC parameters that were not set by the user with their default
- * value.
- */
- qed_dbg_grc_set_params_default(p_hwfn);
-
/* Find port mode */
if (dump) {
switch (qed_rd(p_hwfn, p_ptt, MISC_REG_PORT_MODE)) {
@@ -3370,15 +3635,14 @@ static enum dbg_status qed_grc_dump(struct qed_hwfn *p_hwfn,
}
/* Disable all parities using MFW command */
- if (dump) {
+ if (dump && !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_MCP)) {
parities_masked = !qed_mcp_mask_parities(p_hwfn, p_ptt, 1);
if (!parities_masked) {
+ DP_NOTICE(p_hwfn,
+ "Failed to mask parities using MFW\n");
if (qed_grc_get_param
(p_hwfn, DBG_GRC_PARAM_PARITY_SAFE))
return DBG_STATUS_MCP_COULD_NOT_MASK_PRTY;
- else
- DP_NOTICE(p_hwfn,
- "Failed to mask parities using MFW\n");
}
}
@@ -3409,6 +3673,11 @@ static enum dbg_status qed_grc_dump(struct qed_hwfn *p_hwfn,
offset,
dump,
block_enable, NULL, NULL);
+
+ /* Dump special registers */
+ offset += qed_grc_dump_special_regs(p_hwfn,
+ p_ptt,
+ dump_buf + offset, dump);
}
/* Dump memories */
@@ -3583,9 +3852,9 @@ static u32 qed_idle_chk_dump_failure(struct qed_hwfn *p_hwfn,
}
if (mode_match) {
- u32 grc_addr =
- DWORDS_TO_BYTES(GET_FIELD(reg->data,
- DBG_IDLE_CHK_INFO_REG_ADDRESS));
+ u32 addr =
+ GET_FIELD(reg->data,
+ DBG_IDLE_CHK_INFO_REG_ADDRESS);
/* Write register header */
struct dbg_idle_chk_result_reg_hdr *reg_hdr =
@@ -3597,16 +3866,19 @@ static u32 qed_idle_chk_dump_failure(struct qed_hwfn *p_hwfn,
memset(reg_hdr, 0, sizeof(*reg_hdr));
reg_hdr->size = reg->size;
SET_FIELD(reg_hdr->data,
- DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID,
- rule->num_cond_regs + reg_id);
+ DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID,
+ rule->num_cond_regs + reg_id);
/* Write register values */
- for (i = 0; i < reg->size;
- i++, offset++, grc_addr += 4)
- dump_buf[offset] =
- qed_rd(p_hwfn, p_ptt, grc_addr);
- }
+ offset +=
+ qed_grc_dump_addr_range(p_hwfn,
+ p_ptt,
+ dump_buf + offset,
+ dump,
+ addr,
+ reg->size);
}
+ }
}
return offset;
@@ -3621,7 +3893,7 @@ qed_idle_chk_dump_rule_entries(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
{
struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
u32 cond_reg_values[IDLE_CHK_MAX_ENTRIES_SIZE];
- u32 i, j, offset = 0;
+ u32 i, offset = 0;
u16 entry_id;
u8 reg_id;
@@ -3664,73 +3936,83 @@ qed_idle_chk_dump_rule_entries(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
if (!check_rule && dump)
continue;
+ if (!dump) {
+ u32 entry_dump_size =
+ qed_idle_chk_dump_failure(p_hwfn,
+ p_ptt,
+ dump_buf + offset,
+ false,
+ rule->rule_id,
+ rule,
+ 0,
+ NULL);
+
+ offset += num_reg_entries * entry_dump_size;
+ (*num_failing_rules) += num_reg_entries;
+ continue;
+ }
+
/* Go over all register entries (number of entries is the same
* for all condition registers).
*/
for (entry_id = 0; entry_id < num_reg_entries; entry_id++) {
/* Read current entry of all condition registers */
- if (dump) {
- u32 next_reg_offset = 0;
-
- for (reg_id = 0;
- reg_id < rule->num_cond_regs;
- reg_id++) {
- const struct dbg_idle_chk_cond_reg
- *reg = &cond_regs[reg_id];
-
- /* Find GRC address (if it's a memory,
- * the address of the specific entry is
- * calculated).
- */
- u32 grc_addr =
- DWORDS_TO_BYTES(
- GET_FIELD(reg->data,
- DBG_IDLE_CHK_COND_REG_ADDRESS));
-
- if (reg->num_entries > 1 ||
- reg->start_entry > 0) {
- u32 padded_entry_size =
- reg->entry_size > 1 ?
- roundup_pow_of_two
- (reg->entry_size) : 1;
-
- grc_addr +=
- DWORDS_TO_BYTES(
- (reg->start_entry +
- entry_id)
- * padded_entry_size);
- }
+ u32 next_reg_offset = 0;
- /* Read registers */
- if (next_reg_offset + reg->entry_size >=
- IDLE_CHK_MAX_ENTRIES_SIZE) {
- DP_NOTICE(p_hwfn,
- "idle check registers entry is too large\n");
- return 0;
- }
+ for (reg_id = 0; reg_id < rule->num_cond_regs;
+ reg_id++) {
+ const struct dbg_idle_chk_cond_reg *reg =
+ &cond_regs[reg_id];
- for (j = 0; j < reg->entry_size;
- j++, next_reg_offset++,
- grc_addr += 4)
- cond_reg_values[next_reg_offset] =
- qed_rd(p_hwfn, p_ptt, grc_addr);
+ /* Find GRC address (if it's a memory,the
+ * address of the specific entry is calculated).
+ */
+ u32 addr =
+ GET_FIELD(reg->data,
+ DBG_IDLE_CHK_COND_REG_ADDRESS);
+
+ if (reg->num_entries > 1 ||
+ reg->start_entry > 0) {
+ u32 padded_entry_size =
+ reg->entry_size > 1 ?
+ roundup_pow_of_two(reg->entry_size) :
+ 1;
+
+ addr += (reg->start_entry + entry_id) *
+ padded_entry_size;
}
+
+ /* Read registers */
+ if (next_reg_offset + reg->entry_size >=
+ IDLE_CHK_MAX_ENTRIES_SIZE) {
+ DP_NOTICE(p_hwfn,
+ "idle check registers entry is too large\n");
+ return 0;
+ }
+
+ next_reg_offset +=
+ qed_grc_dump_addr_range(p_hwfn,
+ p_ptt,
+ cond_reg_values +
+ next_reg_offset,
+ dump, addr,
+ reg->entry_size);
}
/* Call rule's condition function - a return value of
* true indicates failure.
*/
if ((*cond_arr[rule->cond_id])(cond_reg_values,
- imm_values) || !dump) {
+ imm_values)) {
offset +=
- qed_idle_chk_dump_failure(p_hwfn,
- p_ptt,
- dump_buf + offset,
- dump,
- rule->rule_id,
- rule,
- entry_id,
- cond_reg_values);
+ qed_idle_chk_dump_failure(p_hwfn,
+ p_ptt,
+ dump_buf + offset,
+ dump,
+ rule->rule_id,
+ rule,
+ entry_id,
+ cond_reg_values);
(*num_failing_rules)++;
break;
}
@@ -3818,13 +4100,18 @@ static enum dbg_status qed_find_nvram_image(struct qed_hwfn *p_hwfn,
struct mcp_file_att file_att;
/* Call NVRAM get file command */
- if (qed_mcp_nvm_rd_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_NVM_GET_FILE_ATT,
- image_type, &ret_mcp_resp, &ret_mcp_param,
- &ret_txn_size, (u32 *)&file_att) != 0)
- return DBG_STATUS_NVRAM_GET_IMAGE_FAILED;
+ int nvm_result = qed_mcp_nvm_rd_cmd(p_hwfn,
+ p_ptt,
+ DRV_MSG_CODE_NVM_GET_FILE_ATT,
+ image_type,
+ &ret_mcp_resp,
+ &ret_mcp_param,
+ &ret_txn_size,
+ (u32 *)&file_att);
/* Check response */
- if ((ret_mcp_resp & FW_MSG_CODE_MASK) != FW_MSG_CODE_NVM_OK)
+ if (nvm_result ||
+ (ret_mcp_resp & FW_MSG_CODE_MASK) != FW_MSG_CODE_NVM_OK)
return DBG_STATUS_NVRAM_GET_IMAGE_FAILED;
/* Update return values */
@@ -3944,7 +4231,6 @@ static enum dbg_status qed_mcp_trace_get_meta_info(struct qed_hwfn *p_hwfn,
u32 running_mfw_addr =
MCP_REG_SCRATCH + SECTION_OFFSET(spad_trace_offsize) +
QED_SECTION_SIZE(spad_trace_offsize) + trace_data_size_bytes;
- enum dbg_status status;
u32 nvram_image_type;
*running_bundle_id = qed_rd(p_hwfn, p_ptt, running_mfw_addr);
@@ -3955,30 +4241,12 @@ static enum dbg_status qed_mcp_trace_get_meta_info(struct qed_hwfn *p_hwfn,
nvram_image_type =
(*running_bundle_id ==
DIR_ID_1) ? NVM_TYPE_MFW_TRACE1 : NVM_TYPE_MFW_TRACE2;
- status = qed_find_nvram_image(p_hwfn,
- p_ptt,
- nvram_image_type,
- trace_meta_offset_bytes,
- trace_meta_size_bytes);
-
- return status;
-}
-
-/* Reads the MCP Trace data from the specified GRC address into the specified
- * buffer.
- */
-static void qed_mcp_trace_read_data(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- u32 grc_addr, u32 size_in_dwords, u32 *buf)
-{
- u32 i;
- DP_VERBOSE(p_hwfn,
- QED_MSG_DEBUG,
- "mcp_trace_read_data: reading trace data of size %d dwords from GRC address 0x%x\n",
- size_in_dwords, grc_addr);
- for (i = 0; i < size_in_dwords; i++, grc_addr += BYTES_IN_DWORD)
- buf[i] = qed_rd(p_hwfn, p_ptt, grc_addr);
+ return qed_find_nvram_image(p_hwfn,
+ p_ptt,
+ nvram_image_type,
+ trace_meta_offset_bytes,
+ trace_meta_size_bytes);
}
/* Reads the MCP Trace meta data (from NVRAM or buffer) into the specified
@@ -4034,11 +4302,14 @@ static enum dbg_status qed_mcp_trace_dump(struct qed_hwfn *p_hwfn,
bool dump, u32 *num_dumped_dwords)
{
u32 trace_data_grc_addr, trace_data_size_bytes, trace_data_size_dwords;
- u32 trace_meta_size_dwords, running_bundle_id, offset = 0;
- u32 trace_meta_offset_bytes, trace_meta_size_bytes;
+ u32 trace_meta_size_dwords = 0, running_bundle_id, offset = 0;
+ u32 trace_meta_offset_bytes = 0, trace_meta_size_bytes = 0;
enum dbg_status status;
+ bool mcp_access;
int halted = 0;
+ mcp_access = !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_MCP);
+
*num_dumped_dwords = 0;
/* Get trace data info */
@@ -4060,7 +4331,7 @@ static enum dbg_status qed_mcp_trace_dump(struct qed_hwfn *p_hwfn,
* consistent if halt fails, MCP trace is taken anyway, with a small
* risk that it may be corrupt.
*/
- if (dump) {
+ if (dump && mcp_access) {
halted = !qed_mcp_halt(p_hwfn, p_ptt);
if (!halted)
DP_NOTICE(p_hwfn, "MCP halt failed!\n");
@@ -4078,13 +4349,12 @@ static enum dbg_status qed_mcp_trace_dump(struct qed_hwfn *p_hwfn,
dump, "size", trace_data_size_dwords);
/* Read trace data from scratchpad into dump buffer */
- if (dump)
- qed_mcp_trace_read_data(p_hwfn,
- p_ptt,
- trace_data_grc_addr,
- trace_data_size_dwords,
- dump_buf + offset);
- offset += trace_data_size_dwords;
+ offset += qed_grc_dump_addr_range(p_hwfn,
+ p_ptt,
+ dump_buf + offset,
+ dump,
+ BYTES_TO_DWORDS(trace_data_grc_addr),
+ trace_data_size_dwords);
/* Resume MCP (only if halt succeeded) */
if (halted && qed_mcp_resume(p_hwfn, p_ptt) != 0)
@@ -4095,38 +4365,38 @@ static enum dbg_status qed_mcp_trace_dump(struct qed_hwfn *p_hwfn,
dump, "mcp_trace_meta", 1);
/* Read trace meta info */
- status = qed_mcp_trace_get_meta_info(p_hwfn,
- p_ptt,
- trace_data_size_bytes,
- &running_bundle_id,
- &trace_meta_offset_bytes,
- &trace_meta_size_bytes);
- if (status != DBG_STATUS_OK)
- return status;
+ if (mcp_access) {
+ status = qed_mcp_trace_get_meta_info(p_hwfn,
+ p_ptt,
+ trace_data_size_bytes,
+ &running_bundle_id,
+ &trace_meta_offset_bytes,
+ &trace_meta_size_bytes);
+ if (status == DBG_STATUS_OK)
+ trace_meta_size_dwords =
+ BYTES_TO_DWORDS(trace_meta_size_bytes);
+ }
- /* Dump trace meta size param (trace_meta_size_bytes is always
- * dword-aligned).
- */
- trace_meta_size_dwords = BYTES_TO_DWORDS(trace_meta_size_bytes);
- offset += qed_dump_num_param(dump_buf + offset, dump, "size",
- trace_meta_size_dwords);
+ /* Dump trace meta size param */
+ offset += qed_dump_num_param(dump_buf + offset,
+ dump, "size", trace_meta_size_dwords);
/* Read trace meta image into dump buffer */
- if (dump) {
+ if (dump && trace_meta_size_dwords)
status = qed_mcp_trace_read_meta(p_hwfn,
- p_ptt,
- trace_meta_offset_bytes,
- trace_meta_size_bytes,
- dump_buf + offset);
- if (status != DBG_STATUS_OK)
- return status;
- }
-
- offset += trace_meta_size_dwords;
+ p_ptt,
+ trace_meta_offset_bytes,
+ trace_meta_size_bytes,
+ dump_buf + offset);
+ if (status == DBG_STATUS_OK)
+ offset += trace_meta_size_dwords;
*num_dumped_dwords = offset;
- return DBG_STATUS_OK;
+ /* If no mcp access, indicate that the dump doesn't contain the meta
+ * data from NVRAM.
+ */
+ return mcp_access ? status : DBG_STATUS_NVRAM_GET_IMAGE_FAILED;
}
/* Dump GRC FIFO */
@@ -4311,9 +4581,10 @@ static u32 qed_fw_asserts_dump(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
{
struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
+ struct fw_asserts_ram_section *asserts;
char storm_letter_str[2] = "?";
struct fw_info fw_info;
- u32 offset = 0, i;
+ u32 offset = 0;
u8 storm_id;
/* Dump global params */
@@ -4323,8 +4594,8 @@ static u32 qed_fw_asserts_dump(struct qed_hwfn *p_hwfn,
offset += qed_dump_str_param(dump_buf + offset,
dump, "dump-type", "fw-asserts");
for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
- u32 fw_asserts_section_addr, next_list_idx_addr, next_list_idx,
- last_list_idx, element_addr;
+ u32 fw_asserts_section_addr, next_list_idx_addr, next_list_idx;
+ u32 last_list_idx, addr;
if (dev_data->block_in_reset[s_storm_defs[storm_id].block_id])
continue;
@@ -4332,6 +4603,8 @@ static u32 qed_fw_asserts_dump(struct qed_hwfn *p_hwfn,
/* Read FW info for the current Storm */
qed_read_fw_info(p_hwfn, p_ptt, storm_id, &fw_info);
+ asserts = &fw_info.fw_asserts_section;
+
/* Dump FW Asserts section header and params */
storm_letter_str[0] = s_storm_defs[storm_id].letter;
offset += qed_dump_section_hdr(dump_buf + offset, dump,
@@ -4339,12 +4612,10 @@ static u32 qed_fw_asserts_dump(struct qed_hwfn *p_hwfn,
offset += qed_dump_str_param(dump_buf + offset, dump, "storm",
storm_letter_str);
offset += qed_dump_num_param(dump_buf + offset, dump, "size",
- fw_info.fw_asserts_section.
- list_element_dword_size);
+ asserts->list_element_dword_size);
if (!dump) {
- offset += fw_info.fw_asserts_section.
- list_element_dword_size;
+ offset += asserts->list_element_dword_size;
continue;
}
@@ -4352,28 +4623,22 @@ static u32 qed_fw_asserts_dump(struct qed_hwfn *p_hwfn,
fw_asserts_section_addr =
s_storm_defs[storm_id].sem_fast_mem_addr +
SEM_FAST_REG_INT_RAM +
- RAM_LINES_TO_BYTES(fw_info.fw_asserts_section.
- section_ram_line_offset);
+ RAM_LINES_TO_BYTES(asserts->section_ram_line_offset);
next_list_idx_addr =
fw_asserts_section_addr +
- DWORDS_TO_BYTES(fw_info.fw_asserts_section.
- list_next_index_dword_offset);
+ DWORDS_TO_BYTES(asserts->list_next_index_dword_offset);
next_list_idx = qed_rd(p_hwfn, p_ptt, next_list_idx_addr);
last_list_idx = (next_list_idx > 0
? next_list_idx
- : fw_info.fw_asserts_section.list_num_elements)
- - 1;
- element_addr =
- fw_asserts_section_addr +
- DWORDS_TO_BYTES(fw_info.fw_asserts_section.
- list_dword_offset) +
- last_list_idx *
- DWORDS_TO_BYTES(fw_info.fw_asserts_section.
- list_element_dword_size);
- for (i = 0;
- i < fw_info.fw_asserts_section.list_element_dword_size;
- i++, offset++, element_addr += BYTES_IN_DWORD)
- dump_buf[offset] = qed_rd(p_hwfn, p_ptt, element_addr);
+ : asserts->list_num_elements) - 1;
+ addr = BYTES_TO_DWORDS(fw_asserts_section_addr) +
+ asserts->list_dword_offset +
+ last_list_idx * asserts->list_element_dword_size;
+ offset +=
+ qed_grc_dump_addr_range(p_hwfn, p_ptt,
+ dump_buf + offset,
+ dump, addr,
+ asserts->list_element_dword_size);
}
/* Dump last section */
@@ -4386,13 +4651,10 @@ static u32 qed_fw_asserts_dump(struct qed_hwfn *p_hwfn,
enum dbg_status qed_dbg_set_bin_ptr(const u8 * const bin_ptr)
{
/* Convert binary data to debug arrays */
- u32 num_of_buffers = *(u32 *)bin_ptr;
- struct bin_buffer_hdr *buf_array;
+ struct bin_buffer_hdr *buf_array = (struct bin_buffer_hdr *)bin_ptr;
u8 buf_id;
- buf_array = (struct bin_buffer_hdr *)((u32 *)bin_ptr + 1);
-
- for (buf_id = 0; buf_id < num_of_buffers; buf_id++) {
+ for (buf_id = 0; buf_id < MAX_BIN_DBG_BUFFER_TYPE; buf_id++) {
s_dbg_arrays[buf_id].ptr =
(u32 *)(bin_ptr + buf_array[buf_id].offset);
s_dbg_arrays[buf_id].size_in_dwords =
@@ -4402,6 +4664,17 @@ enum dbg_status qed_dbg_set_bin_ptr(const u8 * const bin_ptr)
return DBG_STATUS_OK;
}
+/* Assign default GRC param values */
+void qed_dbg_grc_set_params_default(struct qed_hwfn *p_hwfn)
+{
+ struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
+ u32 i;
+
+ for (i = 0; i < MAX_DBG_GRC_PARAMS; i++)
+ dev_data->grc.param_val[i] =
+ s_grc_param_defs[i].default_val[dev_data->chip_id];
+}
+
enum dbg_status qed_dbg_grc_get_dump_buf_size(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u32 *buf_size)
@@ -4441,8 +4714,9 @@ enum dbg_status qed_dbg_grc_dump(struct qed_hwfn *p_hwfn,
/* GRC Dump */
status = qed_grc_dump(p_hwfn, p_ptt, dump_buf, true, num_dumped_dwords);
- /* Clear all GRC params */
- qed_dbg_grc_clear_params(p_hwfn);
+ /* Revert GRC params to their default */
+ qed_dbg_grc_set_params_default(p_hwfn);
+
return status;
}
@@ -4495,6 +4769,10 @@ enum dbg_status qed_dbg_idle_chk_dump(struct qed_hwfn *p_hwfn,
/* Idle Check Dump */
*num_dumped_dwords = qed_idle_chk_dump(p_hwfn, p_ptt, dump_buf, true);
+
+ /* Revert GRC params to their default */
+ qed_dbg_grc_set_params_default(p_hwfn);
+
return DBG_STATUS_OK;
}
@@ -4519,11 +4797,15 @@ enum dbg_status qed_dbg_mcp_trace_dump(struct qed_hwfn *p_hwfn,
u32 needed_buf_size_in_dwords;
enum dbg_status status;
- status = qed_dbg_mcp_trace_get_dump_buf_size(p_hwfn, p_ptt,
+ /* validate buffer size */
+ status =
+ qed_dbg_mcp_trace_get_dump_buf_size(p_hwfn, p_ptt,
&needed_buf_size_in_dwords);
- if (status != DBG_STATUS_OK)
+ if (status != DBG_STATUS_OK &&
+ status != DBG_STATUS_NVRAM_GET_IMAGE_FAILED)
return status;
+
if (buf_size_in_dwords < needed_buf_size_in_dwords)
return DBG_STATUS_DUMP_BUF_TOO_SMALL;
@@ -4531,8 +4813,13 @@ enum dbg_status qed_dbg_mcp_trace_dump(struct qed_hwfn *p_hwfn,
qed_update_blocks_reset_state(p_hwfn, p_ptt);
/* Perform dump */
- return qed_mcp_trace_dump(p_hwfn,
- p_ptt, dump_buf, true, num_dumped_dwords);
+ status = qed_mcp_trace_dump(p_hwfn,
+ p_ptt, dump_buf, true, num_dumped_dwords);
+
+ /* Revert GRC params to their default */
+ qed_dbg_grc_set_params_default(p_hwfn);
+
+ return status;
}
enum dbg_status qed_dbg_reg_fifo_get_dump_buf_size(struct qed_hwfn *p_hwfn,
@@ -4567,8 +4854,14 @@ enum dbg_status qed_dbg_reg_fifo_dump(struct qed_hwfn *p_hwfn,
/* Update reset state */
qed_update_blocks_reset_state(p_hwfn, p_ptt);
- return qed_reg_fifo_dump(p_hwfn,
- p_ptt, dump_buf, true, num_dumped_dwords);
+
+ status = qed_reg_fifo_dump(p_hwfn,
+ p_ptt, dump_buf, true, num_dumped_dwords);
+
+ /* Revert GRC params to their default */
+ qed_dbg_grc_set_params_default(p_hwfn);
+
+ return status;
}
enum dbg_status qed_dbg_igu_fifo_get_dump_buf_size(struct qed_hwfn *p_hwfn,
@@ -4603,8 +4896,13 @@ enum dbg_status qed_dbg_igu_fifo_dump(struct qed_hwfn *p_hwfn,
/* Update reset state */
qed_update_blocks_reset_state(p_hwfn, p_ptt);
- return qed_igu_fifo_dump(p_hwfn,
- p_ptt, dump_buf, true, num_dumped_dwords);
+
+ status = qed_igu_fifo_dump(p_hwfn,
+ p_ptt, dump_buf, true, num_dumped_dwords);
+ /* Revert GRC params to their default */
+ qed_dbg_grc_set_params_default(p_hwfn);
+
+ return status;
}
enum dbg_status
@@ -4641,9 +4939,16 @@ enum dbg_status qed_dbg_protection_override_dump(struct qed_hwfn *p_hwfn,
/* Update reset state */
qed_update_blocks_reset_state(p_hwfn, p_ptt);
- return qed_protection_override_dump(p_hwfn,
- p_ptt,
- dump_buf, true, num_dumped_dwords);
+
+ status = qed_protection_override_dump(p_hwfn,
+ p_ptt,
+ dump_buf,
+ true, num_dumped_dwords);
+
+ /* Revert GRC params to their default */
+ qed_dbg_grc_set_params_default(p_hwfn);
+
+ return status;
}
enum dbg_status qed_dbg_fw_asserts_get_dump_buf_size(struct qed_hwfn *p_hwfn,
@@ -5045,13 +5350,10 @@ static char s_temp_buf[MAX_MSG_LEN];
enum dbg_status qed_dbg_user_set_bin_ptr(const u8 * const bin_ptr)
{
/* Convert binary data to debug arrays */
- u32 num_of_buffers = *(u32 *)bin_ptr;
- struct bin_buffer_hdr *buf_array;
+ struct bin_buffer_hdr *buf_array = (struct bin_buffer_hdr *)bin_ptr;
u8 buf_id;
- buf_array = (struct bin_buffer_hdr *)((u32 *)bin_ptr + 1);
-
- for (buf_id = 0; buf_id < num_of_buffers; buf_id++) {
+ for (buf_id = 0; buf_id < MAX_BIN_DBG_BUFFER_TYPE; buf_id++) {
s_dbg_arrays[buf_id].ptr =
(u32 *)(bin_ptr + buf_array[buf_id].offset);
s_dbg_arrays[buf_id].size_in_dwords =
@@ -5874,16 +6176,16 @@ static enum dbg_status qed_parse_reg_fifo_dump(struct qed_hwfn *p_hwfn,
results_offset +=
sprintf(qed_get_buf_ptr(results_buf,
results_offset),
- "raw: 0x%016llx, address: 0x%07llx, access: %-5s, pf: %2lld, vf: %s, port: %lld, privilege: %-3s, protection: %-12s, master: %-4s, errors: ",
+ "raw: 0x%016llx, address: 0x%07x, access: %-5s, pf: %2d, vf: %s, port: %d, privilege: %-3s, protection: %-12s, master: %-4s, errors: ",
elements[i].data,
- GET_FIELD(elements[i].data,
+ (u32)GET_FIELD(elements[i].data,
REG_FIFO_ELEMENT_ADDRESS) *
REG_FIFO_ELEMENT_ADDR_FACTOR,
s_access_strs[GET_FIELD(elements[i].data,
REG_FIFO_ELEMENT_ACCESS)],
- GET_FIELD(elements[i].data,
- REG_FIFO_ELEMENT_PF), vf_str,
- GET_FIELD(elements[i].data,
+ (u32)GET_FIELD(elements[i].data,
+ REG_FIFO_ELEMENT_PF), vf_str,
+ (u32)GET_FIELD(elements[i].data,
REG_FIFO_ELEMENT_PORT),
s_privilege_strs[GET_FIELD(elements[i].
data,
@@ -6189,13 +6491,13 @@ qed_parse_protection_override_dump(struct qed_hwfn *p_hwfn,
results_offset +=
sprintf(qed_get_buf_ptr(results_buf,
results_offset),
- "window %2d, address: 0x%07x, size: %7lld regs, read: %lld, write: %lld, read protection: %-12s, write protection: %-12s\n",
+ "window %2d, address: 0x%07x, size: %7d regs, read: %d, write: %d, read protection: %-12s, write protection: %-12s\n",
i, address,
- GET_FIELD(elements[i].data,
+ (u32)GET_FIELD(elements[i].data,
PROTECTION_OVERRIDE_ELEMENT_WINDOW_SIZE),
- GET_FIELD(elements[i].data,
+ (u32)GET_FIELD(elements[i].data,
PROTECTION_OVERRIDE_ELEMENT_READ),
- GET_FIELD(elements[i].data,
+ (u32)GET_FIELD(elements[i].data,
PROTECTION_OVERRIDE_ELEMENT_WRITE),
s_protection_strs[GET_FIELD(elements[i].data,
PROTECTION_OVERRIDE_ELEMENT_READ_PROTECTION)],
@@ -6508,7 +6810,7 @@ static enum dbg_status qed_dbg_dump(struct qed_hwfn *p_hwfn,
*/
rc = qed_features_lookup[feature_idx].get_size(p_hwfn, p_ptt,
&buf_size_dwords);
- if (rc != DBG_STATUS_OK)
+ if (rc != DBG_STATUS_OK && rc != DBG_STATUS_NVRAM_GET_IMAGE_FAILED)
return rc;
feature->buf_size = buf_size_dwords * sizeof(u32);
feature->dump_buf = vmalloc(feature->buf_size);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c
index e518f914eab1..5f31140d0b77 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dev.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c
@@ -75,7 +75,8 @@ enum BAR_ID {
BAR_ID_1 /* Used for doorbells */
};
-static u32 qed_hw_bar_size(struct qed_hwfn *p_hwfn, enum BAR_ID bar_id)
+static u32 qed_hw_bar_size(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, enum BAR_ID bar_id)
{
u32 bar_reg = (bar_id == BAR_ID_0 ?
PGLUE_B_REG_PF_BAR0_SIZE : PGLUE_B_REG_PF_BAR1_SIZE);
@@ -84,7 +85,7 @@ static u32 qed_hw_bar_size(struct qed_hwfn *p_hwfn, enum BAR_ID bar_id)
if (IS_VF(p_hwfn->cdev))
return 1 << 17;
- val = qed_rd(p_hwfn, p_hwfn->p_main_ptt, bar_reg);
+ val = qed_rd(p_hwfn, p_ptt, bar_reg);
if (val)
return 1 << (val + 15);
@@ -182,199 +183,573 @@ void qed_resc_free(struct qed_dev *cdev)
}
qed_iov_free(p_hwfn);
qed_dmae_info_free(p_hwfn);
- qed_dcbx_info_free(p_hwfn, p_hwfn->p_dcbx_info);
+ qed_dcbx_info_free(p_hwfn);
}
}
-static int qed_init_qm_info(struct qed_hwfn *p_hwfn, bool b_sleepable)
+/******************** QM initialization *******************/
+#define ACTIVE_TCS_BMAP 0x9f
+#define ACTIVE_TCS_BMAP_4PORT_K2 0xf
+
+/* determines the physical queue flags for a given PF. */
+static u32 qed_get_pq_flags(struct qed_hwfn *p_hwfn)
{
- u8 num_vports, vf_offset = 0, i, vport_id, num_ports, curr_queue = 0;
- struct qed_qm_info *qm_info = &p_hwfn->qm_info;
- struct init_qm_port_params *p_qm_port;
- bool init_rdma_offload_pq = false;
- bool init_pure_ack_pq = false;
- bool init_ooo_pq = false;
- u16 num_pqs, multi_cos_tcs = 1;
- u8 pf_wfq = qm_info->pf_wfq;
- u32 pf_rl = qm_info->pf_rl;
- u16 num_pf_rls = 0;
- u16 num_vfs = 0;
-
-#ifdef CONFIG_QED_SRIOV
- if (p_hwfn->cdev->p_iov_info)
- num_vfs = p_hwfn->cdev->p_iov_info->total_vfs;
-#endif
- memset(qm_info, 0, sizeof(*qm_info));
+ u32 flags;
- num_pqs = multi_cos_tcs + num_vfs + 1; /* The '1' is for pure-LB */
- num_vports = (u8)RESC_NUM(p_hwfn, QED_VPORT);
+ /* common flags */
+ flags = PQ_FLAGS_LB;
- if (p_hwfn->hw_info.personality == QED_PCI_ETH_ROCE) {
- num_pqs++; /* for RoCE queue */
- init_rdma_offload_pq = true;
- /* we subtract num_vfs because each require a rate limiter,
- * and one default rate limiter
- */
- if (p_hwfn->pf_params.rdma_pf_params.enable_dcqcn)
- num_pf_rls = RESC_NUM(p_hwfn, QED_RL) - num_vfs - 1;
+ /* feature flags */
+ if (IS_QED_SRIOV(p_hwfn->cdev))
+ flags |= PQ_FLAGS_VFS;
- num_pqs += num_pf_rls;
- qm_info->num_pf_rls = (u8) num_pf_rls;
+ /* protocol flags */
+ switch (p_hwfn->hw_info.personality) {
+ case QED_PCI_ETH:
+ flags |= PQ_FLAGS_MCOS;
+ break;
+ case QED_PCI_FCOE:
+ flags |= PQ_FLAGS_OFLD;
+ break;
+ case QED_PCI_ISCSI:
+ flags |= PQ_FLAGS_ACK | PQ_FLAGS_OOO | PQ_FLAGS_OFLD;
+ break;
+ case QED_PCI_ETH_ROCE:
+ flags |= PQ_FLAGS_MCOS | PQ_FLAGS_OFLD | PQ_FLAGS_LLT;
+ break;
+ default:
+ DP_ERR(p_hwfn,
+ "unknown personality %d\n", p_hwfn->hw_info.personality);
+ return 0;
}
- if (p_hwfn->hw_info.personality == QED_PCI_ISCSI) {
- num_pqs += 2; /* for iSCSI pure-ACK / OOO queue */
- init_pure_ack_pq = true;
- init_ooo_pq = true;
- }
+ return flags;
+}
- /* Sanity checking that setup requires legal number of resources */
- if (num_pqs > RESC_NUM(p_hwfn, QED_PQ)) {
- DP_ERR(p_hwfn,
- "Need too many Physical queues - 0x%04x when only %04x are available\n",
- num_pqs, RESC_NUM(p_hwfn, QED_PQ));
- return -EINVAL;
- }
+/* Getters for resource amounts necessary for qm initialization */
+u8 qed_init_qm_get_num_tcs(struct qed_hwfn *p_hwfn)
+{
+ return p_hwfn->hw_info.num_hw_tc;
+}
- /* PQs will be arranged as follows: First per-TC PQ then pure-LB quete.
- */
- qm_info->qm_pq_params = kcalloc(num_pqs,
- sizeof(struct init_qm_pq_params),
- b_sleepable ? GFP_KERNEL : GFP_ATOMIC);
- if (!qm_info->qm_pq_params)
- goto alloc_err;
+u16 qed_init_qm_get_num_vfs(struct qed_hwfn *p_hwfn)
+{
+ return IS_QED_SRIOV(p_hwfn->cdev) ?
+ p_hwfn->cdev->p_iov_info->total_vfs : 0;
+}
- qm_info->qm_vport_params = kcalloc(num_vports,
- sizeof(struct init_qm_vport_params),
- b_sleepable ? GFP_KERNEL
- : GFP_ATOMIC);
- if (!qm_info->qm_vport_params)
- goto alloc_err;
+#define NUM_DEFAULT_RLS 1
- qm_info->qm_port_params = kcalloc(MAX_NUM_PORTS,
- sizeof(struct init_qm_port_params),
- b_sleepable ? GFP_KERNEL
- : GFP_ATOMIC);
- if (!qm_info->qm_port_params)
- goto alloc_err;
+u16 qed_init_qm_get_num_pf_rls(struct qed_hwfn *p_hwfn)
+{
+ u16 num_pf_rls, num_vfs = qed_init_qm_get_num_vfs(p_hwfn);
- qm_info->wfq_data = kcalloc(num_vports, sizeof(struct qed_wfq_data),
- b_sleepable ? GFP_KERNEL : GFP_ATOMIC);
- if (!qm_info->wfq_data)
- goto alloc_err;
+ /* num RLs can't exceed resource amount of rls or vports */
+ num_pf_rls = (u16) min_t(u32, RESC_NUM(p_hwfn, QED_RL),
+ RESC_NUM(p_hwfn, QED_VPORT));
+
+ /* Make sure after we reserve there's something left */
+ if (num_pf_rls < num_vfs + NUM_DEFAULT_RLS)
+ return 0;
- vport_id = (u8)RESC_START(p_hwfn, QED_VPORT);
+ /* subtract rls necessary for VFs and one default one for the PF */
+ num_pf_rls -= num_vfs + NUM_DEFAULT_RLS;
- /* First init rate limited queues */
- for (curr_queue = 0; curr_queue < num_pf_rls; curr_queue++) {
- qm_info->qm_pq_params[curr_queue].vport_id = vport_id++;
- qm_info->qm_pq_params[curr_queue].tc_id =
- p_hwfn->hw_info.non_offload_tc;
- qm_info->qm_pq_params[curr_queue].wrr_group = 1;
- qm_info->qm_pq_params[curr_queue].rl_valid = 1;
- }
+ return num_pf_rls;
+}
- /* First init per-TC PQs */
- for (i = 0; i < multi_cos_tcs; i++) {
- struct init_qm_pq_params *params =
- &qm_info->qm_pq_params[curr_queue++];
+u16 qed_init_qm_get_num_vports(struct qed_hwfn *p_hwfn)
+{
+ u32 pq_flags = qed_get_pq_flags(p_hwfn);
- if (p_hwfn->hw_info.personality == QED_PCI_ETH_ROCE ||
- p_hwfn->hw_info.personality == QED_PCI_ETH) {
- params->vport_id = vport_id;
- params->tc_id = p_hwfn->hw_info.non_offload_tc;
- params->wrr_group = 1;
- } else {
- params->vport_id = vport_id;
- params->tc_id = p_hwfn->hw_info.offload_tc;
- params->wrr_group = 1;
- }
- }
+ /* all pqs share the same vport, except for vfs and pf_rl pqs */
+ return (!!(PQ_FLAGS_RLS & pq_flags)) *
+ qed_init_qm_get_num_pf_rls(p_hwfn) +
+ (!!(PQ_FLAGS_VFS & pq_flags)) *
+ qed_init_qm_get_num_vfs(p_hwfn) + 1;
+}
- /* Then init pure-LB PQ */
- qm_info->pure_lb_pq = curr_queue;
- qm_info->qm_pq_params[curr_queue].vport_id =
- (u8) RESC_START(p_hwfn, QED_VPORT);
- qm_info->qm_pq_params[curr_queue].tc_id = PURE_LB_TC;
- qm_info->qm_pq_params[curr_queue].wrr_group = 1;
- curr_queue++;
-
- qm_info->offload_pq = 0;
- if (init_rdma_offload_pq) {
- qm_info->offload_pq = curr_queue;
- qm_info->qm_pq_params[curr_queue].vport_id = vport_id;
- qm_info->qm_pq_params[curr_queue].tc_id =
- p_hwfn->hw_info.offload_tc;
- qm_info->qm_pq_params[curr_queue].wrr_group = 1;
- curr_queue++;
- }
-
- if (init_pure_ack_pq) {
- qm_info->pure_ack_pq = curr_queue;
- qm_info->qm_pq_params[curr_queue].vport_id = vport_id;
- qm_info->qm_pq_params[curr_queue].tc_id =
- p_hwfn->hw_info.offload_tc;
- qm_info->qm_pq_params[curr_queue].wrr_group = 1;
- curr_queue++;
- }
-
- if (init_ooo_pq) {
- qm_info->ooo_pq = curr_queue;
- qm_info->qm_pq_params[curr_queue].vport_id = vport_id;
- qm_info->qm_pq_params[curr_queue].tc_id = DCBX_ISCSI_OOO_TC;
- qm_info->qm_pq_params[curr_queue].wrr_group = 1;
- curr_queue++;
- }
-
- /* Then init per-VF PQs */
- vf_offset = curr_queue;
- for (i = 0; i < num_vfs; i++) {
- /* First vport is used by the PF */
- qm_info->qm_pq_params[curr_queue].vport_id = vport_id + i + 1;
- qm_info->qm_pq_params[curr_queue].tc_id =
- p_hwfn->hw_info.non_offload_tc;
- qm_info->qm_pq_params[curr_queue].wrr_group = 1;
- qm_info->qm_pq_params[curr_queue].rl_valid = 1;
- curr_queue++;
- }
-
- qm_info->vf_queues_offset = vf_offset;
- qm_info->num_pqs = num_pqs;
- qm_info->num_vports = num_vports;
+/* calc amount of PQs according to the requested flags */
+u16 qed_init_qm_get_num_pqs(struct qed_hwfn *p_hwfn)
+{
+ u32 pq_flags = qed_get_pq_flags(p_hwfn);
+ return (!!(PQ_FLAGS_RLS & pq_flags)) *
+ qed_init_qm_get_num_pf_rls(p_hwfn) +
+ (!!(PQ_FLAGS_MCOS & pq_flags)) *
+ qed_init_qm_get_num_tcs(p_hwfn) +
+ (!!(PQ_FLAGS_LB & pq_flags)) + (!!(PQ_FLAGS_OOO & pq_flags)) +
+ (!!(PQ_FLAGS_ACK & pq_flags)) + (!!(PQ_FLAGS_OFLD & pq_flags)) +
+ (!!(PQ_FLAGS_LLT & pq_flags)) +
+ (!!(PQ_FLAGS_VFS & pq_flags)) * qed_init_qm_get_num_vfs(p_hwfn);
+}
+
+/* initialize the top level QM params */
+static void qed_init_qm_params(struct qed_hwfn *p_hwfn)
+{
+ struct qed_qm_info *qm_info = &p_hwfn->qm_info;
+ bool four_port;
+
+ /* pq and vport bases for this PF */
+ qm_info->start_pq = (u16) RESC_START(p_hwfn, QED_PQ);
+ qm_info->start_vport = (u8) RESC_START(p_hwfn, QED_VPORT);
+
+ /* rate limiting and weighted fair queueing are always enabled */
+ qm_info->vport_rl_en = 1;
+ qm_info->vport_wfq_en = 1;
+
+ /* TC config is different for AH 4 port */
+ four_port = p_hwfn->cdev->num_ports_in_engines == MAX_NUM_PORTS_K2;
+
+ /* in AH 4 port we have fewer TCs per port */
+ qm_info->max_phys_tcs_per_port = four_port ? NUM_PHYS_TCS_4PORT_K2 :
+ NUM_OF_PHYS_TCS;
+
+ /* unless MFW indicated otherwise, ooo_tc == 3 for
+ * AH 4-port and 4 otherwise.
+ */
+ if (!qm_info->ooo_tc)
+ qm_info->ooo_tc = four_port ? DCBX_TCP_OOO_K2_4PORT_TC :
+ DCBX_TCP_OOO_TC;
+}
+
+/* initialize qm vport params */
+static void qed_init_qm_vport_params(struct qed_hwfn *p_hwfn)
+{
+ struct qed_qm_info *qm_info = &p_hwfn->qm_info;
+ u8 i;
+
+ /* all vports participate in weighted fair queueing */
+ for (i = 0; i < qed_init_qm_get_num_vports(p_hwfn); i++)
+ qm_info->qm_vport_params[i].vport_wfq = 1;
+}
+
+/* initialize qm port params */
+static void qed_init_qm_port_params(struct qed_hwfn *p_hwfn)
+{
/* Initialize qm port parameters */
- num_ports = p_hwfn->cdev->num_ports_in_engines;
+ u8 i, active_phys_tcs, num_ports = p_hwfn->cdev->num_ports_in_engines;
+
+ /* indicate how ooo and high pri traffic is dealt with */
+ active_phys_tcs = num_ports == MAX_NUM_PORTS_K2 ?
+ ACTIVE_TCS_BMAP_4PORT_K2 :
+ ACTIVE_TCS_BMAP;
+
for (i = 0; i < num_ports; i++) {
- p_qm_port = &qm_info->qm_port_params[i];
+ struct init_qm_port_params *p_qm_port =
+ &p_hwfn->qm_info.qm_port_params[i];
+
p_qm_port->active = 1;
- if (num_ports == 4)
- p_qm_port->active_phys_tcs = 0x7;
- else
- p_qm_port->active_phys_tcs = 0x9f;
+ p_qm_port->active_phys_tcs = active_phys_tcs;
p_qm_port->num_pbf_cmd_lines = PBF_MAX_CMD_LINES / num_ports;
p_qm_port->num_btb_blocks = BTB_MAX_BLOCKS / num_ports;
}
+}
+
+/* Reset the params which must be reset for qm init. QM init may be called as
+ * a result of flows other than driver load (e.g. dcbx renegotiation). Other
+ * params may be affected by the init but would simply recalculate to the same
+ * values. The allocations made for QM init, ports, vports, pqs and vfqs are not
+ * affected as these amounts stay the same.
+ */
+static void qed_init_qm_reset_params(struct qed_hwfn *p_hwfn)
+{
+ struct qed_qm_info *qm_info = &p_hwfn->qm_info;
+
+ qm_info->num_pqs = 0;
+ qm_info->num_vports = 0;
+ qm_info->num_pf_rls = 0;
+ qm_info->num_vf_pqs = 0;
+ qm_info->first_vf_pq = 0;
+ qm_info->first_mcos_pq = 0;
+ qm_info->first_rl_pq = 0;
+}
+
+static void qed_init_qm_advance_vport(struct qed_hwfn *p_hwfn)
+{
+ struct qed_qm_info *qm_info = &p_hwfn->qm_info;
+
+ qm_info->num_vports++;
+
+ if (qm_info->num_vports > qed_init_qm_get_num_vports(p_hwfn))
+ DP_ERR(p_hwfn,
+ "vport overflow! qm_info->num_vports %d, qm_init_get_num_vports() %d\n",
+ qm_info->num_vports, qed_init_qm_get_num_vports(p_hwfn));
+}
+
+/* initialize a single pq and manage qm_info resources accounting.
+ * The pq_init_flags param determines whether the PQ is rate limited
+ * (for VF or PF) and whether a new vport is allocated to the pq or not
+ * (i.e. vport will be shared).
+ */
+
+/* flags for pq init */
+#define PQ_INIT_SHARE_VPORT (1 << 0)
+#define PQ_INIT_PF_RL (1 << 1)
+#define PQ_INIT_VF_RL (1 << 2)
+
+/* defines for pq init */
+#define PQ_INIT_DEFAULT_WRR_GROUP 1
+#define PQ_INIT_DEFAULT_TC 0
+#define PQ_INIT_OFLD_TC (p_hwfn->hw_info.offload_tc)
+
+static void qed_init_qm_pq(struct qed_hwfn *p_hwfn,
+ struct qed_qm_info *qm_info,
+ u8 tc, u32 pq_init_flags)
+{
+ u16 pq_idx = qm_info->num_pqs, max_pq = qed_init_qm_get_num_pqs(p_hwfn);
+
+ if (pq_idx > max_pq)
+ DP_ERR(p_hwfn,
+ "pq overflow! pq %d, max pq %d\n", pq_idx, max_pq);
+
+ /* init pq params */
+ qm_info->qm_pq_params[pq_idx].vport_id = qm_info->start_vport +
+ qm_info->num_vports;
+ qm_info->qm_pq_params[pq_idx].tc_id = tc;
+ qm_info->qm_pq_params[pq_idx].wrr_group = PQ_INIT_DEFAULT_WRR_GROUP;
+ qm_info->qm_pq_params[pq_idx].rl_valid =
+ (pq_init_flags & PQ_INIT_PF_RL || pq_init_flags & PQ_INIT_VF_RL);
+
+ /* qm params accounting */
+ qm_info->num_pqs++;
+ if (!(pq_init_flags & PQ_INIT_SHARE_VPORT))
+ qm_info->num_vports++;
+
+ if (pq_init_flags & PQ_INIT_PF_RL)
+ qm_info->num_pf_rls++;
+
+ if (qm_info->num_vports > qed_init_qm_get_num_vports(p_hwfn))
+ DP_ERR(p_hwfn,
+ "vport overflow! qm_info->num_vports %d, qm_init_get_num_vports() %d\n",
+ qm_info->num_vports, qed_init_qm_get_num_vports(p_hwfn));
+
+ if (qm_info->num_pf_rls > qed_init_qm_get_num_pf_rls(p_hwfn))
+ DP_ERR(p_hwfn,
+ "rl overflow! qm_info->num_pf_rls %d, qm_init_get_num_pf_rls() %d\n",
+ qm_info->num_pf_rls, qed_init_qm_get_num_pf_rls(p_hwfn));
+}
+
+/* get pq index according to PQ_FLAGS */
+static u16 *qed_init_qm_get_idx_from_flags(struct qed_hwfn *p_hwfn,
+ u32 pq_flags)
+{
+ struct qed_qm_info *qm_info = &p_hwfn->qm_info;
+
+ /* Can't have multiple flags set here */
+ if (bitmap_weight((unsigned long *)&pq_flags, sizeof(pq_flags)) > 1)
+ goto err;
+
+ switch (pq_flags) {
+ case PQ_FLAGS_RLS:
+ return &qm_info->first_rl_pq;
+ case PQ_FLAGS_MCOS:
+ return &qm_info->first_mcos_pq;
+ case PQ_FLAGS_LB:
+ return &qm_info->pure_lb_pq;
+ case PQ_FLAGS_OOO:
+ return &qm_info->ooo_pq;
+ case PQ_FLAGS_ACK:
+ return &qm_info->pure_ack_pq;
+ case PQ_FLAGS_OFLD:
+ return &qm_info->offload_pq;
+ case PQ_FLAGS_LLT:
+ return &qm_info->low_latency_pq;
+ case PQ_FLAGS_VFS:
+ return &qm_info->first_vf_pq;
+ default:
+ goto err;
+ }
+
+err:
+ DP_ERR(p_hwfn, "BAD pq flags %d\n", pq_flags);
+ return NULL;
+}
+
+/* save pq index in qm info */
+static void qed_init_qm_set_idx(struct qed_hwfn *p_hwfn,
+ u32 pq_flags, u16 pq_val)
+{
+ u16 *base_pq_idx = qed_init_qm_get_idx_from_flags(p_hwfn, pq_flags);
+
+ *base_pq_idx = p_hwfn->qm_info.start_pq + pq_val;
+}
+
+/* get tx pq index, with the PQ TX base already set (ready for context init) */
+u16 qed_get_cm_pq_idx(struct qed_hwfn *p_hwfn, u32 pq_flags)
+{
+ u16 *base_pq_idx = qed_init_qm_get_idx_from_flags(p_hwfn, pq_flags);
+
+ return *base_pq_idx + CM_TX_PQ_BASE;
+}
+
+u16 qed_get_cm_pq_idx_mcos(struct qed_hwfn *p_hwfn, u8 tc)
+{
+ u8 max_tc = qed_init_qm_get_num_tcs(p_hwfn);
+
+ if (tc > max_tc)
+ DP_ERR(p_hwfn, "tc %d must be smaller than %d\n", tc, max_tc);
+
+ return qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_MCOS) + tc;
+}
+
+u16 qed_get_cm_pq_idx_vf(struct qed_hwfn *p_hwfn, u16 vf)
+{
+ u16 max_vf = qed_init_qm_get_num_vfs(p_hwfn);
+
+ if (vf > max_vf)
+ DP_ERR(p_hwfn, "vf %d must be smaller than %d\n", vf, max_vf);
+
+ return qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_VFS) + vf;
+}
+
+u16 qed_get_cm_pq_idx_rl(struct qed_hwfn *p_hwfn, u8 rl)
+{
+ u16 max_rl = qed_init_qm_get_num_pf_rls(p_hwfn);
+
+ if (rl > max_rl)
+ DP_ERR(p_hwfn, "rl %d must be smaller than %d\n", rl, max_rl);
+
+ return qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_RLS) + rl;
+}
+
+/* Functions for creating specific types of pqs */
+static void qed_init_qm_lb_pq(struct qed_hwfn *p_hwfn)
+{
+ struct qed_qm_info *qm_info = &p_hwfn->qm_info;
+
+ if (!(qed_get_pq_flags(p_hwfn) & PQ_FLAGS_LB))
+ return;
+
+ qed_init_qm_set_idx(p_hwfn, PQ_FLAGS_LB, qm_info->num_pqs);
+ qed_init_qm_pq(p_hwfn, qm_info, PURE_LB_TC, PQ_INIT_SHARE_VPORT);
+}
+
+static void qed_init_qm_ooo_pq(struct qed_hwfn *p_hwfn)
+{
+ struct qed_qm_info *qm_info = &p_hwfn->qm_info;
+
+ if (!(qed_get_pq_flags(p_hwfn) & PQ_FLAGS_OOO))
+ return;
+
+ qed_init_qm_set_idx(p_hwfn, PQ_FLAGS_OOO, qm_info->num_pqs);
+ qed_init_qm_pq(p_hwfn, qm_info, qm_info->ooo_tc, PQ_INIT_SHARE_VPORT);
+}
+
+static void qed_init_qm_pure_ack_pq(struct qed_hwfn *p_hwfn)
+{
+ struct qed_qm_info *qm_info = &p_hwfn->qm_info;
+
+ if (!(qed_get_pq_flags(p_hwfn) & PQ_FLAGS_ACK))
+ return;
+
+ qed_init_qm_set_idx(p_hwfn, PQ_FLAGS_ACK, qm_info->num_pqs);
+ qed_init_qm_pq(p_hwfn, qm_info, PQ_INIT_OFLD_TC, PQ_INIT_SHARE_VPORT);
+}
+
+static void qed_init_qm_offload_pq(struct qed_hwfn *p_hwfn)
+{
+ struct qed_qm_info *qm_info = &p_hwfn->qm_info;
+
+ if (!(qed_get_pq_flags(p_hwfn) & PQ_FLAGS_OFLD))
+ return;
+
+ qed_init_qm_set_idx(p_hwfn, PQ_FLAGS_OFLD, qm_info->num_pqs);
+ qed_init_qm_pq(p_hwfn, qm_info, PQ_INIT_OFLD_TC, PQ_INIT_SHARE_VPORT);
+}
+
+static void qed_init_qm_low_latency_pq(struct qed_hwfn *p_hwfn)
+{
+ struct qed_qm_info *qm_info = &p_hwfn->qm_info;
+
+ if (!(qed_get_pq_flags(p_hwfn) & PQ_FLAGS_LLT))
+ return;
- qm_info->max_phys_tcs_per_port = NUM_OF_PHYS_TCS;
+ qed_init_qm_set_idx(p_hwfn, PQ_FLAGS_LLT, qm_info->num_pqs);
+ qed_init_qm_pq(p_hwfn, qm_info, PQ_INIT_OFLD_TC, PQ_INIT_SHARE_VPORT);
+}
+
+static void qed_init_qm_mcos_pqs(struct qed_hwfn *p_hwfn)
+{
+ struct qed_qm_info *qm_info = &p_hwfn->qm_info;
+ u8 tc_idx;
- qm_info->start_pq = (u16)RESC_START(p_hwfn, QED_PQ);
+ if (!(qed_get_pq_flags(p_hwfn) & PQ_FLAGS_MCOS))
+ return;
+ qed_init_qm_set_idx(p_hwfn, PQ_FLAGS_MCOS, qm_info->num_pqs);
+ for (tc_idx = 0; tc_idx < qed_init_qm_get_num_tcs(p_hwfn); tc_idx++)
+ qed_init_qm_pq(p_hwfn, qm_info, tc_idx, PQ_INIT_SHARE_VPORT);
+}
+
+static void qed_init_qm_vf_pqs(struct qed_hwfn *p_hwfn)
+{
+ struct qed_qm_info *qm_info = &p_hwfn->qm_info;
+ u16 vf_idx, num_vfs = qed_init_qm_get_num_vfs(p_hwfn);
+
+ if (!(qed_get_pq_flags(p_hwfn) & PQ_FLAGS_VFS))
+ return;
+
+ qed_init_qm_set_idx(p_hwfn, PQ_FLAGS_VFS, qm_info->num_pqs);
qm_info->num_vf_pqs = num_vfs;
- qm_info->start_vport = (u8) RESC_START(p_hwfn, QED_VPORT);
+ for (vf_idx = 0; vf_idx < num_vfs; vf_idx++)
+ qed_init_qm_pq(p_hwfn,
+ qm_info, PQ_INIT_DEFAULT_TC, PQ_INIT_VF_RL);
+}
- for (i = 0; i < qm_info->num_vports; i++)
- qm_info->qm_vport_params[i].vport_wfq = 1;
+static void qed_init_qm_rl_pqs(struct qed_hwfn *p_hwfn)
+{
+ u16 pf_rls_idx, num_pf_rls = qed_init_qm_get_num_pf_rls(p_hwfn);
+ struct qed_qm_info *qm_info = &p_hwfn->qm_info;
- qm_info->vport_rl_en = 1;
- qm_info->vport_wfq_en = 1;
- qm_info->pf_rl = pf_rl;
- qm_info->pf_wfq = pf_wfq;
+ if (!(qed_get_pq_flags(p_hwfn) & PQ_FLAGS_RLS))
+ return;
+
+ qed_init_qm_set_idx(p_hwfn, PQ_FLAGS_RLS, qm_info->num_pqs);
+ for (pf_rls_idx = 0; pf_rls_idx < num_pf_rls; pf_rls_idx++)
+ qed_init_qm_pq(p_hwfn, qm_info, PQ_INIT_OFLD_TC, PQ_INIT_PF_RL);
+}
+
+static void qed_init_qm_pq_params(struct qed_hwfn *p_hwfn)
+{
+ /* rate limited pqs, must come first (FW assumption) */
+ qed_init_qm_rl_pqs(p_hwfn);
+
+ /* pqs for multi cos */
+ qed_init_qm_mcos_pqs(p_hwfn);
+
+ /* pure loopback pq */
+ qed_init_qm_lb_pq(p_hwfn);
+
+ /* out of order pq */
+ qed_init_qm_ooo_pq(p_hwfn);
+
+ /* pure ack pq */
+ qed_init_qm_pure_ack_pq(p_hwfn);
+
+ /* pq for offloaded protocol */
+ qed_init_qm_offload_pq(p_hwfn);
+
+ /* low latency pq */
+ qed_init_qm_low_latency_pq(p_hwfn);
+
+ /* done sharing vports */
+ qed_init_qm_advance_vport(p_hwfn);
+
+ /* pqs for vfs */
+ qed_init_qm_vf_pqs(p_hwfn);
+}
+
+/* compare values of getters against resources amounts */
+static int qed_init_qm_sanity(struct qed_hwfn *p_hwfn)
+{
+ if (qed_init_qm_get_num_vports(p_hwfn) > RESC_NUM(p_hwfn, QED_VPORT)) {
+ DP_ERR(p_hwfn, "requested amount of vports exceeds resource\n");
+ return -EINVAL;
+ }
+
+ if (qed_init_qm_get_num_pqs(p_hwfn) > RESC_NUM(p_hwfn, QED_PQ)) {
+ DP_ERR(p_hwfn, "requested amount of pqs exceeds resource\n");
+ return -EINVAL;
+ }
return 0;
+}
-alloc_err:
- qed_qm_info_free(p_hwfn);
- return -ENOMEM;
+static void qed_dp_init_qm_params(struct qed_hwfn *p_hwfn)
+{
+ struct qed_qm_info *qm_info = &p_hwfn->qm_info;
+ struct init_qm_vport_params *vport;
+ struct init_qm_port_params *port;
+ struct init_qm_pq_params *pq;
+ int i, tc;
+
+ /* top level params */
+ DP_VERBOSE(p_hwfn,
+ NETIF_MSG_HW,
+ "qm init top level params: start_pq %d, start_vport %d, pure_lb_pq %d, offload_pq %d, pure_ack_pq %d\n",
+ qm_info->start_pq,
+ qm_info->start_vport,
+ qm_info->pure_lb_pq,
+ qm_info->offload_pq, qm_info->pure_ack_pq);
+ DP_VERBOSE(p_hwfn,
+ NETIF_MSG_HW,
+ "ooo_pq %d, first_vf_pq %d, num_pqs %d, num_vf_pqs %d, num_vports %d, max_phys_tcs_per_port %d\n",
+ qm_info->ooo_pq,
+ qm_info->first_vf_pq,
+ qm_info->num_pqs,
+ qm_info->num_vf_pqs,
+ qm_info->num_vports, qm_info->max_phys_tcs_per_port);
+ DP_VERBOSE(p_hwfn,
+ NETIF_MSG_HW,
+ "pf_rl_en %d, pf_wfq_en %d, vport_rl_en %d, vport_wfq_en %d, pf_wfq %d, pf_rl %d, num_pf_rls %d, pq_flags %x\n",
+ qm_info->pf_rl_en,
+ qm_info->pf_wfq_en,
+ qm_info->vport_rl_en,
+ qm_info->vport_wfq_en,
+ qm_info->pf_wfq,
+ qm_info->pf_rl,
+ qm_info->num_pf_rls, qed_get_pq_flags(p_hwfn));
+
+ /* port table */
+ for (i = 0; i < p_hwfn->cdev->num_ports_in_engines; i++) {
+ port = &(qm_info->qm_port_params[i]);
+ DP_VERBOSE(p_hwfn,
+ NETIF_MSG_HW,
+ "port idx %d, active %d, active_phys_tcs %d, num_pbf_cmd_lines %d, num_btb_blocks %d, reserved %d\n",
+ i,
+ port->active,
+ port->active_phys_tcs,
+ port->num_pbf_cmd_lines,
+ port->num_btb_blocks, port->reserved);
+ }
+
+ /* vport table */
+ for (i = 0; i < qm_info->num_vports; i++) {
+ vport = &(qm_info->qm_vport_params[i]);
+ DP_VERBOSE(p_hwfn,
+ NETIF_MSG_HW,
+ "vport idx %d, vport_rl %d, wfq %d, first_tx_pq_id [ ",
+ qm_info->start_vport + i,
+ vport->vport_rl, vport->vport_wfq);
+ for (tc = 0; tc < NUM_OF_TCS; tc++)
+ DP_VERBOSE(p_hwfn,
+ NETIF_MSG_HW,
+ "%d ", vport->first_tx_pq_id[tc]);
+ DP_VERBOSE(p_hwfn, NETIF_MSG_HW, "]\n");
+ }
+
+ /* pq table */
+ for (i = 0; i < qm_info->num_pqs; i++) {
+ pq = &(qm_info->qm_pq_params[i]);
+ DP_VERBOSE(p_hwfn,
+ NETIF_MSG_HW,
+ "pq idx %d, vport_id %d, tc %d, wrr_grp %d, rl_valid %d\n",
+ qm_info->start_pq + i,
+ pq->vport_id,
+ pq->tc_id, pq->wrr_group, pq->rl_valid);
+ }
+}
+
+static void qed_init_qm_info(struct qed_hwfn *p_hwfn)
+{
+ /* reset params required for init run */
+ qed_init_qm_reset_params(p_hwfn);
+
+ /* init QM top level params */
+ qed_init_qm_params(p_hwfn);
+
+ /* init QM port params */
+ qed_init_qm_port_params(p_hwfn);
+
+ /* init QM vport params */
+ qed_init_qm_vport_params(p_hwfn);
+
+ /* init QM physical queue params */
+ qed_init_qm_pq_params(p_hwfn);
+
+ /* display all that init */
+ qed_dp_init_qm_params(p_hwfn);
}
/* This function reconfigures the QM pf on the fly.
@@ -391,17 +766,8 @@ int qed_qm_reconf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
bool b_rc;
int rc;
- /* qm_info is allocated in qed_init_qm_info() which is already called
- * from qed_resc_alloc() or previous call of qed_qm_reconf().
- * The allocated size may change each init, so we free it before next
- * allocation.
- */
- qed_qm_info_free(p_hwfn);
-
/* initialize qed's qm data structure */
- rc = qed_init_qm_info(p_hwfn, false);
- if (rc)
- return rc;
+ qed_init_qm_info(p_hwfn);
/* stop PF's qm queues */
spin_lock_bh(&qm_lock);
@@ -415,7 +781,7 @@ int qed_qm_reconf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
qed_init_clear_rt_data(p_hwfn);
/* prepare QM portion of runtime array */
- qed_qm_init_pf(p_hwfn);
+ qed_qm_init_pf(p_hwfn, p_ptt);
/* activate init tool on runtime array */
rc = qed_init_run(p_hwfn, p_ptt, PHASE_QM_PF, p_hwfn->rel_pf_id,
@@ -434,6 +800,47 @@ int qed_qm_reconf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
return 0;
}
+static int qed_alloc_qm_data(struct qed_hwfn *p_hwfn)
+{
+ struct qed_qm_info *qm_info = &p_hwfn->qm_info;
+ int rc;
+
+ rc = qed_init_qm_sanity(p_hwfn);
+ if (rc)
+ goto alloc_err;
+
+ qm_info->qm_pq_params = kzalloc(sizeof(*qm_info->qm_pq_params) *
+ qed_init_qm_get_num_pqs(p_hwfn),
+ GFP_KERNEL);
+ if (!qm_info->qm_pq_params)
+ goto alloc_err;
+
+ qm_info->qm_vport_params = kzalloc(sizeof(*qm_info->qm_vport_params) *
+ qed_init_qm_get_num_vports(p_hwfn),
+ GFP_KERNEL);
+ if (!qm_info->qm_vport_params)
+ goto alloc_err;
+
+ qm_info->qm_port_params = kzalloc(sizeof(*qm_info->qm_port_params) *
+ p_hwfn->cdev->num_ports_in_engines,
+ GFP_KERNEL);
+ if (!qm_info->qm_port_params)
+ goto alloc_err;
+
+ qm_info->wfq_data = kzalloc(sizeof(*qm_info->wfq_data) *
+ qed_init_qm_get_num_vports(p_hwfn),
+ GFP_KERNEL);
+ if (!qm_info->wfq_data)
+ goto alloc_err;
+
+ return 0;
+
+alloc_err:
+ DP_NOTICE(p_hwfn, "Failed to allocate memory for QM params\n");
+ qed_qm_info_free(p_hwfn);
+ return -ENOMEM;
+}
+
int qed_resc_alloc(struct qed_dev *cdev)
{
struct qed_iscsi_info *p_iscsi_info;
@@ -442,8 +849,10 @@ int qed_resc_alloc(struct qed_dev *cdev)
#ifdef CONFIG_QED_LL2
struct qed_ll2_info *p_ll2_info;
#endif
+ u32 rdma_tasks, excess_tasks;
struct qed_consq *p_consq;
struct qed_eq *p_eq;
+ u32 line_count;
int i, rc = 0;
if (IS_VF(cdev))
@@ -465,19 +874,44 @@ int qed_resc_alloc(struct qed_dev *cdev)
/* Set the HW cid/tid numbers (in the contest manager)
* Must be done prior to any further computations.
*/
- rc = qed_cxt_set_pf_params(p_hwfn);
+ rc = qed_cxt_set_pf_params(p_hwfn, RDMA_MAX_TIDS);
if (rc)
goto alloc_err;
- /* Prepare and process QM requirements */
- rc = qed_init_qm_info(p_hwfn, true);
+ rc = qed_alloc_qm_data(p_hwfn);
if (rc)
goto alloc_err;
+ /* init qm info */
+ qed_init_qm_info(p_hwfn);
+
/* Compute the ILT client partition */
- rc = qed_cxt_cfg_ilt_compute(p_hwfn);
- if (rc)
- goto alloc_err;
+ rc = qed_cxt_cfg_ilt_compute(p_hwfn, &line_count);
+ if (rc) {
+ DP_NOTICE(p_hwfn,
+ "too many ILT lines; re-computing with less lines\n");
+ /* In case there are not enough ILT lines we reduce the
+ * number of RDMA tasks and re-compute.
+ */
+ excess_tasks =
+ qed_cxt_cfg_ilt_compute_excess(p_hwfn, line_count);
+ if (!excess_tasks)
+ goto alloc_err;
+
+ rdma_tasks = RDMA_MAX_TIDS - excess_tasks;
+ rc = qed_cxt_set_pf_params(p_hwfn, rdma_tasks);
+ if (rc)
+ goto alloc_err;
+
+ rc = qed_cxt_cfg_ilt_compute(p_hwfn, &line_count);
+ if (rc) {
+ DP_ERR(p_hwfn,
+ "failed ILT compute. Requested too many lines: %u\n",
+ line_count);
+
+ goto alloc_err;
+ }
+ }
/* CID map / ILT shadow table / T2
* The talbes sizes are determined by the computations above
@@ -674,11 +1108,19 @@ int qed_final_cleanup(struct qed_hwfn *p_hwfn,
return rc;
}
-static void qed_calc_hw_mode(struct qed_hwfn *p_hwfn)
+static int qed_calc_hw_mode(struct qed_hwfn *p_hwfn)
{
int hw_mode = 0;
- hw_mode = (1 << MODE_BB_B0);
+ if (QED_IS_BB_B0(p_hwfn->cdev)) {
+ hw_mode |= 1 << MODE_BB;
+ } else if (QED_IS_AH(p_hwfn->cdev)) {
+ hw_mode |= 1 << MODE_K2;
+ } else {
+ DP_NOTICE(p_hwfn, "Unknown chip type %#x\n",
+ p_hwfn->cdev->type);
+ return -EINVAL;
+ }
switch (p_hwfn->cdev->num_ports_in_engines) {
case 1:
@@ -693,7 +1135,7 @@ static void qed_calc_hw_mode(struct qed_hwfn *p_hwfn)
default:
DP_NOTICE(p_hwfn, "num_ports_in_engine = %d not supported\n",
p_hwfn->cdev->num_ports_in_engines);
- return;
+ return -EINVAL;
}
switch (p_hwfn->cdev->mf_mode) {
@@ -719,6 +1161,8 @@ static void qed_calc_hw_mode(struct qed_hwfn *p_hwfn)
DP_VERBOSE(p_hwfn, (NETIF_MSG_PROBE | NETIF_MSG_IFUP),
"Configuring function for hw_mode: 0x%08x\n",
p_hwfn->hw_info.hw_mode);
+
+ return 0;
}
/* Init run time data for all PFs on an engine. */
@@ -748,16 +1192,67 @@ static void qed_init_cau_rt_data(struct qed_dev *cdev)
}
}
+static void qed_init_cache_line_size(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt)
+{
+ u32 val, wr_mbs, cache_line_size;
+
+ val = qed_rd(p_hwfn, p_ptt, PSWRQ2_REG_WR_MBS0);
+ switch (val) {
+ case 0:
+ wr_mbs = 128;
+ break;
+ case 1:
+ wr_mbs = 256;
+ break;
+ case 2:
+ wr_mbs = 512;
+ break;
+ default:
+ DP_INFO(p_hwfn,
+ "Unexpected value of PSWRQ2_REG_WR_MBS0 [0x%x]. Avoid configuring PGLUE_B_REG_CACHE_LINE_SIZE.\n",
+ val);
+ return;
+ }
+
+ cache_line_size = min_t(u32, L1_CACHE_BYTES, wr_mbs);
+ switch (cache_line_size) {
+ case 32:
+ val = 0;
+ break;
+ case 64:
+ val = 1;
+ break;
+ case 128:
+ val = 2;
+ break;
+ case 256:
+ val = 3;
+ break;
+ default:
+ DP_INFO(p_hwfn,
+ "Unexpected value of cache line size [0x%x]. Avoid configuring PGLUE_B_REG_CACHE_LINE_SIZE.\n",
+ cache_line_size);
+ }
+
+ if (L1_CACHE_BYTES > wr_mbs)
+ DP_INFO(p_hwfn,
+ "The cache line size for padding is suboptimal for performance [OS cache line size 0x%x, wr mbs 0x%x]\n",
+ L1_CACHE_BYTES, wr_mbs);
+
+ STORE_RT_REG(p_hwfn, PGLUE_REG_B_CACHE_LINE_SIZE_RT_OFFSET, val);
+}
+
static int qed_hw_init_common(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, int hw_mode)
{
struct qed_qm_info *qm_info = &p_hwfn->qm_info;
struct qed_qm_common_rt_init_params params;
struct qed_dev *cdev = p_hwfn->cdev;
+ u8 vf_id, max_num_vfs;
u16 num_pfs, pf_id;
u32 concrete_fid;
int rc = 0;
- u8 vf_id;
qed_init_cau_rt_data(cdev);
@@ -784,17 +1279,7 @@ static int qed_hw_init_common(struct qed_hwfn *p_hwfn,
qed_cxt_hw_init_common(p_hwfn);
- /* Close gate from NIG to BRB/Storm; By default they are open, but
- * we close them to prevent NIG from passing data to reset blocks.
- * Should have been done in the ENGINE phase, but init-tool lacks
- * proper port-pretend capabilities.
- */
- qed_wr(p_hwfn, p_ptt, NIG_REG_RX_BRB_OUT_EN, 0);
- qed_wr(p_hwfn, p_ptt, NIG_REG_STORM_OUT_EN, 0);
- qed_port_pretend(p_hwfn, p_ptt, p_hwfn->port_id ^ 1);
- qed_wr(p_hwfn, p_ptt, NIG_REG_RX_BRB_OUT_EN, 0);
- qed_wr(p_hwfn, p_ptt, NIG_REG_STORM_OUT_EN, 0);
- qed_port_unpretend(p_hwfn, p_ptt);
+ qed_init_cache_line_size(p_hwfn, p_ptt);
rc = qed_init_run(p_hwfn, p_ptt, PHASE_ENGINE, ANY_PHASE_ID, hw_mode);
if (rc)
@@ -814,7 +1299,8 @@ static int qed_hw_init_common(struct qed_hwfn *p_hwfn,
qed_fid_pretend(p_hwfn, p_ptt, p_hwfn->rel_pf_id);
}
- for (vf_id = 0; vf_id < MAX_NUM_VFS_BB; vf_id++) {
+ max_num_vfs = QED_IS_AH(cdev) ? MAX_NUM_VFS_K2 : MAX_NUM_VFS_BB;
+ for (vf_id = 0; vf_id < max_num_vfs; vf_id++) {
concrete_fid = qed_vfid_to_concrete(p_hwfn, vf_id);
qed_fid_pretend(p_hwfn, p_ptt, (u16) concrete_fid);
qed_wr(p_hwfn, p_ptt, CCFC_REG_STRONG_ENABLE_VF, 0x1);
@@ -832,17 +1318,15 @@ static int
qed_hw_init_dpi_size(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, u32 pwm_region_size, u32 n_cpus)
{
- u32 dpi_page_size_1, dpi_page_size_2, dpi_page_size;
- u32 dpi_bit_shift, dpi_count;
+ u32 dpi_bit_shift, dpi_count, dpi_page_size;
u32 min_dpis;
+ u32 n_wids;
/* Calculate DPI size */
- dpi_page_size_1 = QED_WID_SIZE * n_cpus;
- dpi_page_size_2 = max_t(u32, QED_WID_SIZE, PAGE_SIZE);
- dpi_page_size = max_t(u32, dpi_page_size_1, dpi_page_size_2);
- dpi_page_size = roundup_pow_of_two(dpi_page_size);
+ n_wids = max_t(u32, QED_MIN_WIDS, n_cpus);
+ dpi_page_size = QED_WID_SIZE * roundup_pow_of_two(n_wids);
+ dpi_page_size = (dpi_page_size + PAGE_SIZE - 1) & ~(PAGE_SIZE - 1);
dpi_bit_shift = ilog2(dpi_page_size / 4096);
-
dpi_count = pwm_region_size / dpi_page_size;
min_dpis = p_hwfn->pf_params.rdma_pf_params.min_dpis;
@@ -870,13 +1354,13 @@ qed_hw_init_pf_doorbell_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{
u32 pwm_regsize, norm_regsize;
u32 non_pwm_conn, min_addr_reg1;
- u32 db_bar_size, n_cpus;
+ u32 db_bar_size, n_cpus = 1;
u32 roce_edpm_mode;
u32 pf_dems_shift;
int rc = 0;
u8 cond;
- db_bar_size = qed_hw_bar_size(p_hwfn, BAR_ID_1);
+ db_bar_size = qed_hw_bar_size(p_hwfn, p_ptt, BAR_ID_1);
if (p_hwfn->cdev->num_hwfns > 1)
db_bar_size /= 2;
@@ -931,6 +1415,8 @@ qed_hw_init_pf_doorbell_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
qed_rdma_dpm_bar(p_hwfn, p_ptt);
}
+ p_hwfn->wid_count = (u16) n_cpus;
+
DP_INFO(p_hwfn,
"doorbell bar: normal_region_size=%d, pwm_region_size=%d, dpi_size=%d, dpi_count=%d, roce_edpm=%s\n",
norm_regsize,
@@ -967,7 +1453,7 @@ static int qed_hw_init_port(struct qed_hwfn *p_hwfn,
static int qed_hw_init_pf(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
- struct qed_tunn_start_params *p_tunn,
+ struct qed_tunnel_info *p_tunn,
int hw_mode,
bool b_hw_start,
enum qed_int_mode int_mode,
@@ -987,7 +1473,7 @@ static int qed_hw_init_pf(struct qed_hwfn *p_hwfn,
p_hwfn->qm_info.pf_rl = 100000;
}
- qed_cxt_hw_init_pf(p_hwfn);
+ qed_cxt_hw_init_pf(p_hwfn, p_ptt);
qed_int_igu_init_rt(p_hwfn);
@@ -1095,25 +1581,47 @@ static void qed_reset_mb_shadow(struct qed_hwfn *p_hwfn,
p_hwfn->mcp_info->mfw_mb_cur, p_hwfn->mcp_info->mfw_mb_length);
}
-int qed_hw_init(struct qed_dev *cdev,
- struct qed_tunn_start_params *p_tunn,
- bool b_hw_start,
- enum qed_int_mode int_mode,
- bool allow_npar_tx_switch,
- const u8 *bin_fw_data)
+static void
+qed_fill_load_req_params(struct qed_load_req_params *p_load_req,
+ struct qed_drv_load_params *p_drv_load)
+{
+ memset(p_load_req, 0, sizeof(*p_load_req));
+
+ p_load_req->drv_role = p_drv_load->is_crash_kernel ?
+ QED_DRV_ROLE_KDUMP : QED_DRV_ROLE_OS;
+ p_load_req->timeout_val = p_drv_load->mfw_timeout_val;
+ p_load_req->avoid_eng_reset = p_drv_load->avoid_eng_reset;
+ p_load_req->override_force_load = p_drv_load->override_force_load;
+}
+
+static int qed_vf_start(struct qed_hwfn *p_hwfn,
+ struct qed_hw_init_params *p_params)
{
+ if (p_params->p_tunn) {
+ qed_vf_set_vf_start_tunn_update_param(p_params->p_tunn);
+ qed_vf_pf_tunnel_param_update(p_hwfn, p_params->p_tunn);
+ }
+
+ p_hwfn->b_int_enabled = 1;
+
+ return 0;
+}
+
+int qed_hw_init(struct qed_dev *cdev, struct qed_hw_init_params *p_params)
+{
+ struct qed_load_req_params load_req_params;
u32 load_code, param, drv_mb_param;
bool b_default_mtu = true;
struct qed_hwfn *p_hwfn;
int rc = 0, mfw_rc, i;
- if ((int_mode == QED_INT_MODE_MSI) && (cdev->num_hwfns > 1)) {
+ if ((p_params->int_mode == QED_INT_MODE_MSI) && (cdev->num_hwfns > 1)) {
DP_NOTICE(cdev, "MSI mode is not supported for CMT devices\n");
return -EINVAL;
}
if (IS_PF(cdev)) {
- rc = qed_init_fw_data(cdev, bin_fw_data);
+ rc = qed_init_fw_data(cdev, p_params->bin_fw_data);
if (rc)
return rc;
}
@@ -1128,26 +1636,32 @@ int qed_hw_init(struct qed_dev *cdev,
}
if (IS_VF(cdev)) {
- p_hwfn->b_int_enabled = 1;
+ qed_vf_start(p_hwfn, p_params);
continue;
}
/* Enable DMAE in PXP */
rc = qed_change_pci_hwfn(p_hwfn, p_hwfn->p_main_ptt, true);
- qed_calc_hw_mode(p_hwfn);
+ rc = qed_calc_hw_mode(p_hwfn);
+ if (rc)
+ return rc;
- rc = qed_mcp_load_req(p_hwfn, p_hwfn->p_main_ptt, &load_code);
+ qed_fill_load_req_params(&load_req_params,
+ p_params->p_drv_load_params);
+ rc = qed_mcp_load_req(p_hwfn, p_hwfn->p_main_ptt,
+ &load_req_params);
if (rc) {
- DP_NOTICE(p_hwfn, "Failed sending LOAD_REQ command\n");
+ DP_NOTICE(p_hwfn, "Failed sending a LOAD_REQ command\n");
return rc;
}
- qed_reset_mb_shadow(p_hwfn, p_hwfn->p_main_ptt);
-
+ load_code = load_req_params.load_code;
DP_VERBOSE(p_hwfn, QED_MSG_SP,
- "Load request was sent. Resp:0x%x, Load code: 0x%x\n",
- rc, load_code);
+ "Load request was sent. Load code: 0x%x\n",
+ load_code);
+
+ qed_reset_mb_shadow(p_hwfn, p_hwfn->p_main_ptt);
p_hwfn->first_on_engine = (load_code ==
FW_MSG_CODE_DRV_LOAD_ENGINE);
@@ -1168,11 +1682,15 @@ int qed_hw_init(struct qed_dev *cdev,
/* Fall into */
case FW_MSG_CODE_DRV_LOAD_FUNCTION:
rc = qed_hw_init_pf(p_hwfn, p_hwfn->p_main_ptt,
- p_tunn, p_hwfn->hw_info.hw_mode,
- b_hw_start, int_mode,
- allow_npar_tx_switch);
+ p_params->p_tunn,
+ p_hwfn->hw_info.hw_mode,
+ p_params->b_hw_start,
+ p_params->int_mode,
+ p_params->allow_npar_tx_switch);
break;
default:
+ DP_NOTICE(p_hwfn,
+ "Unexpected load code [0x%08x]", load_code);
rc = -EINVAL;
break;
}
@@ -1212,10 +1730,7 @@ int qed_hw_init(struct qed_dev *cdev,
if (IS_PF(cdev)) {
p_hwfn = QED_LEADING_HWFN(cdev);
- drv_mb_param = (FW_MAJOR_VERSION << 24) |
- (FW_MINOR_VERSION << 16) |
- (FW_REVISION_VERSION << 8) |
- (FW_ENGINEERING_VERSION);
+ drv_mb_param = STORM_FW_VERSION;
rc = qed_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt,
DRV_MSG_CODE_OV_UPDATE_STORM_FW_VER,
drv_mb_param, &load_code, &param);
@@ -1290,27 +1805,53 @@ void qed_hw_timers_stop_all(struct qed_dev *cdev)
int qed_hw_stop(struct qed_dev *cdev)
{
- int rc = 0, t_rc;
+ struct qed_hwfn *p_hwfn;
+ struct qed_ptt *p_ptt;
+ int rc, rc2 = 0;
int j;
for_each_hwfn(cdev, j) {
- struct qed_hwfn *p_hwfn = &cdev->hwfns[j];
- struct qed_ptt *p_ptt = p_hwfn->p_main_ptt;
+ p_hwfn = &cdev->hwfns[j];
+ p_ptt = p_hwfn->p_main_ptt;
DP_VERBOSE(p_hwfn, NETIF_MSG_IFDOWN, "Stopping hw/fw\n");
if (IS_VF(cdev)) {
qed_vf_pf_int_cleanup(p_hwfn);
+ rc = qed_vf_pf_reset(p_hwfn);
+ if (rc) {
+ DP_NOTICE(p_hwfn,
+ "qed_vf_pf_reset failed. rc = %d.\n",
+ rc);
+ rc2 = -EINVAL;
+ }
continue;
}
/* mark the hw as uninitialized... */
p_hwfn->hw_init_done = false;
+ /* Send unload command to MCP */
+ rc = qed_mcp_unload_req(p_hwfn, p_ptt);
+ if (rc) {
+ DP_NOTICE(p_hwfn,
+ "Failed sending a UNLOAD_REQ command. rc = %d.\n",
+ rc);
+ rc2 = -EINVAL;
+ }
+
+ qed_slowpath_irq_sync(p_hwfn);
+
+ /* After this point no MFW attentions are expected, e.g. prevent
+ * race between pf stop and dcbx pf update.
+ */
rc = qed_sp_pf_stop(p_hwfn);
- if (rc)
+ if (rc) {
DP_NOTICE(p_hwfn,
- "Failed to close PF against FW. Continue to stop HW to prevent illegal host access by the device\n");
+ "Failed to close PF against FW [rc = %d]. Continue to stop HW to prevent illegal host access by the device.\n",
+ rc);
+ rc2 = -EINVAL;
+ }
qed_wr(p_hwfn, p_ptt,
NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x1);
@@ -1333,34 +1874,54 @@ int qed_hw_stop(struct qed_dev *cdev)
/* Need to wait 1ms to guarantee SBs are cleared */
usleep_range(1000, 2000);
+
+ /* Disable PF in HW blocks */
+ qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_DB_ENABLE, 0);
+ qed_wr(p_hwfn, p_ptt, QM_REG_PF_EN, 0);
+
+ qed_mcp_unload_done(p_hwfn, p_ptt);
+ if (rc) {
+ DP_NOTICE(p_hwfn,
+ "Failed sending a UNLOAD_DONE command. rc = %d.\n",
+ rc);
+ rc2 = -EINVAL;
+ }
}
if (IS_PF(cdev)) {
+ p_hwfn = QED_LEADING_HWFN(cdev);
+ p_ptt = QED_LEADING_HWFN(cdev)->p_main_ptt;
+
/* Disable DMAE in PXP - in CMT, this should only be done for
* first hw-function, and only after all transactions have
* stopped for all active hw-functions.
*/
- t_rc = qed_change_pci_hwfn(&cdev->hwfns[0],
- cdev->hwfns[0].p_main_ptt, false);
- if (t_rc != 0)
- rc = t_rc;
+ rc = qed_change_pci_hwfn(p_hwfn, p_ptt, false);
+ if (rc) {
+ DP_NOTICE(p_hwfn,
+ "qed_change_pci_hwfn failed. rc = %d.\n", rc);
+ rc2 = -EINVAL;
+ }
}
- return rc;
+ return rc2;
}
-void qed_hw_stop_fastpath(struct qed_dev *cdev)
+int qed_hw_stop_fastpath(struct qed_dev *cdev)
{
int j;
for_each_hwfn(cdev, j) {
struct qed_hwfn *p_hwfn = &cdev->hwfns[j];
- struct qed_ptt *p_ptt = p_hwfn->p_main_ptt;
+ struct qed_ptt *p_ptt;
if (IS_VF(cdev)) {
qed_vf_pf_int_cleanup(p_hwfn);
continue;
}
+ p_ptt = qed_ptt_acquire(p_hwfn);
+ if (!p_ptt)
+ return -EAGAIN;
DP_VERBOSE(p_hwfn,
NETIF_MSG_IFDOWN, "Shutting down the fastpath\n");
@@ -1378,100 +1939,28 @@ void qed_hw_stop_fastpath(struct qed_dev *cdev)
/* Need to wait 1ms to guarantee SBs are cleared */
usleep_range(1000, 2000);
- }
-}
-
-void qed_hw_start_fastpath(struct qed_hwfn *p_hwfn)
-{
- if (IS_VF(p_hwfn->cdev))
- return;
-
- /* Re-open incoming traffic */
- qed_wr(p_hwfn, p_hwfn->p_main_ptt,
- NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x0);
-}
-
-static int qed_reg_assert(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt, u32 reg, bool expected)
-{
- u32 assert_val = qed_rd(p_hwfn, p_ptt, reg);
-
- if (assert_val != expected) {
- DP_NOTICE(p_hwfn, "Value at address 0x%08x != 0x%08x\n",
- reg, expected);
- return -EINVAL;
+ qed_ptt_release(p_hwfn, p_ptt);
}
return 0;
}
-int qed_hw_reset(struct qed_dev *cdev)
+int qed_hw_start_fastpath(struct qed_hwfn *p_hwfn)
{
- int rc = 0;
- u32 unload_resp, unload_param;
- u32 wol_param;
- int i;
-
- switch (cdev->wol_config) {
- case QED_OV_WOL_DISABLED:
- wol_param = DRV_MB_PARAM_UNLOAD_WOL_DISABLED;
- break;
- case QED_OV_WOL_ENABLED:
- wol_param = DRV_MB_PARAM_UNLOAD_WOL_ENABLED;
- break;
- default:
- DP_NOTICE(cdev,
- "Unknown WoL configuration %02x\n", cdev->wol_config);
- /* Fallthrough */
- case QED_OV_WOL_DEFAULT:
- wol_param = DRV_MB_PARAM_UNLOAD_WOL_MCP;
- }
-
- for_each_hwfn(cdev, i) {
- struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+ struct qed_ptt *p_ptt;
- if (IS_VF(cdev)) {
- rc = qed_vf_pf_reset(p_hwfn);
- if (rc)
- return rc;
- continue;
- }
-
- DP_VERBOSE(p_hwfn, NETIF_MSG_IFDOWN, "Resetting hw/fw\n");
-
- /* Check for incorrect states */
- qed_reg_assert(p_hwfn, p_hwfn->p_main_ptt,
- QM_REG_USG_CNT_PF_TX, 0);
- qed_reg_assert(p_hwfn, p_hwfn->p_main_ptt,
- QM_REG_USG_CNT_PF_OTHER, 0);
-
- /* Disable PF in HW blocks */
- qed_wr(p_hwfn, p_hwfn->p_main_ptt, DORQ_REG_PF_DB_ENABLE, 0);
- qed_wr(p_hwfn, p_hwfn->p_main_ptt, QM_REG_PF_EN, 0);
- qed_wr(p_hwfn, p_hwfn->p_main_ptt,
- TCFC_REG_STRONG_ENABLE_PF, 0);
- qed_wr(p_hwfn, p_hwfn->p_main_ptt,
- CCFC_REG_STRONG_ENABLE_PF, 0);
+ if (IS_VF(p_hwfn->cdev))
+ return 0;
- /* Send unload command to MCP */
- rc = qed_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt,
- DRV_MSG_CODE_UNLOAD_REQ, wol_param,
- &unload_resp, &unload_param);
- if (rc) {
- DP_NOTICE(p_hwfn, "qed_hw_reset: UNLOAD_REQ failed\n");
- unload_resp = FW_MSG_CODE_DRV_UNLOAD_ENGINE;
- }
+ p_ptt = qed_ptt_acquire(p_hwfn);
+ if (!p_ptt)
+ return -EAGAIN;
- rc = qed_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt,
- DRV_MSG_CODE_UNLOAD_DONE,
- 0, &unload_resp, &unload_param);
- if (rc) {
- DP_NOTICE(p_hwfn, "qed_hw_reset: UNLOAD_DONE failed\n");
- return rc;
- }
- }
+ /* Re-open incoming traffic */
+ qed_wr(p_hwfn, p_ptt, NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x0);
+ qed_ptt_release(p_hwfn, p_ptt);
- return rc;
+ return 0;
}
/* Free hwfn memory and resources acquired in hw_hwfn_prepare */
@@ -1485,10 +1974,25 @@ static void qed_hw_hwfn_free(struct qed_hwfn *p_hwfn)
static void qed_hw_hwfn_prepare(struct qed_hwfn *p_hwfn)
{
/* clear indirect access */
- qed_wr(p_hwfn, p_hwfn->p_main_ptt, PGLUE_B_REG_PGL_ADDR_88_F0, 0);
- qed_wr(p_hwfn, p_hwfn->p_main_ptt, PGLUE_B_REG_PGL_ADDR_8C_F0, 0);
- qed_wr(p_hwfn, p_hwfn->p_main_ptt, PGLUE_B_REG_PGL_ADDR_90_F0, 0);
- qed_wr(p_hwfn, p_hwfn->p_main_ptt, PGLUE_B_REG_PGL_ADDR_94_F0, 0);
+ if (QED_IS_AH(p_hwfn->cdev)) {
+ qed_wr(p_hwfn, p_hwfn->p_main_ptt,
+ PGLUE_B_REG_PGL_ADDR_E8_F0_K2, 0);
+ qed_wr(p_hwfn, p_hwfn->p_main_ptt,
+ PGLUE_B_REG_PGL_ADDR_EC_F0_K2, 0);
+ qed_wr(p_hwfn, p_hwfn->p_main_ptt,
+ PGLUE_B_REG_PGL_ADDR_F0_F0_K2, 0);
+ qed_wr(p_hwfn, p_hwfn->p_main_ptt,
+ PGLUE_B_REG_PGL_ADDR_F4_F0_K2, 0);
+ } else {
+ qed_wr(p_hwfn, p_hwfn->p_main_ptt,
+ PGLUE_B_REG_PGL_ADDR_88_F0_BB, 0);
+ qed_wr(p_hwfn, p_hwfn->p_main_ptt,
+ PGLUE_B_REG_PGL_ADDR_8C_F0_BB, 0);
+ qed_wr(p_hwfn, p_hwfn->p_main_ptt,
+ PGLUE_B_REG_PGL_ADDR_90_F0_BB, 0);
+ qed_wr(p_hwfn, p_hwfn->p_main_ptt,
+ PGLUE_B_REG_PGL_ADDR_94_F0_BB, 0);
+ }
/* Clean Previous errors if such exist */
qed_wr(p_hwfn, p_hwfn->p_main_ptt,
@@ -1522,7 +2026,7 @@ static void qed_hw_set_feat(struct qed_hwfn *p_hwfn)
{
u32 *feat_num = p_hwfn->hw_info.feat_num;
struct qed_sb_cnt_info sb_cnt_info;
- int num_features = 1;
+ u32 non_l2_sbs = 0;
if (IS_ENABLED(CONFIG_QED_RDMA) &&
p_hwfn->hw_info.personality == QED_PCI_ETH_ROCE) {
@@ -1530,204 +2034,260 @@ static void qed_hw_set_feat(struct qed_hwfn *p_hwfn)
* the status blocks equally between L2 / RoCE but with
* consideration as to how many l2 queues / cnqs we have.
*/
- num_features++;
-
feat_num[QED_RDMA_CNQ] =
- min_t(u32, RESC_NUM(p_hwfn, QED_SB) / num_features,
+ min_t(u32, RESC_NUM(p_hwfn, QED_SB) / 2,
RESC_NUM(p_hwfn, QED_RDMA_CNQ_RAM));
- }
- feat_num[QED_PF_L2_QUE] = min_t(u32, RESC_NUM(p_hwfn, QED_SB) /
- num_features,
- RESC_NUM(p_hwfn, QED_L2_QUEUE));
-
- memset(&sb_cnt_info, 0, sizeof(sb_cnt_info));
- qed_int_get_num_sbs(p_hwfn, &sb_cnt_info);
- feat_num[QED_VF_L2_QUE] =
- min_t(u32,
- RESC_NUM(p_hwfn, QED_L2_QUEUE) -
- FEAT_NUM(p_hwfn, QED_PF_L2_QUE), sb_cnt_info.sb_iov_cnt);
+ non_l2_sbs = feat_num[QED_RDMA_CNQ];
+ }
+ if (p_hwfn->hw_info.personality == QED_PCI_ETH_ROCE ||
+ p_hwfn->hw_info.personality == QED_PCI_ETH) {
+ /* Start by allocating VF queues, then PF's */
+ memset(&sb_cnt_info, 0, sizeof(sb_cnt_info));
+ qed_int_get_num_sbs(p_hwfn, &sb_cnt_info);
+ feat_num[QED_VF_L2_QUE] = min_t(u32,
+ RESC_NUM(p_hwfn, QED_L2_QUEUE),
+ sb_cnt_info.sb_iov_cnt);
+ feat_num[QED_PF_L2_QUE] = min_t(u32,
+ RESC_NUM(p_hwfn, QED_SB) -
+ non_l2_sbs,
+ RESC_NUM(p_hwfn,
+ QED_L2_QUEUE) -
+ FEAT_NUM(p_hwfn,
+ QED_VF_L2_QUE));
+ }
+
+ if (p_hwfn->hw_info.personality == QED_PCI_ISCSI)
+ feat_num[QED_ISCSI_CQ] = min_t(u32, RESC_NUM(p_hwfn, QED_SB),
+ RESC_NUM(p_hwfn,
+ QED_CMDQS_CQS));
DP_VERBOSE(p_hwfn,
NETIF_MSG_PROBE,
- "#PF_L2_QUEUES=%d VF_L2_QUEUES=%d #ROCE_CNQ=%d #SBS=%d num_features=%d\n",
+ "#PF_L2_QUEUES=%d VF_L2_QUEUES=%d #ROCE_CNQ=%d ISCSI_CQ=%d #SBS=%d\n",
(int)FEAT_NUM(p_hwfn, QED_PF_L2_QUE),
(int)FEAT_NUM(p_hwfn, QED_VF_L2_QUE),
(int)FEAT_NUM(p_hwfn, QED_RDMA_CNQ),
- RESC_NUM(p_hwfn, QED_SB), num_features);
+ (int)FEAT_NUM(p_hwfn, QED_ISCSI_CQ),
+ RESC_NUM(p_hwfn, QED_SB));
}
-static enum resource_id_enum qed_hw_get_mfw_res_id(enum qed_resources res_id)
+const char *qed_hw_get_resc_name(enum qed_resources res_id)
{
- enum resource_id_enum mfw_res_id = RESOURCE_NUM_INVALID;
-
switch (res_id) {
- case QED_SB:
- mfw_res_id = RESOURCE_NUM_SB_E;
- break;
case QED_L2_QUEUE:
- mfw_res_id = RESOURCE_NUM_L2_QUEUE_E;
- break;
+ return "L2_QUEUE";
case QED_VPORT:
- mfw_res_id = RESOURCE_NUM_VPORT_E;
- break;
+ return "VPORT";
case QED_RSS_ENG:
- mfw_res_id = RESOURCE_NUM_RSS_ENGINES_E;
- break;
+ return "RSS_ENG";
case QED_PQ:
- mfw_res_id = RESOURCE_NUM_PQ_E;
- break;
+ return "PQ";
case QED_RL:
- mfw_res_id = RESOURCE_NUM_RL_E;
- break;
+ return "RL";
case QED_MAC:
+ return "MAC";
case QED_VLAN:
- /* Each VFC resource can accommodate both a MAC and a VLAN */
- mfw_res_id = RESOURCE_VFC_FILTER_E;
- break;
+ return "VLAN";
+ case QED_RDMA_CNQ_RAM:
+ return "RDMA_CNQ_RAM";
case QED_ILT:
- mfw_res_id = RESOURCE_ILT_E;
- break;
+ return "ILT";
case QED_LL2_QUEUE:
- mfw_res_id = RESOURCE_LL2_QUEUE_E;
- break;
- case QED_RDMA_CNQ_RAM:
+ return "LL2_QUEUE";
case QED_CMDQS_CQS:
- /* CNQ/CMDQS are the same resource */
- mfw_res_id = RESOURCE_CQS_E;
- break;
+ return "CMDQS_CQS";
case QED_RDMA_STATS_QUEUE:
- mfw_res_id = RESOURCE_RDMA_STATS_QUEUE_E;
- break;
+ return "RDMA_STATS_QUEUE";
+ case QED_BDQ:
+ return "BDQ";
+ case QED_SB:
+ return "SB";
default:
- break;
+ return "UNKNOWN_RESOURCE";
+ }
+}
+
+static int
+__qed_hw_set_soft_resc_size(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ enum qed_resources res_id,
+ u32 resc_max_val, u32 *p_mcp_resp)
+{
+ int rc;
+
+ rc = qed_mcp_set_resc_max_val(p_hwfn, p_ptt, res_id,
+ resc_max_val, p_mcp_resp);
+ if (rc) {
+ DP_NOTICE(p_hwfn,
+ "MFW response failure for a max value setting of resource %d [%s]\n",
+ res_id, qed_hw_get_resc_name(res_id));
+ return rc;
+ }
+
+ if (*p_mcp_resp != FW_MSG_CODE_RESOURCE_ALLOC_OK)
+ DP_INFO(p_hwfn,
+ "Failed to set the max value of resource %d [%s]. mcp_resp = 0x%08x.\n",
+ res_id, qed_hw_get_resc_name(res_id), *p_mcp_resp);
+
+ return 0;
+}
+
+static int
+qed_hw_set_soft_resc_size(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+ bool b_ah = QED_IS_AH(p_hwfn->cdev);
+ u32 resc_max_val, mcp_resp;
+ u8 res_id;
+ int rc;
+
+ for (res_id = 0; res_id < QED_MAX_RESC; res_id++) {
+ switch (res_id) {
+ case QED_LL2_QUEUE:
+ resc_max_val = MAX_NUM_LL2_RX_QUEUES;
+ break;
+ case QED_RDMA_CNQ_RAM:
+ /* No need for a case for QED_CMDQS_CQS since
+ * CNQ/CMDQS are the same resource.
+ */
+ resc_max_val = NUM_OF_CMDQS_CQS;
+ break;
+ case QED_RDMA_STATS_QUEUE:
+ resc_max_val = b_ah ? RDMA_NUM_STATISTIC_COUNTERS_K2
+ : RDMA_NUM_STATISTIC_COUNTERS_BB;
+ break;
+ case QED_BDQ:
+ resc_max_val = BDQ_NUM_RESOURCES;
+ break;
+ default:
+ continue;
+ }
+
+ rc = __qed_hw_set_soft_resc_size(p_hwfn, p_ptt, res_id,
+ resc_max_val, &mcp_resp);
+ if (rc)
+ return rc;
+
+ /* There's no point to continue to the next resource if the
+ * command is not supported by the MFW.
+ * We do continue if the command is supported but the resource
+ * is unknown to the MFW. Such a resource will be later
+ * configured with the default allocation values.
+ */
+ if (mcp_resp == FW_MSG_CODE_UNSUPPORTED)
+ return -EINVAL;
}
- return mfw_res_id;
+ return 0;
}
-static u32 qed_hw_get_dflt_resc_num(struct qed_hwfn *p_hwfn,
- enum qed_resources res_id)
+static
+int qed_hw_get_dflt_resc(struct qed_hwfn *p_hwfn,
+ enum qed_resources res_id,
+ u32 *p_resc_num, u32 *p_resc_start)
{
u8 num_funcs = p_hwfn->num_funcs_on_engine;
+ bool b_ah = QED_IS_AH(p_hwfn->cdev);
struct qed_sb_cnt_info sb_cnt_info;
- u32 dflt_resc_num = 0;
switch (res_id) {
- case QED_SB:
- memset(&sb_cnt_info, 0, sizeof(sb_cnt_info));
- qed_int_get_num_sbs(p_hwfn, &sb_cnt_info);
- dflt_resc_num = sb_cnt_info.sb_cnt;
- break;
case QED_L2_QUEUE:
- dflt_resc_num = MAX_NUM_L2_QUEUES_BB / num_funcs;
+ *p_resc_num = (b_ah ? MAX_NUM_L2_QUEUES_K2 :
+ MAX_NUM_L2_QUEUES_BB) / num_funcs;
break;
case QED_VPORT:
- dflt_resc_num = MAX_NUM_VPORTS_BB / num_funcs;
+ *p_resc_num = (b_ah ? MAX_NUM_VPORTS_K2 :
+ MAX_NUM_VPORTS_BB) / num_funcs;
break;
case QED_RSS_ENG:
- dflt_resc_num = ETH_RSS_ENGINE_NUM_BB / num_funcs;
+ *p_resc_num = (b_ah ? ETH_RSS_ENGINE_NUM_K2 :
+ ETH_RSS_ENGINE_NUM_BB) / num_funcs;
break;
case QED_PQ:
- /* The granularity of the PQs is 8 */
- dflt_resc_num = MAX_QM_TX_QUEUES_BB / num_funcs;
- dflt_resc_num &= ~0x7;
+ *p_resc_num = (b_ah ? MAX_QM_TX_QUEUES_K2 :
+ MAX_QM_TX_QUEUES_BB) / num_funcs;
+ *p_resc_num &= ~0x7; /* The granularity of the PQs is 8 */
break;
case QED_RL:
- dflt_resc_num = MAX_QM_GLOBAL_RLS / num_funcs;
+ *p_resc_num = MAX_QM_GLOBAL_RLS / num_funcs;
break;
case QED_MAC:
case QED_VLAN:
/* Each VFC resource can accommodate both a MAC and a VLAN */
- dflt_resc_num = ETH_NUM_MAC_FILTERS / num_funcs;
+ *p_resc_num = ETH_NUM_MAC_FILTERS / num_funcs;
break;
case QED_ILT:
- dflt_resc_num = PXP_NUM_ILT_RECORDS_BB / num_funcs;
+ *p_resc_num = (b_ah ? PXP_NUM_ILT_RECORDS_K2 :
+ PXP_NUM_ILT_RECORDS_BB) / num_funcs;
break;
case QED_LL2_QUEUE:
- dflt_resc_num = MAX_NUM_LL2_RX_QUEUES / num_funcs;
+ *p_resc_num = MAX_NUM_LL2_RX_QUEUES / num_funcs;
break;
case QED_RDMA_CNQ_RAM:
case QED_CMDQS_CQS:
/* CNQ/CMDQS are the same resource */
- dflt_resc_num = NUM_OF_CMDQS_CQS / num_funcs;
+ *p_resc_num = NUM_OF_CMDQS_CQS / num_funcs;
break;
case QED_RDMA_STATS_QUEUE:
- dflt_resc_num = RDMA_NUM_STATISTIC_COUNTERS_BB / num_funcs;
+ *p_resc_num = (b_ah ? RDMA_NUM_STATISTIC_COUNTERS_K2 :
+ RDMA_NUM_STATISTIC_COUNTERS_BB) / num_funcs;
break;
- default:
+ case QED_BDQ:
+ if (p_hwfn->hw_info.personality != QED_PCI_ISCSI &&
+ p_hwfn->hw_info.personality != QED_PCI_FCOE)
+ *p_resc_num = 0;
+ else
+ *p_resc_num = 1;
break;
+ case QED_SB:
+ memset(&sb_cnt_info, 0, sizeof(sb_cnt_info));
+ qed_int_get_num_sbs(p_hwfn, &sb_cnt_info);
+ *p_resc_num = sb_cnt_info.sb_cnt;
+ break;
+ default:
+ return -EINVAL;
}
- return dflt_resc_num;
-}
-
-static const char *qed_hw_get_resc_name(enum qed_resources res_id)
-{
switch (res_id) {
- case QED_SB:
- return "SB";
- case QED_L2_QUEUE:
- return "L2_QUEUE";
- case QED_VPORT:
- return "VPORT";
- case QED_RSS_ENG:
- return "RSS_ENG";
- case QED_PQ:
- return "PQ";
- case QED_RL:
- return "RL";
- case QED_MAC:
- return "MAC";
- case QED_VLAN:
- return "VLAN";
- case QED_RDMA_CNQ_RAM:
- return "RDMA_CNQ_RAM";
- case QED_ILT:
- return "ILT";
- case QED_LL2_QUEUE:
- return "LL2_QUEUE";
- case QED_CMDQS_CQS:
- return "CMDQS_CQS";
- case QED_RDMA_STATS_QUEUE:
- return "RDMA_STATS_QUEUE";
+ case QED_BDQ:
+ if (!*p_resc_num)
+ *p_resc_start = 0;
+ else if (p_hwfn->cdev->num_ports_in_engines == 4)
+ *p_resc_start = p_hwfn->port_id;
+ else if (p_hwfn->hw_info.personality == QED_PCI_ISCSI)
+ *p_resc_start = p_hwfn->port_id;
+ else if (p_hwfn->hw_info.personality == QED_PCI_FCOE)
+ *p_resc_start = p_hwfn->port_id + 2;
+ break;
default:
- return "UNKNOWN_RESOURCE";
+ *p_resc_start = *p_resc_num * p_hwfn->enabled_func_idx;
+ break;
}
+
+ return 0;
}
-static int qed_hw_set_resc_info(struct qed_hwfn *p_hwfn,
- enum qed_resources res_id)
+static int __qed_hw_set_resc_info(struct qed_hwfn *p_hwfn,
+ enum qed_resources res_id)
{
- u32 dflt_resc_num = 0, dflt_resc_start = 0, mcp_resp, mcp_param;
- u32 *p_resc_num, *p_resc_start;
- struct resource_info resc_info;
+ u32 dflt_resc_num = 0, dflt_resc_start = 0;
+ u32 mcp_resp, *p_resc_num, *p_resc_start;
int rc;
p_resc_num = &RESC_NUM(p_hwfn, res_id);
p_resc_start = &RESC_START(p_hwfn, res_id);
- /* Default values assumes that each function received equal share */
- dflt_resc_num = qed_hw_get_dflt_resc_num(p_hwfn, res_id);
- if (!dflt_resc_num) {
+ rc = qed_hw_get_dflt_resc(p_hwfn, res_id, &dflt_resc_num,
+ &dflt_resc_start);
+ if (rc) {
DP_ERR(p_hwfn,
"Failed to get default amount for resource %d [%s]\n",
res_id, qed_hw_get_resc_name(res_id));
- return -EINVAL;
- }
- dflt_resc_start = dflt_resc_num * p_hwfn->enabled_func_idx;
-
- memset(&resc_info, 0, sizeof(resc_info));
- resc_info.res_id = qed_hw_get_mfw_res_id(res_id);
- if (resc_info.res_id == RESOURCE_NUM_INVALID) {
- DP_ERR(p_hwfn,
- "Failed to match resource %d [%s] with the MFW resources\n",
- res_id, qed_hw_get_resc_name(res_id));
- return -EINVAL;
+ return rc;
}
- rc = qed_mcp_get_resc_info(p_hwfn, p_hwfn->p_main_ptt, &resc_info,
- &mcp_resp, &mcp_param);
+ rc = qed_mcp_get_resc_info(p_hwfn, p_hwfn->p_main_ptt, res_id,
+ &mcp_resp, p_resc_num, p_resc_start);
if (rc) {
DP_NOTICE(p_hwfn,
"MFW response failure for an allocation request for resource %d [%s]\n",
@@ -1740,13 +2300,12 @@ static int qed_hw_set_resc_info(struct qed_hwfn *p_hwfn,
* - There is an internal error in the MFW while processing the request
* - The resource ID is unknown to the MFW
*/
- if (mcp_resp != FW_MSG_CODE_RESOURCE_ALLOC_OK &&
- mcp_resp != FW_MSG_CODE_RESOURCE_ALLOC_DEPRECATED) {
- DP_NOTICE(p_hwfn,
- "Resource %d [%s]: No allocation info was received [mcp_resp 0x%x]. Applying default values [num %d, start %d].\n",
- res_id,
- qed_hw_get_resc_name(res_id),
- mcp_resp, dflt_resc_num, dflt_resc_start);
+ if (mcp_resp != FW_MSG_CODE_RESOURCE_ALLOC_OK) {
+ DP_INFO(p_hwfn,
+ "Failed to receive allocation info for resource %d [%s]. mcp_resp = 0x%x. Applying default values [%d,%d].\n",
+ res_id,
+ qed_hw_get_resc_name(res_id),
+ mcp_resp, dflt_resc_num, dflt_resc_start);
*p_resc_num = dflt_resc_num;
*p_resc_start = dflt_resc_start;
goto out;
@@ -1754,13 +2313,9 @@ static int qed_hw_set_resc_info(struct qed_hwfn *p_hwfn,
/* Special handling for status blocks; Would be revised in future */
if (res_id == QED_SB) {
- resc_info.size -= 1;
- resc_info.offset -= p_hwfn->enabled_func_idx;
+ *p_resc_num -= 1;
+ *p_resc_start -= p_hwfn->enabled_func_idx;
}
-
- *p_resc_num = resc_info.size;
- *p_resc_start = resc_info.offset;
-
out:
/* PQs have to divide by 8 [that's the HW granularity].
* Reduce number so it would fit.
@@ -1778,19 +2333,80 @@ out:
return 0;
}
-static int qed_hw_get_resc(struct qed_hwfn *p_hwfn)
+static int qed_hw_set_resc_info(struct qed_hwfn *p_hwfn)
{
- u8 res_id;
int rc;
+ u8 res_id;
for (res_id = 0; res_id < QED_MAX_RESC; res_id++) {
- rc = qed_hw_set_resc_info(p_hwfn, res_id);
+ rc = __qed_hw_set_resc_info(p_hwfn, res_id);
if (rc)
return rc;
}
+ return 0;
+}
+
+static int qed_hw_get_resc(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+ struct qed_resc_unlock_params resc_unlock_params;
+ struct qed_resc_lock_params resc_lock_params;
+ bool b_ah = QED_IS_AH(p_hwfn->cdev);
+ u8 res_id;
+ int rc;
+
+ /* Setting the max values of the soft resources and the following
+ * resources allocation queries should be atomic. Since several PFs can
+ * run in parallel - a resource lock is needed.
+ * If either the resource lock or resource set value commands are not
+ * supported - skip the the max values setting, release the lock if
+ * needed, and proceed to the queries. Other failures, including a
+ * failure to acquire the lock, will cause this function to fail.
+ */
+ qed_mcp_resc_lock_default_init(&resc_lock_params, &resc_unlock_params,
+ QED_RESC_LOCK_RESC_ALLOC, false);
+
+ rc = qed_mcp_resc_lock(p_hwfn, p_ptt, &resc_lock_params);
+ if (rc && rc != -EINVAL) {
+ return rc;
+ } else if (rc == -EINVAL) {
+ DP_INFO(p_hwfn,
+ "Skip the max values setting of the soft resources since the resource lock is not supported by the MFW\n");
+ } else if (!rc && !resc_lock_params.b_granted) {
+ DP_NOTICE(p_hwfn,
+ "Failed to acquire the resource lock for the resource allocation commands\n");
+ return -EBUSY;
+ } else {
+ rc = qed_hw_set_soft_resc_size(p_hwfn, p_ptt);
+ if (rc && rc != -EINVAL) {
+ DP_NOTICE(p_hwfn,
+ "Failed to set the max values of the soft resources\n");
+ goto unlock_and_exit;
+ } else if (rc == -EINVAL) {
+ DP_INFO(p_hwfn,
+ "Skip the max values setting of the soft resources since it is not supported by the MFW\n");
+ rc = qed_mcp_resc_unlock(p_hwfn, p_ptt,
+ &resc_unlock_params);
+ if (rc)
+ DP_INFO(p_hwfn,
+ "Failed to release the resource lock for the resource allocation commands\n");
+ }
+ }
+
+ rc = qed_hw_set_resc_info(p_hwfn);
+ if (rc)
+ goto unlock_and_exit;
+
+ if (resc_lock_params.b_granted && !resc_unlock_params.b_released) {
+ rc = qed_mcp_resc_unlock(p_hwfn, p_ptt, &resc_unlock_params);
+ if (rc)
+ DP_INFO(p_hwfn,
+ "Failed to release the resource lock for the resource allocation commands\n");
+ }
+
/* Sanity for ILT */
- if ((RESC_END(p_hwfn, QED_ILT) > PXP_NUM_ILT_RECORDS_BB)) {
+ if ((b_ah && (RESC_END(p_hwfn, QED_ILT) > PXP_NUM_ILT_RECORDS_K2)) ||
+ (!b_ah && (RESC_END(p_hwfn, QED_ILT) > PXP_NUM_ILT_RECORDS_BB))) {
DP_NOTICE(p_hwfn, "Can't assign ILT pages [%08x,...,%08x]\n",
RESC_START(p_hwfn, QED_ILT),
RESC_END(p_hwfn, QED_ILT) - 1);
@@ -1799,8 +2415,6 @@ static int qed_hw_get_resc(struct qed_hwfn *p_hwfn)
qed_hw_set_feat(p_hwfn);
- DP_VERBOSE(p_hwfn, NETIF_MSG_PROBE,
- "The numbers for each resource are:\n");
for (res_id = 0; res_id < QED_MAX_RESC; res_id++)
DP_VERBOSE(p_hwfn, NETIF_MSG_PROBE, "%s = %d start = %d\n",
qed_hw_get_resc_name(res_id),
@@ -1808,6 +2422,11 @@ static int qed_hw_get_resc(struct qed_hwfn *p_hwfn)
RESC_START(p_hwfn, res_id));
return 0;
+
+unlock_and_exit:
+ if (resc_lock_params.b_granted && !resc_unlock_params.b_released)
+ qed_mcp_resc_unlock(p_hwfn, p_ptt, &resc_unlock_params);
+ return rc;
}
static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
@@ -1860,9 +2479,15 @@ static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
case NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X25G:
p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_2X25G;
break;
+ case NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X10G:
+ p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_2X10G;
+ break;
case NVM_CFG1_GLOB_NETWORK_PORT_MODE_1X25G:
p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_1X25G;
break;
+ case NVM_CFG1_GLOB_NETWORK_PORT_MODE_4X25G:
+ p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_4X25G;
+ break;
default:
DP_NOTICE(p_hwfn, "Unknown port mode in 0x%08x\n", core_cfg);
break;
@@ -1976,8 +2601,9 @@ static void qed_get_num_funcs(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{
u8 num_funcs, enabled_func_idx = p_hwfn->rel_pf_id;
u32 reg_function_hide, tmp, eng_mask, low_pfs_mask;
+ struct qed_dev *cdev = p_hwfn->cdev;
- num_funcs = MAX_NUM_PFS_BB;
+ num_funcs = QED_IS_AH(cdev) ? MAX_NUM_PFS_K2 : MAX_NUM_PFS_BB;
/* Bit 0 of MISCS_REG_FUNCTION_HIDE indicates whether the bypass values
* in the other bits are selected.
@@ -1990,12 +2616,17 @@ static void qed_get_num_funcs(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
reg_function_hide = qed_rd(p_hwfn, p_ptt, MISCS_REG_FUNCTION_HIDE);
if (reg_function_hide & 0x1) {
- if (QED_PATH_ID(p_hwfn) && p_hwfn->cdev->num_hwfns == 1) {
- num_funcs = 0;
- eng_mask = 0xaaaa;
+ if (QED_IS_BB(cdev)) {
+ if (QED_PATH_ID(p_hwfn) && cdev->num_hwfns == 1) {
+ num_funcs = 0;
+ eng_mask = 0xaaaa;
+ } else {
+ num_funcs = 1;
+ eng_mask = 0x5554;
+ }
} else {
num_funcs = 1;
- eng_mask = 0x5554;
+ eng_mask = 0xfffe;
}
/* Get the number of the enabled functions on the engine */
@@ -2027,24 +2658,12 @@ static void qed_get_num_funcs(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
p_hwfn->enabled_func_idx, p_hwfn->num_funcs_on_engine);
}
-static int
-qed_get_hw_info(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- enum qed_pci_personality personality)
+static void qed_hw_info_port_num_bb(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt)
{
u32 port_mode;
- int rc;
-
- /* Since all information is common, only first hwfns should do this */
- if (IS_LEAD_HWFN(p_hwfn)) {
- rc = qed_iov_hw_info(p_hwfn);
- if (rc)
- return rc;
- }
- /* Read the port mode */
- port_mode = qed_rd(p_hwfn, p_ptt,
- CNIG_REG_NW_PORT_MODE_BB_B0);
+ port_mode = qed_rd(p_hwfn, p_ptt, CNIG_REG_NW_PORT_MODE_BB_B0);
if (port_mode < 3) {
p_hwfn->cdev->num_ports_in_engines = 1;
@@ -2057,6 +2676,54 @@ qed_get_hw_info(struct qed_hwfn *p_hwfn,
/* Default num_ports_in_engines to something */
p_hwfn->cdev->num_ports_in_engines = 1;
}
+}
+
+static void qed_hw_info_port_num_ah(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt)
+{
+ u32 port;
+ int i;
+
+ p_hwfn->cdev->num_ports_in_engines = 0;
+
+ for (i = 0; i < MAX_NUM_PORTS_K2; i++) {
+ port = qed_rd(p_hwfn, p_ptt,
+ CNIG_REG_NIG_PORT0_CONF_K2 + (i * 4));
+ if (port & 1)
+ p_hwfn->cdev->num_ports_in_engines++;
+ }
+
+ if (!p_hwfn->cdev->num_ports_in_engines) {
+ DP_NOTICE(p_hwfn, "All NIG ports are inactive\n");
+
+ /* Default num_ports_in_engine to something */
+ p_hwfn->cdev->num_ports_in_engines = 1;
+ }
+}
+
+static void qed_hw_info_port_num(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+ if (QED_IS_BB(p_hwfn->cdev))
+ qed_hw_info_port_num_bb(p_hwfn, p_ptt);
+ else
+ qed_hw_info_port_num_ah(p_hwfn, p_ptt);
+}
+
+static int
+qed_get_hw_info(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ enum qed_pci_personality personality)
+{
+ int rc;
+
+ /* Since all information is common, only first hwfns should do this */
+ if (IS_LEAD_HWFN(p_hwfn)) {
+ rc = qed_iov_hw_info(p_hwfn);
+ if (rc)
+ return rc;
+ }
+
+ qed_hw_info_port_num(p_hwfn, p_ptt);
qed_hw_get_nvm_info(p_hwfn, p_ptt);
@@ -2085,33 +2752,48 @@ qed_get_hw_info(struct qed_hwfn *p_hwfn,
p_hwfn->hw_info.personality = protocol;
}
+ p_hwfn->hw_info.num_hw_tc = NUM_PHYS_TCS_4PORT_K2;
+ p_hwfn->hw_info.num_active_tc = 1;
+
qed_get_num_funcs(p_hwfn, p_ptt);
if (qed_mcp_is_init(p_hwfn))
p_hwfn->hw_info.mtu = p_hwfn->mcp_info->func_info.mtu;
- return qed_hw_get_resc(p_hwfn);
+ return qed_hw_get_resc(p_hwfn, p_ptt);
}
-static int qed_get_dev_info(struct qed_dev *cdev)
+static int qed_get_dev_info(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{
- struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_dev *cdev = p_hwfn->cdev;
+ u16 device_id_mask;
u32 tmp;
/* Read Vendor Id / Device Id */
pci_read_config_word(cdev->pdev, PCI_VENDOR_ID, &cdev->vendor_id);
pci_read_config_word(cdev->pdev, PCI_DEVICE_ID, &cdev->device_id);
- cdev->chip_num = (u16)qed_rd(p_hwfn, p_hwfn->p_main_ptt,
- MISCS_REG_CHIP_NUM);
- cdev->chip_rev = (u16)qed_rd(p_hwfn, p_hwfn->p_main_ptt,
- MISCS_REG_CHIP_REV);
+ /* Determine type */
+ device_id_mask = cdev->device_id & QED_DEV_ID_MASK;
+ switch (device_id_mask) {
+ case QED_DEV_ID_MASK_BB:
+ cdev->type = QED_DEV_TYPE_BB;
+ break;
+ case QED_DEV_ID_MASK_AH:
+ cdev->type = QED_DEV_TYPE_AH;
+ break;
+ default:
+ DP_NOTICE(p_hwfn, "Unknown device id 0x%x\n", cdev->device_id);
+ return -EBUSY;
+ }
+
+ cdev->chip_num = (u16)qed_rd(p_hwfn, p_ptt, MISCS_REG_CHIP_NUM);
+ cdev->chip_rev = (u16)qed_rd(p_hwfn, p_ptt, MISCS_REG_CHIP_REV);
+
MASK_FIELD(CHIP_REV, cdev->chip_rev);
- cdev->type = QED_DEV_TYPE_BB;
/* Learn number of HW-functions */
- tmp = qed_rd(p_hwfn, p_hwfn->p_main_ptt,
- MISCS_REG_CMT_ENABLED_FOR_PAIR);
+ tmp = qed_rd(p_hwfn, p_ptt, MISCS_REG_CMT_ENABLED_FOR_PAIR);
if (tmp & (1 << p_hwfn->rel_pf_id)) {
DP_NOTICE(cdev->hwfns, "device in CMT mode\n");
@@ -2120,15 +2802,17 @@ static int qed_get_dev_info(struct qed_dev *cdev)
cdev->num_hwfns = 1;
}
- cdev->chip_bond_id = qed_rd(p_hwfn, p_hwfn->p_main_ptt,
+ cdev->chip_bond_id = qed_rd(p_hwfn, p_ptt,
MISCS_REG_CHIP_TEST_REG) >> 4;
MASK_FIELD(CHIP_BOND_ID, cdev->chip_bond_id);
- cdev->chip_metal = (u16)qed_rd(p_hwfn, p_hwfn->p_main_ptt,
- MISCS_REG_CHIP_METAL);
+ cdev->chip_metal = (u16)qed_rd(p_hwfn, p_ptt, MISCS_REG_CHIP_METAL);
MASK_FIELD(CHIP_METAL, cdev->chip_metal);
DP_INFO(cdev->hwfns,
- "Chip details - Num: %04x Rev: %04x Bond id: %04x Metal: %04x\n",
+ "Chip details - %s %c%d, Num: %04x Rev: %04x Bond id: %04x Metal: %04x\n",
+ QED_IS_BB(cdev) ? "BB" : "AH",
+ 'A' + cdev->chip_rev,
+ (int)cdev->chip_metal,
cdev->chip_num, cdev->chip_rev,
cdev->chip_bond_id, cdev->chip_metal);
@@ -2174,7 +2858,7 @@ static int qed_hw_prepare_single(struct qed_hwfn *p_hwfn,
/* First hwfn learns basic information, e.g., number of hwfns */
if (!p_hwfn->my_id) {
- rc = qed_get_dev_info(p_hwfn->cdev);
+ rc = qed_get_dev_info(p_hwfn, p_hwfn->p_main_ptt);
if (rc)
goto err1;
}
@@ -2195,6 +2879,15 @@ static int qed_hw_prepare_single(struct qed_hwfn *p_hwfn,
goto err2;
}
+ /* Sending a mailbox to the MFW should be done after qed_get_hw_info()
+ * is called as it sets the ports number in an engine.
+ */
+ if (IS_LEAD_HWFN(p_hwfn)) {
+ rc = qed_mcp_initiate_pf_flr(p_hwfn, p_hwfn->p_main_ptt);
+ if (rc)
+ DP_NOTICE(p_hwfn, "Failed to initiate PF FLR\n");
+ }
+
/* Allocate the init RT array and initialize the init-ops engine */
rc = qed_init_alloc(p_hwfn);
if (rc)
@@ -2236,11 +2929,14 @@ int qed_hw_prepare(struct qed_dev *cdev,
u8 __iomem *addr;
/* adjust bar offset for second engine */
- addr = cdev->regview + qed_hw_bar_size(p_hwfn, BAR_ID_0) / 2;
+ addr = cdev->regview +
+ qed_hw_bar_size(p_hwfn, p_hwfn->p_main_ptt,
+ BAR_ID_0) / 2;
p_regview = addr;
- /* adjust doorbell bar offset for second engine */
- addr = cdev->doorbells + qed_hw_bar_size(p_hwfn, BAR_ID_1) / 2;
+ addr = cdev->doorbells +
+ qed_hw_bar_size(p_hwfn, p_hwfn->p_main_ptt,
+ BAR_ID_1) / 2;
p_doorbell = addr;
/* prepare second hw function */
@@ -3363,3 +4059,22 @@ void qed_clean_wfq_db(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
memset(p_hwfn->qm_info.wfq_data, 0,
sizeof(*p_hwfn->qm_info.wfq_data) * p_hwfn->qm_info.num_vports);
}
+
+int qed_device_num_engines(struct qed_dev *cdev)
+{
+ return QED_IS_BB(cdev) ? 2 : 1;
+}
+
+static int qed_device_num_ports(struct qed_dev *cdev)
+{
+ /* in CMT always only one port */
+ if (cdev->num_hwfns > 1)
+ return 1;
+
+ return cdev->num_ports_in_engines * qed_device_num_engines(cdev);
+}
+
+int qed_device_get_port_id(struct qed_dev *cdev)
+{
+ return (QED_LEADING_HWFN(cdev)->abs_pf_id) % qed_device_num_ports(cdev);
+}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev_api.h b/drivers/net/ethernet/qlogic/qed/qed_dev_api.h
index 6812003411cd..cefe3ee9064a 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dev_api.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_dev_api.h
@@ -82,26 +82,63 @@ int qed_resc_alloc(struct qed_dev *cdev);
*/
void qed_resc_setup(struct qed_dev *cdev);
+enum qed_override_force_load {
+ QED_OVERRIDE_FORCE_LOAD_NONE,
+ QED_OVERRIDE_FORCE_LOAD_ALWAYS,
+ QED_OVERRIDE_FORCE_LOAD_NEVER,
+};
+
+struct qed_drv_load_params {
+ /* Indicates whether the driver is running over a crash kernel.
+ * As part of the load request, this will be used for providing the
+ * driver role to the MFW.
+ * In case of a crash kernel over PDA - this should be set to false.
+ */
+ bool is_crash_kernel;
+
+ /* The timeout value that the MFW should use when locking the engine for
+ * the driver load process.
+ * A value of '0' means the default value, and '255' means no timeout.
+ */
+ u8 mfw_timeout_val;
+#define QED_LOAD_REQ_LOCK_TO_DEFAULT 0
+#define QED_LOAD_REQ_LOCK_TO_NONE 255
+
+ /* Avoid engine reset when first PF loads on it */
+ bool avoid_eng_reset;
+
+ /* Allow overriding the default force load behavior */
+ enum qed_override_force_load override_force_load;
+};
+
+struct qed_hw_init_params {
+ /* Tunneling parameters */
+ struct qed_tunnel_info *p_tunn;
+
+ bool b_hw_start;
+
+ /* Interrupt mode [msix, inta, etc.] to use */
+ enum qed_int_mode int_mode;
+
+ /* NPAR tx switching to be used for vports for tx-switching */
+ bool allow_npar_tx_switch;
+
+ /* Binary fw data pointer in binary fw file */
+ const u8 *bin_fw_data;
+
+ /* Driver load parameters */
+ struct qed_drv_load_params *p_drv_load_params;
+};
+
/**
* @brief qed_hw_init -
*
* @param cdev
- * @param p_tunn
- * @param b_hw_start
- * @param int_mode - interrupt mode [msix, inta, etc.] to use.
- * @param allow_npar_tx_switch - npar tx switching to be used
- * for vports configured for tx-switching.
- * @param bin_fw_data - binary fw data pointer in binary fw file.
- * Pass NULL if not using binary fw file.
+ * @param p_params
*
* @return int
*/
-int qed_hw_init(struct qed_dev *cdev,
- struct qed_tunn_start_params *p_tunn,
- bool b_hw_start,
- enum qed_int_mode int_mode,
- bool allow_npar_tx_switch,
- const u8 *bin_fw_data);
+int qed_hw_init(struct qed_dev *cdev, struct qed_hw_init_params *p_params);
/**
* @brief qed_hw_timers_stop_all - stop the timers HW block
@@ -128,26 +165,20 @@ int qed_hw_stop(struct qed_dev *cdev);
*
* @param cdev
*
+ * @return int
*/
-void qed_hw_stop_fastpath(struct qed_dev *cdev);
+int qed_hw_stop_fastpath(struct qed_dev *cdev);
/**
* @brief qed_hw_start_fastpath -restart fastpath traffic,
* only if hw_stop_fastpath was called
*
- * @param cdev
- *
- */
-void qed_hw_start_fastpath(struct qed_hwfn *p_hwfn);
-
-/**
- * @brief qed_hw_reset -
- *
- * @param cdev
+ * @param p_hwfn
*
* @return int
*/
-int qed_hw_reset(struct qed_dev *cdev);
+int qed_hw_start_fastpath(struct qed_hwfn *p_hwfn);
+
/**
* @brief qed_hw_prepare -
@@ -441,4 +472,6 @@ int qed_set_rxq_coalesce(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
*/
int qed_set_txq_coalesce(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
u16 coalesce, u8 qid, u16 sb_id);
+
+const char *qed_hw_get_resc_name(enum qed_resources res_id);
#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_fcoe.c b/drivers/net/ethernet/qlogic/qed/qed_fcoe.c
index cbc81412174f..21a58fffd02b 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_fcoe.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_fcoe.c
@@ -191,7 +191,7 @@ qed_sp_fcoe_func_start(struct qed_hwfn *p_hwfn,
p_data->q_params.cq_sb_pi = fcoe_pf_params->gl_rq_pi;
p_data->q_params.cmdq_sb_pi = fcoe_pf_params->gl_cmd_pi;
- p_data->q_params.bdq_resource_id = FCOE_BDQ_ID(p_hwfn->port_id);
+ p_data->q_params.bdq_resource_id = (u8)RESC_START(p_hwfn, QED_BDQ);
DMA_REGPAIR_LE(p_data->q_params.bdq_pbl_base_address[BDQ_ID_RQ],
fcoe_pf_params->bdq_pbl_base_addr[BDQ_ID_RQ]);
@@ -241,7 +241,7 @@ qed_sp_fcoe_conn_offload(struct qed_hwfn *p_hwfn,
struct fcoe_conn_offload_ramrod_data *p_data;
struct qed_spq_entry *p_ent = NULL;
struct qed_sp_init_data init_data;
- u16 pq_id = 0, tmp;
+ u16 physical_q0, tmp;
int rc;
/* Get SPQ entry */
@@ -261,9 +261,9 @@ qed_sp_fcoe_conn_offload(struct qed_hwfn *p_hwfn,
p_data = &p_ramrod->offload_ramrod_data;
/* Transmission PQ is the first of the PF */
- pq_id = qed_get_qm_pq(p_hwfn, PROTOCOLID_FCOE, NULL);
- p_conn->physical_q0 = cpu_to_le16(pq_id);
- p_data->physical_q0 = cpu_to_le16(pq_id);
+ physical_q0 = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_OFLD);
+ p_conn->physical_q0 = cpu_to_le16(physical_q0);
+ p_data->physical_q0 = cpu_to_le16(physical_q0);
p_data->conn_id = cpu_to_le16(p_conn->conn_id);
DMA_REGPAIR_LE(p_data->sq_pbl_addr, p_conn->sq_pbl_addr);
@@ -340,10 +340,10 @@ qed_sp_fcoe_conn_destroy(struct qed_hwfn *p_hwfn,
static int
qed_sp_fcoe_func_stop(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
enum spq_mode comp_mode,
struct qed_spq_comp_cb *p_comp_addr)
{
- struct qed_ptt *p_ptt = p_hwfn->p_main_ptt;
struct qed_spq_entry *p_ent = NULL;
struct qed_sp_init_data init_data;
u32 active_segs = 0;
@@ -512,19 +512,31 @@ static void __iomem *qed_fcoe_get_db_addr(struct qed_hwfn *p_hwfn, u32 cid)
static void __iomem *qed_fcoe_get_primary_bdq_prod(struct qed_hwfn *p_hwfn,
u8 bdq_id)
{
- u8 bdq_function_id = FCOE_BDQ_ID(p_hwfn->port_id);
-
- return (u8 __iomem *)p_hwfn->regview + GTT_BAR0_MAP_REG_MSDM_RAM +
- MSTORM_SCSI_BDQ_EXT_PROD_OFFSET(bdq_function_id, bdq_id);
+ if (RESC_NUM(p_hwfn, QED_BDQ)) {
+ return (u8 __iomem *)p_hwfn->regview +
+ GTT_BAR0_MAP_REG_MSDM_RAM +
+ MSTORM_SCSI_BDQ_EXT_PROD_OFFSET(RESC_START(p_hwfn,
+ QED_BDQ),
+ bdq_id);
+ } else {
+ DP_NOTICE(p_hwfn, "BDQ is not allocated!\n");
+ return NULL;
+ }
}
static void __iomem *qed_fcoe_get_secondary_bdq_prod(struct qed_hwfn *p_hwfn,
u8 bdq_id)
{
- u8 bdq_function_id = FCOE_BDQ_ID(p_hwfn->port_id);
-
- return (u8 __iomem *)p_hwfn->regview + GTT_BAR0_MAP_REG_TSDM_RAM +
- TSTORM_SCSI_BDQ_EXT_PROD_OFFSET(bdq_function_id, bdq_id);
+ if (RESC_NUM(p_hwfn, QED_BDQ)) {
+ return (u8 __iomem *)p_hwfn->regview +
+ GTT_BAR0_MAP_REG_TSDM_RAM +
+ TSTORM_SCSI_BDQ_EXT_PROD_OFFSET(RESC_START(p_hwfn,
+ QED_BDQ),
+ bdq_id);
+ } else {
+ DP_NOTICE(p_hwfn, "BDQ is not allocated!\n");
+ return NULL;
+ }
}
struct qed_fcoe_info *qed_fcoe_alloc(struct qed_hwfn *p_hwfn)
@@ -753,6 +765,7 @@ static struct qed_hash_fcoe_con *qed_fcoe_get_hash(struct qed_dev *cdev,
static int qed_fcoe_stop(struct qed_dev *cdev)
{
+ struct qed_ptt *p_ptt;
int rc;
if (!(cdev->flags & QED_FLAG_STORAGE_STARTED)) {
@@ -766,10 +779,15 @@ static int qed_fcoe_stop(struct qed_dev *cdev)
return -EINVAL;
}
+ p_ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev));
+ if (!p_ptt)
+ return -EAGAIN;
+
/* Stop the fcoe */
- rc = qed_sp_fcoe_func_stop(QED_LEADING_HWFN(cdev),
+ rc = qed_sp_fcoe_func_stop(QED_LEADING_HWFN(cdev), p_ptt,
QED_SPQ_MODE_EBLOCK, NULL);
cdev->flags &= ~QED_FLAG_STORAGE_STARTED;
+ qed_ptt_release(QED_LEADING_HWFN(cdev), p_ptt);
return rc;
}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_hsi.h
index 37c2bfb663bb..858a57a73589 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_hsi.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h
@@ -574,6 +574,7 @@ enum core_event_opcode {
CORE_EVENT_TX_QUEUE_STOP,
CORE_EVENT_RX_QUEUE_START,
CORE_EVENT_RX_QUEUE_STOP,
+ CORE_EVENT_RX_QUEUE_FLUSH,
MAX_CORE_EVENT_OPCODE
};
@@ -625,6 +626,7 @@ enum core_ramrod_cmd_id {
CORE_RAMROD_TX_QUEUE_START,
CORE_RAMROD_RX_QUEUE_STOP,
CORE_RAMROD_TX_QUEUE_STOP,
+ CORE_RAMROD_RX_QUEUE_FLUSH,
MAX_CORE_RAMROD_CMD_ID
};
@@ -698,7 +700,8 @@ struct core_rx_slow_path_cqe {
u8 type;
u8 ramrod_cmd_id;
__le16 echo;
- __le32 reserved1[7];
+ struct core_rx_cqe_opaque_data opaque_data;
+ __le32 reserved1[5];
};
union core_rx_cqe_union {
@@ -735,45 +738,46 @@ struct core_rx_stop_ramrod_data {
__le16 reserved2[2];
};
-struct core_tx_bd_flags {
- u8 as_bitfield;
-#define CORE_TX_BD_FLAGS_FORCE_VLAN_MODE_MASK 0x1
-#define CORE_TX_BD_FLAGS_FORCE_VLAN_MODE_SHIFT 0
-#define CORE_TX_BD_FLAGS_VLAN_INSERTION_MASK 0x1
-#define CORE_TX_BD_FLAGS_VLAN_INSERTION_SHIFT 1
-#define CORE_TX_BD_FLAGS_START_BD_MASK 0x1
-#define CORE_TX_BD_FLAGS_START_BD_SHIFT 2
-#define CORE_TX_BD_FLAGS_IP_CSUM_MASK 0x1
-#define CORE_TX_BD_FLAGS_IP_CSUM_SHIFT 3
-#define CORE_TX_BD_FLAGS_L4_CSUM_MASK 0x1
-#define CORE_TX_BD_FLAGS_L4_CSUM_SHIFT 4
-#define CORE_TX_BD_FLAGS_IPV6_EXT_MASK 0x1
-#define CORE_TX_BD_FLAGS_IPV6_EXT_SHIFT 5
-#define CORE_TX_BD_FLAGS_L4_PROTOCOL_MASK 0x1
-#define CORE_TX_BD_FLAGS_L4_PROTOCOL_SHIFT 6
-#define CORE_TX_BD_FLAGS_L4_PSEUDO_CSUM_MODE_MASK 0x1
-#define CORE_TX_BD_FLAGS_L4_PSEUDO_CSUM_MODE_SHIFT 7
+struct core_tx_bd_data {
+ __le16 as_bitfield;
+#define CORE_TX_BD_DATA_FORCE_VLAN_MODE_MASK 0x1
+#define CORE_TX_BD_DATA_FORCE_VLAN_MODE_SHIFT 0
+#define CORE_TX_BD_DATA_VLAN_INSERTION_MASK 0x1
+#define CORE_TX_BD_DATA_VLAN_INSERTION_SHIFT 1
+#define CORE_TX_BD_DATA_START_BD_MASK 0x1
+#define CORE_TX_BD_DATA_START_BD_SHIFT 2
+#define CORE_TX_BD_DATA_IP_CSUM_MASK 0x1
+#define CORE_TX_BD_DATA_IP_CSUM_SHIFT 3
+#define CORE_TX_BD_DATA_L4_CSUM_MASK 0x1
+#define CORE_TX_BD_DATA_L4_CSUM_SHIFT 4
+#define CORE_TX_BD_DATA_IPV6_EXT_MASK 0x1
+#define CORE_TX_BD_DATA_IPV6_EXT_SHIFT 5
+#define CORE_TX_BD_DATA_L4_PROTOCOL_MASK 0x1
+#define CORE_TX_BD_DATA_L4_PROTOCOL_SHIFT 6
+#define CORE_TX_BD_DATA_L4_PSEUDO_CSUM_MODE_MASK 0x1
+#define CORE_TX_BD_DATA_L4_PSEUDO_CSUM_MODE_SHIFT 7
+#define CORE_TX_BD_DATA_NBDS_MASK 0xF
+#define CORE_TX_BD_DATA_NBDS_SHIFT 8
+#define CORE_TX_BD_DATA_ROCE_FLAV_MASK 0x1
+#define CORE_TX_BD_DATA_ROCE_FLAV_SHIFT 12
+#define CORE_TX_BD_DATA_IP_LEN_MASK 0x1
+#define CORE_TX_BD_DATA_IP_LEN_SHIFT 13
+#define CORE_TX_BD_DATA_RESERVED0_MASK 0x3
+#define CORE_TX_BD_DATA_RESERVED0_SHIFT 14
};
struct core_tx_bd {
struct regpair addr;
__le16 nbytes;
__le16 nw_vlan_or_lb_echo;
- u8 bitfield0;
-#define CORE_TX_BD_NBDS_MASK 0xF
-#define CORE_TX_BD_NBDS_SHIFT 0
-#define CORE_TX_BD_ROCE_FLAV_MASK 0x1
-#define CORE_TX_BD_ROCE_FLAV_SHIFT 4
-#define CORE_TX_BD_RESERVED0_MASK 0x7
-#define CORE_TX_BD_RESERVED0_SHIFT 5
- struct core_tx_bd_flags bd_flags;
+ struct core_tx_bd_data bd_data;
__le16 bitfield1;
#define CORE_TX_BD_L4_HDR_OFFSET_W_MASK 0x3FFF
#define CORE_TX_BD_L4_HDR_OFFSET_W_SHIFT 0
#define CORE_TX_BD_TX_DST_MASK 0x1
#define CORE_TX_BD_TX_DST_SHIFT 14
-#define CORE_TX_BD_RESERVED1_MASK 0x1
-#define CORE_TX_BD_RESERVED1_SHIFT 15
+#define CORE_TX_BD_RESERVED_MASK 0x1
+#define CORE_TX_BD_RESERVED_SHIFT 15
};
enum core_tx_dest {
@@ -800,6 +804,14 @@ struct core_tx_stop_ramrod_data {
__le32 reserved0[2];
};
+enum dcb_dhcp_update_flag {
+ DONT_UPDATE_DCB_DHCP,
+ UPDATE_DCB,
+ UPDATE_DSCP,
+ UPDATE_DCB_DSCP,
+ MAX_DCB_DHCP_UPDATE_FLAG
+};
+
struct eth_mstorm_per_pf_stat {
struct regpair gre_discard_pkts;
struct regpair vxlan_discard_pkts;
@@ -893,6 +905,12 @@ union event_ring_element {
struct event_ring_next_addr next_addr;
};
+enum fw_flow_ctrl_mode {
+ flow_ctrl_pause,
+ flow_ctrl_pfc,
+ MAX_FW_FLOW_CTRL_MODE
+};
+
/* Major and Minor hsi Versions */
struct hsi_fp_ver_struct {
u8 minor_ver_arr[2];
@@ -921,6 +939,7 @@ enum malicious_vf_error_id {
ETH_EDPM_OUT_OF_SYNC,
ETH_TUNN_IPV6_EXT_NBD_ERR,
ETH_CONTROL_PACKET_VIOLATION,
+ ETH_ANTI_SPOOFING_ERR,
MAX_MALICIOUS_VF_ERROR_ID
};
@@ -1106,8 +1125,9 @@ struct tstorm_per_port_stat {
struct regpair ll2_mac_filter_discard;
struct regpair ll2_conn_disabled_discard;
struct regpair iscsi_irregular_pkt;
- struct regpair reserved;
+ struct regpair fcoe_irregular_pkt;
struct regpair roce_irregular_pkt;
+ struct regpair reserved;
struct regpair eth_irregular_pkt;
struct regpair reserved1;
struct regpair preroce_irregular_pkt;
@@ -1648,6 +1668,11 @@ enum block_addr {
GRCBASE_MS = 0x6a0000,
GRCBASE_PHY_PCIE = 0x620000,
GRCBASE_LED = 0x6b8000,
+ GRCBASE_AVS_WRAP = 0x6b0000,
+ GRCBASE_RGFS = 0x19d0000,
+ GRCBASE_TGFS = 0x19e0000,
+ GRCBASE_PTLD = 0x19f0000,
+ GRCBASE_YPLD = 0x1a10000,
GRCBASE_MISC_AEU = 0x8000,
GRCBASE_BAR0_MAP = 0x1c00000,
MAX_BLOCK_ADDR
@@ -1732,6 +1757,11 @@ enum block_id {
BLOCK_MS,
BLOCK_PHY_PCIE,
BLOCK_LED,
+ BLOCK_AVS_WRAP,
+ BLOCK_RGFS,
+ BLOCK_TGFS,
+ BLOCK_PTLD,
+ BLOCK_YPLD,
BLOCK_MISC_AEU,
BLOCK_BAR0_MAP,
MAX_BLOCK_ID
@@ -1783,9 +1813,9 @@ struct dbg_attn_reg_result {
__le32 data;
#define DBG_ATTN_REG_RESULT_STS_ADDRESS_MASK 0xFFFFFF
#define DBG_ATTN_REG_RESULT_STS_ADDRESS_SHIFT 0
-#define DBG_ATTN_REG_RESULT_NUM_ATTN_IDX_MASK 0xFF
-#define DBG_ATTN_REG_RESULT_NUM_ATTN_IDX_SHIFT 24
- __le16 attn_idx_offset;
+#define DBG_ATTN_REG_RESULT_NUM_REG_ATTN_MASK 0xFF
+#define DBG_ATTN_REG_RESULT_NUM_REG_ATTN_SHIFT 24
+ __le16 block_attn_offset;
__le16 reserved;
__le32 sts_val;
__le32 mask_val;
@@ -1815,12 +1845,12 @@ struct dbg_mode_hdr {
/* Attention register */
struct dbg_attn_reg {
struct dbg_mode_hdr mode;
- __le16 attn_idx_offset;
+ __le16 block_attn_offset;
__le32 data;
#define DBG_ATTN_REG_STS_ADDRESS_MASK 0xFFFFFF
#define DBG_ATTN_REG_STS_ADDRESS_SHIFT 0
-#define DBG_ATTN_REG_NUM_ATTN_IDX_MASK 0xFF
-#define DBG_ATTN_REG_NUM_ATTN_IDX_SHIFT 24
+#define DBG_ATTN_REG_NUM_REG_ATTN_MASK 0xFF
+#define DBG_ATTN_REG_NUM_REG_ATTN_SHIFT 24
__le32 sts_clr_address;
__le32 mask_address;
};
@@ -2001,6 +2031,20 @@ enum dbg_bus_clients {
MAX_DBG_BUS_CLIENTS
};
+enum dbg_bus_constraint_ops {
+ DBG_BUS_CONSTRAINT_OP_EQ,
+ DBG_BUS_CONSTRAINT_OP_NE,
+ DBG_BUS_CONSTRAINT_OP_LT,
+ DBG_BUS_CONSTRAINT_OP_LTC,
+ DBG_BUS_CONSTRAINT_OP_LE,
+ DBG_BUS_CONSTRAINT_OP_LEC,
+ DBG_BUS_CONSTRAINT_OP_GT,
+ DBG_BUS_CONSTRAINT_OP_GTC,
+ DBG_BUS_CONSTRAINT_OP_GE,
+ DBG_BUS_CONSTRAINT_OP_GEC,
+ MAX_DBG_BUS_CONSTRAINT_OPS
+};
+
/* Debug Bus memory address */
struct dbg_bus_mem_addr {
__le32 lo;
@@ -2092,10 +2136,18 @@ struct dbg_bus_data {
* DBG_BUS_TARGET_ID_PCI.
*/
__le16 reserved;
- struct dbg_bus_block_data blocks[80];/* Debug Bus data for each block */
+ struct dbg_bus_block_data blocks[88];/* Debug Bus data for each block */
struct dbg_bus_storm_data storms[6]; /* Debug Bus data for each block */
};
+enum dbg_bus_filter_types {
+ DBG_BUS_FILTER_TYPE_OFF,
+ DBG_BUS_FILTER_TYPE_PRE,
+ DBG_BUS_FILTER_TYPE_POST,
+ DBG_BUS_FILTER_TYPE_ON,
+ MAX_DBG_BUS_FILTER_TYPES
+};
+
/* Debug bus frame modes */
enum dbg_bus_frame_modes {
DBG_BUS_FRAME_MODE_0HW_4ST = 0, /* 0 HW dwords, 4 Storm dwords */
@@ -2104,6 +2156,40 @@ enum dbg_bus_frame_modes {
MAX_DBG_BUS_FRAME_MODES
};
+enum dbg_bus_input_types {
+ DBG_BUS_INPUT_TYPE_STORM,
+ DBG_BUS_INPUT_TYPE_BLOCK,
+ MAX_DBG_BUS_INPUT_TYPES
+};
+
+enum dbg_bus_other_engine_modes {
+ DBG_BUS_OTHER_ENGINE_MODE_NONE,
+ DBG_BUS_OTHER_ENGINE_MODE_DOUBLE_BW_TX,
+ DBG_BUS_OTHER_ENGINE_MODE_DOUBLE_BW_RX,
+ DBG_BUS_OTHER_ENGINE_MODE_CROSS_ENGINE_TX,
+ DBG_BUS_OTHER_ENGINE_MODE_CROSS_ENGINE_RX,
+ MAX_DBG_BUS_OTHER_ENGINE_MODES
+};
+
+enum dbg_bus_post_trigger_types {
+ DBG_BUS_POST_TRIGGER_RECORD,
+ DBG_BUS_POST_TRIGGER_DROP,
+ MAX_DBG_BUS_POST_TRIGGER_TYPES
+};
+
+enum dbg_bus_pre_trigger_types {
+ DBG_BUS_PRE_TRIGGER_START_FROM_ZERO,
+ DBG_BUS_PRE_TRIGGER_NUM_CHUNKS,
+ DBG_BUS_PRE_TRIGGER_DROP,
+ MAX_DBG_BUS_PRE_TRIGGER_TYPES
+};
+
+enum dbg_bus_semi_frame_modes {
+ DBG_BUS_SEMI_FRAME_MODE_0SLOW_4FAST = 0,
+ DBG_BUS_SEMI_FRAME_MODE_4SLOW_0FAST = 3,
+ MAX_DBG_BUS_SEMI_FRAME_MODES
+};
+
/* Debug bus states */
enum dbg_bus_states {
DBG_BUS_STATE_IDLE, /* debug bus idle state (not recording) */
@@ -2115,6 +2201,19 @@ enum dbg_bus_states {
MAX_DBG_BUS_STATES
};
+enum dbg_bus_storm_modes {
+ DBG_BUS_STORM_MODE_PRINTF,
+ DBG_BUS_STORM_MODE_PRAM_ADDR,
+ DBG_BUS_STORM_MODE_DRA_RW,
+ DBG_BUS_STORM_MODE_DRA_W,
+ DBG_BUS_STORM_MODE_LD_ST_ADDR,
+ DBG_BUS_STORM_MODE_DRA_FSM,
+ DBG_BUS_STORM_MODE_RH,
+ DBG_BUS_STORM_MODE_FOC,
+ DBG_BUS_STORM_MODE_EXT_STORE,
+ MAX_DBG_BUS_STORM_MODES
+};
+
/* Debug bus target IDs */
enum dbg_bus_targets {
/* records debug bus to DBG block internal buffer */
@@ -2128,13 +2227,10 @@ enum dbg_bus_targets {
/* GRC Dump data */
struct dbg_grc_data {
- __le32 param_val[40]; /* Value of each GRC parameter. Array size must
- * match the enum dbg_grc_params.
- */
- u8 param_set_by_user[40]; /* Indicates for each GRC parameter if it was
- * set by the user (0/1). Array size must
- * match the enum dbg_grc_params.
- */
+ u8 params_initialized;
+ u8 reserved1;
+ __le16 reserved2;
+ __le32 param_val[48];
};
/* Debug GRC params */
@@ -2181,6 +2277,8 @@ enum dbg_grc_params {
DBG_GRC_PARAM_PARITY_SAFE,
DBG_GRC_PARAM_DUMP_CM, /* dump CM memories (0/1) */
DBG_GRC_PARAM_DUMP_PHY, /* dump PHY memories (0/1) */
+ DBG_GRC_PARAM_NO_MCP,
+ DBG_GRC_PARAM_NO_FW_VER,
MAX_DBG_GRC_PARAMS
};
@@ -2280,7 +2378,7 @@ struct dbg_tools_data {
struct dbg_bus_data bus; /* Debug Bus data */
struct idle_chk_data idle_chk; /* Idle Check data */
u8 mode_enable[40]; /* Indicates if a mode is enabled (0/1) */
- u8 block_in_reset[80]; /* Indicates if a block is in reset state (0/1).
+ u8 block_in_reset[88]; /* Indicates if a block is in reset state (0/1).
*/
u8 chip_id; /* Chip ID (from enum chip_ids) */
u8 platform_id; /* Platform ID (from enum platform_ids) */
@@ -2404,7 +2502,7 @@ struct fw_info_location {
enum init_modes {
MODE_RESERVED,
- MODE_BB_B0,
+ MODE_BB,
MODE_K2,
MODE_ASIC,
MODE_RESERVED2,
@@ -2418,7 +2516,6 @@ enum init_modes {
MODE_PORTS_PER_ENG_2,
MODE_PORTS_PER_ENG_4,
MODE_100G,
- MODE_40G,
MODE_RESERVED6,
MAX_INIT_MODES
};
@@ -2686,6 +2783,13 @@ struct iro {
*/
enum dbg_status qed_dbg_set_bin_ptr(const u8 * const bin_ptr);
/**
+ * @brief qed_dbg_grc_set_params_default - Reverts all GRC parameters to their
+ * default value.
+ *
+ * @param p_hwfn - HW device data
+ */
+void qed_dbg_grc_set_params_default(struct qed_hwfn *p_hwfn);
+/**
* @brief qed_dbg_grc_get_dump_buf_size - Returns the required buffer size for
* GRC Dump.
*
@@ -3369,6 +3473,11 @@ void qed_set_geneve_dest_port(struct qed_hwfn *p_hwfn,
void qed_set_geneve_enable(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
bool eth_geneve_enable, bool ip_geneve_enable);
+void qed_set_rfs_mode_disable(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u16 pf_id);
+void qed_set_rfs_mode_enable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
+ u16 pf_id, bool tcp, bool udp,
+ bool ipv4, bool ipv6);
#define YSTORM_FLOW_CONTROL_MODE_OFFSET (IRO[0].base)
#define YSTORM_FLOW_CONTROL_MODE_SIZE (IRO[0].size)
@@ -3418,7 +3527,7 @@ void qed_set_geneve_enable(struct qed_hwfn *p_hwfn,
#define MSTORM_TPA_TIMEOUT_US_SIZE (IRO[21].size)
#define MSTORM_ETH_PF_STAT_OFFSET(pf_id) \
(IRO[22].base + ((pf_id) * IRO[22].m1))
-#define MSTORM_ETH_PF_STAT_SIZE (IRO[21].size)
+#define MSTORM_ETH_PF_STAT_SIZE (IRO[22].size)
#define USTORM_QUEUE_STAT_OFFSET(stat_counter_id) \
(IRO[23].base + ((stat_counter_id) * IRO[23].m1))
#define USTORM_QUEUE_STAT_SIZE (IRO[23].size)
@@ -3482,7 +3591,7 @@ void qed_set_geneve_enable(struct qed_hwfn *p_hwfn,
static const struct iro iro_arr[47] = {
{0x0, 0x0, 0x0, 0x0, 0x8},
- {0x4cb0, 0x78, 0x0, 0x0, 0x78},
+ {0x4cb0, 0x80, 0x0, 0x0, 0x80},
{0x6318, 0x20, 0x0, 0x0, 0x20},
{0xb00, 0x8, 0x0, 0x0, 0x4},
{0xa80, 0x8, 0x0, 0x0, 0x4},
@@ -3521,13 +3630,13 @@ static const struct iro iro_arr[47] = {
{0xd888, 0x38, 0x0, 0x0, 0x24},
{0x12c38, 0x10, 0x0, 0x0, 0x8},
{0x11aa0, 0x38, 0x0, 0x0, 0x18},
- {0xa8c0, 0x30, 0x0, 0x0, 0x10},
- {0x86f8, 0x28, 0x0, 0x0, 0x18},
+ {0xa8c0, 0x38, 0x0, 0x0, 0x10},
+ {0x86f8, 0x30, 0x0, 0x0, 0x18},
{0x101f8, 0x10, 0x0, 0x0, 0x10},
{0xdd08, 0x48, 0x0, 0x0, 0x38},
{0x10660, 0x20, 0x0, 0x0, 0x20},
{0x2b80, 0x80, 0x0, 0x0, 0x10},
- {0x5000, 0x10, 0x0, 0x0, 0x10},
+ {0x5020, 0x10, 0x0, 0x0, 0x10},
};
/* Runtime array offsets */
@@ -4595,6 +4704,12 @@ enum eth_ipv4_frag_type {
MAX_ETH_IPV4_FRAG_TYPE
};
+enum eth_ip_type {
+ ETH_IPV4,
+ ETH_IPV6,
+ MAX_ETH_IP_TYPE
+};
+
enum eth_ramrod_cmd_id {
ETH_RAMROD_UNUSED,
ETH_RAMROD_VPORT_START,
@@ -4752,6 +4867,18 @@ struct eth_vport_tx_mode {
__le16 reserved2[3];
};
+enum gft_filter_update_action {
+ GFT_ADD_FILTER,
+ GFT_DELETE_FILTER,
+ MAX_GFT_FILTER_UPDATE_ACTION
+};
+
+enum gft_logic_filter_type {
+ GFT_FILTER_TYPE,
+ RFS_FILTER_TYPE,
+ MAX_GFT_LOGIC_FILTER_TYPE
+};
+
/* Ramrod data for rx queue start ramrod */
struct rx_queue_start_ramrod_data {
__le16 rx_queue_id;
@@ -4822,6 +4949,16 @@ struct rx_udp_filter_data {
__le32 tenant_id;
};
+struct rx_update_gft_filter_data {
+ struct regpair pkt_hdr_addr;
+ __le16 pkt_hdr_length;
+ __le16 rx_qid_or_action_icid;
+ u8 vport_id;
+ u8 filter_type;
+ u8 filter_action;
+ u8 reserved;
+};
+
/* Ramrod data for rx queue start ramrod */
struct tx_queue_start_ramrod_data {
__le16 sb_id;
@@ -4944,7 +5081,10 @@ struct vport_update_ramrod_data_cmn {
u8 update_mtu_flg;
__le16 mtu;
- u8 reserved[2];
+ u8 update_ctl_frame_checks_en_flg;
+ u8 ctl_frame_mac_check_en;
+ u8 ctl_frame_ethtype_check_en;
+ u8 reserved[15];
};
struct vport_update_ramrod_mcast {
@@ -4962,6 +5102,652 @@ struct vport_update_ramrod_data {
struct eth_vport_rss_config rss_config;
};
+struct gft_cam_line {
+ __le32 camline;
+#define GFT_CAM_LINE_VALID_MASK 0x1
+#define GFT_CAM_LINE_VALID_SHIFT 0
+#define GFT_CAM_LINE_DATA_MASK 0x3FFF
+#define GFT_CAM_LINE_DATA_SHIFT 1
+#define GFT_CAM_LINE_MASK_BITS_MASK 0x3FFF
+#define GFT_CAM_LINE_MASK_BITS_SHIFT 15
+#define GFT_CAM_LINE_RESERVED1_MASK 0x7
+#define GFT_CAM_LINE_RESERVED1_SHIFT 29
+};
+
+struct gft_cam_line_mapped {
+ __le32 camline;
+#define GFT_CAM_LINE_MAPPED_VALID_MASK 0x1
+#define GFT_CAM_LINE_MAPPED_VALID_SHIFT 0
+#define GFT_CAM_LINE_MAPPED_IP_VERSION_MASK 0x1
+#define GFT_CAM_LINE_MAPPED_IP_VERSION_SHIFT 1
+#define GFT_CAM_LINE_MAPPED_TUNNEL_IP_VERSION_MASK 0x1
+#define GFT_CAM_LINE_MAPPED_TUNNEL_IP_VERSION_SHIFT 2
+#define GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_MASK 0xF
+#define GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_SHIFT 3
+#define GFT_CAM_LINE_MAPPED_TUNNEL_TYPE_MASK 0xF
+#define GFT_CAM_LINE_MAPPED_TUNNEL_TYPE_SHIFT 7
+#define GFT_CAM_LINE_MAPPED_PF_ID_MASK 0xF
+#define GFT_CAM_LINE_MAPPED_PF_ID_SHIFT 11
+#define GFT_CAM_LINE_MAPPED_IP_VERSION_MASK_MASK 0x1
+#define GFT_CAM_LINE_MAPPED_IP_VERSION_MASK_SHIFT 15
+#define GFT_CAM_LINE_MAPPED_TUNNEL_IP_VERSION_MASK_MASK 0x1
+#define GFT_CAM_LINE_MAPPED_TUNNEL_IP_VERSION_MASK_SHIFT 16
+#define GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_MASK_MASK 0xF
+#define GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_MASK_SHIFT 17
+#define GFT_CAM_LINE_MAPPED_TUNNEL_TYPE_MASK_MASK 0xF
+#define GFT_CAM_LINE_MAPPED_TUNNEL_TYPE_MASK_SHIFT 21
+#define GFT_CAM_LINE_MAPPED_PF_ID_MASK_MASK 0xF
+#define GFT_CAM_LINE_MAPPED_PF_ID_MASK_SHIFT 25
+#define GFT_CAM_LINE_MAPPED_RESERVED1_MASK 0x7
+#define GFT_CAM_LINE_MAPPED_RESERVED1_SHIFT 29
+};
+
+union gft_cam_line_union {
+ struct gft_cam_line cam_line;
+ struct gft_cam_line_mapped cam_line_mapped;
+};
+
+enum gft_profile_ip_version {
+ GFT_PROFILE_IPV4 = 0,
+ GFT_PROFILE_IPV6 = 1,
+ MAX_GFT_PROFILE_IP_VERSION
+};
+
+enum gft_profile_upper_protocol_type {
+ GFT_PROFILE_ROCE_PROTOCOL = 0,
+ GFT_PROFILE_RROCE_PROTOCOL = 1,
+ GFT_PROFILE_FCOE_PROTOCOL = 2,
+ GFT_PROFILE_ICMP_PROTOCOL = 3,
+ GFT_PROFILE_ARP_PROTOCOL = 4,
+ GFT_PROFILE_USER_TCP_SRC_PORT_1_INNER = 5,
+ GFT_PROFILE_USER_TCP_DST_PORT_1_INNER = 6,
+ GFT_PROFILE_TCP_PROTOCOL = 7,
+ GFT_PROFILE_USER_UDP_DST_PORT_1_INNER = 8,
+ GFT_PROFILE_USER_UDP_DST_PORT_2_OUTER = 9,
+ GFT_PROFILE_UDP_PROTOCOL = 10,
+ GFT_PROFILE_USER_IP_1_INNER = 11,
+ GFT_PROFILE_USER_IP_2_OUTER = 12,
+ GFT_PROFILE_USER_ETH_1_INNER = 13,
+ GFT_PROFILE_USER_ETH_2_OUTER = 14,
+ GFT_PROFILE_RAW = 15,
+ MAX_GFT_PROFILE_UPPER_PROTOCOL_TYPE
+};
+
+struct gft_ram_line {
+ __le32 low32bits;
+#define GFT_RAM_LINE_VLAN_SELECT_MASK 0x3
+#define GFT_RAM_LINE_VLAN_SELECT_SHIFT 0
+#define GFT_RAM_LINE_TUNNEL_ENTROPHY_MASK 0x1
+#define GFT_RAM_LINE_TUNNEL_ENTROPHY_SHIFT 2
+#define GFT_RAM_LINE_TUNNEL_TTL_EQUAL_ONE_MASK 0x1
+#define GFT_RAM_LINE_TUNNEL_TTL_EQUAL_ONE_SHIFT 3
+#define GFT_RAM_LINE_TUNNEL_TTL_MASK 0x1
+#define GFT_RAM_LINE_TUNNEL_TTL_SHIFT 4
+#define GFT_RAM_LINE_TUNNEL_ETHERTYPE_MASK 0x1
+#define GFT_RAM_LINE_TUNNEL_ETHERTYPE_SHIFT 5
+#define GFT_RAM_LINE_TUNNEL_DST_PORT_MASK 0x1
+#define GFT_RAM_LINE_TUNNEL_DST_PORT_SHIFT 6
+#define GFT_RAM_LINE_TUNNEL_SRC_PORT_MASK 0x1
+#define GFT_RAM_LINE_TUNNEL_SRC_PORT_SHIFT 7
+#define GFT_RAM_LINE_TUNNEL_DSCP_MASK 0x1
+#define GFT_RAM_LINE_TUNNEL_DSCP_SHIFT 8
+#define GFT_RAM_LINE_TUNNEL_OVER_IP_PROTOCOL_MASK 0x1
+#define GFT_RAM_LINE_TUNNEL_OVER_IP_PROTOCOL_SHIFT 9
+#define GFT_RAM_LINE_TUNNEL_DST_IP_MASK 0x1
+#define GFT_RAM_LINE_TUNNEL_DST_IP_SHIFT 10
+#define GFT_RAM_LINE_TUNNEL_SRC_IP_MASK 0x1
+#define GFT_RAM_LINE_TUNNEL_SRC_IP_SHIFT 11
+#define GFT_RAM_LINE_TUNNEL_PRIORITY_MASK 0x1
+#define GFT_RAM_LINE_TUNNEL_PRIORITY_SHIFT 12
+#define GFT_RAM_LINE_TUNNEL_PROVIDER_VLAN_MASK 0x1
+#define GFT_RAM_LINE_TUNNEL_PROVIDER_VLAN_SHIFT 13
+#define GFT_RAM_LINE_TUNNEL_VLAN_MASK 0x1
+#define GFT_RAM_LINE_TUNNEL_VLAN_SHIFT 14
+#define GFT_RAM_LINE_TUNNEL_DST_MAC_MASK 0x1
+#define GFT_RAM_LINE_TUNNEL_DST_MAC_SHIFT 15
+#define GFT_RAM_LINE_TUNNEL_SRC_MAC_MASK 0x1
+#define GFT_RAM_LINE_TUNNEL_SRC_MAC_SHIFT 16
+#define GFT_RAM_LINE_TTL_EQUAL_ONE_MASK 0x1
+#define GFT_RAM_LINE_TTL_EQUAL_ONE_SHIFT 17
+#define GFT_RAM_LINE_TTL_MASK 0x1
+#define GFT_RAM_LINE_TTL_SHIFT 18
+#define GFT_RAM_LINE_ETHERTYPE_MASK 0x1
+#define GFT_RAM_LINE_ETHERTYPE_SHIFT 19
+#define GFT_RAM_LINE_RESERVED0_MASK 0x1
+#define GFT_RAM_LINE_RESERVED0_SHIFT 20
+#define GFT_RAM_LINE_TCP_FLAG_FIN_MASK 0x1
+#define GFT_RAM_LINE_TCP_FLAG_FIN_SHIFT 21
+#define GFT_RAM_LINE_TCP_FLAG_SYN_MASK 0x1
+#define GFT_RAM_LINE_TCP_FLAG_SYN_SHIFT 22
+#define GFT_RAM_LINE_TCP_FLAG_RST_MASK 0x1
+#define GFT_RAM_LINE_TCP_FLAG_RST_SHIFT 23
+#define GFT_RAM_LINE_TCP_FLAG_PSH_MASK 0x1
+#define GFT_RAM_LINE_TCP_FLAG_PSH_SHIFT 24
+#define GFT_RAM_LINE_TCP_FLAG_ACK_MASK 0x1
+#define GFT_RAM_LINE_TCP_FLAG_ACK_SHIFT 25
+#define GFT_RAM_LINE_TCP_FLAG_URG_MASK 0x1
+#define GFT_RAM_LINE_TCP_FLAG_URG_SHIFT 26
+#define GFT_RAM_LINE_TCP_FLAG_ECE_MASK 0x1
+#define GFT_RAM_LINE_TCP_FLAG_ECE_SHIFT 27
+#define GFT_RAM_LINE_TCP_FLAG_CWR_MASK 0x1
+#define GFT_RAM_LINE_TCP_FLAG_CWR_SHIFT 28
+#define GFT_RAM_LINE_TCP_FLAG_NS_MASK 0x1
+#define GFT_RAM_LINE_TCP_FLAG_NS_SHIFT 29
+#define GFT_RAM_LINE_DST_PORT_MASK 0x1
+#define GFT_RAM_LINE_DST_PORT_SHIFT 30
+#define GFT_RAM_LINE_SRC_PORT_MASK 0x1
+#define GFT_RAM_LINE_SRC_PORT_SHIFT 31
+ __le32 high32bits;
+#define GFT_RAM_LINE_DSCP_MASK 0x1
+#define GFT_RAM_LINE_DSCP_SHIFT 0
+#define GFT_RAM_LINE_OVER_IP_PROTOCOL_MASK 0x1
+#define GFT_RAM_LINE_OVER_IP_PROTOCOL_SHIFT 1
+#define GFT_RAM_LINE_DST_IP_MASK 0x1
+#define GFT_RAM_LINE_DST_IP_SHIFT 2
+#define GFT_RAM_LINE_SRC_IP_MASK 0x1
+#define GFT_RAM_LINE_SRC_IP_SHIFT 3
+#define GFT_RAM_LINE_PRIORITY_MASK 0x1
+#define GFT_RAM_LINE_PRIORITY_SHIFT 4
+#define GFT_RAM_LINE_PROVIDER_VLAN_MASK 0x1
+#define GFT_RAM_LINE_PROVIDER_VLAN_SHIFT 5
+#define GFT_RAM_LINE_VLAN_MASK 0x1
+#define GFT_RAM_LINE_VLAN_SHIFT 6
+#define GFT_RAM_LINE_DST_MAC_MASK 0x1
+#define GFT_RAM_LINE_DST_MAC_SHIFT 7
+#define GFT_RAM_LINE_SRC_MAC_MASK 0x1
+#define GFT_RAM_LINE_SRC_MAC_SHIFT 8
+#define GFT_RAM_LINE_TENANT_ID_MASK 0x1
+#define GFT_RAM_LINE_TENANT_ID_SHIFT 9
+#define GFT_RAM_LINE_RESERVED1_MASK 0x3FFFFF
+#define GFT_RAM_LINE_RESERVED1_SHIFT 10
+};
+
+struct mstorm_eth_conn_ag_ctx {
+ u8 byte0;
+ u8 byte1;
+ u8 flags0;
+#define MSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define MSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define MSTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1
+#define MSTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1
+#define MSTORM_ETH_CONN_AG_CTX_CF0_MASK 0x3
+#define MSTORM_ETH_CONN_AG_CTX_CF0_SHIFT 2
+#define MSTORM_ETH_CONN_AG_CTX_CF1_MASK 0x3
+#define MSTORM_ETH_CONN_AG_CTX_CF1_SHIFT 4
+#define MSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3
+#define MSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 6
+ u8 flags1;
+#define MSTORM_ETH_CONN_AG_CTX_CF0EN_MASK 0x1
+#define MSTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT 0
+#define MSTORM_ETH_CONN_AG_CTX_CF1EN_MASK 0x1
+#define MSTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT 1
+#define MSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1
+#define MSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 2
+#define MSTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define MSTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define MSTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define MSTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define MSTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define MSTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define MSTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define MSTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define MSTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define MSTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 7
+ __le16 word0;
+ __le16 word1;
+ __le32 reg0;
+ __le32 reg1;
+};
+
+struct xstorm_eth_conn_agctxdq_ext_ldpart {
+ u8 reserved0;
+ u8 eth_state;
+ u8 flags0;
+#define XSTORMETHCONNAGCTXDQEXTLDPART_EXIST_IN_QM0_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_EXIST_IN_QM0_SHIFT 0
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED1_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED1_SHIFT 1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED2_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED2_SHIFT 2
+#define XSTORMETHCONNAGCTXDQEXTLDPART_EXIST_IN_QM3_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_EXIST_IN_QM3_SHIFT 3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED3_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED3_SHIFT 4
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED4_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED4_SHIFT 5
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED5_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED5_SHIFT 6
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED6_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED6_SHIFT 7
+ u8 flags1;
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED7_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED7_SHIFT 0
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED8_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED8_SHIFT 1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED9_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED9_SHIFT 2
+#define XSTORMETHCONNAGCTXDQEXTLDPART_BIT11_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_BIT11_SHIFT 3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_BIT12_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_BIT12_SHIFT 4
+#define XSTORMETHCONNAGCTXDQEXTLDPART_BIT13_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_BIT13_SHIFT 5
+#define XSTORMETHCONNAGCTXDQEXTLDPART_TX_RULE_ACTIVE_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_TX_RULE_ACTIVE_SHIFT 6
+#define XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_ACTIVE_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_ACTIVE_SHIFT 7
+ u8 flags2;
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF0_MASK 0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF0_SHIFT 0
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF1_MASK 0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF1_SHIFT 2
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF2_MASK 0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF2_SHIFT 4
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF3_MASK 0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF3_SHIFT 6
+ u8 flags3;
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF4_MASK 0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF4_SHIFT 0
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF5_MASK 0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF5_SHIFT 2
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF6_MASK 0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF6_SHIFT 4
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF7_MASK 0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF7_SHIFT 6
+ u8 flags4;
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF8_MASK 0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF8_SHIFT 0
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF9_MASK 0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF9_SHIFT 2
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF10_MASK 0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF10_SHIFT 4
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF11_MASK 0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF11_SHIFT 6
+ u8 flags5;
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF12_MASK 0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF12_SHIFT 0
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF13_MASK 0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF13_SHIFT 2
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF14_MASK 0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF14_SHIFT 4
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF15_MASK 0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF15_SHIFT 6
+ u8 flags6;
+#define XSTORMETHCONNAGCTXDQEXTLDPART_GO_TO_BD_CONS_CF_MASK 0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_GO_TO_BD_CONS_CF_SHIFT 0
+#define XSTORMETHCONNAGCTXDQEXTLDPART_MULTI_UNICAST_CF_MASK 0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_MULTI_UNICAST_CF_SHIFT 2
+#define XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_MASK 0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_SHIFT 4
+#define XSTORMETHCONNAGCTXDQEXTLDPART_TERMINATE_CF_MASK 0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_TERMINATE_CF_SHIFT 6
+ u8 flags7;
+#define XSTORMETHCONNAGCTXDQEXTLDPART_FLUSH_Q0_MASK 0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_FLUSH_Q0_SHIFT 0
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED10_MASK 0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED10_SHIFT 2
+#define XSTORMETHCONNAGCTXDQEXTLDPART_SLOW_PATH_MASK 0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_SLOW_PATH_SHIFT 4
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF0EN_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF0EN_SHIFT 6
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF1EN_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF1EN_SHIFT 7
+ u8 flags8;
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF2EN_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF2EN_SHIFT 0
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF3EN_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF3EN_SHIFT 1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF4EN_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF4EN_SHIFT 2
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF5EN_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF5EN_SHIFT 3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF6EN_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF6EN_SHIFT 4
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF7EN_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF7EN_SHIFT 5
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF8EN_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF8EN_SHIFT 6
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF9EN_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF9EN_SHIFT 7
+ u8 flags9;
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF10EN_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF10EN_SHIFT 0
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF11EN_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF11EN_SHIFT 1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF12EN_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF12EN_SHIFT 2
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF13EN_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF13EN_SHIFT 3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF14EN_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF14EN_SHIFT 4
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF15EN_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF15EN_SHIFT 5
+#define XSTORMETHCONNAGCTXDQEXTLDPART_GO_TO_BD_CONS_CF_EN_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_GO_TO_BD_CONS_CF_EN_SHIFT 6
+#define XSTORMETHCONNAGCTXDQEXTLDPART_MULTI_UNICAST_CF_EN_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_MULTI_UNICAST_CF_EN_SHIFT 7
+ u8 flags10;
+#define XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_EN_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_EN_SHIFT 0
+#define XSTORMETHCONNAGCTXDQEXTLDPART_TERMINATE_CF_EN_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_TERMINATE_CF_EN_SHIFT 1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_FLUSH_Q0_EN_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_FLUSH_Q0_EN_SHIFT 2
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED11_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED11_SHIFT 3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_SLOW_PATH_EN_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_SLOW_PATH_EN_SHIFT 4
+#define XSTORMETHCONNAGCTXDQEXTLDPART_TPH_ENABLE_EN_RESERVED_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_TPH_ENABLE_EN_RESERVED_SHIFT 5
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED12_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED12_SHIFT 6
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED13_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED13_SHIFT 7
+ u8 flags11;
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED14_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED14_SHIFT 0
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED15_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED15_SHIFT 1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_TX_DEC_RULE_EN_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_TX_DEC_RULE_EN_SHIFT 2
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE5EN_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE5EN_SHIFT 3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE6EN_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE6EN_SHIFT 4
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE7EN_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE7EN_SHIFT 5
+#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED1_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED1_SHIFT 6
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE9EN_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE9EN_SHIFT 7
+ u8 flags12;
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE10EN_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE10EN_SHIFT 0
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE11EN_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE11EN_SHIFT 1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED2_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED2_SHIFT 2
+#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED3_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED3_SHIFT 3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE14EN_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE14EN_SHIFT 4
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE15EN_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE15EN_SHIFT 5
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE16EN_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE16EN_SHIFT 6
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE17EN_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE17EN_SHIFT 7
+ u8 flags13;
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE18EN_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE18EN_SHIFT 0
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE19EN_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE19EN_SHIFT 1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED4_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED4_SHIFT 2
+#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED5_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED5_SHIFT 3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED6_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED6_SHIFT 4
+#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED7_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED7_SHIFT 5
+#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED8_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED8_SHIFT 6
+#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED9_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED9_SHIFT 7
+ u8 flags14;
+#define XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_USE_EXT_HDR_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_USE_EXT_HDR_SHIFT 0
+#define XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_SEND_RAW_L3L4_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_SEND_RAW_L3L4_SHIFT 1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_INBAND_PROP_HDR_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_INBAND_PROP_HDR_SHIFT 2
+#define XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_SEND_EXT_TUNNEL_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_SEND_EXT_TUNNEL_SHIFT 3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_L2_EDPM_ENABLE_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_L2_EDPM_ENABLE_SHIFT 4
+#define XSTORMETHCONNAGCTXDQEXTLDPART_ROCE_EDPM_ENABLE_MASK 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_ROCE_EDPM_ENABLE_SHIFT 5
+#define XSTORMETHCONNAGCTXDQEXTLDPART_TPH_ENABLE_MASK 0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_TPH_ENABLE_SHIFT 6
+ u8 edpm_event_id;
+ __le16 physical_q0;
+ __le16 quota;
+ __le16 edpm_num_bds;
+ __le16 tx_bd_cons;
+ __le16 tx_bd_prod;
+ __le16 tx_class;
+ __le16 conn_dpi;
+ u8 byte3;
+ u8 byte4;
+ u8 byte5;
+ u8 byte6;
+ __le32 reg0;
+ __le32 reg1;
+ __le32 reg2;
+ __le32 reg3;
+ __le32 reg4;
+};
+
+struct xstorm_eth_hw_conn_ag_ctx {
+ u8 reserved0;
+ u8 eth_state;
+ u8 flags0;
+#define XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED1_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED1_SHIFT 1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED2_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED2_SHIFT 2
+#define XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED3_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED3_SHIFT 4
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED4_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED4_SHIFT 5
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED5_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED5_SHIFT 6
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED6_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED6_SHIFT 7
+ u8 flags1;
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED7_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED7_SHIFT 0
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED8_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED8_SHIFT 1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED9_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED9_SHIFT 2
+#define XSTORM_ETH_HW_CONN_AG_CTX_BIT11_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_BIT11_SHIFT 3
+#define XSTORM_ETH_HW_CONN_AG_CTX_BIT12_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_BIT12_SHIFT 4
+#define XSTORM_ETH_HW_CONN_AG_CTX_BIT13_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_BIT13_SHIFT 5
+#define XSTORM_ETH_HW_CONN_AG_CTX_TX_RULE_ACTIVE_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_TX_RULE_ACTIVE_SHIFT 6
+#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_ACTIVE_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_ACTIVE_SHIFT 7
+ u8 flags2;
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF0_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF0_SHIFT 0
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF1_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF1_SHIFT 2
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF2_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF2_SHIFT 4
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF3_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF3_SHIFT 6
+ u8 flags3;
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF4_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF4_SHIFT 0
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF5_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF5_SHIFT 2
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF6_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF6_SHIFT 4
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF7_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF7_SHIFT 6
+ u8 flags4;
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF8_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF8_SHIFT 0
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF9_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF9_SHIFT 2
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF10_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF10_SHIFT 4
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF11_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF11_SHIFT 6
+ u8 flags5;
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF12_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF12_SHIFT 0
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF13_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF13_SHIFT 2
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF14_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF14_SHIFT 4
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF15_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF15_SHIFT 6
+ u8 flags6;
+#define XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_SHIFT 0
+#define XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_SHIFT 2
+#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_SHIFT 4
+#define XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_SHIFT 6
+ u8 flags7;
+#define XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_SHIFT 0
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED10_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED10_SHIFT 2
+#define XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_SHIFT 4
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF0EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF0EN_SHIFT 6
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF1EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF1EN_SHIFT 7
+ u8 flags8;
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF2EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF2EN_SHIFT 0
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF3EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF3EN_SHIFT 1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF4EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF4EN_SHIFT 2
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF5EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF5EN_SHIFT 3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF6EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF6EN_SHIFT 4
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF7EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF7EN_SHIFT 5
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF8EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF8EN_SHIFT 6
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF9EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF9EN_SHIFT 7
+ u8 flags9;
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF10EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF10EN_SHIFT 0
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF11EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF11EN_SHIFT 1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF12EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF12EN_SHIFT 2
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF13EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF13EN_SHIFT 3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF14EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF14EN_SHIFT 4
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF15EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF15EN_SHIFT 5
+#define XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_SHIFT 6
+#define XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_EN_SHIFT 7
+ u8 flags10;
+#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_EN_SHIFT 0
+#define XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_EN_SHIFT 1
+#define XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED11_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED11_SHIFT 3
+#define XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
+#define XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_SHIFT 5
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED12_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED12_SHIFT 6
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED13_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED13_SHIFT 7
+ u8 flags11;
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED14_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED14_SHIFT 0
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED15_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED15_SHIFT 1
+#define XSTORM_ETH_HW_CONN_AG_CTX_TX_DEC_RULE_EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_TX_DEC_RULE_EN_SHIFT 2
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE5EN_SHIFT 3
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE6EN_SHIFT 4
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE7EN_SHIFT 5
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED1_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED1_SHIFT 6
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE9EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE9EN_SHIFT 7
+ u8 flags12;
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE10EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE10EN_SHIFT 0
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE11EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE11EN_SHIFT 1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED2_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED2_SHIFT 2
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED3_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED3_SHIFT 3
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE14EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE14EN_SHIFT 4
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE15EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE15EN_SHIFT 5
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE16EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE16EN_SHIFT 6
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE17EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE17EN_SHIFT 7
+ u8 flags13;
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE18EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE18EN_SHIFT 0
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE19EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE19EN_SHIFT 1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED4_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED4_SHIFT 2
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED5_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED5_SHIFT 3
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED6_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED6_SHIFT 4
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED7_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED7_SHIFT 5
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED8_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED8_SHIFT 6
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED9_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED9_SHIFT 7
+ u8 flags14;
+#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_USE_EXT_HDR_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_USE_EXT_HDR_SHIFT 0
+#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_SHIFT 1
+#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_SHIFT 2
+#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_SHIFT 3
+#define XSTORM_ETH_HW_CONN_AG_CTX_L2_EDPM_ENABLE_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_L2_EDPM_ENABLE_SHIFT 4
+#define XSTORM_ETH_HW_CONN_AG_CTX_ROCE_EDPM_ENABLE_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_ROCE_EDPM_ENABLE_SHIFT 5
+#define XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_SHIFT 6
+ u8 edpm_event_id;
+ __le16 physical_q0;
+ __le16 quota;
+ __le16 edpm_num_bds;
+ __le16 tx_bd_cons;
+ __le16 tx_bd_prod;
+ __le16 tx_class;
+ __le16 conn_dpi;
+};
+
struct mstorm_rdma_task_st_ctx {
struct regpair temp[4];
};
@@ -6165,7 +6951,7 @@ struct ystorm_roce_conn_st_ctx {
};
struct xstorm_roce_conn_st_ctx {
- struct regpair temp[22];
+ struct regpair temp[24];
};
struct tstorm_roce_conn_st_ctx {
@@ -6220,7 +7006,7 @@ struct roce_create_qp_req_ramrod_data {
__le16 mtu;
__le16 pd;
__le16 sq_num_pages;
- __le16 reseved2;
+ __le16 low_latency_phy_queue;
struct regpair sq_pbl_addr;
struct regpair orq_pbl_addr;
__le16 local_mac_addr[3];
@@ -6234,7 +7020,7 @@ struct roce_create_qp_req_ramrod_data {
u8 stats_counter_id;
u8 reserved3[7];
__le32 cq_cid;
- __le16 physical_queue0;
+ __le16 regular_latency_phy_queue;
__le16 dpi;
};
@@ -6282,15 +7068,16 @@ struct roce_create_qp_resp_ramrod_data {
__le32 dst_gid[4];
struct regpair qp_handle_for_cqe;
struct regpair qp_handle_for_async;
- __le32 reserved2[2];
+ __le16 low_latency_phy_queue;
+ u8 reserved2[6];
__le32 cq_cid;
- __le16 physical_queue0;
+ __le16 regular_latency_phy_queue;
__le16 dpi;
};
struct roce_destroy_qp_req_output_params {
__le32 num_bound_mw;
- __le32 reserved;
+ __le32 cq_prod;
};
struct roce_destroy_qp_req_ramrod_data {
@@ -6299,7 +7086,7 @@ struct roce_destroy_qp_req_ramrod_data {
struct roce_destroy_qp_resp_output_params {
__le32 num_invalidated_mw;
- __le32 reserved;
+ __le32 cq_prod;
};
struct roce_destroy_qp_resp_ramrod_data {
@@ -7426,6 +8213,7 @@ struct ystorm_fcoe_conn_st_ctx {
u8 fcp_rsp_size;
__le16 mss;
struct regpair reserved;
+ __le16 min_frame_size;
u8 protection_info_flags;
#define YSTORM_FCOE_CONN_ST_CTX_SUPPORT_PROTECTION_MASK 0x1
#define YSTORM_FCOE_CONN_ST_CTX_SUPPORT_PROTECTION_SHIFT 0
@@ -7444,7 +8232,6 @@ struct ystorm_fcoe_conn_st_ctx {
#define YSTORM_FCOE_CONN_ST_CTX_RSRV_MASK 0x3F
#define YSTORM_FCOE_CONN_ST_CTX_RSRV_SHIFT 2
u8 fcp_xfer_size;
- u8 reserved3[2];
};
struct fcoe_vlan_fields {
@@ -8273,10 +9060,10 @@ struct xstorm_iscsi_conn_ag_ctx {
#define XSTORM_ISCSI_CONN_AG_CTX_DQ_FLUSH_MASK 0x3
#define XSTORM_ISCSI_CONN_AG_CTX_DQ_FLUSH_SHIFT 6
u8 flags7;
-#define XSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_MASK 0x3
-#define XSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_SHIFT 0
-#define XSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q1_MASK 0x3
-#define XSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q1_SHIFT 2
+#define XSTORM_ISCSI_CONN_AG_CTX_MST_XCM_Q0_FLUSH_CF_MASK 0x3
+#define XSTORM_ISCSI_CONN_AG_CTX_MST_XCM_Q0_FLUSH_CF_SHIFT 0
+#define XSTORM_ISCSI_CONN_AG_CTX_UST_XCM_Q1_FLUSH_CF_MASK 0x3
+#define XSTORM_ISCSI_CONN_AG_CTX_UST_XCM_Q1_FLUSH_CF_SHIFT 2
#define XSTORM_ISCSI_CONN_AG_CTX_SLOW_PATH_MASK 0x3
#define XSTORM_ISCSI_CONN_AG_CTX_SLOW_PATH_SHIFT 4
#define XSTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK 0x1
@@ -8322,10 +9109,10 @@ struct xstorm_iscsi_conn_ag_ctx {
#define XSTORM_ISCSI_CONN_AG_CTX_CF18EN_SHIFT 0
#define XSTORM_ISCSI_CONN_AG_CTX_DQ_FLUSH_EN_MASK 0x1
#define XSTORM_ISCSI_CONN_AG_CTX_DQ_FLUSH_EN_SHIFT 1
-#define XSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2
-#define XSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q1_EN_MASK 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q1_EN_SHIFT 3
+#define XSTORM_ISCSI_CONN_AG_CTX_MST_XCM_Q0_FLUSH_CF_EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_MST_XCM_Q0_FLUSH_CF_EN_SHIFT 2
+#define XSTORM_ISCSI_CONN_AG_CTX_UST_XCM_Q1_FLUSH_CF_EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_UST_XCM_Q1_FLUSH_CF_EN_SHIFT 3
#define XSTORM_ISCSI_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1
#define XSTORM_ISCSI_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
#define XSTORM_ISCSI_CONN_AG_CTX_PROC_ONLY_CLEANUP_EN_MASK 0x1
@@ -8335,8 +9122,8 @@ struct xstorm_iscsi_conn_ag_ctx {
#define XSTORM_ISCSI_CONN_AG_CTX_MORE_TO_SEND_DEC_RULE_EN_MASK 0x1
#define XSTORM_ISCSI_CONN_AG_CTX_MORE_TO_SEND_DEC_RULE_EN_SHIFT 7
u8 flags11;
-#define XSTORM_ISCSI_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_RULE2EN_SHIFT 0
+#define XSTORM_ISCSI_CONN_AG_CTX_TX_BLOCKED_EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_TX_BLOCKED_EN_SHIFT 0
#define XSTORM_ISCSI_CONN_AG_CTX_RULE3EN_MASK 0x1
#define XSTORM_ISCSI_CONN_AG_CTX_RULE3EN_SHIFT 1
#define XSTORM_ISCSI_CONN_AG_CTX_RESERVED3_MASK 0x1
@@ -8440,7 +9227,7 @@ struct xstorm_iscsi_conn_ag_ctx {
__le32 reg10;
__le32 reg11;
__le32 exp_stat_sn;
- __le32 reg13;
+ __le32 ongoing_fast_rxmit_seq;
__le32 reg14;
__le32 reg15;
__le32 reg16;
@@ -8466,10 +9253,10 @@ struct tstorm_iscsi_conn_ag_ctx {
#define TSTORM_ISCSI_CONN_AG_CTX_CF0_MASK 0x3
#define TSTORM_ISCSI_CONN_AG_CTX_CF0_SHIFT 6
u8 flags1;
-#define TSTORM_ISCSI_CONN_AG_CTX_CF1_MASK 0x3
-#define TSTORM_ISCSI_CONN_AG_CTX_CF1_SHIFT 0
-#define TSTORM_ISCSI_CONN_AG_CTX_CF2_MASK 0x3
-#define TSTORM_ISCSI_CONN_AG_CTX_CF2_SHIFT 2
+#define TSTORM_ISCSI_CONN_AG_CTX_P2T_FLUSH_CF_MASK 0x3
+#define TSTORM_ISCSI_CONN_AG_CTX_P2T_FLUSH_CF_SHIFT 0
+#define TSTORM_ISCSI_CONN_AG_CTX_M2T_FLUSH_CF_MASK 0x3
+#define TSTORM_ISCSI_CONN_AG_CTX_M2T_FLUSH_CF_SHIFT 2
#define TSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_MASK 0x3
#define TSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_SHIFT 4
#define TSTORM_ISCSI_CONN_AG_CTX_CF4_MASK 0x3
@@ -8490,10 +9277,10 @@ struct tstorm_iscsi_conn_ag_ctx {
#define TSTORM_ISCSI_CONN_AG_CTX_CF10_SHIFT 2
#define TSTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK 0x1
#define TSTORM_ISCSI_CONN_AG_CTX_CF0EN_SHIFT 4
-#define TSTORM_ISCSI_CONN_AG_CTX_CF1EN_MASK 0x1
-#define TSTORM_ISCSI_CONN_AG_CTX_CF1EN_SHIFT 5
-#define TSTORM_ISCSI_CONN_AG_CTX_CF2EN_MASK 0x1
-#define TSTORM_ISCSI_CONN_AG_CTX_CF2EN_SHIFT 6
+#define TSTORM_ISCSI_CONN_AG_CTX_P2T_FLUSH_CF_EN_MASK 0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_P2T_FLUSH_CF_EN_SHIFT 5
+#define TSTORM_ISCSI_CONN_AG_CTX_M2T_FLUSH_CF_EN_MASK 0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_M2T_FLUSH_CF_EN_SHIFT 6
#define TSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_EN_MASK 0x1
#define TSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_EN_SHIFT 7
u8 flags4;
@@ -8539,7 +9326,7 @@ struct tstorm_iscsi_conn_ag_ctx {
__le32 reg6;
__le32 reg7;
__le32 reg8;
- u8 byte2;
+ u8 cid_offload_cnt;
u8 byte3;
__le16 word0;
};
@@ -8831,11 +9618,24 @@ struct eth_stats {
u64 r511;
u64 r1023;
u64 r1518;
- u64 r1522;
- u64 r2047;
- u64 r4095;
- u64 r9216;
- u64 r16383;
+
+ union {
+ struct {
+ u64 r1522;
+ u64 r2047;
+ u64 r4095;
+ u64 r9216;
+ u64 r16383;
+ } bb0;
+ struct {
+ u64 unused1;
+ u64 r1519_to_max;
+ u64 unused2;
+ u64 unused3;
+ u64 unused4;
+ } ah0;
+ } u0;
+
u64 rfcs;
u64 rxcf;
u64 rxpf;
@@ -8852,14 +9652,36 @@ struct eth_stats {
u64 t511;
u64 t1023;
u64 t1518;
- u64 t2047;
- u64 t4095;
- u64 t9216;
- u64 t16383;
+
+ union {
+ struct {
+ u64 t2047;
+ u64 t4095;
+ u64 t9216;
+ u64 t16383;
+ } bb1;
+ struct {
+ u64 t1519_to_max;
+ u64 unused6;
+ u64 unused7;
+ u64 unused8;
+ } ah1;
+ } u1;
+
u64 txpf;
u64 txpp;
- u64 tlpiec;
- u64 tncl;
+
+ union {
+ struct {
+ u64 tlpiec;
+ u64 tncl;
+ } bb2;
+ struct {
+ u64 unused9;
+ u64 unused10;
+ } ah2;
+ } u2;
+
u64 rbyte;
u64 rxuca;
u64 rxmca;
@@ -8943,12 +9765,12 @@ struct dcbx_ets_feature {
#define DCBX_ETS_CBS_SHIFT 3
#define DCBX_ETS_MAX_TCS_MASK 0x000000f0
#define DCBX_ETS_MAX_TCS_SHIFT 4
-#define DCBX_ISCSI_OOO_TC_MASK 0x00000f00
-#define DCBX_ISCSI_OOO_TC_SHIFT 8
+#define DCBX_OOO_TC_MASK 0x00000f00
+#define DCBX_OOO_TC_SHIFT 8
u32 pri_tc_tbl[1];
-#define DCBX_ISCSI_OOO_TC (4)
+#define DCBX_TCP_OOO_TC (4)
-#define NIG_ETS_ISCSI_OOO_CLIENT_OFFSET (DCBX_ISCSI_OOO_TC + 1)
+#define NIG_ETS_ISCSI_OOO_CLIENT_OFFSET (DCBX_TCP_OOO_TC + 1)
#define DCBX_CEE_STRICT_PRIORITY 0xf
u32 tc_bw_tbl[2];
u32 tc_tsa_tbl[2];
@@ -8957,6 +9779,9 @@ struct dcbx_ets_feature {
#define DCBX_ETS_TSA_ETS 2
};
+#define DCBX_TCP_OOO_TC (4)
+#define DCBX_TCP_OOO_K2_4PORT_TC (3)
+
struct dcbx_app_priority_entry {
u32 entry;
#define DCBX_APP_PRI_MAP_MASK 0x000000ff
@@ -9067,6 +9892,10 @@ struct dcb_dscp_map {
struct public_global {
u32 max_path;
u32 max_ports;
+#define MODE_1P 1
+#define MODE_2P 2
+#define MODE_3P 3
+#define MODE_4P 4
u32 debug_mb_offset;
u32 phymod_dbg_mb_offset;
struct couple_mode_teaming cmt;
@@ -9248,9 +10077,11 @@ struct public_func {
#define DRV_ID_PDA_COMP_VER_MASK 0x0000ffff
#define DRV_ID_PDA_COMP_VER_SHIFT 0
+#define LOAD_REQ_HSI_VERSION 2
#define DRV_ID_MCP_HSI_VER_MASK 0x00ff0000
#define DRV_ID_MCP_HSI_VER_SHIFT 16
-#define DRV_ID_MCP_HSI_VER_CURRENT (1 << DRV_ID_MCP_HSI_VER_SHIFT)
+#define DRV_ID_MCP_HSI_VER_CURRENT (LOAD_REQ_HSI_VERSION << \
+ DRV_ID_MCP_HSI_VER_SHIFT)
#define DRV_ID_DRV_TYPE_MASK 0x7f000000
#define DRV_ID_DRV_TYPE_SHIFT 24
@@ -9345,6 +10176,7 @@ enum resource_id_enum {
RESOURCE_NUM_RSS_ENGINES_E = 14,
RESOURCE_LL2_QUEUE_E = 15,
RESOURCE_RDMA_STATS_QUEUE_E = 16,
+ RESOURCE_BDQ_E = 17,
RESOURCE_MAX_NUM,
RESOURCE_NUM_INVALID = 0xFFFFFFFF
};
@@ -9362,6 +10194,46 @@ struct resource_info {
#define RESOURCE_ELEMENT_STRICT (1 << 0)
};
+#define DRV_ROLE_NONE 0
+#define DRV_ROLE_PREBOOT 1
+#define DRV_ROLE_OS 2
+#define DRV_ROLE_KDUMP 3
+
+struct load_req_stc {
+ u32 drv_ver_0;
+ u32 drv_ver_1;
+ u32 fw_ver;
+ u32 misc0;
+#define LOAD_REQ_ROLE_MASK 0x000000FF
+#define LOAD_REQ_ROLE_SHIFT 0
+#define LOAD_REQ_LOCK_TO_MASK 0x0000FF00
+#define LOAD_REQ_LOCK_TO_SHIFT 8
+#define LOAD_REQ_LOCK_TO_DEFAULT 0
+#define LOAD_REQ_LOCK_TO_NONE 255
+#define LOAD_REQ_FORCE_MASK 0x000F0000
+#define LOAD_REQ_FORCE_SHIFT 16
+#define LOAD_REQ_FORCE_NONE 0
+#define LOAD_REQ_FORCE_PF 1
+#define LOAD_REQ_FORCE_ALL 2
+#define LOAD_REQ_FLAGS0_MASK 0x00F00000
+#define LOAD_REQ_FLAGS0_SHIFT 20
+#define LOAD_REQ_FLAGS0_AVOID_RESET (0x1 << 0)
+};
+
+struct load_rsp_stc {
+ u32 drv_ver_0;
+ u32 drv_ver_1;
+ u32 fw_ver;
+ u32 misc0;
+#define LOAD_RSP_ROLE_MASK 0x000000FF
+#define LOAD_RSP_ROLE_SHIFT 0
+#define LOAD_RSP_HSI_MASK 0x0000FF00
+#define LOAD_RSP_HSI_SHIFT 8
+#define LOAD_RSP_FLAGS0_MASK 0x000F0000
+#define LOAD_RSP_FLAGS0_SHIFT 16
+#define LOAD_RSP_FLAGS0_DRV_EXISTS (0x1 << 0)
+};
+
union drv_union_data {
u32 ver_str[MCP_DRV_VER_STR_SIZE_DWORD];
struct mcp_mac wol_mac;
@@ -9393,6 +10265,7 @@ struct public_drv_mb {
#define DRV_MSG_CODE_LOAD_REQ 0x10000000
#define DRV_MSG_CODE_LOAD_DONE 0x11000000
#define DRV_MSG_CODE_INIT_HW 0x12000000
+#define DRV_MSG_CODE_CANCEL_LOAD_REQ 0x13000000
#define DRV_MSG_CODE_UNLOAD_REQ 0x20000000
#define DRV_MSG_CODE_UNLOAD_DONE 0x21000000
#define DRV_MSG_CODE_INIT_PHY 0x22000000
@@ -9405,12 +10278,14 @@ struct public_drv_mb {
#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE 0x31000000
#define DRV_MSG_CODE_BW_UPDATE_ACK 0x32000000
#define DRV_MSG_CODE_OV_UPDATE_MTU 0x33000000
+#define DRV_MSG_GET_RESOURCE_ALLOC_MSG 0x34000000
+#define DRV_MSG_SET_RESOURCE_VALUE_MSG 0x35000000
#define DRV_MSG_CODE_OV_UPDATE_WOL 0x38000000
#define DRV_MSG_CODE_OV_UPDATE_ESWITCH_MODE 0x39000000
#define DRV_MSG_CODE_BW_UPDATE_ACK 0x32000000
#define DRV_MSG_CODE_NIG_DRAIN 0x30000000
-#define DRV_MSG_GET_RESOURCE_ALLOC_MSG 0x34000000
+#define DRV_MSG_CODE_INITIATE_PF_FLR 0x02010000
#define DRV_MSG_CODE_VF_DISABLED_DONE 0xc0000000
#define DRV_MSG_CODE_CFG_VF_MSIX 0xc0010000
#define DRV_MSG_CODE_NVM_GET_FILE_ATT 0x00030000
@@ -9436,6 +10311,33 @@ struct public_drv_mb {
#define DRV_MSG_CODE_BIST_TEST 0x001e0000
#define DRV_MSG_CODE_SET_LED_MODE 0x00200000
+#define DRV_MSG_CODE_RESOURCE_CMD 0x00230000
+
+#define RESOURCE_CMD_REQ_RESC_MASK 0x0000001F
+#define RESOURCE_CMD_REQ_RESC_SHIFT 0
+#define RESOURCE_CMD_REQ_OPCODE_MASK 0x000000E0
+#define RESOURCE_CMD_REQ_OPCODE_SHIFT 5
+#define RESOURCE_OPCODE_REQ 1
+#define RESOURCE_OPCODE_REQ_WO_AGING 2
+#define RESOURCE_OPCODE_REQ_W_AGING 3
+#define RESOURCE_OPCODE_RELEASE 4
+#define RESOURCE_OPCODE_FORCE_RELEASE 5
+#define RESOURCE_CMD_REQ_AGE_MASK 0x0000FF00
+#define RESOURCE_CMD_REQ_AGE_SHIFT 8
+
+#define RESOURCE_CMD_RSP_OWNER_MASK 0x000000FF
+#define RESOURCE_CMD_RSP_OWNER_SHIFT 0
+#define RESOURCE_CMD_RSP_OPCODE_MASK 0x00000700
+#define RESOURCE_CMD_RSP_OPCODE_SHIFT 8
+#define RESOURCE_OPCODE_GNT 1
+#define RESOURCE_OPCODE_BUSY 2
+#define RESOURCE_OPCODE_RELEASED 3
+#define RESOURCE_OPCODE_RELEASED_PREVIOUS 4
+#define RESOURCE_OPCODE_WRONG_OWNER 5
+#define RESOURCE_OPCODE_UNKNOWN_CMD 255
+
+#define RESOURCE_DUMP 0
+
#define DRV_MSG_CODE_GET_PF_RDMA_PROTOCOL 0x002b0000
#define DRV_MSG_CODE_OS_WOL 0x002e0000
@@ -9524,12 +10426,16 @@ struct public_drv_mb {
u32 fw_mb_header;
#define FW_MSG_CODE_MASK 0xffff0000
+#define FW_MSG_CODE_UNSUPPORTED 0x00000000
#define FW_MSG_CODE_DRV_LOAD_ENGINE 0x10100000
#define FW_MSG_CODE_DRV_LOAD_PORT 0x10110000
#define FW_MSG_CODE_DRV_LOAD_FUNCTION 0x10120000
#define FW_MSG_CODE_DRV_LOAD_REFUSED_PDA 0x10200000
-#define FW_MSG_CODE_DRV_LOAD_REFUSED_HSI 0x10210000
+#define FW_MSG_CODE_DRV_LOAD_REFUSED_HSI_1 0x10210000
#define FW_MSG_CODE_DRV_LOAD_REFUSED_DIAG 0x10220000
+#define FW_MSG_CODE_DRV_LOAD_REFUSED_HSI 0x10230000
+#define FW_MSG_CODE_DRV_LOAD_REFUSED_REQUIRES_FORCE 0x10300000
+#define FW_MSG_CODE_DRV_LOAD_REFUSED_REJECT 0x10310000
#define FW_MSG_CODE_DRV_LOAD_DONE 0x11100000
#define FW_MSG_CODE_DRV_UNLOAD_ENGINE 0x20110000
#define FW_MSG_CODE_DRV_UNLOAD_PORT 0x20120000
@@ -9549,6 +10455,10 @@ struct public_drv_mb {
#define FW_MSG_SEQ_NUMBER_MASK 0x0000ffff
u32 fw_mb_param;
+#define FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_MASK 0xFFFF0000
+#define FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_SHIFT 16
+#define FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_MASK 0x0000FFFF
+#define FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_SHIFT 0
/* get pf rdma protocol command responce */
#define FW_MB_PARAM_GET_PF_RDMA_NONE 0x0
@@ -9659,6 +10569,8 @@ struct nvm_cfg1_glob {
#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X25G 0xC
#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_1X25G 0xD
#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_4X25G 0xE
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X10G 0xF
+
u32 e_lane_cfg1;
u32 e_lane_cfg2;
u32 f_lane_cfg1;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_hw.c b/drivers/net/ethernet/qlogic/qed/qed_hw.c
index 899cad7f97ea..a05feb38c6ee 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_hw.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_hw.c
@@ -58,6 +58,7 @@ struct qed_ptt {
struct list_head list_entry;
unsigned int idx;
struct pxp_ptt_entry pxp;
+ u8 hwfn_id;
};
struct qed_ptt_pool {
@@ -79,6 +80,7 @@ int qed_ptt_pool_alloc(struct qed_hwfn *p_hwfn)
p_pool->ptts[i].idx = i;
p_pool->ptts[i].pxp.offset = QED_BAR_INVALID_OFFSET;
p_pool->ptts[i].pxp.pretend.control = 0;
+ p_pool->ptts[i].hwfn_id = p_hwfn->my_id;
if (i >= RESERVED_PTT_MAX)
list_add(&p_pool->ptts[i].list_entry,
&p_pool->free_list);
@@ -193,6 +195,11 @@ static u32 qed_set_ptt(struct qed_hwfn *p_hwfn,
offset = hw_addr - win_hw_addr;
+ if (p_ptt->hwfn_id != p_hwfn->my_id)
+ DP_NOTICE(p_hwfn,
+ "ptt[%d] of hwfn[%02x] is used by hwfn[%02x]!\n",
+ p_ptt->idx, p_ptt->hwfn_id, p_hwfn->my_id);
+
/* Verify the address is within the window */
if (hw_addr < win_hw_addr ||
offset >= PXP_EXTERNAL_BAR_PF_WINDOW_SINGLE_SIZE) {
@@ -800,55 +807,3 @@ int qed_dmae_host2host(struct qed_hwfn *p_hwfn,
return rc;
}
-u16 qed_get_qm_pq(struct qed_hwfn *p_hwfn,
- enum protocol_type proto, union qed_qm_pq_params *p_params)
-{
- u16 pq_id = 0;
-
- if ((proto == PROTOCOLID_CORE ||
- proto == PROTOCOLID_ETH ||
- proto == PROTOCOLID_ISCSI ||
- proto == PROTOCOLID_ROCE) && !p_params) {
- DP_NOTICE(p_hwfn,
- "Protocol %d received NULL PQ params\n", proto);
- return 0;
- }
-
- switch (proto) {
- case PROTOCOLID_CORE:
- if (p_params->core.tc == LB_TC)
- pq_id = p_hwfn->qm_info.pure_lb_pq;
- else if (p_params->core.tc == OOO_LB_TC)
- pq_id = p_hwfn->qm_info.ooo_pq;
- else
- pq_id = p_hwfn->qm_info.offload_pq;
- break;
- case PROTOCOLID_ETH:
- pq_id = p_params->eth.tc;
- if (p_params->eth.is_vf)
- pq_id += p_hwfn->qm_info.vf_queues_offset +
- p_params->eth.vf_id;
- break;
- case PROTOCOLID_ISCSI:
- if (p_params->iscsi.q_idx == 1)
- pq_id = p_hwfn->qm_info.pure_ack_pq;
- break;
- case PROTOCOLID_ROCE:
- if (p_params->roce.dcqcn)
- pq_id = p_params->roce.qpid;
- else
- pq_id = p_hwfn->qm_info.offload_pq;
- if (pq_id > p_hwfn->qm_info.num_pf_rls)
- pq_id = p_hwfn->qm_info.offload_pq;
- break;
- case PROTOCOLID_FCOE:
- pq_id = p_hwfn->qm_info.offload_pq;
- break;
- default:
- pq_id = 0;
- }
-
- pq_id = CM_TX_PQ_BASE + pq_id + RESC_START(p_hwfn, QED_PQ);
-
- return pq_id;
-}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_hw.h b/drivers/net/ethernet/qlogic/qed/qed_hw.h
index 9277264d2e65..f2505c691c26 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_hw.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_hw.h
@@ -297,9 +297,6 @@ union qed_qm_pq_params {
} roce;
};
-u16 qed_get_qm_pq(struct qed_hwfn *p_hwfn,
- enum protocol_type proto, union qed_qm_pq_params *params);
-
int qed_init_fw_data(struct qed_dev *cdev,
const u8 *fw_data);
#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c b/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c
index d891a6852695..67200c5498ab 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c
@@ -215,13 +215,6 @@ static void qed_cmdq_lines_voq_rt_init(struct qed_hwfn *p_hwfn,
{
u32 qm_line_crd;
- /* In A0 - Limit the size of pbf queue so that only 511 commands with
- * the minimum size of 4 (FCoE minimum size)
- */
- bool is_bb_a0 = QED_IS_BB_A0(p_hwfn->cdev);
-
- if (is_bb_a0)
- cmdq_lines = min_t(u32, cmdq_lines, 1022);
qm_line_crd = QM_VOQ_LINE_CRD(cmdq_lines);
OVERWRITE_RT_REG(p_hwfn, PBF_CMDQ_LINES_RT_OFFSET(voq),
(u32)cmdq_lines);
@@ -343,13 +336,11 @@ static void qed_tx_pq_map_rt_init(
u16 first_pq_group = p_params->start_pq / QM_PF_QUEUE_GROUP_SIZE;
u16 last_pq_group = (p_params->start_pq + num_pqs - 1) /
QM_PF_QUEUE_GROUP_SIZE;
- bool is_bb_a0 = QED_IS_BB_A0(p_hwfn->cdev);
u16 i, pq_id, pq_group;
/* a bit per Tx PQ indicating if the PQ is associated with a VF */
u32 tx_pq_vf_mask[MAX_QM_TX_QUEUES / QM_PF_QUEUE_GROUP_SIZE] = { 0 };
- u32 tx_pq_vf_mask_width = is_bb_a0 ? 32 : QM_PF_QUEUE_GROUP_SIZE;
- u32 num_tx_pq_vf_masks = MAX_QM_TX_QUEUES / tx_pq_vf_mask_width;
+ u32 num_tx_pq_vf_masks = MAX_QM_TX_QUEUES / QM_PF_QUEUE_GROUP_SIZE;
u32 pq_mem_4kb = QM_PQ_MEM_4KB(p_params->num_pf_cids);
u32 vport_pq_mem_4kb = QM_PQ_MEM_4KB(p_params->num_vf_cids);
u32 mem_addr_4kb = base_mem_addr_4kb;
@@ -371,6 +362,10 @@ static void qed_tx_pq_map_rt_init(
bool is_vf_pq = (i >= p_params->num_pf_pqs);
struct qm_rf_pq_map tx_pq_map;
+ bool rl_valid = p_params->pq_params[i].rl_valid &&
+ (p_params->pq_params[i].vport_id <
+ MAX_QM_GLOBAL_RLS);
+
/* update first Tx PQ of VPORT/TC */
u8 vport_id_in_pf = p_params->pq_params[i].vport_id -
p_params->start_vport;
@@ -389,14 +384,18 @@ static void qed_tx_pq_map_rt_init(
(p_params->pf_id <<
QM_WFQ_VP_PQ_PF_SHIFT));
}
+
+ if (p_params->pq_params[i].rl_valid && !rl_valid)
+ DP_NOTICE(p_hwfn,
+ "Invalid VPORT ID for rate limiter configuration");
/* fill PQ map entry */
memset(&tx_pq_map, 0, sizeof(tx_pq_map));
SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_PQ_VALID, 1);
- SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_RL_VALID,
- p_params->pq_params[i].rl_valid ? 1 : 0);
+ SET_FIELD(tx_pq_map.reg,
+ QM_RF_PQ_MAP_RL_VALID, rl_valid ? 1 : 0);
SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_VP_PQ_ID, first_tx_pq_id);
SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_RL_ID,
- p_params->pq_params[i].rl_valid ?
+ rl_valid ?
p_params->pq_params[i].vport_id : 0);
SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_VOQ, voq);
SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_WRR_WEIGHT_GROUP,
@@ -413,8 +412,9 @@ static void qed_tx_pq_map_rt_init(
/* if PQ is associated with a VF, add indication
* to PQ VF mask
*/
- tx_pq_vf_mask[pq_id / tx_pq_vf_mask_width] |=
- (1 << (pq_id % tx_pq_vf_mask_width));
+ tx_pq_vf_mask[pq_id /
+ QM_PF_QUEUE_GROUP_SIZE] |=
+ BIT((pq_id % QM_PF_QUEUE_GROUP_SIZE));
mem_addr_4kb += vport_pq_mem_4kb;
} else {
mem_addr_4kb += pq_mem_4kb;
@@ -480,8 +480,8 @@ static int qed_pf_wfq_rt_init(struct qed_hwfn *p_hwfn,
if (p_params->pf_id < MAX_NUM_PFS_BB)
crd_reg_offset = QM_REG_WFQPFCRD_RT_OFFSET;
else
- crd_reg_offset = QM_REG_WFQPFCRD_MSB_RT_OFFSET +
- (p_params->pf_id % MAX_NUM_PFS_BB);
+ crd_reg_offset = QM_REG_WFQPFCRD_MSB_RT_OFFSET;
+ crd_reg_offset += p_params->pf_id % MAX_NUM_PFS_BB;
inc_val = QM_WFQ_INC_VAL(p_params->pf_wfq);
if (!inc_val || inc_val > QM_WFQ_MAX_INC_VAL) {
@@ -498,11 +498,11 @@ static int qed_pf_wfq_rt_init(struct qed_hwfn *p_hwfn,
QM_WFQ_CRD_REG_SIGN_BIT);
}
- STORE_RT_REG(p_hwfn, QM_REG_WFQPFWEIGHT_RT_OFFSET + p_params->pf_id,
- inc_val);
STORE_RT_REG(p_hwfn,
QM_REG_WFQPFUPPERBOUND_RT_OFFSET + p_params->pf_id,
QM_WFQ_UPPER_BOUND | QM_WFQ_CRD_REG_SIGN_BIT);
+ STORE_RT_REG(p_hwfn, QM_REG_WFQPFWEIGHT_RT_OFFSET + p_params->pf_id,
+ inc_val);
return 0;
}
@@ -576,6 +576,12 @@ static int qed_vport_rl_rt_init(struct qed_hwfn *p_hwfn,
{
u8 i, vport_id;
+ if (start_vport + num_vports >= MAX_QM_GLOBAL_RLS) {
+ DP_NOTICE(p_hwfn,
+ "Invalid VPORT ID for rate limiter configuration");
+ return -1;
+ }
+
/* go over all PF VPORTs */
for (i = 0, vport_id = start_vport; i < num_vports; i++, vport_id++) {
u32 inc_val = QM_RL_INC_VAL(vport_params[i].vport_rl);
@@ -785,6 +791,12 @@ int qed_init_vport_rl(struct qed_hwfn *p_hwfn,
{
u32 inc_val = QM_RL_INC_VAL(vport_rl);
+ if (vport_id >= MAX_QM_GLOBAL_RLS) {
+ DP_NOTICE(p_hwfn,
+ "Invalid VPORT ID for rate limiter configuration");
+ return -1;
+ }
+
if (inc_val > QM_RL_MAX_INC_VAL) {
DP_NOTICE(p_hwfn, "Invalid VPORT rate-limit configuration");
return -1;
@@ -940,12 +952,6 @@ void qed_set_geneve_enable(struct qed_hwfn *p_hwfn,
eth_geneve_enable ? 1 : 0);
qed_wr(p_hwfn, p_ptt, NIG_REG_NGE_IP_ENABLE, ip_geneve_enable ? 1 : 0);
- /* comp ver */
- reg_val = (ip_geneve_enable || eth_geneve_enable) ? 1 : 0;
- qed_wr(p_hwfn, p_ptt, NIG_REG_NGE_COMP_VER, reg_val);
- qed_wr(p_hwfn, p_ptt, PBF_REG_NGE_COMP_VER, reg_val);
- qed_wr(p_hwfn, p_ptt, PRS_REG_NGE_COMP_VER, reg_val);
-
/* EDPM with geneve tunnel not supported in BB_B0 */
if (QED_IS_BB_B0(p_hwfn->cdev))
return;
@@ -955,3 +961,132 @@ void qed_set_geneve_enable(struct qed_hwfn *p_hwfn,
qed_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_NGE_IP_EN,
ip_geneve_enable ? 1 : 0);
}
+
+#define T_ETH_PACKET_MATCH_RFS_EVENTID 25
+#define PARSER_ETH_CONN_CM_HDR (0x0)
+#define CAM_LINE_SIZE sizeof(u32)
+#define RAM_LINE_SIZE sizeof(u64)
+#define REG_SIZE sizeof(u32)
+
+void qed_set_rfs_mode_disable(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u16 pf_id)
+{
+ union gft_cam_line_union camline;
+ struct gft_ram_line ramline;
+ u32 *p_ramline, i;
+
+ p_ramline = (u32 *)&ramline;
+
+ /*stop using gft logic */
+ qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_GFT, 0);
+ qed_wr(p_hwfn, p_ptt, PRS_REG_CM_HDR_GFT, 0x0);
+ memset(&camline, 0, sizeof(union gft_cam_line_union));
+ qed_wr(p_hwfn, p_ptt, PRS_REG_GFT_CAM + CAM_LINE_SIZE * pf_id,
+ camline.cam_line_mapped.camline);
+ memset(&ramline, 0, sizeof(union gft_cam_line_union));
+
+ for (i = 0; i < RAM_LINE_SIZE / REG_SIZE; i++) {
+ u32 hw_addr = PRS_REG_GFT_PROFILE_MASK_RAM;
+
+ hw_addr += (RAM_LINE_SIZE * pf_id + i * REG_SIZE);
+
+ qed_wr(p_hwfn, p_ptt, hw_addr, *(p_ramline + i));
+ }
+}
+
+void qed_set_rfs_mode_enable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
+ u16 pf_id, bool tcp, bool udp,
+ bool ipv4, bool ipv6)
+{
+ u32 rfs_cm_hdr_event_id, *p_ramline;
+ union gft_cam_line_union camline;
+ struct gft_ram_line ramline;
+ int i;
+
+ rfs_cm_hdr_event_id = qed_rd(p_hwfn, p_ptt, PRS_REG_CM_HDR_GFT);
+ p_ramline = (u32 *)&ramline;
+
+ if (!ipv6 && !ipv4)
+ DP_NOTICE(p_hwfn,
+ "set_rfs_mode_enable: must accept at least on of - ipv4 or ipv6");
+ if (!tcp && !udp)
+ DP_NOTICE(p_hwfn,
+ "set_rfs_mode_enable: must accept at least on of - udp or tcp");
+
+ rfs_cm_hdr_event_id |= T_ETH_PACKET_MATCH_RFS_EVENTID <<
+ PRS_REG_CM_HDR_GFT_EVENT_ID_SHIFT;
+ rfs_cm_hdr_event_id |= PARSER_ETH_CONN_CM_HDR <<
+ PRS_REG_CM_HDR_GFT_CM_HDR_SHIFT;
+ qed_wr(p_hwfn, p_ptt, PRS_REG_CM_HDR_GFT, rfs_cm_hdr_event_id);
+
+ /* Configure Registers for RFS mode */
+ qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_GFT, 1);
+ qed_wr(p_hwfn, p_ptt, PRS_REG_LOAD_L2_FILTER, 0);
+ camline.cam_line_mapped.camline = 0;
+
+ /* cam line is now valid!! */
+ SET_FIELD(camline.cam_line_mapped.camline,
+ GFT_CAM_LINE_MAPPED_VALID, 1);
+
+ /* filters are per PF!! */
+ SET_FIELD(camline.cam_line_mapped.camline,
+ GFT_CAM_LINE_MAPPED_PF_ID_MASK, 1);
+ SET_FIELD(camline.cam_line_mapped.camline,
+ GFT_CAM_LINE_MAPPED_PF_ID, pf_id);
+ if (!(tcp && udp)) {
+ SET_FIELD(camline.cam_line_mapped.camline,
+ GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_MASK, 1);
+ if (tcp)
+ SET_FIELD(camline.cam_line_mapped.camline,
+ GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE,
+ GFT_PROFILE_TCP_PROTOCOL);
+ else
+ SET_FIELD(camline.cam_line_mapped.camline,
+ GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE,
+ GFT_PROFILE_UDP_PROTOCOL);
+ }
+
+ if (!(ipv4 && ipv6)) {
+ SET_FIELD(camline.cam_line_mapped.camline,
+ GFT_CAM_LINE_MAPPED_IP_VERSION_MASK, 1);
+ if (ipv4)
+ SET_FIELD(camline.cam_line_mapped.camline,
+ GFT_CAM_LINE_MAPPED_IP_VERSION,
+ GFT_PROFILE_IPV4);
+ else
+ SET_FIELD(camline.cam_line_mapped.camline,
+ GFT_CAM_LINE_MAPPED_IP_VERSION,
+ GFT_PROFILE_IPV6);
+ }
+
+ /* write characteristics to cam */
+ qed_wr(p_hwfn, p_ptt, PRS_REG_GFT_CAM + CAM_LINE_SIZE * pf_id,
+ camline.cam_line_mapped.camline);
+ camline.cam_line_mapped.camline = qed_rd(p_hwfn, p_ptt,
+ PRS_REG_GFT_CAM +
+ CAM_LINE_SIZE * pf_id);
+
+ /* write line to RAM - compare to filter 4 tuple */
+ ramline.low32bits = 0;
+ ramline.high32bits = 0;
+ SET_FIELD(ramline.high32bits, GFT_RAM_LINE_DST_IP, 1);
+ SET_FIELD(ramline.high32bits, GFT_RAM_LINE_SRC_IP, 1);
+ SET_FIELD(ramline.low32bits, GFT_RAM_LINE_SRC_PORT, 1);
+ SET_FIELD(ramline.low32bits, GFT_RAM_LINE_DST_PORT, 1);
+
+ /* each iteration write to reg */
+ for (i = 0; i < RAM_LINE_SIZE / REG_SIZE; i++)
+ qed_wr(p_hwfn, p_ptt,
+ PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE * pf_id +
+ i * REG_SIZE, *(p_ramline + i));
+
+ /* set default profile so that no filter match will happen */
+ ramline.low32bits = 0xffff;
+ ramline.high32bits = 0xffff;
+
+ for (i = 0; i < RAM_LINE_SIZE / REG_SIZE; i++)
+ qed_wr(p_hwfn, p_ptt,
+ PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE *
+ PRS_GFT_CAM_LINES_NO_MATCH + i * REG_SIZE,
+ *(p_ramline + i));
+}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_init_ops.c b/drivers/net/ethernet/qlogic/qed/qed_init_ops.c
index 243b64e0d4dc..4a2e7be5bf72 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_init_ops.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_init_ops.c
@@ -554,7 +554,7 @@ int qed_init_fw_data(struct qed_dev *cdev, const u8 *data)
}
/* First Dword contains metadata and should be skipped */
- buf_hdr = (struct bin_buffer_hdr *)(data + sizeof(u32));
+ buf_hdr = (struct bin_buffer_hdr *)data;
offset = buf_hdr[BIN_BUF_INIT_FW_VER_INFO].offset;
fw->fw_ver_info = (struct fw_ver_info *)(data + offset);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.c b/drivers/net/ethernet/qlogic/qed/qed_int.c
index 84310b60849b..0ed24d6e6c65 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_int.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_int.c
@@ -2500,8 +2500,9 @@ void qed_int_cau_conf_sb(struct qed_hwfn *p_hwfn,
/* Configure pi coalescing if set */
if (p_hwfn->cdev->int_coalescing_mode == QED_COAL_MODE_ENABLE) {
+ u8 num_tc = p_hwfn->hw_info.num_hw_tc;
u8 timeset, timer_res;
- u8 num_tc = 1, i;
+ u8 i;
/* timeset = (coalesce >> timer-res), timeset is 7bit wide */
if (p_hwfn->cdev->rx_coalesce_usecs <= 0x7F)
diff --git a/drivers/net/ethernet/qlogic/qed/qed_iscsi.c b/drivers/net/ethernet/qlogic/qed/qed_iscsi.c
index 098766f7fe88..339c91dfa658 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_iscsi.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_iscsi.c
@@ -181,6 +181,15 @@ qed_sp_iscsi_func_start(struct qed_hwfn *p_hwfn,
p_params = &p_hwfn->pf_params.iscsi_pf_params;
p_queue = &p_init->q_params;
+ /* Sanity */
+ if (p_params->num_queues > p_hwfn->hw_info.feat_num[QED_ISCSI_CQ]) {
+ DP_ERR(p_hwfn,
+ "Cannot satisfy CQ amount. Queues requested %d, CQs available %d. Aborting function start\n",
+ p_params->num_queues,
+ p_hwfn->hw_info.resc_num[QED_ISCSI_CQ]);
+ return -EINVAL;
+ }
+
SET_FIELD(p_init->hdr.flags,
ISCSI_SLOW_PATH_HDR_LAYER_CODE, ISCSI_SLOW_PATH_LAYER_CODE);
p_init->hdr.op_code = ISCSI_RAMROD_CMD_ID_INIT_FUNC;
@@ -216,7 +225,7 @@ qed_sp_iscsi_func_start(struct qed_hwfn *p_hwfn,
p_queue->cq_cmdq_sb_num_arr[i] = cpu_to_le16(val);
}
- p_queue->bdq_resource_id = ISCSI_BDQ_ID(p_hwfn->port_id);
+ p_queue->bdq_resource_id = (u8)RESC_START(p_hwfn, QED_BDQ);
DMA_REGPAIR_LE(p_queue->bdq_pbl_base_address[BDQ_ID_RQ],
p_params->bdq_pbl_base_addr[BDQ_ID_RQ]);
@@ -270,11 +279,10 @@ static int qed_sp_iscsi_conn_offload(struct qed_hwfn *p_hwfn,
struct tcp_offload_params *p_tcp = NULL;
struct qed_spq_entry *p_ent = NULL;
struct qed_sp_init_data init_data;
- union qed_qm_pq_params pq_params;
- u16 pq0_id = 0, pq1_id = 0;
dma_addr_t r2tq_pbl_addr;
dma_addr_t xhq_pbl_addr;
dma_addr_t uhq_pbl_addr;
+ u16 physical_q;
int rc = 0;
u32 dval;
u16 wval;
@@ -297,16 +305,14 @@ static int qed_sp_iscsi_conn_offload(struct qed_hwfn *p_hwfn,
p_ramrod = &p_ent->ramrod.iscsi_conn_offload;
/* Transmission PQ is the first of the PF */
- memset(&pq_params, 0, sizeof(pq_params));
- pq0_id = qed_get_qm_pq(p_hwfn, PROTOCOLID_ISCSI, &pq_params);
- p_conn->physical_q0 = cpu_to_le16(pq0_id);
- p_ramrod->iscsi.physical_q0 = cpu_to_le16(pq0_id);
+ physical_q = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_OFLD);
+ p_conn->physical_q0 = cpu_to_le16(physical_q);
+ p_ramrod->iscsi.physical_q0 = cpu_to_le16(physical_q);
/* iSCSI Pure-ACK PQ */
- pq_params.iscsi.q_idx = 1;
- pq1_id = qed_get_qm_pq(p_hwfn, PROTOCOLID_ISCSI, &pq_params);
- p_conn->physical_q1 = cpu_to_le16(pq1_id);
- p_ramrod->iscsi.physical_q1 = cpu_to_le16(pq1_id);
+ physical_q = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_ACK);
+ p_conn->physical_q1 = cpu_to_le16(physical_q);
+ p_ramrod->iscsi.physical_q1 = cpu_to_le16(physical_q);
p_ramrod->hdr.op_code = ISCSI_RAMROD_CMD_ID_OFFLOAD_CONN;
SET_FIELD(p_ramrod->hdr.flags, ISCSI_SLOW_PATH_HDR_LAYER_CODE,
@@ -593,21 +599,31 @@ static void __iomem *qed_iscsi_get_db_addr(struct qed_hwfn *p_hwfn, u32 cid)
static void __iomem *qed_iscsi_get_primary_bdq_prod(struct qed_hwfn *p_hwfn,
u8 bdq_id)
{
- u8 bdq_function_id = ISCSI_BDQ_ID(p_hwfn->port_id);
-
- return (u8 __iomem *)p_hwfn->regview + GTT_BAR0_MAP_REG_MSDM_RAM +
- MSTORM_SCSI_BDQ_EXT_PROD_OFFSET(bdq_function_id,
- bdq_id);
+ if (RESC_NUM(p_hwfn, QED_BDQ)) {
+ return (u8 __iomem *)p_hwfn->regview +
+ GTT_BAR0_MAP_REG_MSDM_RAM +
+ MSTORM_SCSI_BDQ_EXT_PROD_OFFSET(RESC_START(p_hwfn,
+ QED_BDQ),
+ bdq_id);
+ } else {
+ DP_NOTICE(p_hwfn, "BDQ is not allocated!\n");
+ return NULL;
+ }
}
static void __iomem *qed_iscsi_get_secondary_bdq_prod(struct qed_hwfn *p_hwfn,
u8 bdq_id)
{
- u8 bdq_function_id = ISCSI_BDQ_ID(p_hwfn->port_id);
-
- return (u8 __iomem *)p_hwfn->regview + GTT_BAR0_MAP_REG_TSDM_RAM +
- TSTORM_SCSI_BDQ_EXT_PROD_OFFSET(bdq_function_id,
- bdq_id);
+ if (RESC_NUM(p_hwfn, QED_BDQ)) {
+ return (u8 __iomem *)p_hwfn->regview +
+ GTT_BAR0_MAP_REG_TSDM_RAM +
+ TSTORM_SCSI_BDQ_EXT_PROD_OFFSET(RESC_START(p_hwfn,
+ QED_BDQ),
+ bdq_id);
+ } else {
+ DP_NOTICE(p_hwfn, "BDQ is not allocated!\n");
+ return NULL;
+ }
}
static int qed_iscsi_setup_connection(struct qed_hwfn *p_hwfn,
@@ -857,6 +873,8 @@ static void _qed_iscsi_get_tstats(struct qed_hwfn *p_hwfn,
HILO_64_REGPAIR(tstats.iscsi_rx_bytes_cnt);
p_stats->iscsi_rx_packet_cnt =
HILO_64_REGPAIR(tstats.iscsi_rx_packet_cnt);
+ p_stats->iscsi_rx_new_ooo_isle_events_cnt =
+ HILO_64_REGPAIR(tstats.iscsi_rx_new_ooo_isle_events_cnt);
p_stats->iscsi_cmdq_threshold_cnt =
le32_to_cpu(tstats.iscsi_cmdq_threshold_cnt);
p_stats->iscsi_rq_threshold_cnt =
@@ -1003,6 +1021,8 @@ static int qed_fill_iscsi_dev_info(struct qed_dev *cdev,
info->secondary_bdq_rq_addr =
qed_iscsi_get_secondary_bdq_prod(hwfn, BDQ_ID_RQ);
+ info->num_cqs = FEAT_NUM(hwfn, QED_ISCSI_CQ);
+
return rc;
}
@@ -1304,6 +1324,26 @@ static int qed_iscsi_stats(struct qed_dev *cdev, struct qed_iscsi_stats *stats)
return qed_iscsi_get_stats(QED_LEADING_HWFN(cdev), stats);
}
+void qed_get_protocol_stats_iscsi(struct qed_dev *cdev,
+ struct qed_mcp_iscsi_stats *stats)
+{
+ struct qed_iscsi_stats proto_stats;
+
+ /* Retrieve FW statistics */
+ memset(&proto_stats, 0, sizeof(proto_stats));
+ if (qed_iscsi_stats(cdev, &proto_stats)) {
+ DP_VERBOSE(cdev, QED_MSG_STORAGE,
+ "Failed to collect ISCSI statistics\n");
+ return;
+ }
+
+ /* Translate FW statistics into struct */
+ stats->rx_pdus = proto_stats.iscsi_rx_total_pdu_cnt;
+ stats->tx_pdus = proto_stats.iscsi_tx_total_pdu_cnt;
+ stats->rx_bytes = proto_stats.iscsi_rx_bytes_cnt;
+ stats->tx_bytes = proto_stats.iscsi_tx_bytes_cnt;
+}
+
static const struct qed_iscsi_ops qed_iscsi_ops_pass = {
.common = &qed_common_ops_pass,
.ll2 = &qed_ll2_ops_pass,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_iscsi.h b/drivers/net/ethernet/qlogic/qed/qed_iscsi.h
index 20c187f4ed0b..ae98f772cbc0 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_iscsi.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_iscsi.h
@@ -64,13 +64,25 @@ void qed_iscsi_setup(struct qed_hwfn *p_hwfn,
void qed_iscsi_free(struct qed_hwfn *p_hwfn,
struct qed_iscsi_info *p_iscsi_info);
+
+/**
+ * @brief - Fills provided statistics struct with statistics.
+ *
+ * @param cdev
+ * @param stats - points to struct that will be filled with statistics.
+ */
+void qed_get_protocol_stats_iscsi(struct qed_dev *cdev,
+ struct qed_mcp_iscsi_stats *stats);
#else /* IS_ENABLED(CONFIG_QED_ISCSI) */
static inline struct qed_iscsi_info *qed_iscsi_alloc(
struct qed_hwfn *p_hwfn) { return NULL; }
static inline void qed_iscsi_setup(struct qed_hwfn *p_hwfn,
struct qed_iscsi_info *p_iscsi_info) {}
static inline void qed_iscsi_free(struct qed_hwfn *p_hwfn,
- struct qed_iscsi_info *p_iscsi_info) {}
+ struct qed_iscsi_info *p_iscsi_info) {}
+static inline void
+qed_get_protocol_stats_iscsi(struct qed_dev *cdev,
+ struct qed_mcp_iscsi_stats *stats) {}
#endif /* IS_ENABLED(CONFIG_QED_ISCSI) */
#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.c b/drivers/net/ethernet/qlogic/qed/qed_l2.c
index df932be5a4e5..746fed4099c8 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_l2.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_l2.c
@@ -938,15 +938,12 @@ qed_eth_pf_tx_queue_start(struct qed_hwfn *p_hwfn,
dma_addr_t pbl_addr,
u16 pbl_size, void __iomem **pp_doorbell)
{
- union qed_qm_pq_params pq_params;
int rc;
- memset(&pq_params, 0, sizeof(pq_params));
rc = qed_eth_txq_start_ramrod(p_hwfn, p_cid,
pbl_addr, pbl_size,
- qed_get_qm_pq(p_hwfn, PROTOCOLID_ETH,
- &pq_params));
+ qed_get_cm_pq_idx_mcos(p_hwfn, tc));
if (rc)
return rc;
@@ -1470,13 +1467,20 @@ static void __qed_get_vport_pstats(struct qed_hwfn *p_hwfn,
memset(&pstats, 0, sizeof(pstats));
qed_memcpy_from(p_hwfn, p_ptt, &pstats, pstats_addr, pstats_len);
- p_stats->tx_ucast_bytes += HILO_64_REGPAIR(pstats.sent_ucast_bytes);
- p_stats->tx_mcast_bytes += HILO_64_REGPAIR(pstats.sent_mcast_bytes);
- p_stats->tx_bcast_bytes += HILO_64_REGPAIR(pstats.sent_bcast_bytes);
- p_stats->tx_ucast_pkts += HILO_64_REGPAIR(pstats.sent_ucast_pkts);
- p_stats->tx_mcast_pkts += HILO_64_REGPAIR(pstats.sent_mcast_pkts);
- p_stats->tx_bcast_pkts += HILO_64_REGPAIR(pstats.sent_bcast_pkts);
- p_stats->tx_err_drop_pkts += HILO_64_REGPAIR(pstats.error_drop_pkts);
+ p_stats->common.tx_ucast_bytes +=
+ HILO_64_REGPAIR(pstats.sent_ucast_bytes);
+ p_stats->common.tx_mcast_bytes +=
+ HILO_64_REGPAIR(pstats.sent_mcast_bytes);
+ p_stats->common.tx_bcast_bytes +=
+ HILO_64_REGPAIR(pstats.sent_bcast_bytes);
+ p_stats->common.tx_ucast_pkts +=
+ HILO_64_REGPAIR(pstats.sent_ucast_pkts);
+ p_stats->common.tx_mcast_pkts +=
+ HILO_64_REGPAIR(pstats.sent_mcast_pkts);
+ p_stats->common.tx_bcast_pkts +=
+ HILO_64_REGPAIR(pstats.sent_bcast_pkts);
+ p_stats->common.tx_err_drop_pkts +=
+ HILO_64_REGPAIR(pstats.error_drop_pkts);
}
static void __qed_get_vport_tstats(struct qed_hwfn *p_hwfn,
@@ -1502,10 +1506,10 @@ static void __qed_get_vport_tstats(struct qed_hwfn *p_hwfn,
memset(&tstats, 0, sizeof(tstats));
qed_memcpy_from(p_hwfn, p_ptt, &tstats, tstats_addr, tstats_len);
- p_stats->mftag_filter_discards +=
- HILO_64_REGPAIR(tstats.mftag_filter_discard);
- p_stats->mac_filter_discards +=
- HILO_64_REGPAIR(tstats.eth_mac_filter_discard);
+ p_stats->common.mftag_filter_discards +=
+ HILO_64_REGPAIR(tstats.mftag_filter_discard);
+ p_stats->common.mac_filter_discards +=
+ HILO_64_REGPAIR(tstats.eth_mac_filter_discard);
}
static void __qed_get_vport_ustats_addrlen(struct qed_hwfn *p_hwfn,
@@ -1539,12 +1543,15 @@ static void __qed_get_vport_ustats(struct qed_hwfn *p_hwfn,
memset(&ustats, 0, sizeof(ustats));
qed_memcpy_from(p_hwfn, p_ptt, &ustats, ustats_addr, ustats_len);
- p_stats->rx_ucast_bytes += HILO_64_REGPAIR(ustats.rcv_ucast_bytes);
- p_stats->rx_mcast_bytes += HILO_64_REGPAIR(ustats.rcv_mcast_bytes);
- p_stats->rx_bcast_bytes += HILO_64_REGPAIR(ustats.rcv_bcast_bytes);
- p_stats->rx_ucast_pkts += HILO_64_REGPAIR(ustats.rcv_ucast_pkts);
- p_stats->rx_mcast_pkts += HILO_64_REGPAIR(ustats.rcv_mcast_pkts);
- p_stats->rx_bcast_pkts += HILO_64_REGPAIR(ustats.rcv_bcast_pkts);
+ p_stats->common.rx_ucast_bytes +=
+ HILO_64_REGPAIR(ustats.rcv_ucast_bytes);
+ p_stats->common.rx_mcast_bytes +=
+ HILO_64_REGPAIR(ustats.rcv_mcast_bytes);
+ p_stats->common.rx_bcast_bytes +=
+ HILO_64_REGPAIR(ustats.rcv_bcast_bytes);
+ p_stats->common.rx_ucast_pkts += HILO_64_REGPAIR(ustats.rcv_ucast_pkts);
+ p_stats->common.rx_mcast_pkts += HILO_64_REGPAIR(ustats.rcv_mcast_pkts);
+ p_stats->common.rx_bcast_pkts += HILO_64_REGPAIR(ustats.rcv_bcast_pkts);
}
static void __qed_get_vport_mstats_addrlen(struct qed_hwfn *p_hwfn,
@@ -1578,23 +1585,26 @@ static void __qed_get_vport_mstats(struct qed_hwfn *p_hwfn,
memset(&mstats, 0, sizeof(mstats));
qed_memcpy_from(p_hwfn, p_ptt, &mstats, mstats_addr, mstats_len);
- p_stats->no_buff_discards += HILO_64_REGPAIR(mstats.no_buff_discard);
- p_stats->packet_too_big_discard +=
- HILO_64_REGPAIR(mstats.packet_too_big_discard);
- p_stats->ttl0_discard += HILO_64_REGPAIR(mstats.ttl0_discard);
- p_stats->tpa_coalesced_pkts +=
- HILO_64_REGPAIR(mstats.tpa_coalesced_pkts);
- p_stats->tpa_coalesced_events +=
- HILO_64_REGPAIR(mstats.tpa_coalesced_events);
- p_stats->tpa_aborts_num += HILO_64_REGPAIR(mstats.tpa_aborts_num);
- p_stats->tpa_coalesced_bytes +=
- HILO_64_REGPAIR(mstats.tpa_coalesced_bytes);
+ p_stats->common.no_buff_discards +=
+ HILO_64_REGPAIR(mstats.no_buff_discard);
+ p_stats->common.packet_too_big_discard +=
+ HILO_64_REGPAIR(mstats.packet_too_big_discard);
+ p_stats->common.ttl0_discard += HILO_64_REGPAIR(mstats.ttl0_discard);
+ p_stats->common.tpa_coalesced_pkts +=
+ HILO_64_REGPAIR(mstats.tpa_coalesced_pkts);
+ p_stats->common.tpa_coalesced_events +=
+ HILO_64_REGPAIR(mstats.tpa_coalesced_events);
+ p_stats->common.tpa_aborts_num +=
+ HILO_64_REGPAIR(mstats.tpa_aborts_num);
+ p_stats->common.tpa_coalesced_bytes +=
+ HILO_64_REGPAIR(mstats.tpa_coalesced_bytes);
}
static void __qed_get_vport_port_stats(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
struct qed_eth_stats *p_stats)
{
+ struct qed_eth_stats_common *p_common = &p_stats->common;
struct port_stats port_stats;
int j;
@@ -1605,54 +1615,75 @@ static void __qed_get_vport_port_stats(struct qed_hwfn *p_hwfn,
offsetof(struct public_port, stats),
sizeof(port_stats));
- p_stats->rx_64_byte_packets += port_stats.eth.r64;
- p_stats->rx_65_to_127_byte_packets += port_stats.eth.r127;
- p_stats->rx_128_to_255_byte_packets += port_stats.eth.r255;
- p_stats->rx_256_to_511_byte_packets += port_stats.eth.r511;
- p_stats->rx_512_to_1023_byte_packets += port_stats.eth.r1023;
- p_stats->rx_1024_to_1518_byte_packets += port_stats.eth.r1518;
- p_stats->rx_1519_to_1522_byte_packets += port_stats.eth.r1522;
- p_stats->rx_1519_to_2047_byte_packets += port_stats.eth.r2047;
- p_stats->rx_2048_to_4095_byte_packets += port_stats.eth.r4095;
- p_stats->rx_4096_to_9216_byte_packets += port_stats.eth.r9216;
- p_stats->rx_9217_to_16383_byte_packets += port_stats.eth.r16383;
- p_stats->rx_crc_errors += port_stats.eth.rfcs;
- p_stats->rx_mac_crtl_frames += port_stats.eth.rxcf;
- p_stats->rx_pause_frames += port_stats.eth.rxpf;
- p_stats->rx_pfc_frames += port_stats.eth.rxpp;
- p_stats->rx_align_errors += port_stats.eth.raln;
- p_stats->rx_carrier_errors += port_stats.eth.rfcr;
- p_stats->rx_oversize_packets += port_stats.eth.rovr;
- p_stats->rx_jabbers += port_stats.eth.rjbr;
- p_stats->rx_undersize_packets += port_stats.eth.rund;
- p_stats->rx_fragments += port_stats.eth.rfrg;
- p_stats->tx_64_byte_packets += port_stats.eth.t64;
- p_stats->tx_65_to_127_byte_packets += port_stats.eth.t127;
- p_stats->tx_128_to_255_byte_packets += port_stats.eth.t255;
- p_stats->tx_256_to_511_byte_packets += port_stats.eth.t511;
- p_stats->tx_512_to_1023_byte_packets += port_stats.eth.t1023;
- p_stats->tx_1024_to_1518_byte_packets += port_stats.eth.t1518;
- p_stats->tx_1519_to_2047_byte_packets += port_stats.eth.t2047;
- p_stats->tx_2048_to_4095_byte_packets += port_stats.eth.t4095;
- p_stats->tx_4096_to_9216_byte_packets += port_stats.eth.t9216;
- p_stats->tx_9217_to_16383_byte_packets += port_stats.eth.t16383;
- p_stats->tx_pause_frames += port_stats.eth.txpf;
- p_stats->tx_pfc_frames += port_stats.eth.txpp;
- p_stats->tx_lpi_entry_count += port_stats.eth.tlpiec;
- p_stats->tx_total_collisions += port_stats.eth.tncl;
- p_stats->rx_mac_bytes += port_stats.eth.rbyte;
- p_stats->rx_mac_uc_packets += port_stats.eth.rxuca;
- p_stats->rx_mac_mc_packets += port_stats.eth.rxmca;
- p_stats->rx_mac_bc_packets += port_stats.eth.rxbca;
- p_stats->rx_mac_frames_ok += port_stats.eth.rxpok;
- p_stats->tx_mac_bytes += port_stats.eth.tbyte;
- p_stats->tx_mac_uc_packets += port_stats.eth.txuca;
- p_stats->tx_mac_mc_packets += port_stats.eth.txmca;
- p_stats->tx_mac_bc_packets += port_stats.eth.txbca;
- p_stats->tx_mac_ctrl_frames += port_stats.eth.txcf;
+ p_common->rx_64_byte_packets += port_stats.eth.r64;
+ p_common->rx_65_to_127_byte_packets += port_stats.eth.r127;
+ p_common->rx_128_to_255_byte_packets += port_stats.eth.r255;
+ p_common->rx_256_to_511_byte_packets += port_stats.eth.r511;
+ p_common->rx_512_to_1023_byte_packets += port_stats.eth.r1023;
+ p_common->rx_1024_to_1518_byte_packets += port_stats.eth.r1518;
+ p_common->rx_crc_errors += port_stats.eth.rfcs;
+ p_common->rx_mac_crtl_frames += port_stats.eth.rxcf;
+ p_common->rx_pause_frames += port_stats.eth.rxpf;
+ p_common->rx_pfc_frames += port_stats.eth.rxpp;
+ p_common->rx_align_errors += port_stats.eth.raln;
+ p_common->rx_carrier_errors += port_stats.eth.rfcr;
+ p_common->rx_oversize_packets += port_stats.eth.rovr;
+ p_common->rx_jabbers += port_stats.eth.rjbr;
+ p_common->rx_undersize_packets += port_stats.eth.rund;
+ p_common->rx_fragments += port_stats.eth.rfrg;
+ p_common->tx_64_byte_packets += port_stats.eth.t64;
+ p_common->tx_65_to_127_byte_packets += port_stats.eth.t127;
+ p_common->tx_128_to_255_byte_packets += port_stats.eth.t255;
+ p_common->tx_256_to_511_byte_packets += port_stats.eth.t511;
+ p_common->tx_512_to_1023_byte_packets += port_stats.eth.t1023;
+ p_common->tx_1024_to_1518_byte_packets += port_stats.eth.t1518;
+ p_common->tx_pause_frames += port_stats.eth.txpf;
+ p_common->tx_pfc_frames += port_stats.eth.txpp;
+ p_common->rx_mac_bytes += port_stats.eth.rbyte;
+ p_common->rx_mac_uc_packets += port_stats.eth.rxuca;
+ p_common->rx_mac_mc_packets += port_stats.eth.rxmca;
+ p_common->rx_mac_bc_packets += port_stats.eth.rxbca;
+ p_common->rx_mac_frames_ok += port_stats.eth.rxpok;
+ p_common->tx_mac_bytes += port_stats.eth.tbyte;
+ p_common->tx_mac_uc_packets += port_stats.eth.txuca;
+ p_common->tx_mac_mc_packets += port_stats.eth.txmca;
+ p_common->tx_mac_bc_packets += port_stats.eth.txbca;
+ p_common->tx_mac_ctrl_frames += port_stats.eth.txcf;
for (j = 0; j < 8; j++) {
- p_stats->brb_truncates += port_stats.brb.brb_truncate[j];
- p_stats->brb_discards += port_stats.brb.brb_discard[j];
+ p_common->brb_truncates += port_stats.brb.brb_truncate[j];
+ p_common->brb_discards += port_stats.brb.brb_discard[j];
+ }
+
+ if (QED_IS_BB(p_hwfn->cdev)) {
+ struct qed_eth_stats_bb *p_bb = &p_stats->bb;
+
+ p_bb->rx_1519_to_1522_byte_packets +=
+ port_stats.eth.u0.bb0.r1522;
+ p_bb->rx_1519_to_2047_byte_packets +=
+ port_stats.eth.u0.bb0.r2047;
+ p_bb->rx_2048_to_4095_byte_packets +=
+ port_stats.eth.u0.bb0.r4095;
+ p_bb->rx_4096_to_9216_byte_packets +=
+ port_stats.eth.u0.bb0.r9216;
+ p_bb->rx_9217_to_16383_byte_packets +=
+ port_stats.eth.u0.bb0.r16383;
+ p_bb->tx_1519_to_2047_byte_packets +=
+ port_stats.eth.u1.bb1.t2047;
+ p_bb->tx_2048_to_4095_byte_packets +=
+ port_stats.eth.u1.bb1.t4095;
+ p_bb->tx_4096_to_9216_byte_packets +=
+ port_stats.eth.u1.bb1.t9216;
+ p_bb->tx_9217_to_16383_byte_packets +=
+ port_stats.eth.u1.bb1.t16383;
+ p_bb->tx_lpi_entry_count += port_stats.eth.u2.bb2.tlpiec;
+ p_bb->tx_total_collisions += port_stats.eth.u2.bb2.tncl;
+ } else {
+ struct qed_eth_stats_ah *p_ah = &p_stats->ah;
+
+ p_ah->rx_1519_to_max_byte_packets +=
+ port_stats.eth.u0.ah0.r1519_to_max;
+ p_ah->tx_1519_to_max_byte_packets =
+ port_stats.eth.u1.ah1.t1519_to_max;
}
}
@@ -1768,6 +1799,84 @@ void qed_reset_vport_stats(struct qed_dev *cdev)
_qed_get_vport_stats(cdev, cdev->reset_stats);
}
+static void
+qed_arfs_mode_configure(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
+ struct qed_arfs_config_params *p_cfg_params)
+{
+ if (p_cfg_params->arfs_enable) {
+ qed_set_rfs_mode_enable(p_hwfn, p_ptt, p_hwfn->rel_pf_id,
+ p_cfg_params->tcp, p_cfg_params->udp,
+ p_cfg_params->ipv4, p_cfg_params->ipv6);
+ DP_VERBOSE(p_hwfn, QED_MSG_SP,
+ "tcp = %s, udp = %s, ipv4 = %s, ipv6 =%s\n",
+ p_cfg_params->tcp ? "Enable" : "Disable",
+ p_cfg_params->udp ? "Enable" : "Disable",
+ p_cfg_params->ipv4 ? "Enable" : "Disable",
+ p_cfg_params->ipv6 ? "Enable" : "Disable");
+ } else {
+ qed_set_rfs_mode_disable(p_hwfn, p_ptt, p_hwfn->rel_pf_id);
+ }
+
+ DP_VERBOSE(p_hwfn, QED_MSG_SP, "Configured ARFS mode : %s\n",
+ p_cfg_params->arfs_enable ? "Enable" : "Disable");
+}
+
+static int
+qed_configure_rfs_ntuple_filter(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
+ struct qed_spq_comp_cb *p_cb,
+ dma_addr_t p_addr, u16 length, u16 qid,
+ u8 vport_id, bool b_is_add)
+{
+ struct rx_update_gft_filter_data *p_ramrod = NULL;
+ struct qed_spq_entry *p_ent = NULL;
+ struct qed_sp_init_data init_data;
+ u16 abs_rx_q_id = 0;
+ u8 abs_vport_id = 0;
+ int rc = -EINVAL;
+
+ rc = qed_fw_vport(p_hwfn, vport_id, &abs_vport_id);
+ if (rc)
+ return rc;
+
+ rc = qed_fw_l2_queue(p_hwfn, qid, &abs_rx_q_id);
+ if (rc)
+ return rc;
+
+ /* Get SPQ entry */
+ memset(&init_data, 0, sizeof(init_data));
+ init_data.cid = qed_spq_get_cid(p_hwfn);
+
+ init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+
+ if (p_cb) {
+ init_data.comp_mode = QED_SPQ_MODE_CB;
+ init_data.p_comp_data = p_cb;
+ } else {
+ init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
+ }
+
+ rc = qed_sp_init_request(p_hwfn, &p_ent,
+ ETH_RAMROD_GFT_UPDATE_FILTER,
+ PROTOCOLID_ETH, &init_data);
+ if (rc)
+ return rc;
+
+ p_ramrod = &p_ent->ramrod.rx_update_gft;
+ DMA_REGPAIR_LE(p_ramrod->pkt_hdr_addr, p_addr);
+ p_ramrod->pkt_hdr_length = cpu_to_le16(length);
+ p_ramrod->rx_qid_or_action_icid = cpu_to_le16(abs_rx_q_id);
+ p_ramrod->vport_id = abs_vport_id;
+ p_ramrod->filter_type = RFS_FILTER_TYPE;
+ p_ramrod->filter_action = b_is_add ? GFT_ADD_FILTER : GFT_DELETE_FILTER;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_SP,
+ "V[%0x], Q[%04x] - %s filter from 0x%llx [length %04xb]\n",
+ abs_vport_id, abs_rx_q_id,
+ b_is_add ? "Adding" : "Removing", (u64)p_addr, length);
+
+ return qed_spq_post(p_hwfn, p_ent, NULL);
+}
+
static int qed_fill_eth_dev_info(struct qed_dev *cdev,
struct qed_dev_eth_info *info)
{
@@ -1898,7 +2007,11 @@ static int qed_start_vport(struct qed_dev *cdev,
return rc;
}
- qed_hw_start_fastpath(p_hwfn);
+ rc = qed_hw_start_fastpath(p_hwfn);
+ if (rc) {
+ DP_ERR(cdev, "Failed to start VPORT fastpath\n");
+ return rc;
+ }
DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP),
"Started V-PORT %d with MTU %d\n",
@@ -2141,7 +2254,13 @@ static int qed_start_txq(struct qed_dev *cdev,
#define QED_HW_STOP_RETRY_LIMIT (10)
static int qed_fastpath_stop(struct qed_dev *cdev)
{
- qed_hw_stop_fastpath(cdev);
+ int rc;
+
+ rc = qed_hw_stop_fastpath(cdev);
+ if (rc) {
+ DP_ERR(cdev, "Failed to stop Fastpath\n");
+ return rc;
+ }
return 0;
}
@@ -2166,31 +2285,46 @@ static int qed_stop_txq(struct qed_dev *cdev, u8 rss_id, void *handle)
static int qed_tunn_configure(struct qed_dev *cdev,
struct qed_tunn_params *tunn_params)
{
- struct qed_tunn_update_params tunn_info;
+ struct qed_tunnel_info tunn_info;
int i, rc;
- if (IS_VF(cdev))
- return 0;
-
memset(&tunn_info, 0, sizeof(tunn_info));
- if (tunn_params->update_vxlan_port == 1) {
- tunn_info.update_vxlan_udp_port = 1;
- tunn_info.vxlan_udp_port = tunn_params->vxlan_port;
+ if (tunn_params->update_vxlan_port) {
+ tunn_info.vxlan_port.b_update_port = true;
+ tunn_info.vxlan_port.port = tunn_params->vxlan_port;
}
- if (tunn_params->update_geneve_port == 1) {
- tunn_info.update_geneve_udp_port = 1;
- tunn_info.geneve_udp_port = tunn_params->geneve_port;
+ if (tunn_params->update_geneve_port) {
+ tunn_info.geneve_port.b_update_port = true;
+ tunn_info.geneve_port.port = tunn_params->geneve_port;
}
for_each_hwfn(cdev, i) {
struct qed_hwfn *hwfn = &cdev->hwfns[i];
+ struct qed_tunnel_info *tun;
+
+ tun = &hwfn->cdev->tunnel;
rc = qed_sp_pf_update_tunn_cfg(hwfn, &tunn_info,
QED_SPQ_MODE_EBLOCK, NULL);
-
if (rc)
return rc;
+
+ if (IS_PF_SRIOV(hwfn)) {
+ u16 vxlan_port, geneve_port;
+ int j;
+
+ vxlan_port = tun->vxlan_port.port;
+ geneve_port = tun->geneve_port.port;
+
+ qed_for_each_vf(hwfn, j) {
+ qed_iov_bulletin_set_udp_ports(hwfn, j,
+ vxlan_port,
+ geneve_port);
+ }
+
+ qed_schedule_iov(hwfn, QED_IOV_WQ_BULLETIN_UPDATE_FLAG);
+ }
}
return 0;
@@ -2315,6 +2449,59 @@ static int qed_configure_filter(struct qed_dev *cdev,
}
}
+static int qed_configure_arfs_searcher(struct qed_dev *cdev, bool en_searcher)
+{
+ struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_arfs_config_params arfs_config_params;
+
+ memset(&arfs_config_params, 0, sizeof(arfs_config_params));
+ arfs_config_params.tcp = true;
+ arfs_config_params.udp = true;
+ arfs_config_params.ipv4 = true;
+ arfs_config_params.ipv6 = true;
+ arfs_config_params.arfs_enable = en_searcher;
+
+ qed_arfs_mode_configure(p_hwfn, p_hwfn->p_arfs_ptt,
+ &arfs_config_params);
+ return 0;
+}
+
+static void
+qed_arfs_sp_response_handler(struct qed_hwfn *p_hwfn,
+ void *cookie, union event_ring_data *data,
+ u8 fw_return_code)
+{
+ struct qed_common_cb_ops *op = p_hwfn->cdev->protocol_ops.common;
+ void *dev = p_hwfn->cdev->ops_cookie;
+
+ op->arfs_filter_op(dev, cookie, fw_return_code);
+}
+
+static int qed_ntuple_arfs_filter_config(struct qed_dev *cdev, void *cookie,
+ dma_addr_t mapping, u16 length,
+ u16 vport_id, u16 rx_queue_id,
+ bool add_filter)
+{
+ struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_spq_comp_cb cb;
+ int rc = -EINVAL;
+
+ cb.function = qed_arfs_sp_response_handler;
+ cb.cookie = cookie;
+
+ rc = qed_configure_rfs_ntuple_filter(p_hwfn, p_hwfn->p_arfs_ptt,
+ &cb, mapping, length, rx_queue_id,
+ vport_id, add_filter);
+ if (rc)
+ DP_NOTICE(p_hwfn,
+ "Failed to issue a-RFS filter configuration\n");
+ else
+ DP_VERBOSE(p_hwfn, NETIF_MSG_DRV,
+ "Successfully issued a-RFS filter configuration\n");
+
+ return rc;
+}
+
static int qed_fp_cqe_completion(struct qed_dev *dev,
u8 rss_id, struct eth_slow_path_rx_cqe *cqe)
{
@@ -2356,6 +2543,8 @@ static const struct qed_eth_ops qed_eth_ops_pass = {
.eth_cqe_completion = &qed_fp_cqe_completion,
.get_vport_stats = &qed_get_vport_stats,
.tunn_config = &qed_tunn_configure,
+ .ntuple_filter_config = &qed_ntuple_arfs_filter_config,
+ .configure_arfs_searcher = &qed_configure_arfs_searcher,
};
const struct qed_eth_ops *qed_get_eth_ops(void)
diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.h b/drivers/net/ethernet/qlogic/qed/qed_l2.h
index e763abd334f6..6f44229899eb 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_l2.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_l2.h
@@ -185,6 +185,14 @@ struct qed_filter_accept_flags {
#define QED_ACCEPT_BCAST 0x20
};
+struct qed_arfs_config_params {
+ bool tcp;
+ bool udp;
+ bool ipv4;
+ bool ipv6;
+ bool arfs_enable;
+};
+
struct qed_sp_vport_update_params {
u16 opaque_fid;
u8 vport_id;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.c b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
index 0d3cef409c96..09c86411918c 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_ll2.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
@@ -597,7 +597,7 @@ static u8 qed_ll2_convert_rx_parse_to_tx_flags(u16 parse_flags)
u8 bd_flags = 0;
if (GET_FIELD(parse_flags, PARSING_AND_ERR_FLAGS_TAG8021QEXIST))
- SET_FIELD(bd_flags, CORE_TX_BD_FLAGS_VLAN_INSERTION, 1);
+ SET_FIELD(bd_flags, CORE_TX_BD_DATA_VLAN_INSERTION, 1);
return bd_flags;
}
@@ -758,8 +758,8 @@ qed_ooo_submit_tx_buffers(struct qed_hwfn *p_hwfn,
p_buffer->placement_offset;
parse_flags = p_buffer->parse_flags;
bd_flags = qed_ll2_convert_rx_parse_to_tx_flags(parse_flags);
- SET_FIELD(bd_flags, CORE_TX_BD_FLAGS_FORCE_VLAN_MODE, 1);
- SET_FIELD(bd_flags, CORE_TX_BD_FLAGS_L4_PROTOCOL, 1);
+ SET_FIELD(bd_flags, CORE_TX_BD_DATA_FORCE_VLAN_MODE, 1);
+ SET_FIELD(bd_flags, CORE_TX_BD_DATA_L4_PROTOCOL, 1);
rc = qed_ll2_prepare_tx_packet(p_hwfn, p_ll2_conn->my_id, 1,
p_buffer->vlan, bd_flags,
@@ -1090,7 +1090,6 @@ static int qed_sp_ll2_tx_queue_start(struct qed_hwfn *p_hwfn,
struct core_tx_start_ramrod_data *p_ramrod = NULL;
struct qed_spq_entry *p_ent = NULL;
struct qed_sp_init_data init_data;
- union qed_qm_pq_params pq_params;
u16 pq_id = 0, pbl_size;
int rc = -EINVAL;
@@ -1127,9 +1126,18 @@ static int qed_sp_ll2_tx_queue_start(struct qed_hwfn *p_hwfn,
pbl_size = qed_chain_get_page_cnt(&p_tx->txq_chain);
p_ramrod->pbl_size = cpu_to_le16(pbl_size);
- memset(&pq_params, 0, sizeof(pq_params));
- pq_params.core.tc = p_ll2_conn->conn.tx_tc;
- pq_id = qed_get_qm_pq(p_hwfn, PROTOCOLID_CORE, &pq_params);
+ switch (p_ll2_conn->conn.tx_tc) {
+ case LB_TC:
+ pq_id = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_LB);
+ break;
+ case OOO_LB_TC:
+ pq_id = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_OOO);
+ break;
+ default:
+ pq_id = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_OFLD);
+ break;
+ }
+
p_ramrod->qm_pq_id = cpu_to_le16(pq_id);
switch (conn_type) {
@@ -1400,13 +1408,21 @@ int qed_ll2_establish_connection(struct qed_hwfn *p_hwfn, u8 connection_handle)
struct qed_ll2_info *p_ll2_conn;
struct qed_ll2_rx_queue *p_rx;
struct qed_ll2_tx_queue *p_tx;
+ struct qed_ptt *p_ptt;
int rc = -EINVAL;
u32 i, capacity;
u8 qid;
+ p_ptt = qed_ptt_acquire(p_hwfn);
+ if (!p_ptt)
+ return -EAGAIN;
+
p_ll2_conn = qed_ll2_handle_sanity_lock(p_hwfn, connection_handle);
- if (!p_ll2_conn)
- return -EINVAL;
+ if (!p_ll2_conn) {
+ rc = -EINVAL;
+ goto out;
+ }
+
p_rx = &p_ll2_conn->rx_queue;
p_tx = &p_ll2_conn->tx_queue;
@@ -1439,7 +1455,9 @@ int qed_ll2_establish_connection(struct qed_hwfn *p_hwfn, u8 connection_handle)
p_tx->cur_completing_frag_num = 0;
*p_tx->p_fw_cons = 0;
- qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_CORE, &p_ll2_conn->cid);
+ rc = qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_CORE, &p_ll2_conn->cid);
+ if (rc)
+ goto out;
qid = p_hwfn->hw_info.resc_start[QED_LL2_QUEUE] + connection_handle;
p_ll2_conn->queue_id = qid;
@@ -1453,26 +1471,28 @@ int qed_ll2_establish_connection(struct qed_hwfn *p_hwfn, u8 connection_handle)
rc = qed_ll2_establish_connection_rx(p_hwfn, p_ll2_conn);
if (rc)
- return rc;
+ goto out;
rc = qed_sp_ll2_tx_queue_start(p_hwfn, p_ll2_conn);
if (rc)
- return rc;
+ goto out;
if (p_hwfn->hw_info.personality != QED_PCI_ETH_ROCE)
- qed_wr(p_hwfn, p_hwfn->p_main_ptt, PRS_REG_USE_LIGHT_L2, 1);
+ qed_wr(p_hwfn, p_ptt, PRS_REG_USE_LIGHT_L2, 1);
qed_ll2_establish_connection_ooo(p_hwfn, p_ll2_conn);
if (p_ll2_conn->conn.conn_type == QED_LL2_TYPE_FCOE) {
- qed_llh_add_protocol_filter(p_hwfn, p_hwfn->p_main_ptt,
+ qed_llh_add_protocol_filter(p_hwfn, p_ptt,
0x8906, 0,
QED_LLH_FILTER_ETHERTYPE);
- qed_llh_add_protocol_filter(p_hwfn, p_hwfn->p_main_ptt,
+ qed_llh_add_protocol_filter(p_hwfn, p_ptt,
0x8914, 0,
QED_LLH_FILTER_ETHERTYPE);
}
+out:
+ qed_ptt_release(p_hwfn, p_ptt);
return rc;
}
@@ -1591,33 +1611,34 @@ static void qed_ll2_prepare_tx_packet_set(struct qed_hwfn *p_hwfn,
p_tx->cur_send_frag_num++;
}
-static void qed_ll2_prepare_tx_packet_set_bd(struct qed_hwfn *p_hwfn,
- struct qed_ll2_info *p_ll2,
- struct qed_ll2_tx_packet *p_curp,
- u8 num_of_bds,
- enum core_tx_dest tx_dest,
- u16 vlan,
- u8 bd_flags,
- u16 l4_hdr_offset_w,
- enum core_roce_flavor_type type,
- dma_addr_t first_frag,
- u16 first_frag_len)
+static void
+qed_ll2_prepare_tx_packet_set_bd(struct qed_hwfn *p_hwfn,
+ struct qed_ll2_info *p_ll2,
+ struct qed_ll2_tx_packet *p_curp,
+ u8 num_of_bds,
+ enum core_tx_dest tx_dest,
+ u16 vlan,
+ u8 bd_flags,
+ u16 l4_hdr_offset_w,
+ enum core_roce_flavor_type roce_flavor,
+ dma_addr_t first_frag,
+ u16 first_frag_len)
{
struct qed_chain *p_tx_chain = &p_ll2->tx_queue.txq_chain;
u16 prod_idx = qed_chain_get_prod_idx(p_tx_chain);
struct core_tx_bd *start_bd = NULL;
- u16 frag_idx;
+ u16 bd_data = 0, frag_idx;
start_bd = (struct core_tx_bd *)qed_chain_produce(p_tx_chain);
start_bd->nw_vlan_or_lb_echo = cpu_to_le16(vlan);
SET_FIELD(start_bd->bitfield1, CORE_TX_BD_L4_HDR_OFFSET_W,
cpu_to_le16(l4_hdr_offset_w));
SET_FIELD(start_bd->bitfield1, CORE_TX_BD_TX_DST, tx_dest);
- start_bd->bd_flags.as_bitfield = bd_flags;
- start_bd->bd_flags.as_bitfield |= CORE_TX_BD_FLAGS_START_BD_MASK <<
- CORE_TX_BD_FLAGS_START_BD_SHIFT;
- SET_FIELD(start_bd->bitfield0, CORE_TX_BD_NBDS, num_of_bds);
- SET_FIELD(start_bd->bitfield0, CORE_TX_BD_ROCE_FLAV, type);
+ bd_data |= bd_flags;
+ SET_FIELD(bd_data, CORE_TX_BD_DATA_START_BD, 0x1);
+ SET_FIELD(bd_data, CORE_TX_BD_DATA_NBDS, num_of_bds);
+ SET_FIELD(bd_data, CORE_TX_BD_DATA_ROCE_FLAV, roce_flavor);
+ start_bd->bd_data.as_bitfield = cpu_to_le16(bd_data);
DMA_REGPAIR_LE(start_bd->addr, first_frag);
start_bd->nbytes = cpu_to_le16(first_frag_len);
@@ -1642,9 +1663,8 @@ static void qed_ll2_prepare_tx_packet_set_bd(struct qed_hwfn *p_hwfn,
struct core_tx_bd **p_bd = &p_curp->bds_set[frag_idx].txq_bd;
*p_bd = (struct core_tx_bd *)qed_chain_produce(p_tx_chain);
- (*p_bd)->bd_flags.as_bitfield = 0;
+ (*p_bd)->bd_data.as_bitfield = 0;
(*p_bd)->bitfield1 = 0;
- (*p_bd)->bitfield0 = 0;
p_curp->bds_set[frag_idx].tx_frag = 0;
p_curp->bds_set[frag_idx].frag_len = 0;
}
@@ -1823,23 +1843,30 @@ int qed_ll2_terminate_connection(struct qed_hwfn *p_hwfn, u8 connection_handle)
{
struct qed_ll2_info *p_ll2_conn = NULL;
int rc = -EINVAL;
+ struct qed_ptt *p_ptt;
+
+ p_ptt = qed_ptt_acquire(p_hwfn);
+ if (!p_ptt)
+ return -EAGAIN;
p_ll2_conn = qed_ll2_handle_sanity_lock(p_hwfn, connection_handle);
- if (!p_ll2_conn)
- return -EINVAL;
+ if (!p_ll2_conn) {
+ rc = -EINVAL;
+ goto out;
+ }
/* Stop Tx & Rx of connection, if needed */
if (QED_LL2_TX_REGISTERED(p_ll2_conn)) {
rc = qed_sp_ll2_tx_queue_stop(p_hwfn, p_ll2_conn);
if (rc)
- return rc;
+ goto out;
qed_ll2_txq_flush(p_hwfn, connection_handle);
}
if (QED_LL2_RX_REGISTERED(p_ll2_conn)) {
rc = qed_sp_ll2_rx_queue_stop(p_hwfn, p_ll2_conn);
if (rc)
- return rc;
+ goto out;
qed_ll2_rxq_flush(p_hwfn, connection_handle);
}
@@ -1847,14 +1874,16 @@ int qed_ll2_terminate_connection(struct qed_hwfn *p_hwfn, u8 connection_handle)
qed_ooo_release_all_isles(p_hwfn, p_hwfn->p_ooo_info);
if (p_ll2_conn->conn.conn_type == QED_LL2_TYPE_FCOE) {
- qed_llh_remove_protocol_filter(p_hwfn, p_hwfn->p_main_ptt,
+ qed_llh_remove_protocol_filter(p_hwfn, p_ptt,
0x8906, 0,
QED_LLH_FILTER_ETHERTYPE);
- qed_llh_remove_protocol_filter(p_hwfn, p_hwfn->p_main_ptt,
+ qed_llh_remove_protocol_filter(p_hwfn, p_ptt,
0x8914, 0,
QED_LLH_FILTER_ETHERTYPE);
}
+out:
+ qed_ptt_release(p_hwfn, p_ptt);
return rc;
}
@@ -2241,11 +2270,11 @@ static int qed_ll2_start_xmit(struct qed_dev *cdev, struct sk_buff *skb)
/* Request HW to calculate IP csum */
if (!((vlan_get_protocol(skb) == htons(ETH_P_IPV6)) &&
ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6))
- flags |= BIT(CORE_TX_BD_FLAGS_IP_CSUM_SHIFT);
+ flags |= BIT(CORE_TX_BD_DATA_IP_CSUM_SHIFT);
if (skb_vlan_tag_present(skb)) {
vlan = skb_vlan_tag_get(skb);
- flags |= BIT(CORE_TX_BD_FLAGS_VLAN_INSERTION_SHIFT);
+ flags |= BIT(CORE_TX_BD_DATA_VLAN_INSERTION_SHIFT);
}
rc = qed_ll2_prepare_tx_packet(QED_LEADING_HWFN(cdev),
diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c
index eef30a598b40..59992cf20d42 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_main.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_main.c
@@ -45,6 +45,7 @@
#include <linux/ethtool.h>
#include <linux/etherdevice.h>
#include <linux/vmalloc.h>
+#include <linux/crash_dump.h>
#include <linux/qed/qed_if.h>
#include <linux/qed/qed_ll2_if.h>
@@ -54,6 +55,8 @@
#include "qed_dev_api.h"
#include "qed_ll2.h"
#include "qed_fcoe.h"
+#include "qed_iscsi.h"
+
#include "qed_mcp.h"
#include "qed_hw.h"
#include "qed_selftest.h"
@@ -227,10 +230,25 @@ err0:
int qed_fill_dev_info(struct qed_dev *cdev,
struct qed_dev_info *dev_info)
{
+ struct qed_tunnel_info *tun = &cdev->tunnel;
struct qed_ptt *ptt;
memset(dev_info, 0, sizeof(struct qed_dev_info));
+ if (tun->vxlan.tun_cls == QED_TUNN_CLSS_MAC_VLAN &&
+ tun->vxlan.b_mode_enabled)
+ dev_info->vxlan_enable = true;
+
+ if (tun->l2_gre.b_mode_enabled && tun->ip_gre.b_mode_enabled &&
+ tun->l2_gre.tun_cls == QED_TUNN_CLSS_MAC_VLAN &&
+ tun->ip_gre.tun_cls == QED_TUNN_CLSS_MAC_VLAN)
+ dev_info->gre_enable = true;
+
+ if (tun->l2_geneve.b_mode_enabled && tun->ip_geneve.b_mode_enabled &&
+ tun->l2_geneve.tun_cls == QED_TUNN_CLSS_MAC_VLAN &&
+ tun->ip_geneve.tun_cls == QED_TUNN_CLSS_MAC_VLAN)
+ dev_info->geneve_enable = true;
+
dev_info->num_hwfns = cdev->num_hwfns;
dev_info->pci_mem_start = cdev->pci_params.mem_start;
dev_info->pci_mem_end = cdev->pci_params.mem_end;
@@ -238,6 +256,7 @@ int qed_fill_dev_info(struct qed_dev *cdev,
dev_info->rdma_supported = (cdev->hwfns[0].hw_info.personality ==
QED_PCI_ETH_ROCE);
dev_info->is_mf_default = IS_MF_DEFAULT(&cdev->hwfns[0]);
+ dev_info->dev_type = cdev->type;
ether_addr_copy(dev_info->hw_mac, cdev->hwfns[0].hw_info.hw_mac_addr);
if (IS_PF(cdev)) {
@@ -588,6 +607,19 @@ int qed_slowpath_irq_req(struct qed_hwfn *hwfn)
return rc;
}
+void qed_slowpath_irq_sync(struct qed_hwfn *p_hwfn)
+{
+ struct qed_dev *cdev = p_hwfn->cdev;
+ u8 id = p_hwfn->my_id;
+ u32 int_mode;
+
+ int_mode = cdev->int_params.out.int_mode;
+ if (int_mode == QED_INT_MODE_MSIX)
+ synchronize_irq(cdev->int_params.msix_table[id].vector);
+ else
+ synchronize_irq(cdev->pdev->irq);
+}
+
static void qed_slowpath_irq_free(struct qed_dev *cdev)
{
int i;
@@ -630,19 +662,6 @@ static int qed_nic_stop(struct qed_dev *cdev)
return rc;
}
-static int qed_nic_reset(struct qed_dev *cdev)
-{
- int rc;
-
- rc = qed_hw_reset(cdev);
- if (rc)
- return rc;
-
- qed_resc_free(cdev);
-
- return 0;
-}
-
static int qed_nic_setup(struct qed_dev *cdev)
{
int rc, i;
@@ -743,7 +762,8 @@ static int qed_slowpath_setup_int(struct qed_dev *cdev,
cdev->int_params.fp_msix_cnt = cdev->int_params.out.num_vectors -
cdev->num_hwfns;
- if (!IS_ENABLED(CONFIG_QED_RDMA))
+ if (!IS_ENABLED(CONFIG_QED_RDMA) ||
+ QED_LEADING_HWFN(cdev)->hw_info.personality != QED_PCI_ETH_ROCE)
return 0;
for_each_hwfn(cdev, i)
@@ -875,10 +895,12 @@ static void qed_update_pf_params(struct qed_dev *cdev,
params->rdma_pf_params.num_qps = QED_ROCE_QPS;
params->rdma_pf_params.min_dpis = QED_ROCE_DPIS;
/* divide by 3 the MRs to avoid MF ILT overflow */
- params->rdma_pf_params.num_mrs = RDMA_MAX_TIDS;
params->rdma_pf_params.gl_pi = QED_ROCE_PROTOCOL_INDEX;
}
+ if (cdev->num_hwfns > 1 || IS_VF(cdev))
+ params->eth_pf_params.num_arfs_filters = 0;
+
/* In case we might support RDMA, don't allow qede to be greedy
* with the L2 contexts. Allow for 64 queues [rx, tx, xdp] per hwfn.
*/
@@ -900,11 +922,15 @@ static void qed_update_pf_params(struct qed_dev *cdev,
static int qed_slowpath_start(struct qed_dev *cdev,
struct qed_slowpath_params *params)
{
- struct qed_tunn_start_params tunn_info;
+ struct qed_drv_load_params drv_load_params;
+ struct qed_hw_init_params hw_init_params;
struct qed_mcp_drv_version drv_version;
+ struct qed_tunnel_info tunn_info;
const u8 *data = NULL;
struct qed_hwfn *hwfn;
+#ifdef CONFIG_RFS_ACCEL
struct qed_ptt *p_ptt;
+#endif
int rc = -EINVAL;
if (qed_iov_wq_start(cdev))
@@ -920,13 +946,18 @@ static int qed_slowpath_start(struct qed_dev *cdev,
goto err;
}
- p_ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev));
- if (p_ptt) {
- QED_LEADING_HWFN(cdev)->p_ptp_ptt = p_ptt;
- } else {
- DP_NOTICE(cdev, "Failed to acquire PTT for PTP\n");
- goto err;
+#ifdef CONFIG_RFS_ACCEL
+ if (cdev->num_hwfns == 1) {
+ p_ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev));
+ if (p_ptt) {
+ QED_LEADING_HWFN(cdev)->p_arfs_ptt = p_ptt;
+ } else {
+ DP_NOTICE(cdev,
+ "Failed to acquire PTT for aRFS\n");
+ goto err;
+ }
}
+#endif
}
cdev->rx_coalesce_usecs = QED_DEFAULT_RX_USECS;
@@ -953,27 +984,47 @@ static int qed_slowpath_start(struct qed_dev *cdev,
qed_dbg_pf_init(cdev);
}
- memset(&tunn_info, 0, sizeof(tunn_info));
- tunn_info.tunn_mode |= 1 << QED_MODE_VXLAN_TUNN |
- 1 << QED_MODE_L2GRE_TUNN |
- 1 << QED_MODE_IPGRE_TUNN |
- 1 << QED_MODE_L2GENEVE_TUNN |
- 1 << QED_MODE_IPGENEVE_TUNN;
-
- tunn_info.tunn_clss_vxlan = QED_TUNN_CLSS_MAC_VLAN;
- tunn_info.tunn_clss_l2gre = QED_TUNN_CLSS_MAC_VLAN;
- tunn_info.tunn_clss_ipgre = QED_TUNN_CLSS_MAC_VLAN;
-
/* Start the slowpath */
- rc = qed_hw_init(cdev, &tunn_info, true,
- cdev->int_params.out.int_mode,
- true, data);
+ memset(&hw_init_params, 0, sizeof(hw_init_params));
+ memset(&tunn_info, 0, sizeof(tunn_info));
+ tunn_info.vxlan.b_mode_enabled = true;
+ tunn_info.l2_gre.b_mode_enabled = true;
+ tunn_info.ip_gre.b_mode_enabled = true;
+ tunn_info.l2_geneve.b_mode_enabled = true;
+ tunn_info.ip_geneve.b_mode_enabled = true;
+ tunn_info.vxlan.tun_cls = QED_TUNN_CLSS_MAC_VLAN;
+ tunn_info.l2_gre.tun_cls = QED_TUNN_CLSS_MAC_VLAN;
+ tunn_info.ip_gre.tun_cls = QED_TUNN_CLSS_MAC_VLAN;
+ tunn_info.l2_geneve.tun_cls = QED_TUNN_CLSS_MAC_VLAN;
+ tunn_info.ip_geneve.tun_cls = QED_TUNN_CLSS_MAC_VLAN;
+ hw_init_params.p_tunn = &tunn_info;
+ hw_init_params.b_hw_start = true;
+ hw_init_params.int_mode = cdev->int_params.out.int_mode;
+ hw_init_params.allow_npar_tx_switch = true;
+ hw_init_params.bin_fw_data = data;
+
+ memset(&drv_load_params, 0, sizeof(drv_load_params));
+ drv_load_params.is_crash_kernel = is_kdump_kernel();
+ drv_load_params.mfw_timeout_val = QED_LOAD_REQ_LOCK_TO_DEFAULT;
+ drv_load_params.avoid_eng_reset = false;
+ drv_load_params.override_force_load = QED_OVERRIDE_FORCE_LOAD_NONE;
+ hw_init_params.p_drv_load_params = &drv_load_params;
+
+ rc = qed_hw_init(cdev, &hw_init_params);
if (rc)
goto err2;
DP_INFO(cdev,
"HW initialization and function start completed successfully\n");
+ if (IS_PF(cdev)) {
+ cdev->tunn_feature_mask = (BIT(QED_MODE_VXLAN_TUNN) |
+ BIT(QED_MODE_L2GENEVE_TUNN) |
+ BIT(QED_MODE_IPGENEVE_TUNN) |
+ BIT(QED_MODE_L2GRE_TUNN) |
+ BIT(QED_MODE_IPGRE_TUNN));
+ }
+
/* Allocate LL2 interface if needed */
if (QED_LEADING_HWFN(cdev)->using_ll2) {
rc = qed_ll2_alloc_if(cdev);
@@ -1014,9 +1065,12 @@ err:
if (IS_PF(cdev))
release_firmware(cdev->firmware);
- if (IS_PF(cdev) && QED_LEADING_HWFN(cdev)->p_ptp_ptt)
+#ifdef CONFIG_RFS_ACCEL
+ if (IS_PF(cdev) && (cdev->num_hwfns == 1) &&
+ QED_LEADING_HWFN(cdev)->p_arfs_ptt)
qed_ptt_release(QED_LEADING_HWFN(cdev),
- QED_LEADING_HWFN(cdev)->p_ptp_ptt);
+ QED_LEADING_HWFN(cdev)->p_arfs_ptt);
+#endif
qed_iov_wq_stop(cdev, false);
@@ -1031,8 +1085,11 @@ static int qed_slowpath_stop(struct qed_dev *cdev)
qed_ll2_dealloc_if(cdev);
if (IS_PF(cdev)) {
- qed_ptt_release(QED_LEADING_HWFN(cdev),
- QED_LEADING_HWFN(cdev)->p_ptp_ptt);
+#ifdef CONFIG_RFS_ACCEL
+ if (cdev->num_hwfns == 1)
+ qed_ptt_release(QED_LEADING_HWFN(cdev),
+ QED_LEADING_HWFN(cdev)->p_arfs_ptt);
+#endif
qed_free_stream_mem(cdev);
if (IS_QED_ETH_IF(cdev))
qed_sriov_disable(cdev, true);
@@ -1042,7 +1099,8 @@ static int qed_slowpath_stop(struct qed_dev *cdev)
}
qed_disable_msix(cdev);
- qed_nic_reset(cdev);
+
+ qed_resc_free(cdev);
qed_iov_wq_stop(cdev, true);
@@ -1653,13 +1711,18 @@ void qed_get_protocol_stats(struct qed_dev *cdev,
switch (type) {
case QED_MCP_LAN_STATS:
qed_get_vport_stats(cdev, &eth_stats);
- stats->lan_stats.ucast_rx_pkts = eth_stats.rx_ucast_pkts;
- stats->lan_stats.ucast_tx_pkts = eth_stats.tx_ucast_pkts;
+ stats->lan_stats.ucast_rx_pkts =
+ eth_stats.common.rx_ucast_pkts;
+ stats->lan_stats.ucast_tx_pkts =
+ eth_stats.common.tx_ucast_pkts;
stats->lan_stats.fcs_err = -1;
break;
case QED_MCP_FCOE_STATS:
qed_get_protocol_stats_fcoe(cdev, &stats->fcoe_stats);
break;
+ case QED_MCP_ISCSI_STATS:
+ qed_get_protocol_stats_iscsi(cdev, &stats->iscsi_stats);
+ break;
default:
DP_ERR(cdev, "Invalid protocol type = %d\n", type);
return;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
index 87fde205149f..7266b36a2655 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
@@ -111,12 +111,71 @@ void qed_mcp_read_mb(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
}
}
+struct qed_mcp_cmd_elem {
+ struct list_head list;
+ struct qed_mcp_mb_params *p_mb_params;
+ u16 expected_seq_num;
+ bool b_is_completed;
+};
+
+/* Must be called while cmd_lock is acquired */
+static struct qed_mcp_cmd_elem *
+qed_mcp_cmd_add_elem(struct qed_hwfn *p_hwfn,
+ struct qed_mcp_mb_params *p_mb_params,
+ u16 expected_seq_num)
+{
+ struct qed_mcp_cmd_elem *p_cmd_elem = NULL;
+
+ p_cmd_elem = kzalloc(sizeof(*p_cmd_elem), GFP_ATOMIC);
+ if (!p_cmd_elem)
+ goto out;
+
+ p_cmd_elem->p_mb_params = p_mb_params;
+ p_cmd_elem->expected_seq_num = expected_seq_num;
+ list_add(&p_cmd_elem->list, &p_hwfn->mcp_info->cmd_list);
+out:
+ return p_cmd_elem;
+}
+
+/* Must be called while cmd_lock is acquired */
+static void qed_mcp_cmd_del_elem(struct qed_hwfn *p_hwfn,
+ struct qed_mcp_cmd_elem *p_cmd_elem)
+{
+ list_del(&p_cmd_elem->list);
+ kfree(p_cmd_elem);
+}
+
+/* Must be called while cmd_lock is acquired */
+static struct qed_mcp_cmd_elem *qed_mcp_cmd_get_elem(struct qed_hwfn *p_hwfn,
+ u16 seq_num)
+{
+ struct qed_mcp_cmd_elem *p_cmd_elem = NULL;
+
+ list_for_each_entry(p_cmd_elem, &p_hwfn->mcp_info->cmd_list, list) {
+ if (p_cmd_elem->expected_seq_num == seq_num)
+ return p_cmd_elem;
+ }
+
+ return NULL;
+}
+
int qed_mcp_free(struct qed_hwfn *p_hwfn)
{
if (p_hwfn->mcp_info) {
+ struct qed_mcp_cmd_elem *p_cmd_elem, *p_tmp;
+
kfree(p_hwfn->mcp_info->mfw_mb_cur);
kfree(p_hwfn->mcp_info->mfw_mb_shadow);
+
+ spin_lock_bh(&p_hwfn->mcp_info->cmd_lock);
+ list_for_each_entry_safe(p_cmd_elem,
+ p_tmp,
+ &p_hwfn->mcp_info->cmd_list, list) {
+ qed_mcp_cmd_del_elem(p_hwfn, p_cmd_elem);
+ }
+ spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock);
}
+
kfree(p_hwfn->mcp_info);
return 0;
@@ -160,7 +219,7 @@ static int qed_load_mcp_offsets(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
p_info->drv_pulse_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_pulse_mb) &
DRV_PULSE_SEQ_MASK;
- p_info->mcp_hist = (u16)qed_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0);
+ p_info->mcp_hist = qed_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0);
return 0;
}
@@ -176,6 +235,12 @@ int qed_mcp_cmd_init(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
goto err;
p_info = p_hwfn->mcp_info;
+ /* Initialize the MFW spinlock */
+ spin_lock_init(&p_info->cmd_lock);
+ spin_lock_init(&p_info->link_lock);
+
+ INIT_LIST_HEAD(&p_info->cmd_list);
+
if (qed_load_mcp_offsets(p_hwfn, p_ptt) != 0) {
DP_NOTICE(p_hwfn, "MCP is not initialized\n");
/* Do not free mcp_info here, since public_base indicate that
@@ -190,10 +255,6 @@ int qed_mcp_cmd_init(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
if (!p_info->mfw_mb_shadow || !p_info->mfw_mb_addr)
goto err;
- /* Initialize the MFW spinlock */
- spin_lock_init(&p_info->lock);
- spin_lock_init(&p_info->link_lock);
-
return 0;
err:
@@ -201,68 +262,39 @@ err:
return -ENOMEM;
}
-/* Locks the MFW mailbox of a PF to ensure a single access.
- * The lock is achieved in most cases by holding a spinlock, causing other
- * threads to wait till a previous access is done.
- * In some cases (currently when a [UN]LOAD_REQ commands are sent), the single
- * access is achieved by setting a blocking flag, which will fail other
- * competing contexts to send their mailboxes.
- */
-static int qed_mcp_mb_lock(struct qed_hwfn *p_hwfn, u32 cmd)
+static void qed_mcp_reread_offsets(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt)
{
- spin_lock_bh(&p_hwfn->mcp_info->lock);
+ u32 generic_por_0 = qed_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0);
- /* The spinlock shouldn't be acquired when the mailbox command is
- * [UN]LOAD_REQ, since the engine is locked by the MFW, and a parallel
- * pending [UN]LOAD_REQ command of another PF together with a spinlock
- * (i.e. interrupts are disabled) - can lead to a deadlock.
- * It is assumed that for a single PF, no other mailbox commands can be
- * sent from another context while sending LOAD_REQ, and that any
- * parallel commands to UNLOAD_REQ can be cancelled.
+ /* Use MCP history register to check if MCP reset occurred between init
+ * time and now.
*/
- if (cmd == DRV_MSG_CODE_LOAD_DONE || cmd == DRV_MSG_CODE_UNLOAD_DONE)
- p_hwfn->mcp_info->block_mb_sending = false;
-
- if (p_hwfn->mcp_info->block_mb_sending) {
- DP_NOTICE(p_hwfn,
- "Trying to send a MFW mailbox command [0x%x] in parallel to [UN]LOAD_REQ. Aborting.\n",
- cmd);
- spin_unlock_bh(&p_hwfn->mcp_info->lock);
- return -EBUSY;
- }
+ if (p_hwfn->mcp_info->mcp_hist != generic_por_0) {
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_SP,
+ "Rereading MCP offsets [mcp_hist 0x%08x, generic_por_0 0x%08x]\n",
+ p_hwfn->mcp_info->mcp_hist, generic_por_0);
- if (cmd == DRV_MSG_CODE_LOAD_REQ || cmd == DRV_MSG_CODE_UNLOAD_REQ) {
- p_hwfn->mcp_info->block_mb_sending = true;
- spin_unlock_bh(&p_hwfn->mcp_info->lock);
+ qed_load_mcp_offsets(p_hwfn, p_ptt);
+ qed_mcp_cmd_port_init(p_hwfn, p_ptt);
}
-
- return 0;
-}
-
-static void qed_mcp_mb_unlock(struct qed_hwfn *p_hwfn, u32 cmd)
-{
- if (cmd != DRV_MSG_CODE_LOAD_REQ && cmd != DRV_MSG_CODE_UNLOAD_REQ)
- spin_unlock_bh(&p_hwfn->mcp_info->lock);
}
int qed_mcp_reset(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{
- u32 seq = ++p_hwfn->mcp_info->drv_mb_seq;
- u8 delay = CHIP_MCP_RESP_ITER_US;
- u32 org_mcp_reset_seq, cnt = 0;
+ u32 org_mcp_reset_seq, seq, delay = CHIP_MCP_RESP_ITER_US, cnt = 0;
int rc = 0;
- /* Ensure that only a single thread is accessing the mailbox at a
- * certain time.
- */
- rc = qed_mcp_mb_lock(p_hwfn, DRV_MSG_CODE_MCP_RESET);
- if (rc != 0)
- return rc;
+ /* Ensure that only a single thread is accessing the mailbox */
+ spin_lock_bh(&p_hwfn->mcp_info->cmd_lock);
- /* Set drv command along with the updated sequence */
org_mcp_reset_seq = qed_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0);
- DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header,
- (DRV_MSG_CODE_MCP_RESET | seq));
+
+ /* Set drv command along with the updated sequence */
+ qed_mcp_reread_offsets(p_hwfn, p_ptt);
+ seq = ++p_hwfn->mcp_info->drv_mb_seq;
+ DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header, (DRV_MSG_CODE_MCP_RESET | seq));
do {
/* Wait for MFW response */
@@ -281,72 +313,207 @@ int qed_mcp_reset(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
rc = -EAGAIN;
}
- qed_mcp_mb_unlock(p_hwfn, DRV_MSG_CODE_MCP_RESET);
+ spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock);
return rc;
}
-static int qed_do_mcp_cmd(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- u32 cmd,
- u32 param,
- u32 *o_mcp_resp,
- u32 *o_mcp_param)
+/* Must be called while cmd_lock is acquired */
+static bool qed_mcp_has_pending_cmd(struct qed_hwfn *p_hwfn)
{
- u8 delay = CHIP_MCP_RESP_ITER_US;
- u32 seq, cnt = 1, actual_mb_seq;
- int rc = 0;
-
- /* Get actual driver mailbox sequence */
- actual_mb_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_mb_header) &
- DRV_MSG_SEQ_NUMBER_MASK;
+ struct qed_mcp_cmd_elem *p_cmd_elem;
- /* Use MCP history register to check if MCP reset occurred between
- * init time and now.
+ /* There is at most one pending command at a certain time, and if it
+ * exists - it is placed at the HEAD of the list.
*/
- if (p_hwfn->mcp_info->mcp_hist !=
- qed_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0)) {
- DP_VERBOSE(p_hwfn, QED_MSG_SP, "Rereading MCP offsets\n");
- qed_load_mcp_offsets(p_hwfn, p_ptt);
- qed_mcp_cmd_port_init(p_hwfn, p_ptt);
+ if (!list_empty(&p_hwfn->mcp_info->cmd_list)) {
+ p_cmd_elem = list_first_entry(&p_hwfn->mcp_info->cmd_list,
+ struct qed_mcp_cmd_elem, list);
+ return !p_cmd_elem->b_is_completed;
}
- seq = ++p_hwfn->mcp_info->drv_mb_seq;
- /* Set drv param */
- DRV_MB_WR(p_hwfn, p_ptt, drv_mb_param, param);
+ return false;
+}
- /* Set drv command along with the updated sequence */
- DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header, (cmd | seq));
+/* Must be called while cmd_lock is acquired */
+static int
+qed_mcp_update_pending_cmd(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+ struct qed_mcp_mb_params *p_mb_params;
+ struct qed_mcp_cmd_elem *p_cmd_elem;
+ u32 mcp_resp;
+ u16 seq_num;
+
+ mcp_resp = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_header);
+ seq_num = (u16)(mcp_resp & FW_MSG_SEQ_NUMBER_MASK);
+
+ /* Return if no new non-handled response has been received */
+ if (seq_num != p_hwfn->mcp_info->drv_mb_seq)
+ return -EAGAIN;
+
+ p_cmd_elem = qed_mcp_cmd_get_elem(p_hwfn, seq_num);
+ if (!p_cmd_elem) {
+ DP_ERR(p_hwfn,
+ "Failed to find a pending mailbox cmd that expects sequence number %d\n",
+ seq_num);
+ return -EINVAL;
+ }
+
+ p_mb_params = p_cmd_elem->p_mb_params;
+
+ /* Get the MFW response along with the sequence number */
+ p_mb_params->mcp_resp = mcp_resp;
+
+ /* Get the MFW param */
+ p_mb_params->mcp_param = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_param);
+
+ /* Get the union data */
+ if (p_mb_params->p_data_dst != NULL && p_mb_params->data_dst_size) {
+ u32 union_data_addr = p_hwfn->mcp_info->drv_mb_addr +
+ offsetof(struct public_drv_mb,
+ union_data);
+ qed_memcpy_from(p_hwfn, p_ptt, p_mb_params->p_data_dst,
+ union_data_addr, p_mb_params->data_dst_size);
+ }
+
+ p_cmd_elem->b_is_completed = true;
+
+ return 0;
+}
+
+/* Must be called while cmd_lock is acquired */
+static void __qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_mcp_mb_params *p_mb_params,
+ u16 seq_num)
+{
+ union drv_union_data union_data;
+ u32 union_data_addr;
+
+ /* Set the union data */
+ union_data_addr = p_hwfn->mcp_info->drv_mb_addr +
+ offsetof(struct public_drv_mb, union_data);
+ memset(&union_data, 0, sizeof(union_data));
+ if (p_mb_params->p_data_src != NULL && p_mb_params->data_src_size)
+ memcpy(&union_data, p_mb_params->p_data_src,
+ p_mb_params->data_src_size);
+ qed_memcpy_to(p_hwfn, p_ptt, union_data_addr, &union_data,
+ sizeof(union_data));
+
+ /* Set the drv param */
+ DRV_MB_WR(p_hwfn, p_ptt, drv_mb_param, p_mb_params->param);
+
+ /* Set the drv command along with the sequence number */
+ DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header, (p_mb_params->cmd | seq_num));
DP_VERBOSE(p_hwfn, QED_MSG_SP,
- "wrote command (%x) to MFW MB param 0x%08x\n",
- (cmd | seq), param);
+ "MFW mailbox: command 0x%08x param 0x%08x\n",
+ (p_mb_params->cmd | seq_num), p_mb_params->param);
+}
+static int
+_qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_mcp_mb_params *p_mb_params,
+ u32 max_retries, u32 delay)
+{
+ struct qed_mcp_cmd_elem *p_cmd_elem;
+ u32 cnt = 0;
+ u16 seq_num;
+ int rc = 0;
+
+ /* Wait until the mailbox is non-occupied */
do {
- /* Wait for MFW response */
+ /* Exit the loop if there is no pending command, or if the
+ * pending command is completed during this iteration.
+ * The spinlock stays locked until the command is sent.
+ */
+
+ spin_lock_bh(&p_hwfn->mcp_info->cmd_lock);
+
+ if (!qed_mcp_has_pending_cmd(p_hwfn))
+ break;
+
+ rc = qed_mcp_update_pending_cmd(p_hwfn, p_ptt);
+ if (!rc)
+ break;
+ else if (rc != -EAGAIN)
+ goto err;
+
+ spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock);
udelay(delay);
- *o_mcp_resp = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_header);
+ } while (++cnt < max_retries);
- /* Give the FW up to 5 second (500*10ms) */
- } while ((seq != (*o_mcp_resp & FW_MSG_SEQ_NUMBER_MASK)) &&
- (cnt++ < QED_DRV_MB_MAX_RETRIES));
+ if (cnt >= max_retries) {
+ DP_NOTICE(p_hwfn,
+ "The MFW mailbox is occupied by an uncompleted command. Failed to send command 0x%08x [param 0x%08x].\n",
+ p_mb_params->cmd, p_mb_params->param);
+ return -EAGAIN;
+ }
- DP_VERBOSE(p_hwfn, QED_MSG_SP,
- "[after %d ms] read (%x) seq is (%x) from FW MB\n",
- cnt * delay, *o_mcp_resp, seq);
-
- /* Is this a reply to our command? */
- if (seq == (*o_mcp_resp & FW_MSG_SEQ_NUMBER_MASK)) {
- *o_mcp_resp &= FW_MSG_CODE_MASK;
- /* Get the MCP param */
- *o_mcp_param = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_param);
- } else {
- /* FW BUG! */
- DP_ERR(p_hwfn, "MFW failed to respond [cmd 0x%x param 0x%x]\n",
- cmd, param);
- *o_mcp_resp = 0;
- rc = -EAGAIN;
+ /* Send the mailbox command */
+ qed_mcp_reread_offsets(p_hwfn, p_ptt);
+ seq_num = ++p_hwfn->mcp_info->drv_mb_seq;
+ p_cmd_elem = qed_mcp_cmd_add_elem(p_hwfn, p_mb_params, seq_num);
+ if (!p_cmd_elem) {
+ rc = -ENOMEM;
+ goto err;
+ }
+
+ __qed_mcp_cmd_and_union(p_hwfn, p_ptt, p_mb_params, seq_num);
+ spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock);
+
+ /* Wait for the MFW response */
+ do {
+ /* Exit the loop if the command is already completed, or if the
+ * command is completed during this iteration.
+ * The spinlock stays locked until the list element is removed.
+ */
+
+ udelay(delay);
+ spin_lock_bh(&p_hwfn->mcp_info->cmd_lock);
+
+ if (p_cmd_elem->b_is_completed)
+ break;
+
+ rc = qed_mcp_update_pending_cmd(p_hwfn, p_ptt);
+ if (!rc)
+ break;
+ else if (rc != -EAGAIN)
+ goto err;
+
+ spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock);
+ } while (++cnt < max_retries);
+
+ if (cnt >= max_retries) {
+ DP_NOTICE(p_hwfn,
+ "The MFW failed to respond to command 0x%08x [param 0x%08x].\n",
+ p_mb_params->cmd, p_mb_params->param);
+
+ spin_lock_bh(&p_hwfn->mcp_info->cmd_lock);
+ qed_mcp_cmd_del_elem(p_hwfn, p_cmd_elem);
+ spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock);
+
+ return -EAGAIN;
}
+
+ qed_mcp_cmd_del_elem(p_hwfn, p_cmd_elem);
+ spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock);
+
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_SP,
+ "MFW mailbox: response 0x%08x param 0x%08x [after %d.%03d ms]\n",
+ p_mb_params->mcp_resp,
+ p_mb_params->mcp_param,
+ (cnt * delay) / 1000, (cnt * delay) % 1000);
+
+ /* Clear the sequence number from the MFW response */
+ p_mb_params->mcp_resp &= FW_MSG_CODE_MASK;
+
+ return 0;
+
+err:
+ spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock);
return rc;
}
@@ -354,9 +521,9 @@ static int qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
struct qed_mcp_mb_params *p_mb_params)
{
- u32 union_data_addr;
-
- int rc;
+ size_t union_data_size = sizeof(union drv_union_data);
+ u32 max_retries = QED_DRV_MB_MAX_RETRIES;
+ u32 delay = CHIP_MCP_RESP_ITER_US;
/* MCP not initialized */
if (!qed_mcp_is_init(p_hwfn)) {
@@ -364,33 +531,17 @@ static int qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn,
return -EBUSY;
}
- union_data_addr = p_hwfn->mcp_info->drv_mb_addr +
- offsetof(struct public_drv_mb, union_data);
-
- /* Ensure that only a single thread is accessing the mailbox at a
- * certain time.
- */
- rc = qed_mcp_mb_lock(p_hwfn, p_mb_params->cmd);
- if (rc)
- return rc;
-
- if (p_mb_params->p_data_src != NULL)
- qed_memcpy_to(p_hwfn, p_ptt, union_data_addr,
- p_mb_params->p_data_src,
- sizeof(*p_mb_params->p_data_src));
-
- rc = qed_do_mcp_cmd(p_hwfn, p_ptt, p_mb_params->cmd,
- p_mb_params->param, &p_mb_params->mcp_resp,
- &p_mb_params->mcp_param);
-
- if (p_mb_params->p_data_dst != NULL)
- qed_memcpy_from(p_hwfn, p_ptt, p_mb_params->p_data_dst,
- union_data_addr,
- sizeof(*p_mb_params->p_data_dst));
-
- qed_mcp_mb_unlock(p_hwfn, p_mb_params->cmd);
+ if (p_mb_params->data_src_size > union_data_size ||
+ p_mb_params->data_dst_size > union_data_size) {
+ DP_ERR(p_hwfn,
+ "The provided size is larger than the union data size [src_size %u, dst_size %u, union_data_size %zu]\n",
+ p_mb_params->data_src_size,
+ p_mb_params->data_dst_size, union_data_size);
+ return -EINVAL;
+ }
- return rc;
+ return _qed_mcp_cmd_and_union(p_hwfn, p_ptt, p_mb_params, max_retries,
+ delay);
}
int qed_mcp_cmd(struct qed_hwfn *p_hwfn,
@@ -401,32 +552,12 @@ int qed_mcp_cmd(struct qed_hwfn *p_hwfn,
u32 *o_mcp_param)
{
struct qed_mcp_mb_params mb_params;
- union drv_union_data data_src;
int rc;
memset(&mb_params, 0, sizeof(mb_params));
- memset(&data_src, 0, sizeof(data_src));
mb_params.cmd = cmd;
mb_params.param = param;
- /* In case of UNLOAD_DONE, set the primary MAC */
- if ((cmd == DRV_MSG_CODE_UNLOAD_DONE) &&
- (p_hwfn->cdev->wol_config == QED_OV_WOL_ENABLED)) {
- u8 *p_mac = p_hwfn->cdev->wol_mac;
-
- data_src.wol_mac.mac_upper = p_mac[0] << 8 | p_mac[1];
- data_src.wol_mac.mac_lower = p_mac[2] << 24 | p_mac[3] << 16 |
- p_mac[4] << 8 | p_mac[5];
-
- DP_VERBOSE(p_hwfn,
- (QED_MSG_SP | NETIF_MSG_IFDOWN),
- "Setting WoL MAC: %pM --> [%08x,%08x]\n",
- p_mac, data_src.wol_mac.mac_upper,
- data_src.wol_mac.mac_lower);
-
- mb_params.p_data_src = &data_src;
- }
-
rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
if (rc)
return rc;
@@ -445,13 +576,17 @@ int qed_mcp_nvm_rd_cmd(struct qed_hwfn *p_hwfn,
u32 *o_mcp_param, u32 *o_txn_size, u32 *o_buf)
{
struct qed_mcp_mb_params mb_params;
- union drv_union_data union_data;
+ u8 raw_data[MCP_DRV_NVM_BUF_LEN];
int rc;
memset(&mb_params, 0, sizeof(mb_params));
mb_params.cmd = cmd;
mb_params.param = param;
- mb_params.p_data_dst = &union_data;
+ mb_params.p_data_dst = raw_data;
+
+ /* Use the maximal value since the actual one is part of the response */
+ mb_params.data_dst_size = MCP_DRV_NVM_BUF_LEN;
+
rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
if (rc)
return rc;
@@ -460,55 +595,413 @@ int qed_mcp_nvm_rd_cmd(struct qed_hwfn *p_hwfn,
*o_mcp_param = mb_params.mcp_param;
*o_txn_size = *o_mcp_param;
- memcpy(o_buf, &union_data.raw_data, *o_txn_size);
+ memcpy(o_buf, raw_data, *o_txn_size);
return 0;
}
-int qed_mcp_load_req(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt, u32 *p_load_code)
+static bool
+qed_mcp_can_force_load(u8 drv_role,
+ u8 exist_drv_role,
+ enum qed_override_force_load override_force_load)
+{
+ bool can_force_load = false;
+
+ switch (override_force_load) {
+ case QED_OVERRIDE_FORCE_LOAD_ALWAYS:
+ can_force_load = true;
+ break;
+ case QED_OVERRIDE_FORCE_LOAD_NEVER:
+ can_force_load = false;
+ break;
+ default:
+ can_force_load = (drv_role == DRV_ROLE_OS &&
+ exist_drv_role == DRV_ROLE_PREBOOT) ||
+ (drv_role == DRV_ROLE_KDUMP &&
+ exist_drv_role == DRV_ROLE_OS);
+ break;
+ }
+
+ return can_force_load;
+}
+
+static int qed_mcp_cancel_load_req(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt)
+{
+ u32 resp = 0, param = 0;
+ int rc;
+
+ rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_CANCEL_LOAD_REQ, 0,
+ &resp, &param);
+ if (rc)
+ DP_NOTICE(p_hwfn,
+ "Failed to send cancel load request, rc = %d\n", rc);
+
+ return rc;
+}
+
+#define CONFIG_QEDE_BITMAP_IDX BIT(0)
+#define CONFIG_QED_SRIOV_BITMAP_IDX BIT(1)
+#define CONFIG_QEDR_BITMAP_IDX BIT(2)
+#define CONFIG_QEDF_BITMAP_IDX BIT(4)
+#define CONFIG_QEDI_BITMAP_IDX BIT(5)
+#define CONFIG_QED_LL2_BITMAP_IDX BIT(6)
+
+static u32 qed_get_config_bitmap(void)
+{
+ u32 config_bitmap = 0x0;
+
+ if (IS_ENABLED(CONFIG_QEDE))
+ config_bitmap |= CONFIG_QEDE_BITMAP_IDX;
+
+ if (IS_ENABLED(CONFIG_QED_SRIOV))
+ config_bitmap |= CONFIG_QED_SRIOV_BITMAP_IDX;
+
+ if (IS_ENABLED(CONFIG_QED_RDMA))
+ config_bitmap |= CONFIG_QEDR_BITMAP_IDX;
+
+ if (IS_ENABLED(CONFIG_QED_FCOE))
+ config_bitmap |= CONFIG_QEDF_BITMAP_IDX;
+
+ if (IS_ENABLED(CONFIG_QED_ISCSI))
+ config_bitmap |= CONFIG_QEDI_BITMAP_IDX;
+
+ if (IS_ENABLED(CONFIG_QED_LL2))
+ config_bitmap |= CONFIG_QED_LL2_BITMAP_IDX;
+
+ return config_bitmap;
+}
+
+struct qed_load_req_in_params {
+ u8 hsi_ver;
+#define QED_LOAD_REQ_HSI_VER_DEFAULT 0
+#define QED_LOAD_REQ_HSI_VER_1 1
+ u32 drv_ver_0;
+ u32 drv_ver_1;
+ u32 fw_ver;
+ u8 drv_role;
+ u8 timeout_val;
+ u8 force_cmd;
+ bool avoid_eng_reset;
+};
+
+struct qed_load_req_out_params {
+ u32 load_code;
+ u32 exist_drv_ver_0;
+ u32 exist_drv_ver_1;
+ u32 exist_fw_ver;
+ u8 exist_drv_role;
+ u8 mfw_hsi_ver;
+ bool drv_exists;
+};
+
+static int
+__qed_mcp_load_req(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_load_req_in_params *p_in_params,
+ struct qed_load_req_out_params *p_out_params)
{
- struct qed_dev *cdev = p_hwfn->cdev;
struct qed_mcp_mb_params mb_params;
- union drv_union_data union_data;
+ struct load_req_stc load_req;
+ struct load_rsp_stc load_rsp;
+ u32 hsi_ver;
int rc;
+ memset(&load_req, 0, sizeof(load_req));
+ load_req.drv_ver_0 = p_in_params->drv_ver_0;
+ load_req.drv_ver_1 = p_in_params->drv_ver_1;
+ load_req.fw_ver = p_in_params->fw_ver;
+ QED_MFW_SET_FIELD(load_req.misc0, LOAD_REQ_ROLE, p_in_params->drv_role);
+ QED_MFW_SET_FIELD(load_req.misc0, LOAD_REQ_LOCK_TO,
+ p_in_params->timeout_val);
+ QED_MFW_SET_FIELD(load_req.misc0, LOAD_REQ_FORCE,
+ p_in_params->force_cmd);
+ QED_MFW_SET_FIELD(load_req.misc0, LOAD_REQ_FLAGS0,
+ p_in_params->avoid_eng_reset);
+
+ hsi_ver = (p_in_params->hsi_ver == QED_LOAD_REQ_HSI_VER_DEFAULT) ?
+ DRV_ID_MCP_HSI_VER_CURRENT :
+ (p_in_params->hsi_ver << DRV_ID_MCP_HSI_VER_SHIFT);
+
memset(&mb_params, 0, sizeof(mb_params));
- /* Load Request */
mb_params.cmd = DRV_MSG_CODE_LOAD_REQ;
- mb_params.param = PDA_COMP | DRV_ID_MCP_HSI_VER_CURRENT |
- cdev->drv_type;
- memcpy(&union_data.ver_str, cdev->ver_str, MCP_DRV_VER_STR_SIZE);
- mb_params.p_data_src = &union_data;
- rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
+ mb_params.param = PDA_COMP | hsi_ver | p_hwfn->cdev->drv_type;
+ mb_params.p_data_src = &load_req;
+ mb_params.data_src_size = sizeof(load_req);
+ mb_params.p_data_dst = &load_rsp;
+ mb_params.data_dst_size = sizeof(load_rsp);
- /* if mcp fails to respond we must abort */
+ DP_VERBOSE(p_hwfn, QED_MSG_SP,
+ "Load Request: param 0x%08x [init_hw %d, drv_type %d, hsi_ver %d, pda 0x%04x]\n",
+ mb_params.param,
+ QED_MFW_GET_FIELD(mb_params.param, DRV_ID_DRV_INIT_HW),
+ QED_MFW_GET_FIELD(mb_params.param, DRV_ID_DRV_TYPE),
+ QED_MFW_GET_FIELD(mb_params.param, DRV_ID_MCP_HSI_VER),
+ QED_MFW_GET_FIELD(mb_params.param, DRV_ID_PDA_COMP_VER));
+
+ if (p_in_params->hsi_ver != QED_LOAD_REQ_HSI_VER_1) {
+ DP_VERBOSE(p_hwfn, QED_MSG_SP,
+ "Load Request: drv_ver 0x%08x_0x%08x, fw_ver 0x%08x, misc0 0x%08x [role %d, timeout %d, force %d, flags0 0x%x]\n",
+ load_req.drv_ver_0,
+ load_req.drv_ver_1,
+ load_req.fw_ver,
+ load_req.misc0,
+ QED_MFW_GET_FIELD(load_req.misc0, LOAD_REQ_ROLE),
+ QED_MFW_GET_FIELD(load_req.misc0,
+ LOAD_REQ_LOCK_TO),
+ QED_MFW_GET_FIELD(load_req.misc0, LOAD_REQ_FORCE),
+ QED_MFW_GET_FIELD(load_req.misc0, LOAD_REQ_FLAGS0));
+ }
+
+ rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
if (rc) {
- DP_ERR(p_hwfn, "MCP response failure, aborting\n");
+ DP_NOTICE(p_hwfn, "Failed to send load request, rc = %d\n", rc);
return rc;
}
- *p_load_code = mb_params.mcp_resp;
+ DP_VERBOSE(p_hwfn, QED_MSG_SP,
+ "Load Response: resp 0x%08x\n", mb_params.mcp_resp);
+ p_out_params->load_code = mb_params.mcp_resp;
+
+ if (p_in_params->hsi_ver != QED_LOAD_REQ_HSI_VER_1 &&
+ p_out_params->load_code != FW_MSG_CODE_DRV_LOAD_REFUSED_HSI_1) {
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_SP,
+ "Load Response: exist_drv_ver 0x%08x_0x%08x, exist_fw_ver 0x%08x, misc0 0x%08x [exist_role %d, mfw_hsi %d, flags0 0x%x]\n",
+ load_rsp.drv_ver_0,
+ load_rsp.drv_ver_1,
+ load_rsp.fw_ver,
+ load_rsp.misc0,
+ QED_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_ROLE),
+ QED_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_HSI),
+ QED_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_FLAGS0));
+
+ p_out_params->exist_drv_ver_0 = load_rsp.drv_ver_0;
+ p_out_params->exist_drv_ver_1 = load_rsp.drv_ver_1;
+ p_out_params->exist_fw_ver = load_rsp.fw_ver;
+ p_out_params->exist_drv_role =
+ QED_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_ROLE);
+ p_out_params->mfw_hsi_ver =
+ QED_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_HSI);
+ p_out_params->drv_exists =
+ QED_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_FLAGS0) &
+ LOAD_RSP_FLAGS0_DRV_EXISTS;
+ }
+
+ return 0;
+}
+
+static int eocre_get_mfw_drv_role(struct qed_hwfn *p_hwfn,
+ enum qed_drv_role drv_role,
+ u8 *p_mfw_drv_role)
+{
+ switch (drv_role) {
+ case QED_DRV_ROLE_OS:
+ *p_mfw_drv_role = DRV_ROLE_OS;
+ break;
+ case QED_DRV_ROLE_KDUMP:
+ *p_mfw_drv_role = DRV_ROLE_KDUMP;
+ break;
+ default:
+ DP_ERR(p_hwfn, "Unexpected driver role %d\n", drv_role);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+enum qed_load_req_force {
+ QED_LOAD_REQ_FORCE_NONE,
+ QED_LOAD_REQ_FORCE_PF,
+ QED_LOAD_REQ_FORCE_ALL,
+};
+
+static void qed_get_mfw_force_cmd(struct qed_hwfn *p_hwfn,
+
+ enum qed_load_req_force force_cmd,
+ u8 *p_mfw_force_cmd)
+{
+ switch (force_cmd) {
+ case QED_LOAD_REQ_FORCE_NONE:
+ *p_mfw_force_cmd = LOAD_REQ_FORCE_NONE;
+ break;
+ case QED_LOAD_REQ_FORCE_PF:
+ *p_mfw_force_cmd = LOAD_REQ_FORCE_PF;
+ break;
+ case QED_LOAD_REQ_FORCE_ALL:
+ *p_mfw_force_cmd = LOAD_REQ_FORCE_ALL;
+ break;
+ }
+}
+
+int qed_mcp_load_req(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_load_req_params *p_params)
+{
+ struct qed_load_req_out_params out_params;
+ struct qed_load_req_in_params in_params;
+ u8 mfw_drv_role, mfw_force_cmd;
+ int rc;
+
+ memset(&in_params, 0, sizeof(in_params));
+ in_params.hsi_ver = QED_LOAD_REQ_HSI_VER_DEFAULT;
+ in_params.drv_ver_0 = QED_VERSION;
+ in_params.drv_ver_1 = qed_get_config_bitmap();
+ in_params.fw_ver = STORM_FW_VERSION;
+ rc = eocre_get_mfw_drv_role(p_hwfn, p_params->drv_role, &mfw_drv_role);
+ if (rc)
+ return rc;
+
+ in_params.drv_role = mfw_drv_role;
+ in_params.timeout_val = p_params->timeout_val;
+ qed_get_mfw_force_cmd(p_hwfn,
+ QED_LOAD_REQ_FORCE_NONE, &mfw_force_cmd);
+
+ in_params.force_cmd = mfw_force_cmd;
+ in_params.avoid_eng_reset = p_params->avoid_eng_reset;
- /* If MFW refused (e.g. other port is in diagnostic mode) we
- * must abort. This can happen in the following cases:
- * - Other port is in diagnostic mode
- * - Previously loaded function on the engine is not compliant with
- * the requester.
- * - MFW cannot cope with the requester's DRV_MFW_HSI_VERSION.
- * -
+ memset(&out_params, 0, sizeof(out_params));
+ rc = __qed_mcp_load_req(p_hwfn, p_ptt, &in_params, &out_params);
+ if (rc)
+ return rc;
+
+ /* First handle cases where another load request should/might be sent:
+ * - MFW expects the old interface [HSI version = 1]
+ * - MFW responds that a force load request is required
*/
- if (!(*p_load_code) ||
- ((*p_load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED_HSI) ||
- ((*p_load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED_PDA) ||
- ((*p_load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED_DIAG)) {
- DP_ERR(p_hwfn, "MCP refused load request, aborting\n");
+ if (out_params.load_code == FW_MSG_CODE_DRV_LOAD_REFUSED_HSI_1) {
+ DP_INFO(p_hwfn,
+ "MFW refused a load request due to HSI > 1. Resending with HSI = 1\n");
+
+ in_params.hsi_ver = QED_LOAD_REQ_HSI_VER_1;
+ memset(&out_params, 0, sizeof(out_params));
+ rc = __qed_mcp_load_req(p_hwfn, p_ptt, &in_params, &out_params);
+ if (rc)
+ return rc;
+ } else if (out_params.load_code ==
+ FW_MSG_CODE_DRV_LOAD_REFUSED_REQUIRES_FORCE) {
+ if (qed_mcp_can_force_load(in_params.drv_role,
+ out_params.exist_drv_role,
+ p_params->override_force_load)) {
+ DP_INFO(p_hwfn,
+ "A force load is required [{role, fw_ver, drv_ver}: loading={%d, 0x%08x, x%08x_0x%08x}, existing={%d, 0x%08x, 0x%08x_0x%08x}]\n",
+ in_params.drv_role, in_params.fw_ver,
+ in_params.drv_ver_0, in_params.drv_ver_1,
+ out_params.exist_drv_role,
+ out_params.exist_fw_ver,
+ out_params.exist_drv_ver_0,
+ out_params.exist_drv_ver_1);
+
+ qed_get_mfw_force_cmd(p_hwfn,
+ QED_LOAD_REQ_FORCE_ALL,
+ &mfw_force_cmd);
+
+ in_params.force_cmd = mfw_force_cmd;
+ memset(&out_params, 0, sizeof(out_params));
+ rc = __qed_mcp_load_req(p_hwfn, p_ptt, &in_params,
+ &out_params);
+ if (rc)
+ return rc;
+ } else {
+ DP_NOTICE(p_hwfn,
+ "A force load is required [{role, fw_ver, drv_ver}: loading={%d, 0x%08x, x%08x_0x%08x}, existing={%d, 0x%08x, 0x%08x_0x%08x}] - Avoid\n",
+ in_params.drv_role, in_params.fw_ver,
+ in_params.drv_ver_0, in_params.drv_ver_1,
+ out_params.exist_drv_role,
+ out_params.exist_fw_ver,
+ out_params.exist_drv_ver_0,
+ out_params.exist_drv_ver_1);
+ DP_NOTICE(p_hwfn,
+ "Avoid sending a force load request to prevent disruption of active PFs\n");
+
+ qed_mcp_cancel_load_req(p_hwfn, p_ptt);
+ return -EBUSY;
+ }
+ }
+
+ /* Now handle the other types of responses.
+ * The "REFUSED_HSI_1" and "REFUSED_REQUIRES_FORCE" responses are not
+ * expected here after the additional revised load requests were sent.
+ */
+ switch (out_params.load_code) {
+ case FW_MSG_CODE_DRV_LOAD_ENGINE:
+ case FW_MSG_CODE_DRV_LOAD_PORT:
+ case FW_MSG_CODE_DRV_LOAD_FUNCTION:
+ if (out_params.mfw_hsi_ver != QED_LOAD_REQ_HSI_VER_1 &&
+ out_params.drv_exists) {
+ /* The role and fw/driver version match, but the PF is
+ * already loaded and has not been unloaded gracefully.
+ */
+ DP_NOTICE(p_hwfn,
+ "PF is already loaded\n");
+ return -EINVAL;
+ }
+ break;
+ default:
+ DP_NOTICE(p_hwfn,
+ "Unexpected refusal to load request [resp 0x%08x]. Aborting.\n",
+ out_params.load_code);
return -EBUSY;
}
+ p_params->load_code = out_params.load_code;
+
return 0;
}
+int qed_mcp_unload_req(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+ u32 wol_param, mcp_resp, mcp_param;
+
+ switch (p_hwfn->cdev->wol_config) {
+ case QED_OV_WOL_DISABLED:
+ wol_param = DRV_MB_PARAM_UNLOAD_WOL_DISABLED;
+ break;
+ case QED_OV_WOL_ENABLED:
+ wol_param = DRV_MB_PARAM_UNLOAD_WOL_ENABLED;
+ break;
+ default:
+ DP_NOTICE(p_hwfn,
+ "Unknown WoL configuration %02x\n",
+ p_hwfn->cdev->wol_config);
+ /* Fallthrough */
+ case QED_OV_WOL_DEFAULT:
+ wol_param = DRV_MB_PARAM_UNLOAD_WOL_MCP;
+ }
+
+ return qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_UNLOAD_REQ, wol_param,
+ &mcp_resp, &mcp_param);
+}
+
+int qed_mcp_unload_done(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+ struct qed_mcp_mb_params mb_params;
+ struct mcp_mac wol_mac;
+
+ memset(&mb_params, 0, sizeof(mb_params));
+ mb_params.cmd = DRV_MSG_CODE_UNLOAD_DONE;
+
+ /* Set the primary MAC if WoL is enabled */
+ if (p_hwfn->cdev->wol_config == QED_OV_WOL_ENABLED) {
+ u8 *p_mac = p_hwfn->cdev->wol_mac;
+
+ memset(&wol_mac, 0, sizeof(wol_mac));
+ wol_mac.mac_upper = p_mac[0] << 8 | p_mac[1];
+ wol_mac.mac_lower = p_mac[2] << 24 | p_mac[3] << 16 |
+ p_mac[4] << 8 | p_mac[5];
+
+ DP_VERBOSE(p_hwfn,
+ (QED_MSG_SP | NETIF_MSG_IFDOWN),
+ "Setting WoL MAC: %pM --> [%08x,%08x]\n",
+ p_mac, wol_mac.mac_upper, wol_mac.mac_lower);
+
+ mb_params.p_data_src = &wol_mac;
+ mb_params.data_src_size = sizeof(wol_mac);
+ }
+
+ return qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
+}
+
static void qed_mcp_handle_vf_flr(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt)
{
@@ -549,7 +1042,6 @@ int qed_mcp_ack_vf_flr(struct qed_hwfn *p_hwfn,
u32 func_addr = SECTION_ADDR(mfw_func_offsize,
MCP_PF_ID(p_hwfn));
struct qed_mcp_mb_params mb_params;
- union drv_union_data union_data;
int rc;
int i;
@@ -560,8 +1052,8 @@ int qed_mcp_ack_vf_flr(struct qed_hwfn *p_hwfn,
memset(&mb_params, 0, sizeof(mb_params));
mb_params.cmd = DRV_MSG_CODE_VF_DISABLED_DONE;
- memcpy(&union_data.ack_vf_disabled, vfs_to_ack, VF_MAX_STATIC / 8);
- mb_params.p_data_src = &union_data;
+ mb_params.p_data_src = vfs_to_ack;
+ mb_params.data_src_size = VF_MAX_STATIC / 8;
rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
if (rc) {
DP_NOTICE(p_hwfn, "Failed to pass ACK for VF flr to MFW\n");
@@ -744,33 +1236,31 @@ int qed_mcp_set_link(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, bool b_up)
{
struct qed_mcp_link_params *params = &p_hwfn->mcp_info->link_input;
struct qed_mcp_mb_params mb_params;
- union drv_union_data union_data;
- struct eth_phy_cfg *phy_cfg;
+ struct eth_phy_cfg phy_cfg;
int rc = 0;
u32 cmd;
/* Set the shmem configuration according to params */
- phy_cfg = &union_data.drv_phy_cfg;
- memset(phy_cfg, 0, sizeof(*phy_cfg));
+ memset(&phy_cfg, 0, sizeof(phy_cfg));
cmd = b_up ? DRV_MSG_CODE_INIT_PHY : DRV_MSG_CODE_LINK_RESET;
if (!params->speed.autoneg)
- phy_cfg->speed = params->speed.forced_speed;
- phy_cfg->pause |= (params->pause.autoneg) ? ETH_PAUSE_AUTONEG : 0;
- phy_cfg->pause |= (params->pause.forced_rx) ? ETH_PAUSE_RX : 0;
- phy_cfg->pause |= (params->pause.forced_tx) ? ETH_PAUSE_TX : 0;
- phy_cfg->adv_speed = params->speed.advertised_speeds;
- phy_cfg->loopback_mode = params->loopback_mode;
+ phy_cfg.speed = params->speed.forced_speed;
+ phy_cfg.pause |= (params->pause.autoneg) ? ETH_PAUSE_AUTONEG : 0;
+ phy_cfg.pause |= (params->pause.forced_rx) ? ETH_PAUSE_RX : 0;
+ phy_cfg.pause |= (params->pause.forced_tx) ? ETH_PAUSE_TX : 0;
+ phy_cfg.adv_speed = params->speed.advertised_speeds;
+ phy_cfg.loopback_mode = params->loopback_mode;
p_hwfn->b_drv_link_init = b_up;
if (b_up) {
DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
"Configuring Link: Speed 0x%08x, Pause 0x%08x, adv_speed 0x%08x, loopback 0x%08x, features 0x%08x\n",
- phy_cfg->speed,
- phy_cfg->pause,
- phy_cfg->adv_speed,
- phy_cfg->loopback_mode,
- phy_cfg->feature_config_flags);
+ phy_cfg.speed,
+ phy_cfg.pause,
+ phy_cfg.adv_speed,
+ phy_cfg.loopback_mode,
+ phy_cfg.feature_config_flags);
} else {
DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
"Resetting link\n");
@@ -778,7 +1268,8 @@ int qed_mcp_set_link(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, bool b_up)
memset(&mb_params, 0, sizeof(mb_params));
mb_params.cmd = cmd;
- mb_params.p_data_src = &union_data;
+ mb_params.p_data_src = &phy_cfg;
+ mb_params.data_src_size = sizeof(phy_cfg);
rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
/* if mcp fails to respond we must abort */
@@ -805,7 +1296,6 @@ static void qed_mcp_send_protocol_stats(struct qed_hwfn *p_hwfn,
enum qed_mcp_protocol_type stats_type;
union qed_mcp_protocol_stats stats;
struct qed_mcp_mb_params mb_params;
- union drv_union_data union_data;
u32 hsi_param;
switch (type) {
@@ -835,8 +1325,8 @@ static void qed_mcp_send_protocol_stats(struct qed_hwfn *p_hwfn,
memset(&mb_params, 0, sizeof(mb_params));
mb_params.cmd = DRV_MSG_CODE_GET_STATS;
mb_params.param = hsi_param;
- memcpy(&union_data, &stats, sizeof(stats));
- mb_params.p_data_src = &union_data;
+ mb_params.p_data_src = &stats;
+ mb_params.data_src_size = sizeof(stats);
qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
}
@@ -963,7 +1453,7 @@ int qed_mcp_handle_events(struct qed_hwfn *p_hwfn,
qed_mcp_update_bw(p_hwfn, p_ptt);
break;
default:
- DP_NOTICE(p_hwfn, "Unimplemented MFW message %d\n", i);
+ DP_INFO(p_hwfn, "Unimplemented MFW message %d\n", i);
rc = -EINVAL;
}
}
@@ -1316,24 +1806,23 @@ qed_mcp_send_drv_version(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
struct qed_mcp_drv_version *p_ver)
{
- struct drv_version_stc *p_drv_version;
struct qed_mcp_mb_params mb_params;
- union drv_union_data union_data;
+ struct drv_version_stc drv_version;
__be32 val;
u32 i;
int rc;
- p_drv_version = &union_data.drv_version;
- p_drv_version->version = p_ver->version;
-
+ memset(&drv_version, 0, sizeof(drv_version));
+ drv_version.version = p_ver->version;
for (i = 0; i < (MCP_DRV_VER_STR_SIZE - 4) / sizeof(u32); i++) {
val = cpu_to_be32(*((u32 *)&p_ver->name[i * sizeof(u32)]));
- *(__be32 *)&p_drv_version->name[i * sizeof(u32)] = val;
+ *(__be32 *)&drv_version.name[i * sizeof(u32)] = val;
}
memset(&mb_params, 0, sizeof(mb_params));
mb_params.cmd = DRV_MSG_CODE_SET_VERSION;
- mb_params.p_data_src = &union_data;
+ mb_params.p_data_src = &drv_version;
+ mb_params.data_src_size = sizeof(drv_version);
rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
if (rc)
DP_ERR(p_hwfn, "MCP response failure, aborting\n");
@@ -1450,7 +1939,7 @@ int qed_mcp_ov_update_mac(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, u8 *mac)
{
struct qed_mcp_mb_params mb_params;
- union drv_union_data union_data;
+ u32 mfw_mac[2];
int rc;
memset(&mb_params, 0, sizeof(mb_params));
@@ -1458,8 +1947,17 @@ int qed_mcp_ov_update_mac(struct qed_hwfn *p_hwfn,
mb_params.param = DRV_MSG_CODE_VMAC_TYPE_MAC <<
DRV_MSG_CODE_VMAC_TYPE_SHIFT;
mb_params.param |= MCP_PF_ID(p_hwfn);
- ether_addr_copy(&union_data.raw_data[0], mac);
- mb_params.p_data_src = &union_data;
+
+ /* MCP is BE, and on LE platforms PCI would swap access to SHMEM
+ * in 32-bit granularity.
+ * So the MAC has to be set in native order [and not byte order],
+ * otherwise it would be read incorrectly by MFW after swap.
+ */
+ mfw_mac[0] = mac[0] << 24 | mac[1] << 16 | mac[2] << 8 | mac[3];
+ mfw_mac[1] = mac[4] << 24 | mac[5] << 16;
+
+ mb_params.p_data_src = (u8 *)mfw_mac;
+ mb_params.data_src_size = 8;
rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
if (rc)
DP_ERR(p_hwfn, "Failed to send mac address, rc = %d\n", rc);
@@ -1724,52 +2222,426 @@ int qed_mcp_bist_nvm_test_get_image_att(struct qed_hwfn *p_hwfn,
return rc;
}
-#define QED_RESC_ALLOC_VERSION_MAJOR 1
+static enum resource_id_enum qed_mcp_get_mfw_res_id(enum qed_resources res_id)
+{
+ enum resource_id_enum mfw_res_id = RESOURCE_NUM_INVALID;
+
+ switch (res_id) {
+ case QED_SB:
+ mfw_res_id = RESOURCE_NUM_SB_E;
+ break;
+ case QED_L2_QUEUE:
+ mfw_res_id = RESOURCE_NUM_L2_QUEUE_E;
+ break;
+ case QED_VPORT:
+ mfw_res_id = RESOURCE_NUM_VPORT_E;
+ break;
+ case QED_RSS_ENG:
+ mfw_res_id = RESOURCE_NUM_RSS_ENGINES_E;
+ break;
+ case QED_PQ:
+ mfw_res_id = RESOURCE_NUM_PQ_E;
+ break;
+ case QED_RL:
+ mfw_res_id = RESOURCE_NUM_RL_E;
+ break;
+ case QED_MAC:
+ case QED_VLAN:
+ /* Each VFC resource can accommodate both a MAC and a VLAN */
+ mfw_res_id = RESOURCE_VFC_FILTER_E;
+ break;
+ case QED_ILT:
+ mfw_res_id = RESOURCE_ILT_E;
+ break;
+ case QED_LL2_QUEUE:
+ mfw_res_id = RESOURCE_LL2_QUEUE_E;
+ break;
+ case QED_RDMA_CNQ_RAM:
+ case QED_CMDQS_CQS:
+ /* CNQ/CMDQS are the same resource */
+ mfw_res_id = RESOURCE_CQS_E;
+ break;
+ case QED_RDMA_STATS_QUEUE:
+ mfw_res_id = RESOURCE_RDMA_STATS_QUEUE_E;
+ break;
+ case QED_BDQ:
+ mfw_res_id = RESOURCE_BDQ_E;
+ break;
+ default:
+ break;
+ }
+
+ return mfw_res_id;
+}
+
+#define QED_RESC_ALLOC_VERSION_MAJOR 2
#define QED_RESC_ALLOC_VERSION_MINOR 0
#define QED_RESC_ALLOC_VERSION \
((QED_RESC_ALLOC_VERSION_MAJOR << \
DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_SHIFT) | \
(QED_RESC_ALLOC_VERSION_MINOR << \
DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_SHIFT))
-int qed_mcp_get_resc_info(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- struct resource_info *p_resc_info,
- u32 *p_mcp_resp, u32 *p_mcp_param)
+
+struct qed_resc_alloc_in_params {
+ u32 cmd;
+ enum qed_resources res_id;
+ u32 resc_max_val;
+};
+
+struct qed_resc_alloc_out_params {
+ u32 mcp_resp;
+ u32 mcp_param;
+ u32 resc_num;
+ u32 resc_start;
+ u32 vf_resc_num;
+ u32 vf_resc_start;
+ u32 flags;
+};
+
+static int
+qed_mcp_resc_allocation_msg(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_resc_alloc_in_params *p_in_params,
+ struct qed_resc_alloc_out_params *p_out_params)
{
struct qed_mcp_mb_params mb_params;
- union drv_union_data union_data;
+ struct resource_info mfw_resc_info;
int rc;
+ memset(&mfw_resc_info, 0, sizeof(mfw_resc_info));
+
+ mfw_resc_info.res_id = qed_mcp_get_mfw_res_id(p_in_params->res_id);
+ if (mfw_resc_info.res_id == RESOURCE_NUM_INVALID) {
+ DP_ERR(p_hwfn,
+ "Failed to match resource %d [%s] with the MFW resources\n",
+ p_in_params->res_id,
+ qed_hw_get_resc_name(p_in_params->res_id));
+ return -EINVAL;
+ }
+
+ switch (p_in_params->cmd) {
+ case DRV_MSG_SET_RESOURCE_VALUE_MSG:
+ mfw_resc_info.size = p_in_params->resc_max_val;
+ /* Fallthrough */
+ case DRV_MSG_GET_RESOURCE_ALLOC_MSG:
+ break;
+ default:
+ DP_ERR(p_hwfn, "Unexpected resource alloc command [0x%08x]\n",
+ p_in_params->cmd);
+ return -EINVAL;
+ }
+
memset(&mb_params, 0, sizeof(mb_params));
- memset(&union_data, 0, sizeof(union_data));
- mb_params.cmd = DRV_MSG_GET_RESOURCE_ALLOC_MSG;
+ mb_params.cmd = p_in_params->cmd;
mb_params.param = QED_RESC_ALLOC_VERSION;
+ mb_params.p_data_src = &mfw_resc_info;
+ mb_params.data_src_size = sizeof(mfw_resc_info);
+ mb_params.p_data_dst = mb_params.p_data_src;
+ mb_params.data_dst_size = mb_params.data_src_size;
- /* Need to have a sufficient large struct, as the cmd_and_union
- * is going to do memcpy from and to it.
- */
- memcpy(&union_data.resource, p_resc_info, sizeof(*p_resc_info));
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_SP,
+ "Resource message request: cmd 0x%08x, res_id %d [%s], hsi_version %d.%d, val 0x%x\n",
+ p_in_params->cmd,
+ p_in_params->res_id,
+ qed_hw_get_resc_name(p_in_params->res_id),
+ QED_MFW_GET_FIELD(mb_params.param,
+ DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR),
+ QED_MFW_GET_FIELD(mb_params.param,
+ DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR),
+ p_in_params->resc_max_val);
- mb_params.p_data_src = &union_data;
- mb_params.p_data_dst = &union_data;
rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
if (rc)
return rc;
- /* Copy the data back */
- memcpy(p_resc_info, &union_data.resource, sizeof(*p_resc_info));
- *p_mcp_resp = mb_params.mcp_resp;
- *p_mcp_param = mb_params.mcp_param;
+ p_out_params->mcp_resp = mb_params.mcp_resp;
+ p_out_params->mcp_param = mb_params.mcp_param;
+ p_out_params->resc_num = mfw_resc_info.size;
+ p_out_params->resc_start = mfw_resc_info.offset;
+ p_out_params->vf_resc_num = mfw_resc_info.vf_size;
+ p_out_params->vf_resc_start = mfw_resc_info.vf_offset;
+ p_out_params->flags = mfw_resc_info.flags;
+
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_SP,
+ "Resource message response: mfw_hsi_version %d.%d, num 0x%x, start 0x%x, vf_num 0x%x, vf_start 0x%x, flags 0x%08x\n",
+ QED_MFW_GET_FIELD(p_out_params->mcp_param,
+ FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR),
+ QED_MFW_GET_FIELD(p_out_params->mcp_param,
+ FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR),
+ p_out_params->resc_num,
+ p_out_params->resc_start,
+ p_out_params->vf_resc_num,
+ p_out_params->vf_resc_start, p_out_params->flags);
+
+ return 0;
+}
+
+int
+qed_mcp_set_resc_max_val(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ enum qed_resources res_id,
+ u32 resc_max_val, u32 *p_mcp_resp)
+{
+ struct qed_resc_alloc_out_params out_params;
+ struct qed_resc_alloc_in_params in_params;
+ int rc;
+
+ memset(&in_params, 0, sizeof(in_params));
+ in_params.cmd = DRV_MSG_SET_RESOURCE_VALUE_MSG;
+ in_params.res_id = res_id;
+ in_params.resc_max_val = resc_max_val;
+ memset(&out_params, 0, sizeof(out_params));
+ rc = qed_mcp_resc_allocation_msg(p_hwfn, p_ptt, &in_params,
+ &out_params);
+ if (rc)
+ return rc;
+
+ *p_mcp_resp = out_params.mcp_resp;
+
+ return 0;
+}
+
+int
+qed_mcp_get_resc_info(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ enum qed_resources res_id,
+ u32 *p_mcp_resp, u32 *p_resc_num, u32 *p_resc_start)
+{
+ struct qed_resc_alloc_out_params out_params;
+ struct qed_resc_alloc_in_params in_params;
+ int rc;
+
+ memset(&in_params, 0, sizeof(in_params));
+ in_params.cmd = DRV_MSG_GET_RESOURCE_ALLOC_MSG;
+ in_params.res_id = res_id;
+ memset(&out_params, 0, sizeof(out_params));
+ rc = qed_mcp_resc_allocation_msg(p_hwfn, p_ptt, &in_params,
+ &out_params);
+ if (rc)
+ return rc;
+
+ *p_mcp_resp = out_params.mcp_resp;
+
+ if (*p_mcp_resp == FW_MSG_CODE_RESOURCE_ALLOC_OK) {
+ *p_resc_num = out_params.resc_num;
+ *p_resc_start = out_params.resc_start;
+ }
+
+ return 0;
+}
+
+int qed_mcp_initiate_pf_flr(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+ u32 mcp_resp, mcp_param;
+
+ return qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_INITIATE_PF_FLR, 0,
+ &mcp_resp, &mcp_param);
+}
+
+static int qed_mcp_resource_cmd(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 param, u32 *p_mcp_resp, u32 *p_mcp_param)
+{
+ int rc;
+
+ rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_RESOURCE_CMD, param,
+ p_mcp_resp, p_mcp_param);
+ if (rc)
+ return rc;
+
+ if (*p_mcp_resp == FW_MSG_CODE_UNSUPPORTED) {
+ DP_INFO(p_hwfn,
+ "The resource command is unsupported by the MFW\n");
+ return -EINVAL;
+ }
+
+ if (*p_mcp_param == RESOURCE_OPCODE_UNKNOWN_CMD) {
+ u8 opcode = QED_MFW_GET_FIELD(param, RESOURCE_CMD_REQ_OPCODE);
+
+ DP_NOTICE(p_hwfn,
+ "The resource command is unknown to the MFW [param 0x%08x, opcode %d]\n",
+ param, opcode);
+ return -EINVAL;
+ }
+
+ return rc;
+}
+
+int
+__qed_mcp_resc_lock(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_resc_lock_params *p_params)
+{
+ u32 param = 0, mcp_resp, mcp_param;
+ u8 opcode;
+ int rc;
+
+ switch (p_params->timeout) {
+ case QED_MCP_RESC_LOCK_TO_DEFAULT:
+ opcode = RESOURCE_OPCODE_REQ;
+ p_params->timeout = 0;
+ break;
+ case QED_MCP_RESC_LOCK_TO_NONE:
+ opcode = RESOURCE_OPCODE_REQ_WO_AGING;
+ p_params->timeout = 0;
+ break;
+ default:
+ opcode = RESOURCE_OPCODE_REQ_W_AGING;
+ break;
+ }
+
+ QED_MFW_SET_FIELD(param, RESOURCE_CMD_REQ_RESC, p_params->resource);
+ QED_MFW_SET_FIELD(param, RESOURCE_CMD_REQ_OPCODE, opcode);
+ QED_MFW_SET_FIELD(param, RESOURCE_CMD_REQ_AGE, p_params->timeout);
+
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_SP,
+ "Resource lock request: param 0x%08x [age %d, opcode %d, resource %d]\n",
+ param, p_params->timeout, opcode, p_params->resource);
+
+ /* Attempt to acquire the resource */
+ rc = qed_mcp_resource_cmd(p_hwfn, p_ptt, param, &mcp_resp, &mcp_param);
+ if (rc)
+ return rc;
+
+ /* Analyze the response */
+ p_params->owner = QED_MFW_GET_FIELD(mcp_param, RESOURCE_CMD_RSP_OWNER);
+ opcode = QED_MFW_GET_FIELD(mcp_param, RESOURCE_CMD_RSP_OPCODE);
DP_VERBOSE(p_hwfn,
QED_MSG_SP,
- "MFW resource_info: version 0x%x, res_id 0x%x, size 0x%x, offset 0x%x, vf_size 0x%x, vf_offset 0x%x, flags 0x%x\n",
- *p_mcp_param,
- p_resc_info->res_id,
- p_resc_info->size,
- p_resc_info->offset,
- p_resc_info->vf_size,
- p_resc_info->vf_offset, p_resc_info->flags);
+ "Resource lock response: mcp_param 0x%08x [opcode %d, owner %d]\n",
+ mcp_param, opcode, p_params->owner);
+
+ switch (opcode) {
+ case RESOURCE_OPCODE_GNT:
+ p_params->b_granted = true;
+ break;
+ case RESOURCE_OPCODE_BUSY:
+ p_params->b_granted = false;
+ break;
+ default:
+ DP_NOTICE(p_hwfn,
+ "Unexpected opcode in resource lock response [mcp_param 0x%08x, opcode %d]\n",
+ mcp_param, opcode);
+ return -EINVAL;
+ }
return 0;
}
+
+int
+qed_mcp_resc_lock(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, struct qed_resc_lock_params *p_params)
+{
+ u32 retry_cnt = 0;
+ int rc;
+
+ do {
+ /* No need for an interval before the first iteration */
+ if (retry_cnt) {
+ if (p_params->sleep_b4_retry) {
+ u16 retry_interval_in_ms =
+ DIV_ROUND_UP(p_params->retry_interval,
+ 1000);
+
+ msleep(retry_interval_in_ms);
+ } else {
+ udelay(p_params->retry_interval);
+ }
+ }
+
+ rc = __qed_mcp_resc_lock(p_hwfn, p_ptt, p_params);
+ if (rc)
+ return rc;
+
+ if (p_params->b_granted)
+ break;
+ } while (retry_cnt++ < p_params->retry_num);
+
+ return 0;
+}
+
+int
+qed_mcp_resc_unlock(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_resc_unlock_params *p_params)
+{
+ u32 param = 0, mcp_resp, mcp_param;
+ u8 opcode;
+ int rc;
+
+ opcode = p_params->b_force ? RESOURCE_OPCODE_FORCE_RELEASE
+ : RESOURCE_OPCODE_RELEASE;
+ QED_MFW_SET_FIELD(param, RESOURCE_CMD_REQ_RESC, p_params->resource);
+ QED_MFW_SET_FIELD(param, RESOURCE_CMD_REQ_OPCODE, opcode);
+
+ DP_VERBOSE(p_hwfn, QED_MSG_SP,
+ "Resource unlock request: param 0x%08x [opcode %d, resource %d]\n",
+ param, opcode, p_params->resource);
+
+ /* Attempt to release the resource */
+ rc = qed_mcp_resource_cmd(p_hwfn, p_ptt, param, &mcp_resp, &mcp_param);
+ if (rc)
+ return rc;
+
+ /* Analyze the response */
+ opcode = QED_MFW_GET_FIELD(mcp_param, RESOURCE_CMD_RSP_OPCODE);
+
+ DP_VERBOSE(p_hwfn, QED_MSG_SP,
+ "Resource unlock response: mcp_param 0x%08x [opcode %d]\n",
+ mcp_param, opcode);
+
+ switch (opcode) {
+ case RESOURCE_OPCODE_RELEASED_PREVIOUS:
+ DP_INFO(p_hwfn,
+ "Resource unlock request for an already released resource [%d]\n",
+ p_params->resource);
+ /* Fallthrough */
+ case RESOURCE_OPCODE_RELEASED:
+ p_params->b_released = true;
+ break;
+ case RESOURCE_OPCODE_WRONG_OWNER:
+ p_params->b_released = false;
+ break;
+ default:
+ DP_NOTICE(p_hwfn,
+ "Unexpected opcode in resource unlock response [mcp_param 0x%08x, opcode %d]\n",
+ mcp_param, opcode);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+void qed_mcp_resc_lock_default_init(struct qed_resc_lock_params *p_lock,
+ struct qed_resc_unlock_params *p_unlock,
+ enum qed_resc_lock
+ resource, bool b_is_permanent)
+{
+ if (p_lock) {
+ memset(p_lock, 0, sizeof(*p_lock));
+
+ /* Permanent resources don't require aging, and there's no
+ * point in trying to acquire them more than once since it's
+ * unexpected another entity would release them.
+ */
+ if (b_is_permanent) {
+ p_lock->timeout = QED_MCP_RESC_LOCK_TO_NONE;
+ } else {
+ p_lock->retry_num = QED_MCP_RESC_LOCK_RETRY_CNT_DFLT;
+ p_lock->retry_interval =
+ QED_MCP_RESC_LOCK_RETRY_VAL_DFLT;
+ p_lock->sleep_b4_retry = true;
+ }
+
+ p_lock->resource = resource;
+ }
+
+ if (p_unlock) {
+ memset(p_unlock, 0, sizeof(*p_unlock));
+ p_unlock->resource = resource;
+ }
+}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.h b/drivers/net/ethernet/qlogic/qed/qed_mcp.h
index 368e88de146c..5ae35d6cc7d1 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_mcp.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.h
@@ -39,6 +39,7 @@
#include <linux/spinlock.h>
#include <linux/qed/qed_fcoe_if.h>
#include "qed_hsi.h"
+#include "qed_dev_api.h"
struct qed_mcp_link_speed_params {
bool autoneg;
@@ -479,14 +480,18 @@ int qed_mcp_bist_nvm_test_get_image_att(struct qed_hwfn *p_hwfn,
rel_pfid)
#define MCP_PF_ID(p_hwfn) MCP_PF_ID_BY_REL(p_hwfn, (p_hwfn)->rel_pf_id)
-/* TODO - this is only correct as long as only BB is supported, and
- * no port-swapping is implemented; Afterwards we'll need to fix it.
- */
-#define MFW_PORT(_p_hwfn) ((_p_hwfn)->abs_pf_id % \
- ((_p_hwfn)->cdev->num_ports_in_engines * 2))
+#define MFW_PORT(_p_hwfn) ((_p_hwfn)->abs_pf_id % \
+ ((_p_hwfn)->cdev->num_ports_in_engines * \
+ qed_device_num_engines((_p_hwfn)->cdev)))
+
struct qed_mcp_info {
- /* Spinlock used for protecting the access to the MFW mailbox */
- spinlock_t lock;
+ /* List for mailbox commands which were sent and wait for a response */
+ struct list_head cmd_list;
+
+ /* Spinlock used for protecting the access to the mailbox commands list
+ * and the sending of the commands.
+ */
+ spinlock_t cmd_lock;
/* Spinlock used for syncing SW link-changes and link-changes
* originating from attention context.
@@ -506,14 +511,16 @@ struct qed_mcp_info {
u8 *mfw_mb_cur;
u8 *mfw_mb_shadow;
u16 mfw_mb_length;
- u16 mcp_hist;
+ u32 mcp_hist;
};
struct qed_mcp_mb_params {
u32 cmd;
u32 param;
- union drv_union_data *p_data_src;
- union drv_union_data *p_data_dst;
+ void *p_data_src;
+ u8 data_src_size;
+ void *p_data_dst;
+ u8 data_dst_size;
u32 mcp_resp;
u32 mcp_param;
};
@@ -564,27 +571,55 @@ int qed_mcp_free(struct qed_hwfn *p_hwfn);
int qed_mcp_handle_events(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt);
+enum qed_drv_role {
+ QED_DRV_ROLE_OS,
+ QED_DRV_ROLE_KDUMP,
+};
+
+struct qed_load_req_params {
+ /* Input params */
+ enum qed_drv_role drv_role;
+ u8 timeout_val;
+ bool avoid_eng_reset;
+ enum qed_override_force_load override_force_load;
+
+ /* Output params */
+ u32 load_code;
+};
+
/**
- * @brief Sends a LOAD_REQ to the MFW, and in case operation
- * succeed, returns whether this PF is the first on the
- * chip/engine/port or function. This function should be
- * called when driver is ready to accept MFW events after
- * Storms initializations are done.
+ * @brief Sends a LOAD_REQ to the MFW, and in case the operation succeeds,
+ * returns whether this PF is the first on the engine/port or function.
*
- * @param p_hwfn - hw function
- * @param p_ptt - PTT required for register access
- * @param p_load_code - The MCP response param containing one
- * of the following:
- * FW_MSG_CODE_DRV_LOAD_ENGINE
- * FW_MSG_CODE_DRV_LOAD_PORT
- * FW_MSG_CODE_DRV_LOAD_FUNCTION
- * @return int -
- * 0 - Operation was successul.
- * -EBUSY - Operation failed
+ * @param p_hwfn
+ * @param p_ptt
+ * @param p_params
+ *
+ * @return int - 0 - Operation was successful.
*/
int qed_mcp_load_req(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
- u32 *p_load_code);
+ struct qed_load_req_params *p_params);
+
+/**
+ * @brief Sends a UNLOAD_REQ message to the MFW
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ *
+ * @return int - 0 - Operation was successful.
+ */
+int qed_mcp_unload_req(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
+
+/**
+ * @brief Sends a UNLOAD_DONE message to the MFW
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ *
+ * @return int - 0 - Operation was successful.
+ */
+int qed_mcp_unload_done(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
/**
* @brief Read the MFW mailbox into Current buffer.
@@ -708,6 +743,41 @@ int qed_mcp_mask_parities(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, u32 mask_parities);
/**
+ * @brief - Sets the MFW's max value for the given resource
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param res_id
+ * @param resc_max_val
+ * @param p_mcp_resp
+ *
+ * @return int - 0 - operation was successful.
+ */
+int
+qed_mcp_set_resc_max_val(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ enum qed_resources res_id,
+ u32 resc_max_val, u32 *p_mcp_resp);
+
+/**
+ * @brief - Gets the MFW allocation info for the given resource
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param res_id
+ * @param p_mcp_resp
+ * @param p_resc_num
+ * @param p_resc_start
+ *
+ * @return int - 0 - operation was successful.
+ */
+int
+qed_mcp_get_resc_info(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ enum qed_resources res_id,
+ u32 *p_mcp_resp, u32 *p_resc_num, u32 *p_resc_start);
+
+/**
* @brief Send eswitch mode to MFW
*
* @param p_hwfn
@@ -720,19 +790,106 @@ int qed_mcp_ov_update_eswitch(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
enum qed_ov_eswitch eswitch);
+#define QED_MCP_RESC_LOCK_MIN_VAL RESOURCE_DUMP
+#define QED_MCP_RESC_LOCK_MAX_VAL 31
+
+enum qed_resc_lock {
+ QED_RESC_LOCK_DBG_DUMP = QED_MCP_RESC_LOCK_MIN_VAL,
+ QED_RESC_LOCK_PTP_PORT0,
+ QED_RESC_LOCK_PTP_PORT1,
+ QED_RESC_LOCK_PTP_PORT2,
+ QED_RESC_LOCK_PTP_PORT3,
+ QED_RESC_LOCK_RESC_ALLOC = QED_MCP_RESC_LOCK_MAX_VAL,
+ QED_RESC_LOCK_RESC_INVALID
+};
+
/**
- * @brief - Gets the MFW allocation info for the given resource
+ * @brief - Initiates PF FLR
*
* @param p_hwfn
* @param p_ptt
- * @param p_resc_info - descriptor of requested resource
- * @param p_mcp_resp
- * @param p_mcp_param
*
* @return int - 0 - operation was successful.
*/
-int qed_mcp_get_resc_info(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- struct resource_info *p_resc_info,
- u32 *p_mcp_resp, u32 *p_mcp_param);
+int qed_mcp_initiate_pf_flr(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
+struct qed_resc_lock_params {
+ /* Resource number [valid values are 0..31] */
+ u8 resource;
+
+ /* Lock timeout value in seconds [default, none or 1..254] */
+ u8 timeout;
+#define QED_MCP_RESC_LOCK_TO_DEFAULT 0
+#define QED_MCP_RESC_LOCK_TO_NONE 255
+
+ /* Number of times to retry locking */
+ u8 retry_num;
+#define QED_MCP_RESC_LOCK_RETRY_CNT_DFLT 10
+
+ /* The interval in usec between retries */
+ u16 retry_interval;
+#define QED_MCP_RESC_LOCK_RETRY_VAL_DFLT 10000
+
+ /* Use sleep or delay between retries */
+ bool sleep_b4_retry;
+
+ /* Will be set as true if the resource is free and granted */
+ bool b_granted;
+
+ /* Will be filled with the resource owner.
+ * [0..15 = PF0-15, 16 = MFW]
+ */
+ u8 owner;
+};
+
+/**
+ * @brief Acquires MFW generic resource lock
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param p_params
+ *
+ * @return int - 0 - operation was successful.
+ */
+int
+qed_mcp_resc_lock(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, struct qed_resc_lock_params *p_params);
+
+struct qed_resc_unlock_params {
+ /* Resource number [valid values are 0..31] */
+ u8 resource;
+
+ /* Allow to release a resource even if belongs to another PF */
+ bool b_force;
+
+ /* Will be set as true if the resource is released */
+ bool b_released;
+};
+
+/**
+ * @brief Releases MFW generic resource lock
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param p_params
+ *
+ * @return int - 0 - operation was successful.
+ */
+int
+qed_mcp_resc_unlock(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_resc_unlock_params *p_params);
+
+/**
+ * @brief - default initialization for lock/unlock resource structs
+ *
+ * @param p_lock - lock params struct to be initialized; Can be NULL
+ * @param p_unlock - unlock params struct to be initialized; Can be NULL
+ * @param resource - the requested resource
+ * @paral b_is_permanent - disable retries & aging when set
+ */
+void qed_mcp_resc_lock_default_init(struct qed_resc_lock_params *p_lock,
+ struct qed_resc_unlock_params *p_unlock,
+ enum qed_resc_lock
+ resource, bool b_is_permanent);
+
#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ooo.c b/drivers/net/ethernet/qlogic/qed/qed_ooo.c
index 378afce58b3f..db96670192c7 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_ooo.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_ooo.c
@@ -41,6 +41,7 @@
#include "qed_iscsi.h"
#include "qed_ll2.h"
#include "qed_ooo.h"
+#include "qed_cxt.h"
static struct qed_ooo_archipelago
*qed_ooo_seek_archipelago(struct qed_hwfn *p_hwfn,
@@ -48,15 +49,18 @@ static struct qed_ooo_archipelago
*p_ooo_info,
u32 cid)
{
- struct qed_ooo_archipelago *p_archipelago = NULL;
+ u32 idx = (cid & 0xffff) - p_ooo_info->cid_base;
+ struct qed_ooo_archipelago *p_archipelago;
- list_for_each_entry(p_archipelago,
- &p_ooo_info->archipelagos_list, list_entry) {
- if (p_archipelago->cid == cid)
- return p_archipelago;
- }
+ if (idx >= p_ooo_info->max_num_archipelagos)
+ return NULL;
- return NULL;
+ p_archipelago = &p_ooo_info->p_archipelagos_mem[idx];
+
+ if (list_empty(&p_archipelago->isles_list))
+ return NULL;
+
+ return p_archipelago;
}
static struct qed_ooo_isle *qed_ooo_seek_isle(struct qed_hwfn *p_hwfn,
@@ -97,8 +101,8 @@ void qed_ooo_save_history_entry(struct qed_hwfn *p_hwfn,
struct qed_ooo_info *qed_ooo_alloc(struct qed_hwfn *p_hwfn)
{
+ u16 max_num_archipelagos = 0, cid_base;
struct qed_ooo_info *p_ooo_info;
- u16 max_num_archipelagos = 0;
u16 max_num_isles = 0;
u32 i;
@@ -110,6 +114,7 @@ struct qed_ooo_info *qed_ooo_alloc(struct qed_hwfn *p_hwfn)
max_num_archipelagos = p_hwfn->pf_params.iscsi_pf_params.num_cons;
max_num_isles = QED_MAX_NUM_ISLES + max_num_archipelagos;
+ cid_base = (u16)qed_cxt_get_proto_cid_start(p_hwfn, PROTOCOLID_ISCSI);
if (!max_num_archipelagos) {
DP_NOTICE(p_hwfn,
@@ -121,11 +126,12 @@ struct qed_ooo_info *qed_ooo_alloc(struct qed_hwfn *p_hwfn)
if (!p_ooo_info)
return NULL;
+ p_ooo_info->cid_base = cid_base;
+ p_ooo_info->max_num_archipelagos = max_num_archipelagos;
+
INIT_LIST_HEAD(&p_ooo_info->free_buffers_list);
INIT_LIST_HEAD(&p_ooo_info->ready_buffers_list);
INIT_LIST_HEAD(&p_ooo_info->free_isles_list);
- INIT_LIST_HEAD(&p_ooo_info->free_archipelagos_list);
- INIT_LIST_HEAD(&p_ooo_info->archipelagos_list);
p_ooo_info->p_isles_mem = kcalloc(max_num_isles,
sizeof(struct qed_ooo_isle),
@@ -146,11 +152,8 @@ struct qed_ooo_info *qed_ooo_alloc(struct qed_hwfn *p_hwfn)
if (!p_ooo_info->p_archipelagos_mem)
goto no_archipelagos_mem;
- for (i = 0; i < max_num_archipelagos; i++) {
+ for (i = 0; i < max_num_archipelagos; i++)
INIT_LIST_HEAD(&p_ooo_info->p_archipelagos_mem[i].isles_list);
- list_add_tail(&p_ooo_info->p_archipelagos_mem[i].list_entry,
- &p_ooo_info->free_archipelagos_list);
- }
p_ooo_info->ooo_history.p_cqes =
kcalloc(QED_MAX_NUM_OOO_HISTORY_ENTRIES,
@@ -178,21 +181,9 @@ void qed_ooo_release_connection_isles(struct qed_hwfn *p_hwfn,
struct qed_ooo_archipelago *p_archipelago;
struct qed_ooo_buffer *p_buffer;
struct qed_ooo_isle *p_isle;
- bool b_found = false;
-
- if (list_empty(&p_ooo_info->archipelagos_list))
- return;
- list_for_each_entry(p_archipelago,
- &p_ooo_info->archipelagos_list, list_entry) {
- if (p_archipelago->cid == cid) {
- list_del(&p_archipelago->list_entry);
- b_found = true;
- break;
- }
- }
-
- if (!b_found)
+ p_archipelago = qed_ooo_seek_archipelago(p_hwfn, p_ooo_info, cid);
+ if (!p_archipelago)
return;
while (!list_empty(&p_archipelago->isles_list)) {
@@ -216,27 +207,21 @@ void qed_ooo_release_connection_isles(struct qed_hwfn *p_hwfn,
list_add_tail(&p_isle->list_entry,
&p_ooo_info->free_isles_list);
}
-
- list_add_tail(&p_archipelago->list_entry,
- &p_ooo_info->free_archipelagos_list);
}
void qed_ooo_release_all_isles(struct qed_hwfn *p_hwfn,
struct qed_ooo_info *p_ooo_info)
{
- struct qed_ooo_archipelago *p_arch;
+ struct qed_ooo_archipelago *p_archipelago;
struct qed_ooo_buffer *p_buffer;
struct qed_ooo_isle *p_isle;
+ u32 i;
- while (!list_empty(&p_ooo_info->archipelagos_list)) {
- p_arch = list_first_entry(&p_ooo_info->archipelagos_list,
- struct qed_ooo_archipelago,
- list_entry);
-
- list_del(&p_arch->list_entry);
+ for (i = 0; i < p_ooo_info->max_num_archipelagos; i++) {
+ p_archipelago = &(p_ooo_info->p_archipelagos_mem[i]);
- while (!list_empty(&p_arch->isles_list)) {
- p_isle = list_first_entry(&p_arch->isles_list,
+ while (!list_empty(&p_archipelago->isles_list)) {
+ p_isle = list_first_entry(&p_archipelago->isles_list,
struct qed_ooo_isle,
list_entry);
@@ -258,8 +243,6 @@ void qed_ooo_release_all_isles(struct qed_hwfn *p_hwfn,
list_add_tail(&p_isle->list_entry,
&p_ooo_info->free_isles_list);
}
- list_add_tail(&p_arch->list_entry,
- &p_ooo_info->free_archipelagos_list);
}
if (!list_empty(&p_ooo_info->ready_buffers_list))
list_splice_tail_init(&p_ooo_info->ready_buffers_list,
@@ -378,12 +361,6 @@ void qed_ooo_delete_isles(struct qed_hwfn *p_hwfn,
p_ooo_info->cur_isles_number--;
list_add(&p_isle->list_entry, &p_ooo_info->free_isles_list);
}
-
- if (list_empty(&p_archipelago->isles_list)) {
- list_del(&p_archipelago->list_entry);
- list_add(&p_archipelago->list_entry,
- &p_ooo_info->free_archipelagos_list);
- }
}
void qed_ooo_add_new_isle(struct qed_hwfn *p_hwfn,
@@ -426,28 +403,10 @@ void qed_ooo_add_new_isle(struct qed_hwfn *p_hwfn,
return;
}
- if (!p_archipelago &&
- !list_empty(&p_ooo_info->free_archipelagos_list)) {
- p_archipelago =
- list_first_entry(&p_ooo_info->free_archipelagos_list,
- struct qed_ooo_archipelago, list_entry);
+ if (!p_archipelago) {
+ u32 idx = (cid & 0xffff) - p_ooo_info->cid_base;
- list_del(&p_archipelago->list_entry);
- if (!list_empty(&p_archipelago->isles_list)) {
- DP_NOTICE(p_hwfn,
- "Free OOO connection is not empty\n");
- INIT_LIST_HEAD(&p_archipelago->isles_list);
- }
- p_archipelago->cid = cid;
- list_add(&p_archipelago->list_entry,
- &p_ooo_info->archipelagos_list);
- } else if (!p_archipelago) {
- DP_NOTICE(p_hwfn, "No more free OOO connections\n");
- list_add(&p_isle->list_entry,
- &p_ooo_info->free_isles_list);
- list_add(&p_buffer->list_entry,
- &p_ooo_info->free_buffers_list);
- return;
+ p_archipelago = &p_ooo_info->p_archipelagos_mem[idx];
}
list_add(&p_buffer->list_entry, &p_isle->buffers_list);
@@ -517,11 +476,6 @@ void qed_ooo_join_isles(struct qed_hwfn *p_hwfn,
} else {
list_splice_tail_init(&p_right_isle->buffers_list,
&p_ooo_info->ready_buffers_list);
- if (list_empty(&p_archipelago->isles_list)) {
- list_del(&p_archipelago->list_entry);
- list_add(&p_archipelago->list_entry,
- &p_ooo_info->free_archipelagos_list);
- }
}
list_add_tail(&p_right_isle->list_entry, &p_ooo_info->free_isles_list);
}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ooo.h b/drivers/net/ethernet/qlogic/qed/qed_ooo.h
index 4f138fb5f533..791ad0f8b759 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_ooo.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_ooo.h
@@ -60,9 +60,7 @@ struct qed_ooo_isle {
};
struct qed_ooo_archipelago {
- struct list_head list_entry;
struct list_head isles_list;
- u32 cid;
};
struct qed_ooo_history {
@@ -75,14 +73,14 @@ struct qed_ooo_info {
struct list_head free_buffers_list;
struct list_head ready_buffers_list;
struct list_head free_isles_list;
- struct list_head free_archipelagos_list;
- struct list_head archipelagos_list;
struct qed_ooo_archipelago *p_archipelagos_mem;
struct qed_ooo_isle *p_isles_mem;
struct qed_ooo_history ooo_history;
u32 cur_isles_number;
u32 max_isles_number;
u32 gen_isles_number;
+ u16 max_num_archipelagos;
+ u16 cid_base;
};
#if IS_ENABLED(CONFIG_QED_ISCSI)
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ptp.c b/drivers/net/ethernet/qlogic/qed/qed_ptp.c
index d27aa85da23c..434a164a76ed 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_ptp.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_ptp.c
@@ -34,7 +34,7 @@
#include "qed_dev_api.h"
#include "qed_hw.h"
#include "qed_l2.h"
-#include "qed_ptp.h"
+#include "qed_mcp.h"
#include "qed_reg_addr.h"
/* 16 nano second time quantas to wait before making a Drift adjustment */
@@ -45,6 +45,82 @@
#define QED_DRIFT_CNTR_DIRECTION_SHIFT 31
#define QED_TIMESTAMP_MASK BIT(16)
+static enum qed_resc_lock qed_ptcdev_to_resc(struct qed_hwfn *p_hwfn)
+{
+ switch (qed_device_get_port_id(p_hwfn->cdev)) {
+ case 0:
+ return QED_RESC_LOCK_PTP_PORT0;
+ case 1:
+ return QED_RESC_LOCK_PTP_PORT1;
+ case 2:
+ return QED_RESC_LOCK_PTP_PORT2;
+ case 3:
+ return QED_RESC_LOCK_PTP_PORT3;
+ default:
+ return QED_RESC_LOCK_RESC_INVALID;
+ }
+}
+
+static int qed_ptp_res_lock(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+ struct qed_resc_lock_params params;
+ enum qed_resc_lock resource;
+ int rc;
+
+ resource = qed_ptcdev_to_resc(p_hwfn);
+ if (resource == QED_RESC_LOCK_RESC_INVALID)
+ return -EINVAL;
+
+ qed_mcp_resc_lock_default_init(&params, NULL, resource, true);
+
+ rc = qed_mcp_resc_lock(p_hwfn, p_ptt, &params);
+ if (rc && rc != -EINVAL) {
+ return rc;
+ } else if (rc == -EINVAL) {
+ /* MFW doesn't support resource locking, first PF on the port
+ * has lock ownership.
+ */
+ if (p_hwfn->abs_pf_id < p_hwfn->cdev->num_ports_in_engines)
+ return 0;
+
+ DP_INFO(p_hwfn, "PF doesn't have lock ownership\n");
+ return -EBUSY;
+ } else if (!rc && !params.b_granted) {
+ DP_INFO(p_hwfn, "Failed to acquire ptp resource lock\n");
+ return -EBUSY;
+ }
+
+ return rc;
+}
+
+static int qed_ptp_res_unlock(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+ struct qed_resc_unlock_params params;
+ enum qed_resc_lock resource;
+ int rc;
+
+ resource = qed_ptcdev_to_resc(p_hwfn);
+ if (resource == QED_RESC_LOCK_RESC_INVALID)
+ return -EINVAL;
+
+ qed_mcp_resc_lock_default_init(NULL, &params, resource, true);
+
+ rc = qed_mcp_resc_unlock(p_hwfn, p_ptt, &params);
+ if (rc == -EINVAL) {
+ /* MFW doesn't support locking, first PF has lock ownership */
+ if (p_hwfn->abs_pf_id < p_hwfn->cdev->num_ports_in_engines) {
+ rc = 0;
+ } else {
+ DP_INFO(p_hwfn, "PF doesn't have lock ownership\n");
+ return -EINVAL;
+ }
+ } else if (rc) {
+ DP_INFO(p_hwfn, "Failed to release the ptp resource lock\n");
+ }
+
+ return rc;
+}
+
/* Read Rx timestamp */
static int qed_ptp_hw_read_rx_ts(struct qed_dev *cdev, u64 *timestamp)
{
@@ -112,39 +188,73 @@ static int qed_ptp_hw_read_cc(struct qed_dev *cdev, u64 *phc_cycles)
}
/* Filter PTP protocol packets that need to be timestamped */
-static int qed_ptp_hw_cfg_rx_filters(struct qed_dev *cdev,
- enum qed_ptp_filter_type type)
+static int qed_ptp_hw_cfg_filters(struct qed_dev *cdev,
+ enum qed_ptp_filter_type rx_type,
+ enum qed_ptp_hwtstamp_tx_type tx_type)
{
struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
struct qed_ptt *p_ptt = p_hwfn->p_ptp_ptt;
- u32 rule_mask, parm_mask;
+ u32 rule_mask, enable_cfg = 0x0;
- switch (type) {
- case QED_PTP_FILTER_L2_IPV4_IPV6:
- parm_mask = 0x6AA;
- rule_mask = 0x3EEE;
+ switch (rx_type) {
+ case QED_PTP_FILTER_NONE:
+ enable_cfg = 0x0;
+ rule_mask = 0x3FFF;
break;
- case QED_PTP_FILTER_L2:
- parm_mask = 0x6BF;
- rule_mask = 0x3EFF;
+ case QED_PTP_FILTER_ALL:
+ enable_cfg = 0x7;
+ rule_mask = 0x3CAA;
break;
- case QED_PTP_FILTER_IPV4_IPV6:
- parm_mask = 0x7EA;
- rule_mask = 0x3FFE;
+ case QED_PTP_FILTER_V1_L4_EVENT:
+ enable_cfg = 0x3;
+ rule_mask = 0x3FFA;
break;
- case QED_PTP_FILTER_IPV4:
- parm_mask = 0x7EE;
+ case QED_PTP_FILTER_V1_L4_GEN:
+ enable_cfg = 0x3;
rule_mask = 0x3FFE;
break;
+ case QED_PTP_FILTER_V2_L4_EVENT:
+ enable_cfg = 0x5;
+ rule_mask = 0x3FAA;
+ break;
+ case QED_PTP_FILTER_V2_L4_GEN:
+ enable_cfg = 0x5;
+ rule_mask = 0x3FEE;
+ break;
+ case QED_PTP_FILTER_V2_L2_EVENT:
+ enable_cfg = 0x5;
+ rule_mask = 0x3CFF;
+ break;
+ case QED_PTP_FILTER_V2_L2_GEN:
+ enable_cfg = 0x5;
+ rule_mask = 0x3EFF;
+ break;
+ case QED_PTP_FILTER_V2_EVENT:
+ enable_cfg = 0x5;
+ rule_mask = 0x3CAA;
+ break;
+ case QED_PTP_FILTER_V2_GEN:
+ enable_cfg = 0x5;
+ rule_mask = 0x3EEE;
+ break;
default:
- DP_INFO(p_hwfn, "Invalid PTP filter type %d\n", type);
+ DP_INFO(p_hwfn, "Invalid PTP filter type %d\n", rx_type);
return -EINVAL;
}
- qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_PTP_PARAM_MASK, parm_mask);
+ qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_PTP_PARAM_MASK, 0);
qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_PTP_RULE_MASK, rule_mask);
+ qed_wr(p_hwfn, p_ptt, NIG_REG_RX_PTP_EN, enable_cfg);
- qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_PTP_TO_HOST, 0x1);
+ if (tx_type == QED_PTP_HWTSTAMP_TX_OFF) {
+ qed_wr(p_hwfn, p_ptt, NIG_REG_TX_PTP_EN, 0x0);
+ qed_wr(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_PARAM_MASK, 0x7FF);
+ qed_wr(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_RULE_MASK, 0x3FFF);
+ } else {
+ qed_wr(p_hwfn, p_ptt, NIG_REG_TX_PTP_EN, enable_cfg);
+ qed_wr(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_PARAM_MASK, 0);
+ qed_wr(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_RULE_MASK, rule_mask);
+ }
/* Reset possibly old timestamps */
qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_PTP_HOST_BUF_SEQID,
@@ -248,7 +358,25 @@ static int qed_ptp_hw_adjfreq(struct qed_dev *cdev, s32 ppb)
static int qed_ptp_hw_enable(struct qed_dev *cdev)
{
struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
- struct qed_ptt *p_ptt = p_hwfn->p_ptp_ptt;
+ struct qed_ptt *p_ptt;
+ int rc;
+
+ p_ptt = qed_ptt_acquire(p_hwfn);
+ if (!p_ptt) {
+ DP_NOTICE(p_hwfn, "Failed to acquire PTT for PTP\n");
+ return -EBUSY;
+ }
+
+ p_hwfn->p_ptp_ptt = p_ptt;
+
+ rc = qed_ptp_res_lock(p_hwfn, p_ptt);
+ if (rc) {
+ DP_INFO(p_hwfn,
+ "Couldn't acquire the resource lock, skip ptp enable for this PF\n");
+ qed_ptt_release(p_hwfn, p_ptt);
+ p_hwfn->p_ptp_ptt = NULL;
+ return rc;
+ }
/* Reset PTP event detection rules - will be configured in the IOCTL */
qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_PTP_PARAM_MASK, 0x7FF);
@@ -262,12 +390,20 @@ static int qed_ptp_hw_enable(struct qed_dev *cdev)
qed_wr(p_hwfn, p_ptt, NIG_REG_TS_OUTPUT_ENABLE_PDA, 0x1);
/* Pause free running counter */
- qed_wr(p_hwfn, p_ptt, NIG_REG_TIMESYNC_GEN_REG_BB, 2);
+ if (QED_IS_BB_B0(p_hwfn->cdev))
+ qed_wr(p_hwfn, p_ptt, NIG_REG_TIMESYNC_GEN_REG_BB, 2);
+ if (QED_IS_AH(p_hwfn->cdev))
+ qed_wr(p_hwfn, p_ptt, NIG_REG_TSGEN_FREECNT_UPDATE_K2, 2);
qed_wr(p_hwfn, p_ptt, NIG_REG_TSGEN_FREE_CNT_VALUE_LSB, 0);
qed_wr(p_hwfn, p_ptt, NIG_REG_TSGEN_FREE_CNT_VALUE_MSB, 0);
/* Resume free running counter */
- qed_wr(p_hwfn, p_ptt, NIG_REG_TIMESYNC_GEN_REG_BB, 4);
+ if (QED_IS_BB_B0(p_hwfn->cdev))
+ qed_wr(p_hwfn, p_ptt, NIG_REG_TIMESYNC_GEN_REG_BB, 4);
+ if (QED_IS_AH(p_hwfn->cdev)) {
+ qed_wr(p_hwfn, p_ptt, NIG_REG_TSGEN_FREECNT_UPDATE_K2, 4);
+ qed_wr(p_hwfn, p_ptt, NIG_REG_PTP_LATCH_OSTS_PKT_TIME, 1);
+ }
/* Disable drift register */
qed_wr(p_hwfn, p_ptt, NIG_REG_TSGEN_DRIFT_CNTR_CONF, 0x0);
@@ -281,22 +417,13 @@ static int qed_ptp_hw_enable(struct qed_dev *cdev)
return 0;
}
-static int qed_ptp_hw_hwtstamp_tx_on(struct qed_dev *cdev)
-{
- struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
- struct qed_ptt *p_ptt = p_hwfn->p_ptp_ptt;
-
- qed_wr(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_PARAM_MASK, 0x6AA);
- qed_wr(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_RULE_MASK, 0x3EEE);
-
- return 0;
-}
-
static int qed_ptp_hw_disable(struct qed_dev *cdev)
{
struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
struct qed_ptt *p_ptt = p_hwfn->p_ptp_ptt;
+ qed_ptp_res_unlock(p_hwfn, p_ptt);
+
/* Reset PTP event detection rules */
qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_PTP_PARAM_MASK, 0x7FF);
qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_PTP_RULE_MASK, 0x3FFF);
@@ -308,12 +435,14 @@ static int qed_ptp_hw_disable(struct qed_dev *cdev)
qed_wr(p_hwfn, p_ptt, NIG_REG_RX_PTP_EN, 0x0);
qed_wr(p_hwfn, p_ptt, NIG_REG_TX_PTP_EN, 0x0);
+ qed_ptt_release(p_hwfn, p_ptt);
+ p_hwfn->p_ptp_ptt = NULL;
+
return 0;
}
const struct qed_eth_ptp_ops qed_ptp_ops_pass = {
- .hwtstamp_tx_on = qed_ptp_hw_hwtstamp_tx_on,
- .cfg_rx_filters = qed_ptp_hw_cfg_rx_filters,
+ .cfg_filters = qed_ptp_hw_cfg_filters,
.read_rx_ts = qed_ptp_hw_read_rx_ts,
.read_tx_ts = qed_ptp_hw_read_tx_ts,
.read_cc = qed_ptp_hw_read_cc,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ptp.h b/drivers/net/ethernet/qlogic/qed/qed_ptp.h
deleted file mode 100644
index 63c666d0b739..000000000000
--- a/drivers/net/ethernet/qlogic/qed/qed_ptp.h
+++ /dev/null
@@ -1,47 +0,0 @@
-/* QLogic qed NIC Driver
- * Copyright (c) 2015-2017 QLogic Corporation
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and /or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef _QED_PTP_H
-#define _QED_PTP_H
-#include <linux/types.h>
-
-int qed_ptp_hwtstamp_tx_on(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
-int qed_ptp_cfg_rx_filters(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
- enum qed_ptp_filter_type type);
-int qed_ptp_read_rx_ts(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u64 *ts);
-int qed_ptp_read_tx_ts(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u64 *ts);
-int qed_ptp_read_cc(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt, u64 *cycles);
-int qed_ptp_adjfreq(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, s32 ppb);
-int qed_ptp_disable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
-int qed_ptp_enable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
-
-#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
index d59d9df60cd2..1ae73b2d6d1e 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
@@ -160,13 +160,13 @@
0x2e0704UL
#define CCFC_REG_STRONG_ENABLE_PF \
0x2e0708UL
-#define PGLUE_B_REG_PGL_ADDR_88_F0 \
+#define PGLUE_B_REG_PGL_ADDR_88_F0_BB \
0x2aa404UL
-#define PGLUE_B_REG_PGL_ADDR_8C_F0 \
+#define PGLUE_B_REG_PGL_ADDR_8C_F0_BB \
0x2aa408UL
-#define PGLUE_B_REG_PGL_ADDR_90_F0 \
+#define PGLUE_B_REG_PGL_ADDR_90_F0_BB \
0x2aa40cUL
-#define PGLUE_B_REG_PGL_ADDR_94_F0 \
+#define PGLUE_B_REG_PGL_ADDR_94_F0_BB \
0x2aa410UL
#define PGLUE_B_REG_WAS_ERROR_PF_31_0_CLR \
0x2aa138UL
@@ -356,6 +356,10 @@
0x238804UL
#define RDIF_REG_STOP_ON_ERROR \
0x300040UL
+#define RDIF_REG_DEBUG_ERROR_INFO \
+ 0x300400UL
+#define RDIF_REG_DEBUG_ERROR_INFO_SIZE \
+ 64
#define SRC_REG_SOFT_RST \
0x23874cUL
#define TCFC_REG_ACTIVITY_COUNTER \
@@ -370,6 +374,10 @@
0x1700004UL
#define TDIF_REG_STOP_ON_ERROR \
0x310040UL
+#define TDIF_REG_DEBUG_ERROR_INFO \
+ 0x310400UL
+#define TDIF_REG_DEBUG_ERROR_INFO_SIZE \
+ 64
#define UCM_REG_INIT \
0x1280000UL
#define UMAC_REG_IPG_HD_BKP_CNTL_BB_B0 \
@@ -1236,6 +1244,26 @@
0x1901534UL
#define USEM_REG_DBG_FORCE_FRAME \
0x1901538UL
+#define NWS_REG_DBG_SELECT \
+ 0x700128UL
+#define NWS_REG_DBG_DWORD_ENABLE \
+ 0x70012cUL
+#define NWS_REG_DBG_SHIFT \
+ 0x700130UL
+#define NWS_REG_DBG_FORCE_VALID \
+ 0x700134UL
+#define NWS_REG_DBG_FORCE_FRAME \
+ 0x700138UL
+#define MS_REG_DBG_SELECT \
+ 0x6a0228UL
+#define MS_REG_DBG_DWORD_ENABLE \
+ 0x6a022cUL
+#define MS_REG_DBG_SHIFT \
+ 0x6a0230UL
+#define MS_REG_DBG_FORCE_VALID \
+ 0x6a0234UL
+#define MS_REG_DBG_FORCE_FRAME \
+ 0x6a0238UL
#define PCIE_REG_DBG_COMMON_SELECT \
0x054398UL
#define PCIE_REG_DBG_COMMON_DWORD_ENABLE \
@@ -1448,6 +1476,8 @@
0x000b48UL
#define RSS_REG_RSS_RAM_DATA \
0x238c20UL
+#define RSS_REG_RSS_RAM_DATA_SIZE \
+ 4
#define MISC_REG_BLOCK_256B_EN \
0x008c14UL
#define NWS_REG_NWS_CMU \
@@ -1520,4 +1550,22 @@
#define NIG_REG_TIMESYNC_GEN_REG_BB 0x500d00UL
#define NIG_REG_TSGEN_FREE_CNT_VALUE_LSB 0x5088a8UL
#define NIG_REG_TSGEN_FREE_CNT_VALUE_MSB 0x5088acUL
+#define NIG_REG_PTP_LATCH_OSTS_PKT_TIME 0x509040UL
+#define PSWRQ2_REG_WR_MBS0 0x240400UL
+
+#define PGLUE_B_REG_PGL_ADDR_E8_F0_K2 0x2aaf98UL
+#define PGLUE_B_REG_PGL_ADDR_EC_F0_K2 0x2aaf9cUL
+#define PGLUE_B_REG_PGL_ADDR_F0_F0_K2 0x2aafa0UL
+#define PGLUE_B_REG_PGL_ADDR_F4_F0_K2 0x2aafa4UL
+#define NIG_REG_TSGEN_FREECNT_UPDATE_K2 0x509008UL
+#define CNIG_REG_NIG_PORT0_CONF_K2 0x218200UL
+
+#define PRS_REG_SEARCH_GFT 0x1f11bcUL
+#define PRS_REG_CM_HDR_GFT 0x1f11c8UL
+#define PRS_REG_GFT_CAM 0x1f1100UL
+#define PRS_REG_GFT_PROFILE_MASK_RAM 0x1f1000UL
+#define PRS_REG_CM_HDR_GFT_EVENT_ID_SHIFT 0
+#define PRS_REG_CM_HDR_GFT_CM_HDR_SHIFT 8
+#define PRS_REG_LOAD_L2_FILTER 0x1f0198UL
+
#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_roce.c b/drivers/net/ethernet/qlogic/qed/qed_roce.c
index d9ff6b28591c..56289d7cd306 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_roce.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_roce.c
@@ -66,17 +66,31 @@
#include "qed_roce.h"
#include "qed_ll2.h"
-void qed_async_roce_event(struct qed_hwfn *p_hwfn,
- struct event_ring_entry *p_eqe)
+static void qed_roce_free_real_icid(struct qed_hwfn *p_hwfn, u16 icid);
+
+void qed_roce_async_event(struct qed_hwfn *p_hwfn,
+ u8 fw_event_code, union rdma_eqe_data *rdma_data)
{
- struct qed_rdma_info *p_rdma_info = p_hwfn->p_rdma_info;
+ if (fw_event_code == ROCE_ASYNC_EVENT_DESTROY_QP_DONE) {
+ u16 icid =
+ (u16)le32_to_cpu(rdma_data->rdma_destroy_qp_data.cid);
+
+ /* icid release in this async event can occur only if the icid
+ * was offloaded to the FW. In case it wasn't offloaded this is
+ * handled in qed_roce_sp_destroy_qp.
+ */
+ qed_roce_free_real_icid(p_hwfn, icid);
+ } else {
+ struct qed_rdma_events *events = &p_hwfn->p_rdma_info->events;
- p_rdma_info->events.affiliated_event(p_rdma_info->events.context,
- p_eqe->opcode, &p_eqe->data);
+ events->affiliated_event(p_hwfn->p_rdma_info->events.context,
+ fw_event_code,
+ &rdma_data->async_handle);
+ }
}
static int qed_rdma_bmap_alloc(struct qed_hwfn *p_hwfn,
- struct qed_bmap *bmap, u32 max_count)
+ struct qed_bmap *bmap, u32 max_count, char *name)
{
DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "max_count = %08x\n", max_count);
@@ -90,43 +104,62 @@ static int qed_rdma_bmap_alloc(struct qed_hwfn *p_hwfn,
return -ENOMEM;
}
- DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocated bitmap %p\n",
- bmap->bitmap);
+ snprintf(bmap->name, QED_RDMA_MAX_BMAP_NAME, "%s", name);
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "0\n");
return 0;
}
static int qed_rdma_bmap_alloc_id(struct qed_hwfn *p_hwfn,
struct qed_bmap *bmap, u32 *id_num)
{
- DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "bmap = %p\n", bmap);
-
*id_num = find_first_zero_bit(bmap->bitmap, bmap->max_count);
-
- if (*id_num >= bmap->max_count) {
- DP_NOTICE(p_hwfn, "no id available max_count=%d\n",
- bmap->max_count);
+ if (*id_num >= bmap->max_count)
return -EINVAL;
- }
__set_bit(*id_num, bmap->bitmap);
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "%s bitmap: allocated id %d\n",
+ bmap->name, *id_num);
+
return 0;
}
+static void qed_bmap_set_id(struct qed_hwfn *p_hwfn,
+ struct qed_bmap *bmap, u32 id_num)
+{
+ if (id_num >= bmap->max_count)
+ return;
+
+ __set_bit(id_num, bmap->bitmap);
+}
+
static void qed_bmap_release_id(struct qed_hwfn *p_hwfn,
struct qed_bmap *bmap, u32 id_num)
{
bool b_acquired;
- DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "id_num = %08x", id_num);
if (id_num >= bmap->max_count)
return;
b_acquired = test_and_clear_bit(id_num, bmap->bitmap);
if (!b_acquired) {
- DP_NOTICE(p_hwfn, "ID %d already released\n", id_num);
+ DP_NOTICE(p_hwfn, "%s bitmap: id %d already released\n",
+ bmap->name, id_num);
return;
}
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "%s bitmap: released id %d\n",
+ bmap->name, id_num);
+}
+
+static int qed_bmap_test_id(struct qed_hwfn *p_hwfn,
+ struct qed_bmap *bmap, u32 id_num)
+{
+ if (id_num >= bmap->max_count)
+ return -1;
+
+ return test_bit(id_num, bmap->bitmap);
}
static u32 qed_rdma_get_sb_id(void *p_hwfn, u32 rel_sb_id)
@@ -170,7 +203,8 @@ static int qed_rdma_alloc(struct qed_hwfn *p_hwfn,
/* Queue zone lines are shared between RoCE and L2 in such a way that
* they can be used by each without obstructing the other.
*/
- p_rdma_info->queue_zone_base = (u16)FEAT_NUM(p_hwfn, QED_L2_QUEUE);
+ p_rdma_info->queue_zone_base = (u16)RESC_START(p_hwfn, QED_L2_QUEUE);
+ p_rdma_info->max_queue_zones = (u16)RESC_NUM(p_hwfn, QED_L2_QUEUE);
/* Allocate a struct with device params and fill it */
p_rdma_info->dev = kzalloc(sizeof(*p_rdma_info->dev), GFP_KERNEL);
@@ -191,7 +225,8 @@ static int qed_rdma_alloc(struct qed_hwfn *p_hwfn,
}
/* Allocate bit map for pd's */
- rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->pd_map, RDMA_MAX_PDS);
+ rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->pd_map, RDMA_MAX_PDS,
+ "PD");
if (rc) {
DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
"Failed to allocate pd_map, rc = %d\n",
@@ -201,7 +236,7 @@ static int qed_rdma_alloc(struct qed_hwfn *p_hwfn,
/* Allocate DPI bitmap */
rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->dpi_map,
- p_hwfn->dpi_count);
+ p_hwfn->dpi_count, "DPI");
if (rc) {
DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
"Failed to allocate DPI bitmap, rc = %d\n", rc);
@@ -212,7 +247,7 @@ static int qed_rdma_alloc(struct qed_hwfn *p_hwfn,
* twice the number of QPs.
*/
rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->cq_map,
- p_rdma_info->num_qps * 2);
+ p_rdma_info->num_qps * 2, "CQ");
if (rc) {
DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
"Failed to allocate cq bitmap, rc = %d\n", rc);
@@ -224,7 +259,7 @@ static int qed_rdma_alloc(struct qed_hwfn *p_hwfn,
* The maximum number of CQs is bounded to twice the number of QPs.
*/
rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->toggle_bits,
- p_rdma_info->num_qps * 2);
+ p_rdma_info->num_qps * 2, "Toggle");
if (rc) {
DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
"Failed to allocate toogle bits, rc = %d\n", rc);
@@ -233,7 +268,7 @@ static int qed_rdma_alloc(struct qed_hwfn *p_hwfn,
/* Allocate bitmap for itids */
rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->tid_map,
- p_rdma_info->num_mrs);
+ p_rdma_info->num_mrs, "MR");
if (rc) {
DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
"Failed to allocate itids bitmaps, rc = %d\n", rc);
@@ -241,16 +276,27 @@ static int qed_rdma_alloc(struct qed_hwfn *p_hwfn,
}
/* Allocate bitmap for cids used for qps. */
- rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->cid_map, num_cons);
+ rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->cid_map, num_cons,
+ "CID");
if (rc) {
DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
"Failed to allocate cid bitmap, rc = %d\n", rc);
goto free_tid_map;
}
+ /* Allocate bitmap for cids used for responders/requesters. */
+ rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->real_cid_map, num_cons,
+ "REAL_CID");
+ if (rc) {
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
+ "Failed to allocate real cid bitmap, rc = %d\n", rc);
+ goto free_cid_map;
+ }
DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocation successful\n");
return 0;
+free_cid_map:
+ kfree(p_rdma_info->cid_map.bitmap);
free_tid_map:
kfree(p_rdma_info->tid_map.bitmap);
free_toggle_map:
@@ -271,16 +317,79 @@ free_rdma_info:
return rc;
}
+static void qed_rdma_bmap_free(struct qed_hwfn *p_hwfn,
+ struct qed_bmap *bmap, bool check)
+{
+ int weight = bitmap_weight(bmap->bitmap, bmap->max_count);
+ int last_line = bmap->max_count / (64 * 8);
+ int last_item = last_line * 8 +
+ DIV_ROUND_UP(bmap->max_count % (64 * 8), 64);
+ u64 *pmap = (u64 *)bmap->bitmap;
+ int line, item, offset;
+ u8 str_last_line[200] = { 0 };
+
+ if (!weight || !check)
+ goto end;
+
+ DP_NOTICE(p_hwfn,
+ "%s bitmap not free - size=%d, weight=%d, 512 bits per line\n",
+ bmap->name, bmap->max_count, weight);
+
+ /* print aligned non-zero lines, if any */
+ for (item = 0, line = 0; line < last_line; line++, item += 8)
+ if (bitmap_weight((unsigned long *)&pmap[item], 64 * 8))
+ DP_NOTICE(p_hwfn,
+ "line 0x%04x: 0x%016llx 0x%016llx 0x%016llx 0x%016llx 0x%016llx 0x%016llx 0x%016llx 0x%016llx\n",
+ line,
+ pmap[item],
+ pmap[item + 1],
+ pmap[item + 2],
+ pmap[item + 3],
+ pmap[item + 4],
+ pmap[item + 5],
+ pmap[item + 6], pmap[item + 7]);
+
+ /* print last unaligned non-zero line, if any */
+ if ((bmap->max_count % (64 * 8)) &&
+ (bitmap_weight((unsigned long *)&pmap[item],
+ bmap->max_count - item * 64))) {
+ offset = sprintf(str_last_line, "line 0x%04x: ", line);
+ for (; item < last_item; item++)
+ offset += sprintf(str_last_line + offset,
+ "0x%016llx ", pmap[item]);
+ DP_NOTICE(p_hwfn, "%s\n", str_last_line);
+ }
+
+end:
+ kfree(bmap->bitmap);
+ bmap->bitmap = NULL;
+}
+
static void qed_rdma_resc_free(struct qed_hwfn *p_hwfn)
{
+ struct qed_bmap *rcid_map = &p_hwfn->p_rdma_info->real_cid_map;
struct qed_rdma_info *p_rdma_info = p_hwfn->p_rdma_info;
+ int wait_count = 0;
- kfree(p_rdma_info->cid_map.bitmap);
- kfree(p_rdma_info->tid_map.bitmap);
- kfree(p_rdma_info->toggle_bits.bitmap);
- kfree(p_rdma_info->cq_map.bitmap);
- kfree(p_rdma_info->dpi_map.bitmap);
- kfree(p_rdma_info->pd_map.bitmap);
+ /* when destroying a_RoCE QP the control is returned to the user after
+ * the synchronous part. The asynchronous part may take a little longer.
+ * We delay for a short while if an async destroy QP is still expected.
+ * Beyond the added delay we clear the bitmap anyway.
+ */
+ while (bitmap_weight(rcid_map->bitmap, rcid_map->max_count)) {
+ msleep(100);
+ if (wait_count++ > 20) {
+ DP_NOTICE(p_hwfn, "cid bitmap wait timed out\n");
+ break;
+ }
+ }
+
+ qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->cid_map, 1);
+ qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->pd_map, 1);
+ qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->dpi_map, 1);
+ qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->cq_map, 1);
+ qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->toggle_bits, 0);
+ qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->tid_map, 1);
kfree(p_rdma_info->port);
kfree(p_rdma_info->dev);
@@ -675,6 +784,7 @@ static int qed_rdma_add_user(void *rdma_cxt,
((out_params->dpi) * p_hwfn->dpi_size);
out_params->dpi_size = p_hwfn->dpi_size;
+ out_params->wid_count = p_hwfn->wid_count;
DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Adding user - done, rc = %d\n", rc);
return rc;
@@ -693,6 +803,8 @@ static struct qed_rdma_port *qed_rdma_query_port(void *rdma_cxt)
p_port->link_speed = p_hwfn->mcp_info->link_output.speed;
+ p_port->max_msg_size = RDMA_MAX_DATA_SIZE_IN_WQE;
+
return p_port;
}
@@ -724,6 +836,14 @@ static void qed_rdma_cnq_prod_update(void *rdma_cxt, u8 qz_offset, u16 prod)
u32 addr;
p_hwfn = (struct qed_hwfn *)rdma_cxt;
+
+ if (qz_offset > p_hwfn->p_rdma_info->max_queue_zones) {
+ DP_NOTICE(p_hwfn,
+ "queue zone offset %d is too large (max is %d)\n",
+ qz_offset, p_hwfn->p_rdma_info->max_queue_zones);
+ return;
+ }
+
qz_num = p_hwfn->p_rdma_info->queue_zone_base + qz_offset;
addr = GTT_BAR0_MAP_REG_USDM_RAM +
USTORM_COMMON_QUEUE_CONS_OFFSET(qz_num);
@@ -737,9 +857,12 @@ static void qed_rdma_cnq_prod_update(void *rdma_cxt, u8 qz_offset, u16 prod)
static int qed_fill_rdma_dev_info(struct qed_dev *cdev,
struct qed_dev_rdma_info *info)
{
+ struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
+
memset(info, 0, sizeof(*info));
info->rdma_type = QED_RDMA_TYPE_ROCE;
+ info->user_dpm_enabled = (p_hwfn->db_bar_no_edpm == 0);
qed_fill_dev_info(cdev, &info->common);
@@ -887,8 +1010,7 @@ static int qed_rdma_create_cq(void *rdma_cxt,
/* Allocate icid */
spin_lock_bh(&p_info->lock);
- rc = qed_rdma_bmap_alloc_id(p_hwfn,
- &p_info->cq_map, &returned_id);
+ rc = qed_rdma_bmap_alloc_id(p_hwfn, &p_info->cq_map, &returned_id);
spin_unlock_bh(&p_info->lock);
if (rc) {
@@ -1080,6 +1202,14 @@ static enum roce_flavor qed_roce_mode_to_flavor(enum roce_mode roce_mode)
return flavor;
}
+void qed_roce_free_cid_pair(struct qed_hwfn *p_hwfn, u16 cid)
+{
+ spin_lock_bh(&p_hwfn->p_rdma_info->lock);
+ qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->cid_map, cid);
+ qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->cid_map, cid + 1);
+ spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
+}
+
static int qed_roce_alloc_cid(struct qed_hwfn *p_hwfn, u16 *cid)
{
struct qed_rdma_info *p_rdma_info = p_hwfn->p_rdma_info;
@@ -1139,15 +1269,22 @@ err:
return rc;
}
+static void qed_roce_set_real_cid(struct qed_hwfn *p_hwfn, u32 cid)
+{
+ spin_lock_bh(&p_hwfn->p_rdma_info->lock);
+ qed_bmap_set_id(p_hwfn, &p_hwfn->p_rdma_info->real_cid_map, cid);
+ spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
+}
+
static int qed_roce_sp_create_responder(struct qed_hwfn *p_hwfn,
struct qed_rdma_qp *qp)
{
struct roce_create_qp_resp_ramrod_data *p_ramrod;
struct qed_sp_init_data init_data;
- union qed_qm_pq_params qm_params;
enum roce_flavor roce_flavor;
struct qed_spq_entry *p_ent;
- u16 physical_queue0 = 0;
+ u16 regular_latency_queue;
+ enum protocol_type proto;
int rc;
DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
@@ -1229,15 +1366,16 @@ static int qed_roce_sp_create_responder(struct qed_hwfn *p_hwfn,
p_ramrod->qp_handle_for_async.lo = cpu_to_le32(qp->qp_handle_async.lo);
p_ramrod->qp_handle_for_cqe.hi = cpu_to_le32(qp->qp_handle.hi);
p_ramrod->qp_handle_for_cqe.lo = cpu_to_le32(qp->qp_handle.lo);
- p_ramrod->stats_counter_id = p_hwfn->rel_pf_id;
p_ramrod->cq_cid = cpu_to_le32((p_hwfn->hw_info.opaque_fid << 16) |
qp->rq_cq_id);
- memset(&qm_params, 0, sizeof(qm_params));
- qm_params.roce.qpid = qp->icid >> 1;
- physical_queue0 = qed_get_qm_pq(p_hwfn, PROTOCOLID_ROCE, &qm_params);
+ regular_latency_queue = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_OFLD);
+
+ p_ramrod->regular_latency_phy_queue =
+ cpu_to_le16(regular_latency_queue);
+ p_ramrod->low_latency_phy_queue =
+ cpu_to_le16(regular_latency_queue);
- p_ramrod->physical_queue0 = cpu_to_le16(physical_queue0);
p_ramrod->dpi = cpu_to_le16(qp->dpi);
qed_rdma_set_fw_mac(p_ramrod->remote_mac_addr, qp->remote_mac_addr);
@@ -1253,13 +1391,19 @@ static int qed_roce_sp_create_responder(struct qed_hwfn *p_hwfn,
rc = qed_spq_post(p_hwfn, p_ent, NULL);
- DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d physical_queue0 = 0x%x\n",
- rc, physical_queue0);
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
+ "rc = %d regular physical queue = 0x%x\n", rc,
+ regular_latency_queue);
if (rc)
goto err;
qp->resp_offloaded = true;
+ qp->cq_prod = 0;
+
+ proto = p_hwfn->p_rdma_info->proto;
+ qed_roce_set_real_cid(p_hwfn, qp->icid -
+ qed_cxt_get_proto_cid_start(p_hwfn, proto));
return rc;
@@ -1277,10 +1421,10 @@ static int qed_roce_sp_create_requester(struct qed_hwfn *p_hwfn,
{
struct roce_create_qp_req_ramrod_data *p_ramrod;
struct qed_sp_init_data init_data;
- union qed_qm_pq_params qm_params;
enum roce_flavor roce_flavor;
struct qed_spq_entry *p_ent;
- u16 physical_queue0 = 0;
+ u16 regular_latency_queue;
+ enum protocol_type proto;
int rc;
DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
@@ -1351,15 +1495,16 @@ static int qed_roce_sp_create_requester(struct qed_hwfn *p_hwfn,
p_ramrod->qp_handle_for_async.lo = cpu_to_le32(qp->qp_handle_async.lo);
p_ramrod->qp_handle_for_cqe.hi = cpu_to_le32(qp->qp_handle.hi);
p_ramrod->qp_handle_for_cqe.lo = cpu_to_le32(qp->qp_handle.lo);
- p_ramrod->stats_counter_id = p_hwfn->rel_pf_id;
- p_ramrod->cq_cid = cpu_to_le32((p_hwfn->hw_info.opaque_fid << 16) |
- qp->sq_cq_id);
+ p_ramrod->cq_cid =
+ cpu_to_le32((p_hwfn->hw_info.opaque_fid << 16) | qp->sq_cq_id);
- memset(&qm_params, 0, sizeof(qm_params));
- qm_params.roce.qpid = qp->icid >> 1;
- physical_queue0 = qed_get_qm_pq(p_hwfn, PROTOCOLID_ROCE, &qm_params);
+ regular_latency_queue = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_OFLD);
+
+ p_ramrod->regular_latency_phy_queue =
+ cpu_to_le16(regular_latency_queue);
+ p_ramrod->low_latency_phy_queue =
+ cpu_to_le16(regular_latency_queue);
- p_ramrod->physical_queue0 = cpu_to_le16(physical_queue0);
p_ramrod->dpi = cpu_to_le16(qp->dpi);
qed_rdma_set_fw_mac(p_ramrod->remote_mac_addr, qp->remote_mac_addr);
@@ -1378,6 +1523,10 @@ static int qed_roce_sp_create_requester(struct qed_hwfn *p_hwfn,
goto err;
qp->req_offloaded = true;
+ proto = p_hwfn->p_rdma_info->proto;
+ qed_roce_set_real_cid(p_hwfn,
+ qp->icid + 1 -
+ qed_cxt_get_proto_cid_start(p_hwfn, proto));
return rc;
@@ -1577,7 +1726,8 @@ static int qed_roce_sp_modify_requester(struct qed_hwfn *p_hwfn,
static int qed_roce_sp_destroy_qp_responder(struct qed_hwfn *p_hwfn,
struct qed_rdma_qp *qp,
- u32 *num_invalidated_mw)
+ u32 *num_invalidated_mw,
+ u32 *cq_prod)
{
struct roce_destroy_qp_resp_output_params *p_ramrod_res;
struct roce_destroy_qp_resp_ramrod_data *p_ramrod;
@@ -1588,8 +1738,22 @@ static int qed_roce_sp_destroy_qp_responder(struct qed_hwfn *p_hwfn,
DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
- if (!qp->resp_offloaded)
+ *num_invalidated_mw = 0;
+ *cq_prod = qp->cq_prod;
+
+ if (!qp->resp_offloaded) {
+ /* If a responder was never offload, we need to free the cids
+ * allocated in create_qp as a FW async event will never arrive
+ */
+ u32 cid;
+
+ cid = qp->icid -
+ qed_cxt_get_proto_cid_start(p_hwfn,
+ p_hwfn->p_rdma_info->proto);
+ qed_roce_free_cid_pair(p_hwfn, (u16)cid);
+
return 0;
+ }
/* Get SPQ entry */
memset(&init_data, 0, sizeof(init_data));
@@ -1624,6 +1788,8 @@ static int qed_roce_sp_destroy_qp_responder(struct qed_hwfn *p_hwfn,
goto err;
*num_invalidated_mw = le32_to_cpu(p_ramrod_res->num_invalidated_mw);
+ *cq_prod = le32_to_cpu(p_ramrod_res->cq_prod);
+ qp->cq_prod = *cq_prod;
/* Free IRQ - only if ramrod succeeded, in case FW is still using it */
dma_free_coherent(&p_hwfn->cdev->pdev->dev,
@@ -1827,10 +1993,8 @@ static int qed_roce_query_qp(struct qed_hwfn *p_hwfn,
out_params->draining = false;
- if (rq_err_state)
+ if (rq_err_state || sq_err_state)
qp->cur_state = QED_ROCE_QP_STATE_ERR;
- else if (sq_err_state)
- qp->cur_state = QED_ROCE_QP_STATE_SQE;
else if (sq_draining)
out_params->draining = true;
out_params->state = qp->cur_state;
@@ -1849,10 +2013,9 @@ err_resp:
static int qed_roce_destroy_qp(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp)
{
- struct qed_rdma_info *p_rdma_info = p_hwfn->p_rdma_info;
u32 num_invalidated_mw = 0;
u32 num_bound_mw = 0;
- u32 start_cid;
+ u32 cq_prod;
int rc;
/* Destroys the specified QP */
@@ -1866,7 +2029,8 @@ static int qed_roce_destroy_qp(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp)
if (qp->cur_state != QED_ROCE_QP_STATE_RESET) {
rc = qed_roce_sp_destroy_qp_responder(p_hwfn, qp,
- &num_invalidated_mw);
+ &num_invalidated_mw,
+ &cq_prod);
if (rc)
return rc;
@@ -1881,21 +2045,6 @@ static int qed_roce_destroy_qp(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp)
"number of invalidate memory windows is different from bounded ones\n");
return -EINVAL;
}
-
- spin_lock_bh(&p_rdma_info->lock);
-
- start_cid = qed_cxt_get_proto_cid_start(p_hwfn,
- p_rdma_info->proto);
-
- /* Release responder's icid */
- qed_bmap_release_id(p_hwfn, &p_rdma_info->cid_map,
- qp->icid - start_cid);
-
- /* Release requester's icid */
- qed_bmap_release_id(p_hwfn, &p_rdma_info->cid_map,
- qp->icid + 1 - start_cid);
-
- spin_unlock_bh(&p_rdma_info->lock);
}
return 0;
@@ -2097,8 +2246,7 @@ static int qed_roce_modify_qp(struct qed_hwfn *p_hwfn,
params->modify_flags);
return rc;
- } else if (qp->cur_state == QED_ROCE_QP_STATE_ERR ||
- qp->cur_state == QED_ROCE_QP_STATE_SQE) {
+ } else if (qp->cur_state == QED_ROCE_QP_STATE_ERR) {
/* ->ERR */
rc = qed_roce_sp_modify_responder(p_hwfn, qp, true,
params->modify_flags);
@@ -2110,12 +2258,19 @@ static int qed_roce_modify_qp(struct qed_hwfn *p_hwfn,
return rc;
} else if (qp->cur_state == QED_ROCE_QP_STATE_RESET) {
/* Any state -> RESET */
+ u32 cq_prod;
+
+ /* Send destroy responder ramrod */
+ rc = qed_roce_sp_destroy_qp_responder(p_hwfn,
+ qp,
+ &num_invalidated_mw,
+ &cq_prod);
- rc = qed_roce_sp_destroy_qp_responder(p_hwfn, qp,
- &num_invalidated_mw);
if (rc)
return rc;
+ qp->cq_prod = cq_prod;
+
rc = qed_roce_sp_destroy_qp_requester(p_hwfn, qp,
&num_bound_mw);
@@ -2357,6 +2512,8 @@ qed_rdma_register_tid(void *rdma_cxt,
}
rc = qed_spq_post(p_hwfn, p_ent, &fw_return_code);
+ if (rc)
+ return rc;
if (fw_return_code != RDMA_RETURN_OK) {
DP_NOTICE(p_hwfn, "fw_return_code = %d\n", fw_return_code);
@@ -2454,6 +2611,31 @@ static int qed_rdma_deregister_tid(void *rdma_cxt, u32 itid)
return rc;
}
+static void qed_roce_free_real_icid(struct qed_hwfn *p_hwfn, u16 icid)
+{
+ struct qed_rdma_info *p_rdma_info = p_hwfn->p_rdma_info;
+ u32 start_cid, cid, xcid;
+
+ /* an even icid belongs to a responder while an odd icid belongs to a
+ * requester. The 'cid' received as an input can be either. We calculate
+ * the "partner" icid and call it xcid. Only if both are free then the
+ * "cid" map can be cleared.
+ */
+ start_cid = qed_cxt_get_proto_cid_start(p_hwfn, p_rdma_info->proto);
+ cid = icid - start_cid;
+ xcid = cid ^ 1;
+
+ spin_lock_bh(&p_rdma_info->lock);
+
+ qed_bmap_release_id(p_hwfn, &p_rdma_info->real_cid_map, cid);
+ if (qed_bmap_test_id(p_hwfn, &p_rdma_info->real_cid_map, xcid) == 0) {
+ qed_bmap_release_id(p_hwfn, &p_rdma_info->cid_map, cid);
+ qed_bmap_release_id(p_hwfn, &p_rdma_info->cid_map, xcid);
+ }
+
+ spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
+}
+
static void *qed_rdma_get_rdma_ctx(struct qed_dev *cdev)
{
return QED_LEADING_HWFN(cdev);
@@ -2773,7 +2955,7 @@ static int qed_roce_ll2_tx(struct qed_dev *cdev,
: QED_LL2_RROCE;
if (pkt->roce_mode == ROCE_V2_IPV4)
- flags |= BIT(CORE_TX_BD_FLAGS_IP_CSUM_SHIFT);
+ flags |= BIT(CORE_TX_BD_DATA_IP_CSUM_SHIFT);
/* Tx header */
rc = qed_ll2_prepare_tx_packet(QED_LEADING_HWFN(cdev), roce_ll2->handle,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_roce.h b/drivers/net/ethernet/qlogic/qed/qed_roce.h
index 36cf4b2ab7fa..9742af516183 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_roce.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_roce.h
@@ -67,9 +67,11 @@ enum qed_rdma_toggle_bit {
QED_RDMA_TOGGLE_BIT_SET = 1
};
+#define QED_RDMA_MAX_BMAP_NAME (10)
struct qed_bmap {
unsigned long *bitmap;
u32 max_count;
+ char name[QED_RDMA_MAX_BMAP_NAME];
};
struct qed_rdma_info {
@@ -82,6 +84,7 @@ struct qed_rdma_info {
struct qed_bmap qp_map;
struct qed_bmap srq_map;
struct qed_bmap cid_map;
+ struct qed_bmap real_cid_map;
struct qed_bmap dpi_map;
struct qed_bmap toggle_bits;
struct qed_rdma_events events;
@@ -92,6 +95,7 @@ struct qed_rdma_info {
u32 num_qps;
u32 num_mrs;
u16 queue_zone_base;
+ u16 max_queue_zones;
enum protocol_type proto;
};
@@ -153,6 +157,7 @@ struct qed_rdma_qp {
dma_addr_t irq_phys_addr;
u8 irq_num_pages;
bool resp_offloaded;
+ u32 cq_prod;
u8 remote_mac_addr[6];
u8 local_mac_addr[6];
@@ -163,8 +168,8 @@ struct qed_rdma_qp {
#if IS_ENABLED(CONFIG_QED_RDMA)
void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
-void qed_async_roce_event(struct qed_hwfn *p_hwfn,
- struct event_ring_entry *p_eqe);
+void qed_roce_async_event(struct qed_hwfn *p_hwfn,
+ u8 fw_event_code, union rdma_eqe_data *rdma_data);
void qed_ll2b_complete_tx_gsi_packet(struct qed_hwfn *p_hwfn,
u8 connection_handle,
void *cookie,
@@ -187,7 +192,9 @@ void qed_ll2b_complete_rx_gsi_packet(struct qed_hwfn *p_hwfn,
u16 src_mac_addr_lo, bool b_last_packet);
#else
static inline void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) {}
-static inline void qed_async_roce_event(struct qed_hwfn *p_hwfn, struct event_ring_entry *p_eqe) {}
+static inline void qed_roce_async_event(struct qed_hwfn *p_hwfn,
+ u8 fw_event_code,
+ union rdma_eqe_data *rdma_data) {}
static inline void qed_ll2b_complete_tx_gsi_packet(struct qed_hwfn *p_hwfn,
u8 connection_handle,
void *cookie,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp.h b/drivers/net/ethernet/qlogic/qed/qed_sp.h
index 30393ffaa8e5..3357bbefa445 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sp.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_sp.h
@@ -84,6 +84,7 @@ union ramrod_data {
struct tx_queue_stop_ramrod_data tx_queue_stop;
struct vport_start_ramrod_data vport_start;
struct vport_stop_ramrod_data vport_stop;
+ struct rx_update_gft_filter_data rx_update_gft;
struct vport_update_ramrod_data vport_update;
struct core_rx_start_ramrod_data core_rx_queue_start;
struct core_rx_stop_ramrod_data core_rx_queue_stop;
@@ -408,7 +409,7 @@ int qed_sp_init_request(struct qed_hwfn *p_hwfn,
*/
int qed_sp_pf_start(struct qed_hwfn *p_hwfn,
- struct qed_tunn_start_params *p_tunn,
+ struct qed_tunnel_info *p_tunn,
enum qed_mf_mode mode, bool allow_npar_tx_switch);
/**
@@ -441,7 +442,7 @@ int qed_sp_pf_update(struct qed_hwfn *p_hwfn);
int qed_sp_pf_stop(struct qed_hwfn *p_hwfn);
int qed_sp_pf_update_tunn_cfg(struct qed_hwfn *p_hwfn,
- struct qed_tunn_update_params *p_tunn,
+ struct qed_tunnel_info *p_tunn,
enum spq_mode comp_mode,
struct qed_spq_comp_cb *p_comp_data);
/**
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
index 6fb80f9ef446..bc3694e91b85 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
@@ -111,7 +111,7 @@ int qed_sp_init_request(struct qed_hwfn *p_hwfn,
return 0;
}
-static enum tunnel_clss qed_tunn_get_clss_type(u8 type)
+static enum tunnel_clss qed_tunn_clss_to_fw_clss(u8 type)
{
switch (type) {
case QED_TUNN_CLSS_MAC_VLAN:
@@ -122,206 +122,201 @@ static enum tunnel_clss qed_tunn_get_clss_type(u8 type)
return TUNNEL_CLSS_INNER_MAC_VLAN;
case QED_TUNN_CLSS_INNER_MAC_VNI:
return TUNNEL_CLSS_INNER_MAC_VNI;
+ case QED_TUNN_CLSS_MAC_VLAN_DUAL_STAGE:
+ return TUNNEL_CLSS_MAC_VLAN_DUAL_STAGE;
default:
return TUNNEL_CLSS_MAC_VLAN;
}
}
static void
-qed_tunn_set_pf_fix_tunn_mode(struct qed_hwfn *p_hwfn,
- struct qed_tunn_update_params *p_src,
- struct pf_update_tunnel_config *p_tunn_cfg)
+qed_set_pf_update_tunn_mode(struct qed_tunnel_info *p_tun,
+ struct qed_tunnel_info *p_src, bool b_pf_start)
{
- unsigned long cached_tunn_mode = p_hwfn->cdev->tunn_mode;
- unsigned long update_mask = p_src->tunn_mode_update_mask;
- unsigned long tunn_mode = p_src->tunn_mode;
- unsigned long new_tunn_mode = 0;
-
- if (test_bit(QED_MODE_L2GRE_TUNN, &update_mask)) {
- if (test_bit(QED_MODE_L2GRE_TUNN, &tunn_mode))
- __set_bit(QED_MODE_L2GRE_TUNN, &new_tunn_mode);
- } else {
- if (test_bit(QED_MODE_L2GRE_TUNN, &cached_tunn_mode))
- __set_bit(QED_MODE_L2GRE_TUNN, &new_tunn_mode);
- }
-
- if (test_bit(QED_MODE_IPGRE_TUNN, &update_mask)) {
- if (test_bit(QED_MODE_IPGRE_TUNN, &tunn_mode))
- __set_bit(QED_MODE_IPGRE_TUNN, &new_tunn_mode);
- } else {
- if (test_bit(QED_MODE_IPGRE_TUNN, &cached_tunn_mode))
- __set_bit(QED_MODE_IPGRE_TUNN, &new_tunn_mode);
- }
+ if (p_src->vxlan.b_update_mode || b_pf_start)
+ p_tun->vxlan.b_mode_enabled = p_src->vxlan.b_mode_enabled;
- if (test_bit(QED_MODE_VXLAN_TUNN, &update_mask)) {
- if (test_bit(QED_MODE_VXLAN_TUNN, &tunn_mode))
- __set_bit(QED_MODE_VXLAN_TUNN, &new_tunn_mode);
- } else {
- if (test_bit(QED_MODE_VXLAN_TUNN, &cached_tunn_mode))
- __set_bit(QED_MODE_VXLAN_TUNN, &new_tunn_mode);
- }
-
- if (p_src->update_geneve_udp_port) {
- p_tunn_cfg->set_geneve_udp_port_flg = 1;
- p_tunn_cfg->geneve_udp_port =
- cpu_to_le16(p_src->geneve_udp_port);
- }
+ if (p_src->l2_gre.b_update_mode || b_pf_start)
+ p_tun->l2_gre.b_mode_enabled = p_src->l2_gre.b_mode_enabled;
- if (test_bit(QED_MODE_L2GENEVE_TUNN, &update_mask)) {
- if (test_bit(QED_MODE_L2GENEVE_TUNN, &tunn_mode))
- __set_bit(QED_MODE_L2GENEVE_TUNN, &new_tunn_mode);
- } else {
- if (test_bit(QED_MODE_L2GENEVE_TUNN, &cached_tunn_mode))
- __set_bit(QED_MODE_L2GENEVE_TUNN, &new_tunn_mode);
- }
+ if (p_src->ip_gre.b_update_mode || b_pf_start)
+ p_tun->ip_gre.b_mode_enabled = p_src->ip_gre.b_mode_enabled;
- if (test_bit(QED_MODE_IPGENEVE_TUNN, &update_mask)) {
- if (test_bit(QED_MODE_IPGENEVE_TUNN, &tunn_mode))
- __set_bit(QED_MODE_IPGENEVE_TUNN, &new_tunn_mode);
- } else {
- if (test_bit(QED_MODE_IPGENEVE_TUNN, &cached_tunn_mode))
- __set_bit(QED_MODE_IPGENEVE_TUNN, &new_tunn_mode);
- }
+ if (p_src->l2_geneve.b_update_mode || b_pf_start)
+ p_tun->l2_geneve.b_mode_enabled =
+ p_src->l2_geneve.b_mode_enabled;
- p_src->tunn_mode = new_tunn_mode;
+ if (p_src->ip_geneve.b_update_mode || b_pf_start)
+ p_tun->ip_geneve.b_mode_enabled =
+ p_src->ip_geneve.b_mode_enabled;
}
-static void
-qed_tunn_set_pf_update_params(struct qed_hwfn *p_hwfn,
- struct qed_tunn_update_params *p_src,
- struct pf_update_tunnel_config *p_tunn_cfg)
+static void qed_set_tunn_cls_info(struct qed_tunnel_info *p_tun,
+ struct qed_tunnel_info *p_src)
{
- unsigned long tunn_mode = p_src->tunn_mode;
enum tunnel_clss type;
- qed_tunn_set_pf_fix_tunn_mode(p_hwfn, p_src, p_tunn_cfg);
- p_tunn_cfg->update_rx_pf_clss = p_src->update_rx_pf_clss;
- p_tunn_cfg->update_tx_pf_clss = p_src->update_tx_pf_clss;
-
- type = qed_tunn_get_clss_type(p_src->tunn_clss_vxlan);
- p_tunn_cfg->tunnel_clss_vxlan = type;
-
- type = qed_tunn_get_clss_type(p_src->tunn_clss_l2gre);
- p_tunn_cfg->tunnel_clss_l2gre = type;
+ p_tun->b_update_rx_cls = p_src->b_update_rx_cls;
+ p_tun->b_update_tx_cls = p_src->b_update_tx_cls;
+
+ type = qed_tunn_clss_to_fw_clss(p_src->vxlan.tun_cls);
+ p_tun->vxlan.tun_cls = type;
+ type = qed_tunn_clss_to_fw_clss(p_src->l2_gre.tun_cls);
+ p_tun->l2_gre.tun_cls = type;
+ type = qed_tunn_clss_to_fw_clss(p_src->ip_gre.tun_cls);
+ p_tun->ip_gre.tun_cls = type;
+ type = qed_tunn_clss_to_fw_clss(p_src->l2_geneve.tun_cls);
+ p_tun->l2_geneve.tun_cls = type;
+ type = qed_tunn_clss_to_fw_clss(p_src->ip_geneve.tun_cls);
+ p_tun->ip_geneve.tun_cls = type;
+}
- type = qed_tunn_get_clss_type(p_src->tunn_clss_ipgre);
- p_tunn_cfg->tunnel_clss_ipgre = type;
+static void qed_set_tunn_ports(struct qed_tunnel_info *p_tun,
+ struct qed_tunnel_info *p_src)
+{
+ p_tun->geneve_port.b_update_port = p_src->geneve_port.b_update_port;
+ p_tun->vxlan_port.b_update_port = p_src->vxlan_port.b_update_port;
- if (p_src->update_vxlan_udp_port) {
- p_tunn_cfg->set_vxlan_udp_port_flg = 1;
- p_tunn_cfg->vxlan_udp_port = cpu_to_le16(p_src->vxlan_udp_port);
- }
+ if (p_src->geneve_port.b_update_port)
+ p_tun->geneve_port.port = p_src->geneve_port.port;
- if (test_bit(QED_MODE_L2GRE_TUNN, &tunn_mode))
- p_tunn_cfg->tx_enable_l2gre = 1;
+ if (p_src->vxlan_port.b_update_port)
+ p_tun->vxlan_port.port = p_src->vxlan_port.port;
+}
- if (test_bit(QED_MODE_IPGRE_TUNN, &tunn_mode))
- p_tunn_cfg->tx_enable_ipgre = 1;
+static void
+__qed_set_ramrod_tunnel_param(u8 *p_tunn_cls, u8 *p_enable_tx_clas,
+ struct qed_tunn_update_type *tun_type)
+{
+ *p_tunn_cls = tun_type->tun_cls;
- if (test_bit(QED_MODE_VXLAN_TUNN, &tunn_mode))
- p_tunn_cfg->tx_enable_vxlan = 1;
+ if (tun_type->b_mode_enabled)
+ *p_enable_tx_clas = 1;
+}
- if (p_src->update_geneve_udp_port) {
- p_tunn_cfg->set_geneve_udp_port_flg = 1;
- p_tunn_cfg->geneve_udp_port =
- cpu_to_le16(p_src->geneve_udp_port);
+static void
+qed_set_ramrod_tunnel_param(u8 *p_tunn_cls, u8 *p_enable_tx_clas,
+ struct qed_tunn_update_type *tun_type,
+ u8 *p_update_port, __le16 *p_port,
+ struct qed_tunn_update_udp_port *p_udp_port)
+{
+ __qed_set_ramrod_tunnel_param(p_tunn_cls, p_enable_tx_clas, tun_type);
+ if (p_udp_port->b_update_port) {
+ *p_update_port = 1;
+ *p_port = cpu_to_le16(p_udp_port->port);
}
+}
- if (test_bit(QED_MODE_L2GENEVE_TUNN, &tunn_mode))
- p_tunn_cfg->tx_enable_l2geneve = 1;
-
- if (test_bit(QED_MODE_IPGENEVE_TUNN, &tunn_mode))
- p_tunn_cfg->tx_enable_ipgeneve = 1;
-
- type = qed_tunn_get_clss_type(p_src->tunn_clss_l2geneve);
- p_tunn_cfg->tunnel_clss_l2geneve = type;
-
- type = qed_tunn_get_clss_type(p_src->tunn_clss_ipgeneve);
- p_tunn_cfg->tunnel_clss_ipgeneve = type;
+static void
+qed_tunn_set_pf_update_params(struct qed_hwfn *p_hwfn,
+ struct qed_tunnel_info *p_src,
+ struct pf_update_tunnel_config *p_tunn_cfg)
+{
+ struct qed_tunnel_info *p_tun = &p_hwfn->cdev->tunnel;
+
+ qed_set_pf_update_tunn_mode(p_tun, p_src, false);
+ qed_set_tunn_cls_info(p_tun, p_src);
+ qed_set_tunn_ports(p_tun, p_src);
+
+ qed_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_vxlan,
+ &p_tunn_cfg->tx_enable_vxlan,
+ &p_tun->vxlan,
+ &p_tunn_cfg->set_vxlan_udp_port_flg,
+ &p_tunn_cfg->vxlan_udp_port,
+ &p_tun->vxlan_port);
+
+ qed_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_l2geneve,
+ &p_tunn_cfg->tx_enable_l2geneve,
+ &p_tun->l2_geneve,
+ &p_tunn_cfg->set_geneve_udp_port_flg,
+ &p_tunn_cfg->geneve_udp_port,
+ &p_tun->geneve_port);
+
+ __qed_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_ipgeneve,
+ &p_tunn_cfg->tx_enable_ipgeneve,
+ &p_tun->ip_geneve);
+
+ __qed_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_l2gre,
+ &p_tunn_cfg->tx_enable_l2gre,
+ &p_tun->l2_gre);
+
+ __qed_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_ipgre,
+ &p_tunn_cfg->tx_enable_ipgre,
+ &p_tun->ip_gre);
+
+ p_tunn_cfg->update_rx_pf_clss = p_tun->b_update_rx_cls;
+ p_tunn_cfg->update_tx_pf_clss = p_tun->b_update_tx_cls;
}
static void qed_set_hw_tunn_mode(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
- unsigned long tunn_mode)
+ struct qed_tunnel_info *p_tun)
{
- u8 l2gre_enable = 0, ipgre_enable = 0, vxlan_enable = 0;
- u8 l2geneve_enable = 0, ipgeneve_enable = 0;
-
- if (test_bit(QED_MODE_L2GRE_TUNN, &tunn_mode))
- l2gre_enable = 1;
+ qed_set_gre_enable(p_hwfn, p_ptt, p_tun->l2_gre.b_mode_enabled,
+ p_tun->ip_gre.b_mode_enabled);
+ qed_set_vxlan_enable(p_hwfn, p_ptt, p_tun->vxlan.b_mode_enabled);
- if (test_bit(QED_MODE_IPGRE_TUNN, &tunn_mode))
- ipgre_enable = 1;
-
- if (test_bit(QED_MODE_VXLAN_TUNN, &tunn_mode))
- vxlan_enable = 1;
-
- qed_set_gre_enable(p_hwfn, p_ptt, l2gre_enable, ipgre_enable);
- qed_set_vxlan_enable(p_hwfn, p_ptt, vxlan_enable);
+ qed_set_geneve_enable(p_hwfn, p_ptt, p_tun->l2_geneve.b_mode_enabled,
+ p_tun->ip_geneve.b_mode_enabled);
+}
- if (test_bit(QED_MODE_L2GENEVE_TUNN, &tunn_mode))
- l2geneve_enable = 1;
+static void qed_set_hw_tunn_mode_port(struct qed_hwfn *p_hwfn,
+ struct qed_tunnel_info *p_tunn)
+{
+ if (p_tunn->vxlan_port.b_update_port)
+ qed_set_vxlan_dest_port(p_hwfn, p_hwfn->p_main_ptt,
+ p_tunn->vxlan_port.port);
- if (test_bit(QED_MODE_IPGENEVE_TUNN, &tunn_mode))
- ipgeneve_enable = 1;
+ if (p_tunn->geneve_port.b_update_port)
+ qed_set_geneve_dest_port(p_hwfn, p_hwfn->p_main_ptt,
+ p_tunn->geneve_port.port);
- qed_set_geneve_enable(p_hwfn, p_ptt, l2geneve_enable,
- ipgeneve_enable);
+ qed_set_hw_tunn_mode(p_hwfn, p_hwfn->p_main_ptt, p_tunn);
}
static void
qed_tunn_set_pf_start_params(struct qed_hwfn *p_hwfn,
- struct qed_tunn_start_params *p_src,
+ struct qed_tunnel_info *p_src,
struct pf_start_tunnel_config *p_tunn_cfg)
{
- unsigned long tunn_mode;
- enum tunnel_clss type;
+ struct qed_tunnel_info *p_tun = &p_hwfn->cdev->tunnel;
if (!p_src)
return;
- tunn_mode = p_src->tunn_mode;
- type = qed_tunn_get_clss_type(p_src->tunn_clss_vxlan);
- p_tunn_cfg->tunnel_clss_vxlan = type;
- type = qed_tunn_get_clss_type(p_src->tunn_clss_l2gre);
- p_tunn_cfg->tunnel_clss_l2gre = type;
- type = qed_tunn_get_clss_type(p_src->tunn_clss_ipgre);
- p_tunn_cfg->tunnel_clss_ipgre = type;
-
- if (p_src->update_vxlan_udp_port) {
- p_tunn_cfg->set_vxlan_udp_port_flg = 1;
- p_tunn_cfg->vxlan_udp_port = cpu_to_le16(p_src->vxlan_udp_port);
- }
-
- if (test_bit(QED_MODE_L2GRE_TUNN, &tunn_mode))
- p_tunn_cfg->tx_enable_l2gre = 1;
-
- if (test_bit(QED_MODE_IPGRE_TUNN, &tunn_mode))
- p_tunn_cfg->tx_enable_ipgre = 1;
-
- if (test_bit(QED_MODE_VXLAN_TUNN, &tunn_mode))
- p_tunn_cfg->tx_enable_vxlan = 1;
-
- if (p_src->update_geneve_udp_port) {
- p_tunn_cfg->set_geneve_udp_port_flg = 1;
- p_tunn_cfg->geneve_udp_port =
- cpu_to_le16(p_src->geneve_udp_port);
- }
-
- if (test_bit(QED_MODE_L2GENEVE_TUNN, &tunn_mode))
- p_tunn_cfg->tx_enable_l2geneve = 1;
-
- if (test_bit(QED_MODE_IPGENEVE_TUNN, &tunn_mode))
- p_tunn_cfg->tx_enable_ipgeneve = 1;
-
- type = qed_tunn_get_clss_type(p_src->tunn_clss_l2geneve);
- p_tunn_cfg->tunnel_clss_l2geneve = type;
- type = qed_tunn_get_clss_type(p_src->tunn_clss_ipgeneve);
- p_tunn_cfg->tunnel_clss_ipgeneve = type;
+ qed_set_pf_update_tunn_mode(p_tun, p_src, true);
+ qed_set_tunn_cls_info(p_tun, p_src);
+ qed_set_tunn_ports(p_tun, p_src);
+
+ qed_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_vxlan,
+ &p_tunn_cfg->tx_enable_vxlan,
+ &p_tun->vxlan,
+ &p_tunn_cfg->set_vxlan_udp_port_flg,
+ &p_tunn_cfg->vxlan_udp_port,
+ &p_tun->vxlan_port);
+
+ qed_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_l2geneve,
+ &p_tunn_cfg->tx_enable_l2geneve,
+ &p_tun->l2_geneve,
+ &p_tunn_cfg->set_geneve_udp_port_flg,
+ &p_tunn_cfg->geneve_udp_port,
+ &p_tun->geneve_port);
+
+ __qed_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_ipgeneve,
+ &p_tunn_cfg->tx_enable_ipgeneve,
+ &p_tun->ip_geneve);
+
+ __qed_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_l2gre,
+ &p_tunn_cfg->tx_enable_l2gre,
+ &p_tun->l2_gre);
+
+ __qed_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_ipgre,
+ &p_tunn_cfg->tx_enable_ipgre,
+ &p_tun->ip_gre);
}
int qed_sp_pf_start(struct qed_hwfn *p_hwfn,
- struct qed_tunn_start_params *p_tunn,
+ struct qed_tunnel_info *p_tunn,
enum qed_mf_mode mode, bool allow_npar_tx_switch)
{
struct pf_start_ramrod_data *p_ramrod = NULL;
@@ -416,11 +411,8 @@ int qed_sp_pf_start(struct qed_hwfn *p_hwfn,
rc = qed_spq_post(p_hwfn, p_ent, NULL);
- if (p_tunn) {
- qed_set_hw_tunn_mode(p_hwfn, p_hwfn->p_main_ptt,
- p_tunn->tunn_mode);
- p_hwfn->cdev->tunn_mode = p_tunn->tunn_mode;
- }
+ if (p_tunn)
+ qed_set_hw_tunn_mode_port(p_hwfn, &p_hwfn->cdev->tunnel);
return rc;
}
@@ -451,7 +443,7 @@ int qed_sp_pf_update(struct qed_hwfn *p_hwfn)
/* Set pf update ramrod command params */
int qed_sp_pf_update_tunn_cfg(struct qed_hwfn *p_hwfn,
- struct qed_tunn_update_params *p_tunn,
+ struct qed_tunnel_info *p_tunn,
enum spq_mode comp_mode,
struct qed_spq_comp_cb *p_comp_data)
{
@@ -459,6 +451,12 @@ int qed_sp_pf_update_tunn_cfg(struct qed_hwfn *p_hwfn,
struct qed_sp_init_data init_data;
int rc = -EINVAL;
+ if (IS_VF(p_hwfn->cdev))
+ return qed_vf_pf_tunnel_param_update(p_hwfn, p_tunn);
+
+ if (!p_tunn)
+ return -EINVAL;
+
/* Get SPQ entry */
memset(&init_data, 0, sizeof(init_data));
init_data.cid = qed_spq_get_cid(p_hwfn);
@@ -479,15 +477,7 @@ int qed_sp_pf_update_tunn_cfg(struct qed_hwfn *p_hwfn,
if (rc)
return rc;
- if (p_tunn->update_vxlan_udp_port)
- qed_set_vxlan_dest_port(p_hwfn, p_hwfn->p_main_ptt,
- p_tunn->vxlan_udp_port);
- if (p_tunn->update_geneve_udp_port)
- qed_set_geneve_dest_port(p_hwfn, p_hwfn->p_main_ptt,
- p_tunn->geneve_udp_port);
-
- qed_set_hw_tunn_mode(p_hwfn, p_hwfn->p_main_ptt, p_tunn->tunn_mode);
- p_hwfn->cdev->tunn_mode = p_tunn->tunn_mode;
+ qed_set_hw_tunn_mode_port(p_hwfn, &p_hwfn->cdev->tunnel);
return rc;
}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_spq.c b/drivers/net/ethernet/qlogic/qed/qed_spq.c
index 645328a9f0cf..f6423a139ca0 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_spq.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_spq.c
@@ -119,6 +119,7 @@ static int qed_spq_block(struct qed_hwfn *p_hwfn,
u8 *p_fw_ret, bool skip_quick_poll)
{
struct qed_spq_comp_done *comp_done;
+ struct qed_ptt *p_ptt;
int rc;
/* A relatively short polling period w/o sleeping, to allow the FW to
@@ -135,8 +136,14 @@ static int qed_spq_block(struct qed_hwfn *p_hwfn,
if (!rc)
return 0;
+ p_ptt = qed_ptt_acquire(p_hwfn);
+ if (!p_ptt) {
+ DP_NOTICE(p_hwfn, "ptt, failed to acquire\n");
+ return -EAGAIN;
+ }
+
DP_INFO(p_hwfn, "Ramrod is stuck, requesting MCP drain\n");
- rc = qed_mcp_drain(p_hwfn, p_hwfn->p_main_ptt);
+ rc = qed_mcp_drain(p_hwfn, p_ptt);
if (rc) {
DP_NOTICE(p_hwfn, "MCP drain failed\n");
goto err;
@@ -145,15 +152,18 @@ static int qed_spq_block(struct qed_hwfn *p_hwfn,
/* Retry after drain */
rc = __qed_spq_block(p_hwfn, p_ent, p_fw_ret, true);
if (!rc)
- return 0;
+ goto out;
comp_done = (struct qed_spq_comp_done *)p_ent->comp_cb.cookie;
- if (comp_done->done == 1) {
+ if (comp_done->done == 1)
if (p_fw_ret)
*p_fw_ret = comp_done->fw_return_code;
- return 0;
- }
+out:
+ qed_ptt_release(p_hwfn, p_ptt);
+ return 0;
+
err:
+ qed_ptt_release(p_hwfn, p_ptt);
DP_NOTICE(p_hwfn,
"Ramrod is stuck [CID %08x cmd %02x protocol %02x echo %04x]\n",
le32_to_cpu(p_ent->elem.hdr.cid),
@@ -205,11 +215,10 @@ static int qed_spq_fill_entry(struct qed_hwfn *p_hwfn,
static void qed_spq_hw_initialize(struct qed_hwfn *p_hwfn,
struct qed_spq *p_spq)
{
- u16 pq;
- struct qed_cxt_info cxt_info;
- struct core_conn_context *p_cxt;
- union qed_qm_pq_params pq_params;
- int rc;
+ struct core_conn_context *p_cxt;
+ struct qed_cxt_info cxt_info;
+ u16 physical_q;
+ int rc;
cxt_info.iid = p_spq->cid;
@@ -231,10 +240,8 @@ static void qed_spq_hw_initialize(struct qed_hwfn *p_hwfn,
XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN, 1);
/* QM physical queue */
- memset(&pq_params, 0, sizeof(pq_params));
- pq_params.core.tc = LB_TC;
- pq = qed_get_qm_pq(p_hwfn, PROTOCOLID_CORE, &pq_params);
- p_cxt->xstorm_ag_context.physical_q0 = cpu_to_le16(pq);
+ physical_q = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_LB);
+ p_cxt->xstorm_ag_context.physical_q0 = cpu_to_le16(physical_q);
p_cxt->xstorm_st_context.spq_base_lo =
DMA_LO_LE(p_spq->chain.p_phys_addr);
@@ -296,9 +303,12 @@ qed_async_event_completion(struct qed_hwfn *p_hwfn,
struct event_ring_entry *p_eqe)
{
switch (p_eqe->protocol_id) {
+#if IS_ENABLED(CONFIG_QED_RDMA)
case PROTOCOLID_ROCE:
- qed_async_roce_event(p_hwfn, p_eqe);
+ qed_roce_async_event(p_hwfn, p_eqe->opcode,
+ &p_eqe->data.rdma_data);
return 0;
+#endif
case PROTOCOLID_COMMON:
return qed_sriov_eqe_event(p_hwfn,
p_eqe->opcode,
@@ -306,14 +316,6 @@ qed_async_event_completion(struct qed_hwfn *p_hwfn,
case PROTOCOLID_ISCSI:
if (!IS_ENABLED(CONFIG_QED_ISCSI))
return -EINVAL;
- if (p_eqe->opcode == ISCSI_EVENT_TYPE_ASYN_DELETE_OOO_ISLES) {
- u32 cid = le32_to_cpu(p_eqe->data.iscsi_info.cid);
-
- qed_ooo_release_connection_isles(p_hwfn,
- p_hwfn->p_ooo_info,
- cid);
- return 0;
- }
if (p_hwfn->p_iscsi_info->event_cb) {
struct qed_iscsi_info *p_iscsi = p_hwfn->p_iscsi_info;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
index 253c2bbe1e4e..d5df29f787c5 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
@@ -178,26 +178,59 @@ static struct qed_vf_info *qed_iov_get_vf_info(struct qed_hwfn *p_hwfn,
return vf;
}
+enum qed_iov_validate_q_mode {
+ QED_IOV_VALIDATE_Q_NA,
+ QED_IOV_VALIDATE_Q_ENABLE,
+ QED_IOV_VALIDATE_Q_DISABLE,
+};
+
+static bool qed_iov_validate_queue_mode(struct qed_hwfn *p_hwfn,
+ struct qed_vf_info *p_vf,
+ u16 qid,
+ enum qed_iov_validate_q_mode mode,
+ bool b_is_tx)
+{
+ if (mode == QED_IOV_VALIDATE_Q_NA)
+ return true;
+
+ if ((b_is_tx && p_vf->vf_queues[qid].p_tx_cid) ||
+ (!b_is_tx && p_vf->vf_queues[qid].p_rx_cid))
+ return mode == QED_IOV_VALIDATE_Q_ENABLE;
+
+ /* In case we haven't found any valid cid, then its disabled */
+ return mode == QED_IOV_VALIDATE_Q_DISABLE;
+}
+
static bool qed_iov_validate_rxq(struct qed_hwfn *p_hwfn,
- struct qed_vf_info *p_vf, u16 rx_qid)
+ struct qed_vf_info *p_vf,
+ u16 rx_qid,
+ enum qed_iov_validate_q_mode mode)
{
- if (rx_qid >= p_vf->num_rxqs)
+ if (rx_qid >= p_vf->num_rxqs) {
DP_VERBOSE(p_hwfn,
QED_MSG_IOV,
"VF[0x%02x] - can't touch Rx queue[%04x]; Only 0x%04x are allocated\n",
p_vf->abs_vf_id, rx_qid, p_vf->num_rxqs);
- return rx_qid < p_vf->num_rxqs;
+ return false;
+ }
+
+ return qed_iov_validate_queue_mode(p_hwfn, p_vf, rx_qid, mode, false);
}
static bool qed_iov_validate_txq(struct qed_hwfn *p_hwfn,
- struct qed_vf_info *p_vf, u16 tx_qid)
+ struct qed_vf_info *p_vf,
+ u16 tx_qid,
+ enum qed_iov_validate_q_mode mode)
{
- if (tx_qid >= p_vf->num_txqs)
+ if (tx_qid >= p_vf->num_txqs) {
DP_VERBOSE(p_hwfn,
QED_MSG_IOV,
"VF[0x%02x] - can't touch Tx queue[%04x]; Only 0x%04x are allocated\n",
p_vf->abs_vf_id, tx_qid, p_vf->num_txqs);
- return tx_qid < p_vf->num_txqs;
+ return false;
+ }
+
+ return qed_iov_validate_queue_mode(p_hwfn, p_vf, tx_qid, mode, true);
}
static bool qed_iov_validate_sb(struct qed_hwfn *p_hwfn,
@@ -217,6 +250,34 @@ static bool qed_iov_validate_sb(struct qed_hwfn *p_hwfn,
return false;
}
+static bool qed_iov_validate_active_rxq(struct qed_hwfn *p_hwfn,
+ struct qed_vf_info *p_vf)
+{
+ u8 i;
+
+ for (i = 0; i < p_vf->num_rxqs; i++)
+ if (qed_iov_validate_queue_mode(p_hwfn, p_vf, i,
+ QED_IOV_VALIDATE_Q_ENABLE,
+ false))
+ return true;
+
+ return false;
+}
+
+static bool qed_iov_validate_active_txq(struct qed_hwfn *p_hwfn,
+ struct qed_vf_info *p_vf)
+{
+ u8 i;
+
+ for (i = 0; i < p_vf->num_txqs; i++)
+ if (qed_iov_validate_queue_mode(p_hwfn, p_vf, i,
+ QED_IOV_VALIDATE_Q_ENABLE,
+ true))
+ return true;
+
+ return false;
+}
+
static int qed_iov_post_vf_bulletin(struct qed_hwfn *p_hwfn,
int vfid, struct qed_ptt *p_ptt)
{
@@ -557,14 +618,30 @@ int qed_iov_hw_info(struct qed_hwfn *p_hwfn)
return 0;
}
- /* Calculate the first VF index - this is a bit tricky; Basically,
- * VFs start at offset 16 relative to PF0, and 2nd engine VFs begin
- * after the first engine's VFs.
+ /* First VF index based on offset is tricky:
+ * - If ARI is supported [likely], offset - (16 - pf_id) would
+ * provide the number for eng0. 2nd engine Vfs would begin
+ * after the first engine's VFs.
+ * - If !ARI, VFs would start on next device.
+ * so offset - (256 - pf_id) would provide the number.
+ * Utilize the fact that (256 - pf_id) is achieved only by later
+ * to diffrentiate between the two.
*/
- cdev->p_iov_info->first_vf_in_pf = p_hwfn->cdev->p_iov_info->offset +
- p_hwfn->abs_pf_id - 16;
- if (QED_PATH_ID(p_hwfn))
- cdev->p_iov_info->first_vf_in_pf -= MAX_NUM_VFS_BB;
+
+ if (p_hwfn->cdev->p_iov_info->offset < (256 - p_hwfn->abs_pf_id)) {
+ u32 first = p_hwfn->cdev->p_iov_info->offset +
+ p_hwfn->abs_pf_id - 16;
+
+ cdev->p_iov_info->first_vf_in_pf = first;
+
+ if (QED_PATH_ID(p_hwfn))
+ cdev->p_iov_info->first_vf_in_pf -= MAX_NUM_VFS_BB;
+ } else {
+ u32 first = p_hwfn->cdev->p_iov_info->offset +
+ p_hwfn->abs_pf_id - 256;
+
+ cdev->p_iov_info->first_vf_in_pf = first;
+ }
DP_VERBOSE(p_hwfn, QED_MSG_IOV,
"First VF in hwfn 0x%08x\n",
@@ -677,6 +754,11 @@ static int qed_iov_enable_vf_access(struct qed_hwfn *p_hwfn,
u32 igu_vf_conf = IGU_VF_CONF_FUNC_EN;
int rc;
+ /* It's possible VF was previously considered malicious -
+ * clear the indication even if we're only going to disable VF.
+ */
+ vf->b_malicious = false;
+
if (vf->to_disable)
return 0;
@@ -689,9 +771,6 @@ static int qed_iov_enable_vf_access(struct qed_hwfn *p_hwfn,
qed_iov_vf_igu_reset(p_hwfn, p_ptt, vf);
- /* It's possible VF was previously considered malicious */
- vf->b_malicious = false;
-
rc = qed_mcp_config_vf_msix(p_hwfn, p_ptt, vf->abs_vf_id, vf->num_sbs);
if (rc)
return rc;
@@ -1118,13 +1197,17 @@ static void qed_iov_send_response(struct qed_hwfn *p_hwfn,
(sizeof(union pfvf_tlvs) - sizeof(u64)) / 4,
&params);
- qed_dmae_host2host(p_hwfn, p_ptt, mbx->reply_phys,
- mbx->req_virt->first_tlv.reply_address,
- sizeof(u64) / 4, &params);
-
+ /* Once PF copies the rc to the VF, the latter can continue
+ * and send an additional message. So we have to make sure the
+ * channel would be re-set to ready prior to that.
+ */
REG_WR(p_hwfn,
GTT_BAR0_MAP_REG_USDM_RAM +
USTORM_VF_PF_CHANNEL_READY_OFFSET(eng_vf_id), 1);
+
+ qed_dmae_host2host(p_hwfn, p_ptt, mbx->reply_phys,
+ mbx->req_virt->first_tlv.reply_address,
+ sizeof(u64) / 4, &params);
}
static u16 qed_iov_vport_to_tlv(struct qed_hwfn *p_hwfn,
@@ -1733,6 +1816,8 @@ static void qed_iov_vf_mbx_start_vport(struct qed_hwfn *p_hwfn,
vf->state = VF_ENABLED;
start = &mbx->req_virt->start_vport;
+ qed_iov_enable_vf_traffic(p_hwfn, p_ptt, vf);
+
/* Initialize Status block in CAU */
for (sb_id = 0; sb_id < vf->num_sbs; sb_id++) {
if (!start->sb_addr[sb_id]) {
@@ -1746,7 +1831,6 @@ static void qed_iov_vf_mbx_start_vport(struct qed_hwfn *p_hwfn,
start->sb_addr[sb_id],
vf->igu_sbs[sb_id], vf->abs_vf_id, 1);
}
- qed_iov_enable_vf_traffic(p_hwfn, p_ptt, vf);
vf->mtu = start->mtu;
vf->shadow_config.inner_vlan_removal = start->inner_vlan_removal;
@@ -1803,6 +1887,16 @@ static void qed_iov_vf_mbx_stop_vport(struct qed_hwfn *p_hwfn,
vf->vport_instance--;
vf->spoof_chk = false;
+ if ((qed_iov_validate_active_rxq(p_hwfn, vf)) ||
+ (qed_iov_validate_active_txq(p_hwfn, vf))) {
+ vf->b_malicious = true;
+ DP_NOTICE(p_hwfn,
+ "VF [%02x] - considered malicious; Unable to stop RX/TX queuess\n",
+ vf->abs_vf_id);
+ status = PFVF_STATUS_MALICIOUS;
+ goto out;
+ }
+
rc = qed_sp_vport_stop(p_hwfn, vf->opaque_fid, vf->vport_id);
if (rc) {
DP_ERR(p_hwfn, "qed_iov_vf_mbx_stop_vport returned error %d\n",
@@ -1814,6 +1908,7 @@ static void qed_iov_vf_mbx_stop_vport(struct qed_hwfn *p_hwfn,
vf->configured_features = 0;
memset(&vf->shadow_config, 0, sizeof(vf->shadow_config));
+out:
qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_VPORT_TEARDOWN,
sizeof(struct pfvf_def_resp_tlv), status);
}
@@ -1870,7 +1965,8 @@ static void qed_iov_vf_mbx_start_rxq(struct qed_hwfn *p_hwfn,
req = &mbx->req_virt->start_rxq;
- if (!qed_iov_validate_rxq(p_hwfn, vf, req->rx_qid) ||
+ if (!qed_iov_validate_rxq(p_hwfn, vf, req->rx_qid,
+ QED_IOV_VALIDATE_Q_DISABLE) ||
!qed_iov_validate_sb(p_hwfn, vf, req->hw_sb))
goto out;
@@ -1923,6 +2019,220 @@ out:
qed_iov_vf_mbx_start_rxq_resp(p_hwfn, p_ptt, vf, status, b_legacy_vf);
}
+static void
+qed_iov_pf_update_tun_response(struct pfvf_update_tunn_param_tlv *p_resp,
+ struct qed_tunnel_info *p_tun,
+ u16 tunn_feature_mask)
+{
+ p_resp->tunn_feature_mask = tunn_feature_mask;
+ p_resp->vxlan_mode = p_tun->vxlan.b_mode_enabled;
+ p_resp->l2geneve_mode = p_tun->l2_geneve.b_mode_enabled;
+ p_resp->ipgeneve_mode = p_tun->ip_geneve.b_mode_enabled;
+ p_resp->l2gre_mode = p_tun->l2_gre.b_mode_enabled;
+ p_resp->ipgre_mode = p_tun->l2_gre.b_mode_enabled;
+ p_resp->vxlan_clss = p_tun->vxlan.tun_cls;
+ p_resp->l2gre_clss = p_tun->l2_gre.tun_cls;
+ p_resp->ipgre_clss = p_tun->ip_gre.tun_cls;
+ p_resp->l2geneve_clss = p_tun->l2_geneve.tun_cls;
+ p_resp->ipgeneve_clss = p_tun->ip_geneve.tun_cls;
+ p_resp->geneve_udp_port = p_tun->geneve_port.port;
+ p_resp->vxlan_udp_port = p_tun->vxlan_port.port;
+}
+
+static void
+__qed_iov_pf_update_tun_param(struct vfpf_update_tunn_param_tlv *p_req,
+ struct qed_tunn_update_type *p_tun,
+ enum qed_tunn_mode mask, u8 tun_cls)
+{
+ if (p_req->tun_mode_update_mask & BIT(mask)) {
+ p_tun->b_update_mode = true;
+
+ if (p_req->tunn_mode & BIT(mask))
+ p_tun->b_mode_enabled = true;
+ }
+
+ p_tun->tun_cls = tun_cls;
+}
+
+static void
+qed_iov_pf_update_tun_param(struct vfpf_update_tunn_param_tlv *p_req,
+ struct qed_tunn_update_type *p_tun,
+ struct qed_tunn_update_udp_port *p_port,
+ enum qed_tunn_mode mask,
+ u8 tun_cls, u8 update_port, u16 port)
+{
+ if (update_port) {
+ p_port->b_update_port = true;
+ p_port->port = port;
+ }
+
+ __qed_iov_pf_update_tun_param(p_req, p_tun, mask, tun_cls);
+}
+
+static bool
+qed_iov_pf_validate_tunn_param(struct vfpf_update_tunn_param_tlv *p_req)
+{
+ bool b_update_requested = false;
+
+ if (p_req->tun_mode_update_mask || p_req->update_tun_cls ||
+ p_req->update_geneve_port || p_req->update_vxlan_port)
+ b_update_requested = true;
+
+ return b_update_requested;
+}
+
+static void qed_pf_validate_tunn_mode(struct qed_tunn_update_type *tun, int *rc)
+{
+ if (tun->b_update_mode && !tun->b_mode_enabled) {
+ tun->b_update_mode = false;
+ *rc = -EINVAL;
+ }
+}
+
+static int
+qed_pf_validate_modify_tunn_config(struct qed_hwfn *p_hwfn,
+ u16 *tun_features, bool *update,
+ struct qed_tunnel_info *tun_src)
+{
+ struct qed_eth_cb_ops *ops = p_hwfn->cdev->protocol_ops.eth;
+ struct qed_tunnel_info *tun = &p_hwfn->cdev->tunnel;
+ u16 bultn_vxlan_port, bultn_geneve_port;
+ void *cookie = p_hwfn->cdev->ops_cookie;
+ int i, rc = 0;
+
+ *tun_features = p_hwfn->cdev->tunn_feature_mask;
+ bultn_vxlan_port = tun->vxlan_port.port;
+ bultn_geneve_port = tun->geneve_port.port;
+ qed_pf_validate_tunn_mode(&tun_src->vxlan, &rc);
+ qed_pf_validate_tunn_mode(&tun_src->l2_geneve, &rc);
+ qed_pf_validate_tunn_mode(&tun_src->ip_geneve, &rc);
+ qed_pf_validate_tunn_mode(&tun_src->l2_gre, &rc);
+ qed_pf_validate_tunn_mode(&tun_src->ip_gre, &rc);
+
+ if ((tun_src->b_update_rx_cls || tun_src->b_update_tx_cls) &&
+ (tun_src->vxlan.tun_cls != QED_TUNN_CLSS_MAC_VLAN ||
+ tun_src->l2_geneve.tun_cls != QED_TUNN_CLSS_MAC_VLAN ||
+ tun_src->ip_geneve.tun_cls != QED_TUNN_CLSS_MAC_VLAN ||
+ tun_src->l2_gre.tun_cls != QED_TUNN_CLSS_MAC_VLAN ||
+ tun_src->ip_gre.tun_cls != QED_TUNN_CLSS_MAC_VLAN)) {
+ tun_src->b_update_rx_cls = false;
+ tun_src->b_update_tx_cls = false;
+ rc = -EINVAL;
+ }
+
+ if (tun_src->vxlan_port.b_update_port) {
+ if (tun_src->vxlan_port.port == tun->vxlan_port.port) {
+ tun_src->vxlan_port.b_update_port = false;
+ } else {
+ *update = true;
+ bultn_vxlan_port = tun_src->vxlan_port.port;
+ }
+ }
+
+ if (tun_src->geneve_port.b_update_port) {
+ if (tun_src->geneve_port.port == tun->geneve_port.port) {
+ tun_src->geneve_port.b_update_port = false;
+ } else {
+ *update = true;
+ bultn_geneve_port = tun_src->geneve_port.port;
+ }
+ }
+
+ qed_for_each_vf(p_hwfn, i) {
+ qed_iov_bulletin_set_udp_ports(p_hwfn, i, bultn_vxlan_port,
+ bultn_geneve_port);
+ }
+
+ qed_schedule_iov(p_hwfn, QED_IOV_WQ_BULLETIN_UPDATE_FLAG);
+ ops->ports_update(cookie, bultn_vxlan_port, bultn_geneve_port);
+
+ return rc;
+}
+
+static void qed_iov_vf_mbx_update_tunn_param(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_vf_info *p_vf)
+{
+ struct qed_tunnel_info *p_tun = &p_hwfn->cdev->tunnel;
+ struct qed_iov_vf_mbx *mbx = &p_vf->vf_mbx;
+ struct pfvf_update_tunn_param_tlv *p_resp;
+ struct vfpf_update_tunn_param_tlv *p_req;
+ u8 status = PFVF_STATUS_SUCCESS;
+ bool b_update_required = false;
+ struct qed_tunnel_info tunn;
+ u16 tunn_feature_mask = 0;
+ int i, rc = 0;
+
+ mbx->offset = (u8 *)mbx->reply_virt;
+
+ memset(&tunn, 0, sizeof(tunn));
+ p_req = &mbx->req_virt->tunn_param_update;
+
+ if (!qed_iov_pf_validate_tunn_param(p_req)) {
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+ "No tunnel update requested by VF\n");
+ status = PFVF_STATUS_FAILURE;
+ goto send_resp;
+ }
+
+ tunn.b_update_rx_cls = p_req->update_tun_cls;
+ tunn.b_update_tx_cls = p_req->update_tun_cls;
+
+ qed_iov_pf_update_tun_param(p_req, &tunn.vxlan, &tunn.vxlan_port,
+ QED_MODE_VXLAN_TUNN, p_req->vxlan_clss,
+ p_req->update_vxlan_port,
+ p_req->vxlan_port);
+ qed_iov_pf_update_tun_param(p_req, &tunn.l2_geneve, &tunn.geneve_port,
+ QED_MODE_L2GENEVE_TUNN,
+ p_req->l2geneve_clss,
+ p_req->update_geneve_port,
+ p_req->geneve_port);
+ __qed_iov_pf_update_tun_param(p_req, &tunn.ip_geneve,
+ QED_MODE_IPGENEVE_TUNN,
+ p_req->ipgeneve_clss);
+ __qed_iov_pf_update_tun_param(p_req, &tunn.l2_gre,
+ QED_MODE_L2GRE_TUNN, p_req->l2gre_clss);
+ __qed_iov_pf_update_tun_param(p_req, &tunn.ip_gre,
+ QED_MODE_IPGRE_TUNN, p_req->ipgre_clss);
+
+ /* If PF modifies VF's req then it should
+ * still return an error in case of partial configuration
+ * or modified configuration as opposed to requested one.
+ */
+ rc = qed_pf_validate_modify_tunn_config(p_hwfn, &tunn_feature_mask,
+ &b_update_required, &tunn);
+
+ if (rc)
+ status = PFVF_STATUS_FAILURE;
+
+ /* If QED client is willing to update anything ? */
+ if (b_update_required) {
+ u16 geneve_port;
+
+ rc = qed_sp_pf_update_tunn_cfg(p_hwfn, &tunn,
+ QED_SPQ_MODE_EBLOCK, NULL);
+ if (rc)
+ status = PFVF_STATUS_FAILURE;
+
+ geneve_port = p_tun->geneve_port.port;
+ qed_for_each_vf(p_hwfn, i) {
+ qed_iov_bulletin_set_udp_ports(p_hwfn, i,
+ p_tun->vxlan_port.port,
+ geneve_port);
+ }
+ }
+
+send_resp:
+ p_resp = qed_add_tlv(p_hwfn, &mbx->offset,
+ CHANNEL_TLV_UPDATE_TUNN_PARAM, sizeof(*p_resp));
+
+ qed_iov_pf_update_tun_response(p_resp, p_tun, tunn_feature_mask);
+ qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END,
+ sizeof(struct channel_list_end_tlv));
+
+ qed_iov_send_response(p_hwfn, p_ptt, p_vf, sizeof(*p_resp), status);
+}
+
static void qed_iov_vf_mbx_start_txq_resp(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
struct qed_vf_info *p_vf, u8 status)
@@ -1970,21 +2280,16 @@ static void qed_iov_vf_mbx_start_txq(struct qed_hwfn *p_hwfn,
struct qed_queue_start_common_params params;
struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
u8 status = PFVF_STATUS_NO_RESOURCE;
- union qed_qm_pq_params pq_params;
struct vfpf_start_txq_tlv *req;
struct qed_vf_q_info *p_queue;
int rc;
u16 pq;
- /* Prepare the parameters which would choose the right PQ */
- memset(&pq_params, 0, sizeof(pq_params));
- pq_params.eth.is_vf = 1;
- pq_params.eth.vf_id = vf->relative_vf_id;
-
memset(&params, 0, sizeof(params));
req = &mbx->req_virt->start_txq;
- if (!qed_iov_validate_txq(p_hwfn, vf, req->tx_qid) ||
+ if (!qed_iov_validate_txq(p_hwfn, vf, req->tx_qid,
+ QED_IOV_VALIDATE_Q_DISABLE) ||
!qed_iov_validate_sb(p_hwfn, vf, req->hw_sb))
goto out;
@@ -2004,7 +2309,7 @@ static void qed_iov_vf_mbx_start_txq(struct qed_hwfn *p_hwfn,
if (!p_queue->p_tx_cid)
goto out;
- pq = qed_get_qm_pq(p_hwfn, PROTOCOLID_ETH, &pq_params);
+ pq = qed_get_cm_pq_idx_vf(p_hwfn, vf->relative_vf_id);
rc = qed_eth_txq_start_ramrod(p_hwfn, p_queue->p_tx_cid,
req->pbl_addr, req->pbl_size, pq);
if (rc) {
@@ -2021,57 +2326,53 @@ out:
static int qed_iov_vf_stop_rxqs(struct qed_hwfn *p_hwfn,
struct qed_vf_info *vf,
- u16 rxq_id, u8 num_rxqs, bool cqe_completion)
+ u16 rxq_id, bool cqe_completion)
{
struct qed_vf_q_info *p_queue;
int rc = 0;
- int qid;
- if (rxq_id + num_rxqs > ARRAY_SIZE(vf->vf_queues))
+ if (!qed_iov_validate_rxq(p_hwfn, vf, rxq_id,
+ QED_IOV_VALIDATE_Q_ENABLE)) {
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_IOV,
+ "VF[%d] Tried Closing Rx 0x%04x which is inactive\n",
+ vf->relative_vf_id, rxq_id);
return -EINVAL;
+ }
- for (qid = rxq_id; qid < rxq_id + num_rxqs; qid++) {
- p_queue = &vf->vf_queues[qid];
+ p_queue = &vf->vf_queues[rxq_id];
- if (!p_queue->p_rx_cid)
- continue;
-
- rc = qed_eth_rx_queue_stop(p_hwfn,
- p_queue->p_rx_cid,
- false, cqe_completion);
- if (rc)
- return rc;
+ rc = qed_eth_rx_queue_stop(p_hwfn,
+ p_queue->p_rx_cid,
+ false, cqe_completion);
+ if (rc)
+ return rc;
- vf->vf_queues[qid].p_rx_cid = NULL;
- vf->num_active_rxqs--;
- }
+ p_queue->p_rx_cid = NULL;
+ vf->num_active_rxqs--;
- return rc;
+ return 0;
}
static int qed_iov_vf_stop_txqs(struct qed_hwfn *p_hwfn,
- struct qed_vf_info *vf, u16 txq_id, u8 num_txqs)
+ struct qed_vf_info *vf, u16 txq_id)
{
- int rc = 0;
struct qed_vf_q_info *p_queue;
- int qid;
+ int rc = 0;
- if (txq_id + num_txqs > ARRAY_SIZE(vf->vf_queues))
+ if (!qed_iov_validate_txq(p_hwfn, vf, txq_id,
+ QED_IOV_VALIDATE_Q_ENABLE))
return -EINVAL;
- for (qid = txq_id; qid < txq_id + num_txqs; qid++) {
- p_queue = &vf->vf_queues[qid];
- if (!p_queue->p_tx_cid)
- continue;
+ p_queue = &vf->vf_queues[txq_id];
- rc = qed_eth_tx_queue_stop(p_hwfn, p_queue->p_tx_cid);
- if (rc)
- return rc;
+ rc = qed_eth_tx_queue_stop(p_hwfn, p_queue->p_tx_cid);
+ if (rc)
+ return rc;
- p_queue->p_tx_cid = NULL;
- }
+ p_queue->p_tx_cid = NULL;
- return rc;
+ return 0;
}
static void qed_iov_vf_mbx_stop_rxqs(struct qed_hwfn *p_hwfn,
@@ -2080,20 +2381,28 @@ static void qed_iov_vf_mbx_stop_rxqs(struct qed_hwfn *p_hwfn,
{
u16 length = sizeof(struct pfvf_def_resp_tlv);
struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
- u8 status = PFVF_STATUS_SUCCESS;
+ u8 status = PFVF_STATUS_FAILURE;
struct vfpf_stop_rxqs_tlv *req;
int rc;
- /* We give the option of starting from qid != 0, in this case we
- * need to make sure that qid + num_qs doesn't exceed the actual
- * amount of queues that exist.
+ /* There has never been an official driver that used this interface
+ * for stopping multiple queues, and it is now considered deprecated.
+ * Validate this isn't used here.
*/
req = &mbx->req_virt->stop_rxqs;
- rc = qed_iov_vf_stop_rxqs(p_hwfn, vf, req->rx_qid,
- req->num_rxqs, req->cqe_completion);
- if (rc)
- status = PFVF_STATUS_FAILURE;
+ if (req->num_rxqs != 1) {
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+ "Odd; VF[%d] tried stopping multiple Rx queues\n",
+ vf->relative_vf_id);
+ status = PFVF_STATUS_NOT_SUPPORTED;
+ goto out;
+ }
+ rc = qed_iov_vf_stop_rxqs(p_hwfn, vf, req->rx_qid,
+ req->cqe_completion);
+ if (!rc)
+ status = PFVF_STATUS_SUCCESS;
+out:
qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_STOP_RXQS,
length, status);
}
@@ -2104,19 +2413,27 @@ static void qed_iov_vf_mbx_stop_txqs(struct qed_hwfn *p_hwfn,
{
u16 length = sizeof(struct pfvf_def_resp_tlv);
struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
- u8 status = PFVF_STATUS_SUCCESS;
+ u8 status = PFVF_STATUS_FAILURE;
struct vfpf_stop_txqs_tlv *req;
int rc;
- /* We give the option of starting from qid != 0, in this case we
- * need to make sure that qid + num_qs doesn't exceed the actual
- * amount of queues that exist.
+ /* There has never been an official driver that used this interface
+ * for stopping multiple queues, and it is now considered deprecated.
+ * Validate this isn't used here.
*/
req = &mbx->req_virt->stop_txqs;
- rc = qed_iov_vf_stop_txqs(p_hwfn, vf, req->tx_qid, req->num_txqs);
- if (rc)
- status = PFVF_STATUS_FAILURE;
+ if (req->num_txqs != 1) {
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+ "Odd; VF[%d] tried stopping multiple Tx queues\n",
+ vf->relative_vf_id);
+ status = PFVF_STATUS_NOT_SUPPORTED;
+ goto out;
+ }
+ rc = qed_iov_vf_stop_txqs(p_hwfn, vf, req->tx_qid);
+ if (!rc)
+ status = PFVF_STATUS_SUCCESS;
+out:
qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_STOP_TXQS,
length, status);
}
@@ -2141,22 +2458,17 @@ static void qed_iov_vf_mbx_update_rxqs(struct qed_hwfn *p_hwfn,
complete_event_flg = !!(req->flags & VFPF_RXQ_UPD_COMPLETE_EVENT_FLAG);
/* Validate inputs */
- if (req->num_rxqs + req->rx_qid > QED_MAX_VF_CHAINS_PER_PF ||
- !qed_iov_validate_rxq(p_hwfn, vf, req->rx_qid)) {
- DP_INFO(p_hwfn, "VF[%d]: Incorrect Rxqs [%04x, %02x]\n",
- vf->relative_vf_id, req->rx_qid, req->num_rxqs);
- goto out;
- }
-
- for (i = 0; i < req->num_rxqs; i++) {
- qid = req->rx_qid + i;
- if (!vf->vf_queues[qid].p_rx_cid) {
- DP_INFO(p_hwfn,
- "VF[%d] rx_qid = %d isn`t active!\n",
- vf->relative_vf_id, qid);
+ for (i = req->rx_qid; i < req->rx_qid + req->num_rxqs; i++)
+ if (!qed_iov_validate_rxq(p_hwfn, vf, i,
+ QED_IOV_VALIDATE_Q_ENABLE)) {
+ DP_INFO(p_hwfn, "VF[%d]: Incorrect Rxqs [%04x, %02x]\n",
+ vf->relative_vf_id, req->rx_qid, req->num_rxqs);
goto out;
}
+ /* Prepare the handlers */
+ for (i = 0; i < req->num_rxqs; i++) {
+ qid = req->rx_qid + i;
handlers[i] = vf->vf_queues[qid].p_rx_cid;
}
@@ -2372,7 +2684,8 @@ qed_iov_vp_update_rss_param(struct qed_hwfn *p_hwfn,
for (i = 0; i < table_size; i++) {
q_idx = p_rss_tlv->rss_ind_table[i];
- if (!qed_iov_validate_rxq(p_hwfn, vf, q_idx)) {
+ if (!qed_iov_validate_rxq(p_hwfn, vf, q_idx,
+ QED_IOV_VALIDATE_Q_ENABLE)) {
DP_VERBOSE(p_hwfn,
QED_MSG_IOV,
"VF[%d]: Omitting RSS due to wrong queue %04x\n",
@@ -2381,15 +2694,6 @@ qed_iov_vp_update_rss_param(struct qed_hwfn *p_hwfn,
goto out;
}
- if (!vf->vf_queues[q_idx].p_rx_cid) {
- DP_VERBOSE(p_hwfn,
- QED_MSG_IOV,
- "VF[%d]: Omitting RSS due to inactive queue %08x\n",
- vf->relative_vf_id, q_idx);
- b_reject = true;
- goto out;
- }
-
p_rss->rss_ind_table[i] = vf->vf_queues[q_idx].p_rx_cid;
}
@@ -3042,9 +3346,10 @@ qed_iov_vf_flr_cleanup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
return rc;
}
-int qed_iov_mark_vf_flr(struct qed_hwfn *p_hwfn, u32 *p_disabled_vfs)
+bool qed_iov_mark_vf_flr(struct qed_hwfn *p_hwfn, u32 *p_disabled_vfs)
{
- u16 i, found = 0;
+ bool found = false;
+ u16 i;
DP_VERBOSE(p_hwfn, QED_MSG_IOV, "Marking FLR-ed VFs\n");
for (i = 0; i < (VF_MAX_STATIC / 32); i++)
@@ -3054,7 +3359,7 @@ int qed_iov_mark_vf_flr(struct qed_hwfn *p_hwfn, u32 *p_disabled_vfs)
if (!p_hwfn->cdev->p_iov_info) {
DP_NOTICE(p_hwfn, "VF flr but no IOV\n");
- return 0;
+ return false;
}
/* Mark VFs */
@@ -3083,7 +3388,7 @@ int qed_iov_mark_vf_flr(struct qed_hwfn *p_hwfn, u32 *p_disabled_vfs)
* VF flr until ACKs, we're safe.
*/
p_flr[rel_vf_id / 64] |= 1ULL << (rel_vf_id % 64);
- found = 1;
+ found = true;
}
}
@@ -3184,6 +3489,9 @@ static void qed_iov_process_mbx_req(struct qed_hwfn *p_hwfn,
case CHANNEL_TLV_RELEASE:
qed_iov_vf_mbx_release(p_hwfn, p_ptt, p_vf);
break;
+ case CHANNEL_TLV_UPDATE_TUNN_PARAM:
+ qed_iov_vf_mbx_update_tunn_param(p_hwfn, p_ptt, p_vf);
+ break;
}
} else if (qed_iov_tlv_supported(mbx->first_tlv.tl.type)) {
DP_VERBOSE(p_hwfn, QED_MSG_IOV,
@@ -3289,11 +3597,17 @@ static void qed_sriov_vfpf_malicious(struct qed_hwfn *p_hwfn,
if (!p_vf)
return;
- DP_INFO(p_hwfn,
- "VF [%d] - Malicious behavior [%02x]\n",
- p_vf->abs_vf_id, p_data->err_id);
+ if (!p_vf->b_malicious) {
+ DP_NOTICE(p_hwfn,
+ "VF [%d] - Malicious behavior [%02x]\n",
+ p_vf->abs_vf_id, p_data->err_id);
- p_vf->b_malicious = true;
+ p_vf->b_malicious = true;
+ } else {
+ DP_INFO(p_hwfn,
+ "VF [%d] - Malicious behavior [%02x]\n",
+ p_vf->abs_vf_id, p_data->err_id);
+ }
}
int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn,
@@ -3414,6 +3728,29 @@ static void qed_iov_bulletin_set_forced_vlan(struct qed_hwfn *p_hwfn,
qed_iov_configure_vport_forced(p_hwfn, vf_info, feature);
}
+void qed_iov_bulletin_set_udp_ports(struct qed_hwfn *p_hwfn,
+ int vfid, u16 vxlan_port, u16 geneve_port)
+{
+ struct qed_vf_info *vf_info;
+
+ vf_info = qed_iov_get_vf_info(p_hwfn, (u16)vfid, true);
+ if (!vf_info) {
+ DP_NOTICE(p_hwfn->cdev,
+ "Can not set udp ports, invalid vfid [%d]\n", vfid);
+ return;
+ }
+
+ if (vf_info->b_malicious) {
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+ "Can not set udp ports to malicious VF [%d]\n",
+ vfid);
+ return;
+ }
+
+ vf_info->bulletin.p_virt->vxlan_udp_port = vxlan_port;
+ vf_info->bulletin.p_virt->geneve_udp_port = geneve_port;
+}
+
static bool qed_iov_vf_has_vport_instance(struct qed_hwfn *p_hwfn, int vfid)
{
struct qed_vf_info *p_vf_info;
@@ -3842,6 +4179,7 @@ static int qed_get_vf_config(struct qed_dev *cdev,
void qed_inform_vf_link_state(struct qed_hwfn *hwfn)
{
+ struct qed_hwfn *lead_hwfn = QED_LEADING_HWFN(hwfn->cdev);
struct qed_mcp_link_capabilities caps;
struct qed_mcp_link_params params;
struct qed_mcp_link_state link;
@@ -3858,9 +4196,15 @@ void qed_inform_vf_link_state(struct qed_hwfn *hwfn)
if (!vf_info)
continue;
- memcpy(&params, qed_mcp_get_link_params(hwfn), sizeof(params));
- memcpy(&link, qed_mcp_get_link_state(hwfn), sizeof(link));
- memcpy(&caps, qed_mcp_get_link_capabilities(hwfn),
+ /* Only hwfn0 is actually interested in the link speed.
+ * But since only it would receive an MFW indication of link,
+ * need to take configuration from it - otherwise things like
+ * rate limiting for hwfn1 VF would not work.
+ */
+ memcpy(&params, qed_mcp_get_link_params(lead_hwfn),
+ sizeof(params));
+ memcpy(&link, qed_mcp_get_link_state(lead_hwfn), sizeof(link));
+ memcpy(&caps, qed_mcp_get_link_capabilities(lead_hwfn),
sizeof(caps));
/* Modify link according to the VF's configured link state */
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.h b/drivers/net/ethernet/qlogic/qed/qed_sriov.h
index a89605821522..81a497ce6585 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sriov.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.h
@@ -270,6 +270,9 @@ enum qed_iov_wq_flag {
*/
u16 qed_iov_get_next_active_vf(struct qed_hwfn *p_hwfn, u16 rel_vf_id);
+void qed_iov_bulletin_set_udp_ports(struct qed_hwfn *p_hwfn,
+ int vfid, u16 vxlan_port, u16 geneve_port);
+
/**
* @brief Read sriov related information and allocated resources
* reads from configuraiton space, shmem, etc.
@@ -348,9 +351,9 @@ int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn,
* @param p_hwfn
* @param disabled_vfs - bitmask of all VFs on path that were FLRed
*
- * @return 1 iff one of the PF's vfs got FLRed. 0 otherwise.
+ * @return true iff one of the PF's vfs got FLRed. false otherwise.
*/
-int qed_iov_mark_vf_flr(struct qed_hwfn *p_hwfn, u32 *disabled_vfs);
+bool qed_iov_mark_vf_flr(struct qed_hwfn *p_hwfn, u32 *disabled_vfs);
/**
* @brief Search extended TLVs in request/reply buffer.
@@ -378,6 +381,12 @@ static inline u16 qed_iov_get_next_active_vf(struct qed_hwfn *p_hwfn,
return MAX_NUM_VFS;
}
+static inline void
+qed_iov_bulletin_set_udp_ports(struct qed_hwfn *p_hwfn, int vfid,
+ u16 vxlan_port, u16 geneve_port)
+{
+}
+
static inline int qed_iov_hw_info(struct qed_hwfn *p_hwfn)
{
return 0;
@@ -407,10 +416,10 @@ static inline int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn,
return -EINVAL;
}
-static inline int qed_iov_mark_vf_flr(struct qed_hwfn *p_hwfn,
- u32 *disabled_vfs)
+static inline bool qed_iov_mark_vf_flr(struct qed_hwfn *p_hwfn,
+ u32 *disabled_vfs)
{
- return 0;
+ return false;
}
static inline void qed_iov_wq_stop(struct qed_dev *cdev, bool schedule_first)
diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.c b/drivers/net/ethernet/qlogic/qed/qed_vf.c
index 15d2855ec563..11d71e5eea14 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_vf.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_vf.c
@@ -134,14 +134,20 @@ static int qed_send_msg2pf(struct qed_hwfn *p_hwfn, u8 *done, u32 resp_size)
}
if (!*done) {
- DP_VERBOSE(p_hwfn, QED_MSG_IOV,
- "VF <-- PF Timeout [Type %d]\n",
- p_req->first_tlv.tl.type);
+ DP_NOTICE(p_hwfn,
+ "VF <-- PF Timeout [Type %d]\n",
+ p_req->first_tlv.tl.type);
rc = -EBUSY;
} else {
- DP_VERBOSE(p_hwfn, QED_MSG_IOV,
- "PF response: %d [Type %d]\n",
- *done, p_req->first_tlv.tl.type);
+ if ((*done != PFVF_STATUS_SUCCESS) &&
+ (*done != PFVF_STATUS_NO_RESOURCE))
+ DP_NOTICE(p_hwfn,
+ "PF response: %d [Type %d]\n",
+ *done, p_req->first_tlv.tl.type);
+ else
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+ "PF response: %d [Type %d]\n",
+ *done, p_req->first_tlv.tl.type);
}
return rc;
@@ -228,7 +234,7 @@ static int qed_vf_pf_acquire(struct qed_hwfn *p_hwfn)
/* send acquire request */
rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
if (rc)
- return rc;
+ goto exit;
/* copy acquire response from buffer to p_hwfn */
memcpy(&p_iov->acquire_resp, resp, sizeof(p_iov->acquire_resp));
@@ -412,6 +418,155 @@ free_p_iov:
#define MSTORM_QZONE_START(dev) (TSTORM_QZONE_START + \
(TSTORM_QZONE_SIZE * NUM_OF_L2_QUEUES(dev)))
+static void
+__qed_vf_prep_tunn_req_tlv(struct vfpf_update_tunn_param_tlv *p_req,
+ struct qed_tunn_update_type *p_src,
+ enum qed_tunn_clss mask, u8 *p_cls)
+{
+ if (p_src->b_update_mode) {
+ p_req->tun_mode_update_mask |= BIT(mask);
+
+ if (p_src->b_mode_enabled)
+ p_req->tunn_mode |= BIT(mask);
+ }
+
+ *p_cls = p_src->tun_cls;
+}
+
+static void
+qed_vf_prep_tunn_req_tlv(struct vfpf_update_tunn_param_tlv *p_req,
+ struct qed_tunn_update_type *p_src,
+ enum qed_tunn_clss mask,
+ u8 *p_cls, struct qed_tunn_update_udp_port *p_port,
+ u8 *p_update_port, u16 *p_udp_port)
+{
+ if (p_port->b_update_port) {
+ *p_update_port = 1;
+ *p_udp_port = p_port->port;
+ }
+
+ __qed_vf_prep_tunn_req_tlv(p_req, p_src, mask, p_cls);
+}
+
+void qed_vf_set_vf_start_tunn_update_param(struct qed_tunnel_info *p_tun)
+{
+ if (p_tun->vxlan.b_mode_enabled)
+ p_tun->vxlan.b_update_mode = true;
+ if (p_tun->l2_geneve.b_mode_enabled)
+ p_tun->l2_geneve.b_update_mode = true;
+ if (p_tun->ip_geneve.b_mode_enabled)
+ p_tun->ip_geneve.b_update_mode = true;
+ if (p_tun->l2_gre.b_mode_enabled)
+ p_tun->l2_gre.b_update_mode = true;
+ if (p_tun->ip_gre.b_mode_enabled)
+ p_tun->ip_gre.b_update_mode = true;
+
+ p_tun->b_update_rx_cls = true;
+ p_tun->b_update_tx_cls = true;
+}
+
+static void
+__qed_vf_update_tunn_param(struct qed_tunn_update_type *p_tun,
+ u16 feature_mask, u8 tunn_mode,
+ u8 tunn_cls, enum qed_tunn_mode val)
+{
+ if (feature_mask & BIT(val)) {
+ p_tun->b_mode_enabled = tunn_mode;
+ p_tun->tun_cls = tunn_cls;
+ } else {
+ p_tun->b_mode_enabled = false;
+ }
+}
+
+static void qed_vf_update_tunn_param(struct qed_hwfn *p_hwfn,
+ struct qed_tunnel_info *p_tun,
+ struct pfvf_update_tunn_param_tlv *p_resp)
+{
+ /* Update mode and classes provided by PF */
+ u16 feat_mask = p_resp->tunn_feature_mask;
+
+ __qed_vf_update_tunn_param(&p_tun->vxlan, feat_mask,
+ p_resp->vxlan_mode, p_resp->vxlan_clss,
+ QED_MODE_VXLAN_TUNN);
+ __qed_vf_update_tunn_param(&p_tun->l2_geneve, feat_mask,
+ p_resp->l2geneve_mode,
+ p_resp->l2geneve_clss,
+ QED_MODE_L2GENEVE_TUNN);
+ __qed_vf_update_tunn_param(&p_tun->ip_geneve, feat_mask,
+ p_resp->ipgeneve_mode,
+ p_resp->ipgeneve_clss,
+ QED_MODE_IPGENEVE_TUNN);
+ __qed_vf_update_tunn_param(&p_tun->l2_gre, feat_mask,
+ p_resp->l2gre_mode, p_resp->l2gre_clss,
+ QED_MODE_L2GRE_TUNN);
+ __qed_vf_update_tunn_param(&p_tun->ip_gre, feat_mask,
+ p_resp->ipgre_mode, p_resp->ipgre_clss,
+ QED_MODE_IPGRE_TUNN);
+ p_tun->geneve_port.port = p_resp->geneve_udp_port;
+ p_tun->vxlan_port.port = p_resp->vxlan_udp_port;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+ "tunn mode: vxlan=0x%x, l2geneve=0x%x, ipgeneve=0x%x, l2gre=0x%x, ipgre=0x%x",
+ p_tun->vxlan.b_mode_enabled, p_tun->l2_geneve.b_mode_enabled,
+ p_tun->ip_geneve.b_mode_enabled,
+ p_tun->l2_gre.b_mode_enabled, p_tun->ip_gre.b_mode_enabled);
+}
+
+int qed_vf_pf_tunnel_param_update(struct qed_hwfn *p_hwfn,
+ struct qed_tunnel_info *p_src)
+{
+ struct qed_tunnel_info *p_tun = &p_hwfn->cdev->tunnel;
+ struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
+ struct pfvf_update_tunn_param_tlv *p_resp;
+ struct vfpf_update_tunn_param_tlv *p_req;
+ int rc;
+
+ p_req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_UPDATE_TUNN_PARAM,
+ sizeof(*p_req));
+
+ if (p_src->b_update_rx_cls && p_src->b_update_tx_cls)
+ p_req->update_tun_cls = 1;
+
+ qed_vf_prep_tunn_req_tlv(p_req, &p_src->vxlan, QED_MODE_VXLAN_TUNN,
+ &p_req->vxlan_clss, &p_src->vxlan_port,
+ &p_req->update_vxlan_port,
+ &p_req->vxlan_port);
+ qed_vf_prep_tunn_req_tlv(p_req, &p_src->l2_geneve,
+ QED_MODE_L2GENEVE_TUNN,
+ &p_req->l2geneve_clss, &p_src->geneve_port,
+ &p_req->update_geneve_port,
+ &p_req->geneve_port);
+ __qed_vf_prep_tunn_req_tlv(p_req, &p_src->ip_geneve,
+ QED_MODE_IPGENEVE_TUNN,
+ &p_req->ipgeneve_clss);
+ __qed_vf_prep_tunn_req_tlv(p_req, &p_src->l2_gre,
+ QED_MODE_L2GRE_TUNN, &p_req->l2gre_clss);
+ __qed_vf_prep_tunn_req_tlv(p_req, &p_src->ip_gre,
+ QED_MODE_IPGRE_TUNN, &p_req->ipgre_clss);
+
+ /* add list termination tlv */
+ qed_add_tlv(p_hwfn, &p_iov->offset,
+ CHANNEL_TLV_LIST_END,
+ sizeof(struct channel_list_end_tlv));
+
+ p_resp = &p_iov->pf2vf_reply->tunn_param_resp;
+ rc = qed_send_msg2pf(p_hwfn, &p_resp->hdr.status, sizeof(*p_resp));
+
+ if (rc)
+ goto exit;
+
+ if (p_resp->hdr.status != PFVF_STATUS_SUCCESS) {
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+ "Failed to update tunnel parameters\n");
+ rc = -EINVAL;
+ }
+
+ qed_vf_update_tunn_param(p_hwfn, p_tun, p_resp);
+exit:
+ qed_vf_pf_req_end(p_hwfn, rc);
+ return rc;
+}
+
int
qed_vf_pf_rxq_start(struct qed_hwfn *p_hwfn,
struct qed_queue_cid *p_cid,
@@ -1245,6 +1400,18 @@ static bool qed_vf_bulletin_get_forced_mac(struct qed_hwfn *hwfn,
return true;
}
+static void
+qed_vf_bulletin_get_udp_ports(struct qed_hwfn *p_hwfn,
+ u16 *p_vxlan_port, u16 *p_geneve_port)
+{
+ struct qed_bulletin_content *p_bulletin;
+
+ p_bulletin = &p_hwfn->vf_iov_info->bulletin_shadow;
+
+ *p_vxlan_port = p_bulletin->vxlan_udp_port;
+ *p_geneve_port = p_bulletin->geneve_udp_port;
+}
+
void qed_vf_get_fw_version(struct qed_hwfn *p_hwfn,
u16 *fw_major, u16 *fw_minor,
u16 *fw_rev, u16 *fw_eng)
@@ -1264,12 +1431,16 @@ static void qed_handle_bulletin_change(struct qed_hwfn *hwfn)
struct qed_eth_cb_ops *ops = hwfn->cdev->protocol_ops.eth;
u8 mac[ETH_ALEN], is_mac_exist, is_mac_forced;
void *cookie = hwfn->cdev->ops_cookie;
+ u16 vxlan_port, geneve_port;
+ qed_vf_bulletin_get_udp_ports(hwfn, &vxlan_port, &geneve_port);
is_mac_exist = qed_vf_bulletin_get_forced_mac(hwfn, mac,
&is_mac_forced);
if (is_mac_exist && cookie)
ops->force_mac(cookie, mac, !!is_mac_forced);
+ ops->ports_update(cookie, vxlan_port, geneve_port);
+
/* Always update link configuration according to bulletin */
qed_link_update(hwfn);
}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.h b/drivers/net/ethernet/qlogic/qed/qed_vf.h
index 7da0b165d8bc..34ac70b0e5fe 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_vf.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_vf.h
@@ -275,6 +275,8 @@ struct vfpf_stop_rxqs_tlv {
struct vfpf_first_tlv first_tlv;
u16 rx_qid;
+
+ /* this field is deprecated and should *always* be set to '1' */
u8 num_rxqs;
u8 cqe_completion;
u8 padding[4];
@@ -285,6 +287,8 @@ struct vfpf_stop_txqs_tlv {
struct vfpf_first_tlv first_tlv;
u16 tx_qid;
+
+ /* this field is deprecated and should *always* be set to '1' */
u8 num_txqs;
u8 padding[5];
};
@@ -425,6 +429,43 @@ struct vfpf_ucast_filter_tlv {
u16 padding[3];
};
+/* tunnel update param tlv */
+struct vfpf_update_tunn_param_tlv {
+ struct vfpf_first_tlv first_tlv;
+
+ u8 tun_mode_update_mask;
+ u8 tunn_mode;
+ u8 update_tun_cls;
+ u8 vxlan_clss;
+ u8 l2gre_clss;
+ u8 ipgre_clss;
+ u8 l2geneve_clss;
+ u8 ipgeneve_clss;
+ u8 update_geneve_port;
+ u8 update_vxlan_port;
+ u16 geneve_port;
+ u16 vxlan_port;
+ u8 padding[2];
+};
+
+struct pfvf_update_tunn_param_tlv {
+ struct pfvf_tlv hdr;
+
+ u16 tunn_feature_mask;
+ u8 vxlan_mode;
+ u8 l2geneve_mode;
+ u8 ipgeneve_mode;
+ u8 l2gre_mode;
+ u8 ipgre_mode;
+ u8 vxlan_clss;
+ u8 l2gre_clss;
+ u8 ipgre_clss;
+ u8 l2geneve_clss;
+ u8 ipgeneve_clss;
+ u16 vxlan_udp_port;
+ u16 geneve_udp_port;
+};
+
struct tlv_buffer_size {
u8 tlv_buffer[TLV_BUFFER_SIZE];
};
@@ -440,6 +481,7 @@ union vfpf_tlvs {
struct vfpf_vport_start_tlv start_vport;
struct vfpf_vport_update_tlv vport_update;
struct vfpf_ucast_filter_tlv ucast_filter;
+ struct vfpf_update_tunn_param_tlv tunn_param_update;
struct channel_list_end_tlv list_end;
struct tlv_buffer_size tlv_buf_size;
};
@@ -449,6 +491,7 @@ union pfvf_tlvs {
struct pfvf_acquire_resp_tlv acquire_resp;
struct tlv_buffer_size tlv_buf_size;
struct pfvf_start_queue_resp_tlv queue_start;
+ struct pfvf_update_tunn_param_tlv tunn_param_resp;
};
enum qed_bulletin_bit {
@@ -509,7 +552,9 @@ struct qed_bulletin_content {
u8 partner_rx_flow_ctrl_en;
u8 partner_adv_pause;
u8 sfp_tx_fault;
- u8 padding4[6];
+ u16 vxlan_udp_port;
+ u16 geneve_udp_port;
+ u8 padding4[2];
u32 speed;
u32 partner_adv_speed;
@@ -551,6 +596,7 @@ enum {
CHANNEL_TLV_VPORT_UPDATE_RSS,
CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN,
CHANNEL_TLV_VPORT_UPDATE_SGE_TPA,
+ CHANNEL_TLV_UPDATE_TUNN_PARAM,
CHANNEL_TLV_MAX,
/* Required for iterating over vport-update tlvs.
@@ -868,6 +914,9 @@ void __qed_vf_get_link_caps(struct qed_hwfn *p_hwfn,
struct qed_bulletin_content *p_bulletin);
void qed_iov_vf_task(struct work_struct *work);
+void qed_vf_set_vf_start_tunn_update_param(struct qed_tunnel_info *p_tun);
+int qed_vf_pf_tunnel_param_update(struct qed_hwfn *p_hwfn,
+ struct qed_tunnel_info *p_tunn);
#else
static inline void qed_vf_get_link_params(struct qed_hwfn *p_hwfn,
struct qed_mcp_link_params *params)
@@ -1029,6 +1078,17 @@ __qed_vf_get_link_caps(struct qed_hwfn *p_hwfn,
static inline void qed_iov_vf_task(struct work_struct *work)
{
}
+
+static inline void
+qed_vf_set_vf_start_tunn_update_param(struct qed_tunnel_info *p_tun)
+{
+}
+
+static inline int qed_vf_pf_tunnel_param_update(struct qed_hwfn *p_hwfn,
+ struct qed_tunnel_info *p_tunn)
+{
+ return -EINVAL;
+}
#endif
#endif
diff --git a/drivers/net/ethernet/qlogic/qede/qede.h b/drivers/net/ethernet/qlogic/qede/qede.h
index f2aaef2cfb86..9b4f08b6f9b9 100644
--- a/drivers/net/ethernet/qlogic/qede/qede.h
+++ b/drivers/net/ethernet/qlogic/qede/qede.h
@@ -41,6 +41,9 @@
#include <linux/mutex.h>
#include <linux/bpf.h>
#include <linux/io.h>
+#ifdef CONFIG_RFS_ACCEL
+#include <linux/cpu_rmap.h>
+#endif
#include <linux/qed/common_hsi.h>
#include <linux/qed/eth_common.h>
#include <linux/qed/qed_if.h>
@@ -50,7 +53,7 @@
#define QEDE_MAJOR_VERSION 8
#define QEDE_MINOR_VERSION 10
#define QEDE_REVISION_VERSION 10
-#define QEDE_ENGINEERING_VERSION 20
+#define QEDE_ENGINEERING_VERSION 21
#define DRV_MODULE_VERSION __stringify(QEDE_MAJOR_VERSION) "." \
__stringify(QEDE_MINOR_VERSION) "." \
__stringify(QEDE_REVISION_VERSION) "." \
@@ -58,7 +61,7 @@
#define DRV_MODULE_SYM qede
-struct qede_stats {
+struct qede_stats_common {
u64 no_buff_discards;
u64 packet_too_big_discard;
u64 ttl0_discard;
@@ -90,11 +93,6 @@ struct qede_stats {
u64 rx_256_to_511_byte_packets;
u64 rx_512_to_1023_byte_packets;
u64 rx_1024_to_1518_byte_packets;
- u64 rx_1519_to_1522_byte_packets;
- u64 rx_1519_to_2047_byte_packets;
- u64 rx_2048_to_4095_byte_packets;
- u64 rx_4096_to_9216_byte_packets;
- u64 rx_9217_to_16383_byte_packets;
u64 rx_crc_errors;
u64 rx_mac_crtl_frames;
u64 rx_pause_frames;
@@ -111,17 +109,39 @@ struct qede_stats {
u64 tx_256_to_511_byte_packets;
u64 tx_512_to_1023_byte_packets;
u64 tx_1024_to_1518_byte_packets;
+ u64 tx_pause_frames;
+ u64 tx_pfc_frames;
+ u64 brb_truncates;
+ u64 brb_discards;
+ u64 tx_mac_ctrl_frames;
+};
+
+struct qede_stats_bb {
+ u64 rx_1519_to_1522_byte_packets;
+ u64 rx_1519_to_2047_byte_packets;
+ u64 rx_2048_to_4095_byte_packets;
+ u64 rx_4096_to_9216_byte_packets;
+ u64 rx_9217_to_16383_byte_packets;
u64 tx_1519_to_2047_byte_packets;
u64 tx_2048_to_4095_byte_packets;
u64 tx_4096_to_9216_byte_packets;
u64 tx_9217_to_16383_byte_packets;
- u64 tx_pause_frames;
- u64 tx_pfc_frames;
u64 tx_lpi_entry_count;
u64 tx_total_collisions;
- u64 brb_truncates;
- u64 brb_discards;
- u64 tx_mac_ctrl_frames;
+};
+
+struct qede_stats_ah {
+ u64 rx_1519_to_max_byte_packets;
+ u64 tx_1519_to_max_byte_packets;
+};
+
+struct qede_stats {
+ struct qede_stats_common common;
+
+ union {
+ struct qede_stats_bb bb;
+ struct qede_stats_ah ah;
+ };
};
struct qede_vlan {
@@ -147,10 +167,11 @@ struct qede_dev {
u32 dp_module;
u8 dp_level;
- u32 flags;
-#define QEDE_FLAG_IS_VF BIT(0)
+ unsigned long flags;
+#define QEDE_FLAG_IS_VF BIT(0)
#define IS_VF(edev) (!!((edev)->flags & QEDE_FLAG_IS_VF))
#define QEDE_TX_TIMESTAMPING_EN BIT(1)
+#define QEDE_FLAGS_PTP_TX_IN_PRORGESS BIT(2)
const struct qed_eth_ops *ops;
struct qede_ptp *ptp;
@@ -158,6 +179,10 @@ struct qede_dev {
struct qed_dev_eth_info dev_info;
#define QEDE_MAX_RSS_CNT(edev) ((edev)->dev_info.num_queues)
#define QEDE_MAX_TSS_CNT(edev) ((edev)->dev_info.num_queues)
+#define QEDE_IS_BB(edev) \
+ ((edev)->dev_info.common.dev_type == QED_DEV_TYPE_BB)
+#define QEDE_IS_AH(edev) \
+ ((edev)->dev_info.common.dev_type == QED_DEV_TYPE_AH)
struct qede_fastpath *fp_array;
u8 req_num_tx;
@@ -216,7 +241,10 @@ struct qede_dev {
u16 vxlan_dst_port;
u16 geneve_dst_port;
- bool wol_enabled;
+#ifdef CONFIG_RFS_ACCEL
+ struct qede_arfs *arfs;
+#endif
+ bool wol_enabled;
struct qede_rdma_dev rdma_info;
@@ -292,21 +320,24 @@ struct qede_rx_queue {
u8 data_direction;
u8 rxq_id;
+ /* Used once per each NAPI run */
+ u16 num_rx_buffers;
+
+ u16 rx_headroom;
+
u32 rx_buf_size;
u32 rx_buf_seg_size;
- u64 rcv_pkts;
-
struct sw_rx_data *sw_rx_ring;
struct qed_chain rx_bd_ring;
struct qed_chain rx_comp_ring ____cacheline_aligned;
- /* Used once per each NAPI run */
- u16 num_rx_buffers;
-
/* GRO */
struct qede_agg_info tpa_info[ETH_TPA_MAX_AGGS_NUM];
+ /* Used once per each NAPI run */
+ u64 rcv_pkts;
+
u64 rx_hw_errors;
u64 rx_alloc_errors;
u64 rx_ip_frags;
@@ -328,6 +359,11 @@ struct sw_tx_bd {
#define QEDE_TSO_SPLIT_BD BIT(0)
};
+struct sw_tx_xdp {
+ struct page *page;
+ dma_addr_t mapping;
+};
+
struct qede_tx_queue {
u8 is_xdp;
bool is_legacy;
@@ -351,11 +387,11 @@ struct qede_tx_queue {
#define QEDE_TXQ_IDX_TO_XDP(edev, idx) ((idx) + QEDE_MAX_TSS_CNT(edev))
/* Regular Tx requires skb + metadata for release purpose,
- * while XDP requires only the pages themselves.
+ * while XDP requires the pages and the mapped address.
*/
union {
struct sw_tx_bd *skbs;
- struct page **pages;
+ struct sw_tx_xdp *xdp;
} sw_tx_ring;
struct qed_chain tx_pbl;
@@ -407,8 +443,20 @@ struct qede_fastpath {
#define QEDE_TUNN_CSUM_UNNECESSARY BIT(2)
#define QEDE_SP_RX_MODE 1
-#define QEDE_SP_VXLAN_PORT_CONFIG 2
-#define QEDE_SP_GENEVE_PORT_CONFIG 3
+
+#ifdef CONFIG_RFS_ACCEL
+int qede_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
+ u16 rxq_index, u32 flow_id);
+void qede_process_arfs_filters(struct qede_dev *edev, bool free_fltr);
+void qede_poll_for_freeing_arfs_filters(struct qede_dev *edev);
+void qede_arfs_filter_op(void *dev, void *filter, u8 fw_rc);
+void qede_free_arfs(struct qede_dev *edev);
+int qede_alloc_arfs(struct qede_dev *edev);
+
+#define QEDE_SP_ARFS_CONFIG 4
+#define QEDE_SP_TASK_POLL_DELAY (5 * HZ)
+#define QEDE_RFS_MAX_FLTR 256
+#endif
struct qede_reload_args {
void (*func)(struct qede_dev *edev, struct qede_reload_args *args);
@@ -433,6 +481,7 @@ irqreturn_t qede_msix_fp_int(int irq, void *fp_cookie);
/* Filtering function definitions */
void qede_force_mac(void *dev, u8 *mac, bool forced);
+void qede_udp_ports_update(void *dev, u16 vxlan_port, u16 geneve_port);
int qede_set_mac_addr(struct net_device *ndev, void *p);
int qede_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid);
diff --git a/drivers/net/ethernet/qlogic/qede/qede_dcbnl.c b/drivers/net/ethernet/qlogic/qede/qede_dcbnl.c
index 03e8c0212433..a9e7379313db 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_dcbnl.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_dcbnl.c
@@ -281,6 +281,11 @@ static int qede_dcbnl_ieee_setapp(struct net_device *netdev,
struct dcb_app *app)
{
struct qede_dev *edev = netdev_priv(netdev);
+ int err;
+
+ err = dcb_ieee_setapp(netdev, app);
+ if (err)
+ return err;
return edev->ops->dcb->ieee_setapp(edev->cdev, app);
}
diff --git a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
index 897953133245..4dcfe9614731 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
@@ -75,16 +75,33 @@ static const struct {
QEDE_TQSTAT(stopped_cnt),
};
-#define QEDE_STAT_OFFSET(stat_name) (offsetof(struct qede_stats, stat_name))
-#define QEDE_STAT_STRING(stat_name) (#stat_name)
-#define _QEDE_STAT(stat_name, pf_only) \
- {QEDE_STAT_OFFSET(stat_name), QEDE_STAT_STRING(stat_name), pf_only}
-#define QEDE_PF_STAT(stat_name) _QEDE_STAT(stat_name, true)
-#define QEDE_STAT(stat_name) _QEDE_STAT(stat_name, false)
+#define QEDE_STAT_OFFSET(stat_name, type, base) \
+ (offsetof(type, stat_name) + (base))
+#define QEDE_STAT_STRING(stat_name) (#stat_name)
+#define _QEDE_STAT(stat_name, type, base, attr) \
+ {QEDE_STAT_OFFSET(stat_name, type, base), \
+ QEDE_STAT_STRING(stat_name), \
+ attr}
+#define QEDE_STAT(stat_name) \
+ _QEDE_STAT(stat_name, struct qede_stats_common, 0, 0x0)
+#define QEDE_PF_STAT(stat_name) \
+ _QEDE_STAT(stat_name, struct qede_stats_common, 0, \
+ BIT(QEDE_STAT_PF_ONLY))
+#define QEDE_PF_BB_STAT(stat_name) \
+ _QEDE_STAT(stat_name, struct qede_stats_bb, \
+ offsetof(struct qede_stats, bb), \
+ BIT(QEDE_STAT_PF_ONLY) | BIT(QEDE_STAT_BB_ONLY))
+#define QEDE_PF_AH_STAT(stat_name) \
+ _QEDE_STAT(stat_name, struct qede_stats_ah, \
+ offsetof(struct qede_stats, ah), \
+ BIT(QEDE_STAT_PF_ONLY) | BIT(QEDE_STAT_AH_ONLY))
static const struct {
u64 offset;
char string[ETH_GSTRING_LEN];
- bool pf_only;
+ unsigned long attr;
+#define QEDE_STAT_PF_ONLY 0
+#define QEDE_STAT_BB_ONLY 1
+#define QEDE_STAT_AH_ONLY 2
} qede_stats_arr[] = {
QEDE_STAT(rx_ucast_bytes),
QEDE_STAT(rx_mcast_bytes),
@@ -106,22 +123,23 @@ static const struct {
QEDE_PF_STAT(rx_256_to_511_byte_packets),
QEDE_PF_STAT(rx_512_to_1023_byte_packets),
QEDE_PF_STAT(rx_1024_to_1518_byte_packets),
- QEDE_PF_STAT(rx_1519_to_1522_byte_packets),
- QEDE_PF_STAT(rx_1519_to_2047_byte_packets),
- QEDE_PF_STAT(rx_2048_to_4095_byte_packets),
- QEDE_PF_STAT(rx_4096_to_9216_byte_packets),
- QEDE_PF_STAT(rx_9217_to_16383_byte_packets),
+ QEDE_PF_BB_STAT(rx_1519_to_1522_byte_packets),
+ QEDE_PF_BB_STAT(rx_1519_to_2047_byte_packets),
+ QEDE_PF_BB_STAT(rx_2048_to_4095_byte_packets),
+ QEDE_PF_BB_STAT(rx_4096_to_9216_byte_packets),
+ QEDE_PF_BB_STAT(rx_9217_to_16383_byte_packets),
+ QEDE_PF_AH_STAT(rx_1519_to_max_byte_packets),
QEDE_PF_STAT(tx_64_byte_packets),
QEDE_PF_STAT(tx_65_to_127_byte_packets),
QEDE_PF_STAT(tx_128_to_255_byte_packets),
QEDE_PF_STAT(tx_256_to_511_byte_packets),
QEDE_PF_STAT(tx_512_to_1023_byte_packets),
QEDE_PF_STAT(tx_1024_to_1518_byte_packets),
- QEDE_PF_STAT(tx_1519_to_2047_byte_packets),
- QEDE_PF_STAT(tx_2048_to_4095_byte_packets),
- QEDE_PF_STAT(tx_4096_to_9216_byte_packets),
- QEDE_PF_STAT(tx_9217_to_16383_byte_packets),
-
+ QEDE_PF_BB_STAT(tx_1519_to_2047_byte_packets),
+ QEDE_PF_BB_STAT(tx_2048_to_4095_byte_packets),
+ QEDE_PF_BB_STAT(tx_4096_to_9216_byte_packets),
+ QEDE_PF_BB_STAT(tx_9217_to_16383_byte_packets),
+ QEDE_PF_AH_STAT(tx_1519_to_max_byte_packets),
QEDE_PF_STAT(rx_mac_crtl_frames),
QEDE_PF_STAT(tx_mac_ctrl_frames),
QEDE_PF_STAT(rx_pause_frames),
@@ -136,8 +154,8 @@ static const struct {
QEDE_PF_STAT(rx_jabbers),
QEDE_PF_STAT(rx_undersize_packets),
QEDE_PF_STAT(rx_fragments),
- QEDE_PF_STAT(tx_lpi_entry_count),
- QEDE_PF_STAT(tx_total_collisions),
+ QEDE_PF_BB_STAT(tx_lpi_entry_count),
+ QEDE_PF_BB_STAT(tx_total_collisions),
QEDE_PF_STAT(brb_truncates),
QEDE_PF_STAT(brb_discards),
QEDE_STAT(no_buff_discards),
@@ -155,6 +173,12 @@ static const struct {
};
#define QEDE_NUM_STATS ARRAY_SIZE(qede_stats_arr)
+#define QEDE_STAT_IS_PF_ONLY(i) \
+ test_bit(QEDE_STAT_PF_ONLY, &qede_stats_arr[i].attr)
+#define QEDE_STAT_IS_BB_ONLY(i) \
+ test_bit(QEDE_STAT_BB_ONLY, &qede_stats_arr[i].attr)
+#define QEDE_STAT_IS_AH_ONLY(i) \
+ test_bit(QEDE_STAT_AH_ONLY, &qede_stats_arr[i].attr)
enum {
QEDE_PRI_FLAG_CMT,
@@ -213,6 +237,13 @@ static void qede_get_strings_stats_rxq(struct qede_dev *edev,
}
}
+static bool qede_is_irrelevant_stat(struct qede_dev *edev, int stat_index)
+{
+ return (IS_VF(edev) && QEDE_STAT_IS_PF_ONLY(stat_index)) ||
+ (QEDE_IS_BB(edev) && QEDE_STAT_IS_AH_ONLY(stat_index)) ||
+ (QEDE_IS_AH(edev) && QEDE_STAT_IS_BB_ONLY(stat_index));
+}
+
static void qede_get_strings_stats(struct qede_dev *edev, u8 *buf)
{
struct qede_fastpath *fp;
@@ -234,7 +265,7 @@ static void qede_get_strings_stats(struct qede_dev *edev, u8 *buf)
/* Account for non-queue statistics */
for (i = 0; i < QEDE_NUM_STATS; i++) {
- if (IS_VF(edev) && qede_stats_arr[i].pf_only)
+ if (qede_is_irrelevant_stat(edev, i))
continue;
strcpy(buf, qede_stats_arr[i].string);
buf += ETH_GSTRING_LEN;
@@ -309,7 +340,7 @@ static void qede_get_ethtool_stats(struct net_device *dev,
}
for (i = 0; i < QEDE_NUM_STATS; i++) {
- if (IS_VF(edev) && qede_stats_arr[i].pf_only)
+ if (qede_is_irrelevant_stat(edev, i))
continue;
*buf = *((u64 *)(((void *)&edev->stats) +
qede_stats_arr[i].offset));
@@ -323,17 +354,13 @@ static void qede_get_ethtool_stats(struct net_device *dev,
static int qede_get_sset_count(struct net_device *dev, int stringset)
{
struct qede_dev *edev = netdev_priv(dev);
- int num_stats = QEDE_NUM_STATS;
+ int num_stats = QEDE_NUM_STATS, i;
switch (stringset) {
case ETH_SS_STATS:
- if (IS_VF(edev)) {
- int i;
-
- for (i = 0; i < QEDE_NUM_STATS; i++)
- if (qede_stats_arr[i].pf_only)
- num_stats--;
- }
+ for (i = 0; i < QEDE_NUM_STATS; i++)
+ if (qede_is_irrelevant_stat(edev, i))
+ num_stats--;
/* Account for the Regular Tx statistics */
num_stats += QEDE_TSS_COUNT(edev) * QEDE_NUM_TQSTATS;
diff --git a/drivers/net/ethernet/qlogic/qede/qede_filter.c b/drivers/net/ethernet/qlogic/qede/qede_filter.c
index 107c3fda4792..eb5652073ca8 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_filter.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_filter.c
@@ -38,6 +38,459 @@
#include <linux/qed/qed_if.h>
#include "qede.h"
+#ifdef CONFIG_RFS_ACCEL
+struct qede_arfs_tuple {
+ union {
+ __be32 src_ipv4;
+ struct in6_addr src_ipv6;
+ };
+ union {
+ __be32 dst_ipv4;
+ struct in6_addr dst_ipv6;
+ };
+ __be16 src_port;
+ __be16 dst_port;
+ __be16 eth_proto;
+ u8 ip_proto;
+};
+
+struct qede_arfs_fltr_node {
+#define QEDE_FLTR_VALID 0
+ unsigned long state;
+
+ /* pointer to aRFS packet buffer */
+ void *data;
+
+ /* dma map address of aRFS packet buffer */
+ dma_addr_t mapping;
+
+ /* length of aRFS packet buffer */
+ int buf_len;
+
+ /* tuples to hold from aRFS packet buffer */
+ struct qede_arfs_tuple tuple;
+
+ u32 flow_id;
+ u16 sw_id;
+ u16 rxq_id;
+ u16 next_rxq_id;
+ bool filter_op;
+ bool used;
+ struct hlist_node node;
+};
+
+struct qede_arfs {
+#define QEDE_ARFS_POLL_COUNT 100
+#define QEDE_RFS_FLW_BITSHIFT (4)
+#define QEDE_RFS_FLW_MASK ((1 << QEDE_RFS_FLW_BITSHIFT) - 1)
+ struct hlist_head arfs_hl_head[1 << QEDE_RFS_FLW_BITSHIFT];
+
+ /* lock for filter list access */
+ spinlock_t arfs_list_lock;
+ unsigned long *arfs_fltr_bmap;
+ int filter_count;
+ bool enable;
+};
+
+static void qede_configure_arfs_fltr(struct qede_dev *edev,
+ struct qede_arfs_fltr_node *n,
+ u16 rxq_id, bool add_fltr)
+{
+ const struct qed_eth_ops *op = edev->ops;
+
+ if (n->used)
+ return;
+
+ DP_VERBOSE(edev, NETIF_MSG_RX_STATUS,
+ "%s arfs filter flow_id=%d, sw_id=%d, src_port=%d, dst_port=%d, rxq=%d\n",
+ add_fltr ? "Adding" : "Deleting",
+ n->flow_id, n->sw_id, ntohs(n->tuple.src_port),
+ ntohs(n->tuple.dst_port), rxq_id);
+
+ n->used = true;
+ n->filter_op = add_fltr;
+ op->ntuple_filter_config(edev->cdev, n, n->mapping, n->buf_len, 0,
+ rxq_id, add_fltr);
+}
+
+static void
+qede_free_arfs_filter(struct qede_dev *edev, struct qede_arfs_fltr_node *fltr)
+{
+ kfree(fltr->data);
+ clear_bit(fltr->sw_id, edev->arfs->arfs_fltr_bmap);
+ kfree(fltr);
+}
+
+void qede_arfs_filter_op(void *dev, void *filter, u8 fw_rc)
+{
+ struct qede_arfs_fltr_node *fltr = filter;
+ struct qede_dev *edev = dev;
+
+ if (fw_rc) {
+ DP_NOTICE(edev,
+ "Failed arfs filter configuration fw_rc=%d, flow_id=%d, sw_id=%d, src_port=%d, dst_port=%d, rxq=%d\n",
+ fw_rc, fltr->flow_id, fltr->sw_id,
+ ntohs(fltr->tuple.src_port),
+ ntohs(fltr->tuple.dst_port), fltr->rxq_id);
+
+ spin_lock_bh(&edev->arfs->arfs_list_lock);
+
+ fltr->used = false;
+ clear_bit(QEDE_FLTR_VALID, &fltr->state);
+
+ spin_unlock_bh(&edev->arfs->arfs_list_lock);
+ return;
+ }
+
+ spin_lock_bh(&edev->arfs->arfs_list_lock);
+
+ fltr->used = false;
+
+ if (fltr->filter_op) {
+ set_bit(QEDE_FLTR_VALID, &fltr->state);
+ if (fltr->rxq_id != fltr->next_rxq_id)
+ qede_configure_arfs_fltr(edev, fltr, fltr->rxq_id,
+ false);
+ } else {
+ clear_bit(QEDE_FLTR_VALID, &fltr->state);
+ if (fltr->rxq_id != fltr->next_rxq_id) {
+ fltr->rxq_id = fltr->next_rxq_id;
+ qede_configure_arfs_fltr(edev, fltr,
+ fltr->rxq_id, true);
+ }
+ }
+
+ spin_unlock_bh(&edev->arfs->arfs_list_lock);
+}
+
+/* Should be called while qede_lock is held */
+void qede_process_arfs_filters(struct qede_dev *edev, bool free_fltr)
+{
+ int i;
+
+ for (i = 0; i <= QEDE_RFS_FLW_MASK; i++) {
+ struct hlist_node *temp;
+ struct hlist_head *head;
+ struct qede_arfs_fltr_node *fltr;
+
+ head = &edev->arfs->arfs_hl_head[i];
+
+ hlist_for_each_entry_safe(fltr, temp, head, node) {
+ bool del = false;
+
+ if (edev->state != QEDE_STATE_OPEN)
+ del = true;
+
+ spin_lock_bh(&edev->arfs->arfs_list_lock);
+
+ if ((!test_bit(QEDE_FLTR_VALID, &fltr->state) &&
+ !fltr->used) || free_fltr) {
+ hlist_del(&fltr->node);
+ dma_unmap_single(&edev->pdev->dev,
+ fltr->mapping,
+ fltr->buf_len, DMA_TO_DEVICE);
+ qede_free_arfs_filter(edev, fltr);
+ edev->arfs->filter_count--;
+ } else {
+ if ((rps_may_expire_flow(edev->ndev,
+ fltr->rxq_id,
+ fltr->flow_id,
+ fltr->sw_id) || del) &&
+ !free_fltr)
+ qede_configure_arfs_fltr(edev, fltr,
+ fltr->rxq_id,
+ false);
+ }
+
+ spin_unlock_bh(&edev->arfs->arfs_list_lock);
+ }
+ }
+
+ spin_lock_bh(&edev->arfs->arfs_list_lock);
+
+ if (!edev->arfs->filter_count) {
+ if (edev->arfs->enable) {
+ edev->arfs->enable = false;
+ edev->ops->configure_arfs_searcher(edev->cdev, false);
+ }
+ } else {
+ set_bit(QEDE_SP_ARFS_CONFIG, &edev->sp_flags);
+ schedule_delayed_work(&edev->sp_task,
+ QEDE_SP_TASK_POLL_DELAY);
+ }
+
+ spin_unlock_bh(&edev->arfs->arfs_list_lock);
+}
+
+/* This function waits until all aRFS filters get deleted and freed.
+ * On timeout it frees all filters forcefully.
+ */
+void qede_poll_for_freeing_arfs_filters(struct qede_dev *edev)
+{
+ int count = QEDE_ARFS_POLL_COUNT;
+
+ while (count) {
+ qede_process_arfs_filters(edev, false);
+
+ if (!edev->arfs->filter_count)
+ break;
+
+ msleep(100);
+ count--;
+ }
+
+ if (!count) {
+ DP_NOTICE(edev, "Timeout in polling for arfs filter free\n");
+
+ /* Something is terribly wrong, free forcefully */
+ qede_process_arfs_filters(edev, true);
+ }
+}
+
+int qede_alloc_arfs(struct qede_dev *edev)
+{
+ int i;
+
+ edev->arfs = vzalloc(sizeof(*edev->arfs));
+ if (!edev->arfs)
+ return -ENOMEM;
+
+ spin_lock_init(&edev->arfs->arfs_list_lock);
+
+ for (i = 0; i <= QEDE_RFS_FLW_MASK; i++)
+ INIT_HLIST_HEAD(&edev->arfs->arfs_hl_head[i]);
+
+ edev->ndev->rx_cpu_rmap = alloc_irq_cpu_rmap(QEDE_RSS_COUNT(edev));
+ if (!edev->ndev->rx_cpu_rmap) {
+ vfree(edev->arfs);
+ edev->arfs = NULL;
+ return -ENOMEM;
+ }
+
+ edev->arfs->arfs_fltr_bmap = vzalloc(BITS_TO_LONGS(QEDE_RFS_MAX_FLTR) *
+ sizeof(long));
+ if (!edev->arfs->arfs_fltr_bmap) {
+ free_irq_cpu_rmap(edev->ndev->rx_cpu_rmap);
+ edev->ndev->rx_cpu_rmap = NULL;
+ vfree(edev->arfs);
+ edev->arfs = NULL;
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+void qede_free_arfs(struct qede_dev *edev)
+{
+ if (!edev->arfs)
+ return;
+
+ if (edev->ndev->rx_cpu_rmap)
+ free_irq_cpu_rmap(edev->ndev->rx_cpu_rmap);
+
+ edev->ndev->rx_cpu_rmap = NULL;
+ vfree(edev->arfs->arfs_fltr_bmap);
+ edev->arfs->arfs_fltr_bmap = NULL;
+ vfree(edev->arfs);
+ edev->arfs = NULL;
+}
+
+static bool qede_compare_ip_addr(struct qede_arfs_fltr_node *tpos,
+ const struct sk_buff *skb)
+{
+ if (skb->protocol == htons(ETH_P_IP)) {
+ if (tpos->tuple.src_ipv4 == ip_hdr(skb)->saddr &&
+ tpos->tuple.dst_ipv4 == ip_hdr(skb)->daddr)
+ return true;
+ else
+ return false;
+ } else {
+ struct in6_addr *src = &tpos->tuple.src_ipv6;
+ u8 size = sizeof(struct in6_addr);
+
+ if (!memcmp(src, &ipv6_hdr(skb)->saddr, size) &&
+ !memcmp(&tpos->tuple.dst_ipv6, &ipv6_hdr(skb)->daddr, size))
+ return true;
+ else
+ return false;
+ }
+}
+
+static struct qede_arfs_fltr_node *
+qede_arfs_htbl_key_search(struct hlist_head *h, const struct sk_buff *skb,
+ __be16 src_port, __be16 dst_port, u8 ip_proto)
+{
+ struct qede_arfs_fltr_node *tpos;
+
+ hlist_for_each_entry(tpos, h, node)
+ if (tpos->tuple.ip_proto == ip_proto &&
+ tpos->tuple.eth_proto == skb->protocol &&
+ qede_compare_ip_addr(tpos, skb) &&
+ tpos->tuple.src_port == src_port &&
+ tpos->tuple.dst_port == dst_port)
+ return tpos;
+
+ return NULL;
+}
+
+static struct qede_arfs_fltr_node *
+qede_alloc_filter(struct qede_dev *edev, int min_hlen)
+{
+ struct qede_arfs_fltr_node *n;
+ int bit_id;
+
+ bit_id = find_first_zero_bit(edev->arfs->arfs_fltr_bmap,
+ QEDE_RFS_MAX_FLTR);
+
+ if (bit_id >= QEDE_RFS_MAX_FLTR)
+ return NULL;
+
+ n = kzalloc(sizeof(*n), GFP_ATOMIC);
+ if (!n)
+ return NULL;
+
+ n->data = kzalloc(min_hlen, GFP_ATOMIC);
+ if (!n->data) {
+ kfree(n);
+ return NULL;
+ }
+
+ n->sw_id = (u16)bit_id;
+ set_bit(bit_id, edev->arfs->arfs_fltr_bmap);
+ return n;
+}
+
+int qede_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
+ u16 rxq_index, u32 flow_id)
+{
+ struct qede_dev *edev = netdev_priv(dev);
+ struct qede_arfs_fltr_node *n;
+ int min_hlen, rc, tp_offset;
+ struct ethhdr *eth;
+ __be16 *ports;
+ u16 tbl_idx;
+ u8 ip_proto;
+
+ if (skb->encapsulation)
+ return -EPROTONOSUPPORT;
+
+ if (skb->protocol != htons(ETH_P_IP) &&
+ skb->protocol != htons(ETH_P_IPV6))
+ return -EPROTONOSUPPORT;
+
+ if (skb->protocol == htons(ETH_P_IP)) {
+ ip_proto = ip_hdr(skb)->protocol;
+ tp_offset = sizeof(struct iphdr);
+ } else {
+ ip_proto = ipv6_hdr(skb)->nexthdr;
+ tp_offset = sizeof(struct ipv6hdr);
+ }
+
+ if (ip_proto != IPPROTO_TCP && ip_proto != IPPROTO_UDP)
+ return -EPROTONOSUPPORT;
+
+ ports = (__be16 *)(skb->data + tp_offset);
+ tbl_idx = skb_get_hash_raw(skb) & QEDE_RFS_FLW_MASK;
+
+ spin_lock_bh(&edev->arfs->arfs_list_lock);
+
+ n = qede_arfs_htbl_key_search(&edev->arfs->arfs_hl_head[tbl_idx],
+ skb, ports[0], ports[1], ip_proto);
+
+ if (n) {
+ /* Filter match */
+ n->next_rxq_id = rxq_index;
+
+ if (test_bit(QEDE_FLTR_VALID, &n->state)) {
+ if (n->rxq_id != rxq_index)
+ qede_configure_arfs_fltr(edev, n, n->rxq_id,
+ false);
+ } else {
+ if (!n->used) {
+ n->rxq_id = rxq_index;
+ qede_configure_arfs_fltr(edev, n, n->rxq_id,
+ true);
+ }
+ }
+
+ rc = n->sw_id;
+ goto ret_unlock;
+ }
+
+ min_hlen = ETH_HLEN + skb_headlen(skb);
+
+ n = qede_alloc_filter(edev, min_hlen);
+ if (!n) {
+ rc = -ENOMEM;
+ goto ret_unlock;
+ }
+
+ n->buf_len = min_hlen;
+ n->rxq_id = rxq_index;
+ n->next_rxq_id = rxq_index;
+ n->tuple.src_port = ports[0];
+ n->tuple.dst_port = ports[1];
+ n->flow_id = flow_id;
+
+ if (skb->protocol == htons(ETH_P_IP)) {
+ n->tuple.src_ipv4 = ip_hdr(skb)->saddr;
+ n->tuple.dst_ipv4 = ip_hdr(skb)->daddr;
+ } else {
+ memcpy(&n->tuple.src_ipv6, &ipv6_hdr(skb)->saddr,
+ sizeof(struct in6_addr));
+ memcpy(&n->tuple.dst_ipv6, &ipv6_hdr(skb)->daddr,
+ sizeof(struct in6_addr));
+ }
+
+ eth = (struct ethhdr *)n->data;
+ eth->h_proto = skb->protocol;
+ n->tuple.eth_proto = skb->protocol;
+ n->tuple.ip_proto = ip_proto;
+ memcpy(n->data + ETH_HLEN, skb->data, skb_headlen(skb));
+
+ n->mapping = dma_map_single(&edev->pdev->dev, n->data,
+ n->buf_len, DMA_TO_DEVICE);
+ if (dma_mapping_error(&edev->pdev->dev, n->mapping)) {
+ DP_NOTICE(edev, "Failed to map DMA memory for arfs\n");
+ qede_free_arfs_filter(edev, n);
+ rc = -ENOMEM;
+ goto ret_unlock;
+ }
+
+ INIT_HLIST_NODE(&n->node);
+ hlist_add_head(&n->node, &edev->arfs->arfs_hl_head[tbl_idx]);
+ edev->arfs->filter_count++;
+
+ if (edev->arfs->filter_count == 1 && !edev->arfs->enable) {
+ edev->ops->configure_arfs_searcher(edev->cdev, true);
+ edev->arfs->enable = true;
+ }
+
+ qede_configure_arfs_fltr(edev, n, n->rxq_id, true);
+
+ spin_unlock_bh(&edev->arfs->arfs_list_lock);
+
+ set_bit(QEDE_SP_ARFS_CONFIG, &edev->sp_flags);
+ schedule_delayed_work(&edev->sp_task, 0);
+ return n->sw_id;
+
+ret_unlock:
+ spin_unlock_bh(&edev->arfs->arfs_list_lock);
+ return rc;
+}
+#endif
+
+void qede_udp_ports_update(void *dev, u16 vxlan_port, u16 geneve_port)
+{
+ struct qede_dev *edev = dev;
+
+ if (edev->vxlan_dst_port != vxlan_port)
+ edev->vxlan_dst_port = 0;
+
+ if (edev->geneve_dst_port != geneve_port)
+ edev->geneve_dst_port = 0;
+}
+
void qede_force_mac(void *dev, u8 *mac, bool forced)
{
struct qede_dev *edev = dev;
@@ -441,69 +894,112 @@ int qede_set_features(struct net_device *dev, netdev_features_t features)
void qede_udp_tunnel_add(struct net_device *dev, struct udp_tunnel_info *ti)
{
struct qede_dev *edev = netdev_priv(dev);
+ struct qed_tunn_params tunn_params;
u16 t_port = ntohs(ti->port);
+ int rc;
+
+ memset(&tunn_params, 0, sizeof(tunn_params));
switch (ti->type) {
case UDP_TUNNEL_TYPE_VXLAN:
+ if (!edev->dev_info.common.vxlan_enable)
+ return;
+
if (edev->vxlan_dst_port)
return;
- edev->vxlan_dst_port = t_port;
+ tunn_params.update_vxlan_port = 1;
+ tunn_params.vxlan_port = t_port;
- DP_VERBOSE(edev, QED_MSG_DEBUG, "Added vxlan port=%d\n",
- t_port);
+ __qede_lock(edev);
+ rc = edev->ops->tunn_config(edev->cdev, &tunn_params);
+ __qede_unlock(edev);
+
+ if (!rc) {
+ edev->vxlan_dst_port = t_port;
+ DP_VERBOSE(edev, QED_MSG_DEBUG, "Added vxlan port=%d\n",
+ t_port);
+ } else {
+ DP_NOTICE(edev, "Failed to add vxlan UDP port=%d\n",
+ t_port);
+ }
- set_bit(QEDE_SP_VXLAN_PORT_CONFIG, &edev->sp_flags);
break;
case UDP_TUNNEL_TYPE_GENEVE:
+ if (!edev->dev_info.common.geneve_enable)
+ return;
+
if (edev->geneve_dst_port)
return;
- edev->geneve_dst_port = t_port;
+ tunn_params.update_geneve_port = 1;
+ tunn_params.geneve_port = t_port;
+
+ __qede_lock(edev);
+ rc = edev->ops->tunn_config(edev->cdev, &tunn_params);
+ __qede_unlock(edev);
+
+ if (!rc) {
+ edev->geneve_dst_port = t_port;
+ DP_VERBOSE(edev, QED_MSG_DEBUG,
+ "Added geneve port=%d\n", t_port);
+ } else {
+ DP_NOTICE(edev, "Failed to add geneve UDP port=%d\n",
+ t_port);
+ }
- DP_VERBOSE(edev, QED_MSG_DEBUG, "Added geneve port=%d\n",
- t_port);
- set_bit(QEDE_SP_GENEVE_PORT_CONFIG, &edev->sp_flags);
break;
default:
return;
}
-
- schedule_delayed_work(&edev->sp_task, 0);
}
-void qede_udp_tunnel_del(struct net_device *dev, struct udp_tunnel_info *ti)
+void qede_udp_tunnel_del(struct net_device *dev,
+ struct udp_tunnel_info *ti)
{
struct qede_dev *edev = netdev_priv(dev);
+ struct qed_tunn_params tunn_params;
u16 t_port = ntohs(ti->port);
+ memset(&tunn_params, 0, sizeof(tunn_params));
+
switch (ti->type) {
case UDP_TUNNEL_TYPE_VXLAN:
if (t_port != edev->vxlan_dst_port)
return;
+ tunn_params.update_vxlan_port = 1;
+ tunn_params.vxlan_port = 0;
+
+ __qede_lock(edev);
+ edev->ops->tunn_config(edev->cdev, &tunn_params);
+ __qede_unlock(edev);
+
edev->vxlan_dst_port = 0;
DP_VERBOSE(edev, QED_MSG_DEBUG, "Deleted vxlan port=%d\n",
t_port);
- set_bit(QEDE_SP_VXLAN_PORT_CONFIG, &edev->sp_flags);
break;
case UDP_TUNNEL_TYPE_GENEVE:
if (t_port != edev->geneve_dst_port)
return;
+ tunn_params.update_geneve_port = 1;
+ tunn_params.geneve_port = 0;
+
+ __qede_lock(edev);
+ edev->ops->tunn_config(edev->cdev, &tunn_params);
+ __qede_unlock(edev);
+
edev->geneve_dst_port = 0;
DP_VERBOSE(edev, QED_MSG_DEBUG, "Deleted geneve port=%d\n",
t_port);
- set_bit(QEDE_SP_GENEVE_PORT_CONFIG, &edev->sp_flags);
break;
default:
return;
}
-
- schedule_delayed_work(&edev->sp_task, 0);
}
static void qede_xdp_reload_func(struct qede_dev *edev,
@@ -520,11 +1016,6 @@ static int qede_xdp_set(struct qede_dev *edev, struct bpf_prog *prog)
{
struct qede_reload_args args;
- if (prog && prog->xdp_adjust_head) {
- DP_ERR(edev, "Does not support bpf_xdp_adjust_head()\n");
- return -EOPNOTSUPP;
- }
-
/* If we're called, there was already a bpf reference increment */
args.func = &qede_xdp_reload_func;
args.u.new_prog = prog;
@@ -537,6 +1028,11 @@ int qede_xdp(struct net_device *dev, struct netdev_xdp *xdp)
{
struct qede_dev *edev = netdev_priv(dev);
+ if (IS_VF(edev)) {
+ DP_NOTICE(edev, "VFs don't support XDP\n");
+ return -EOPNOTSUPP;
+ }
+
switch (xdp->command) {
case XDP_SETUP_PROG:
return qede_xdp_set(edev, xdp->prog);
diff --git a/drivers/net/ethernet/qlogic/qede/qede_fp.c b/drivers/net/ethernet/qlogic/qede/qede_fp.c
index 1e65038c8fc0..7b6f41d06245 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_fp.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_fp.c
@@ -87,7 +87,8 @@ int qede_alloc_rx_buffer(struct qede_rx_queue *rxq, bool allow_lazy)
rx_bd = (struct eth_rx_bd *)qed_chain_produce(&rxq->rx_bd_ring);
WARN_ON(!rx_bd);
rx_bd->addr.hi = cpu_to_le32(upper_32_bits(mapping));
- rx_bd->addr.lo = cpu_to_le32(lower_32_bits(mapping));
+ rx_bd->addr.lo = cpu_to_le32(lower_32_bits(mapping) +
+ rxq->rx_headroom);
rxq->sw_rx_prod++;
rxq->filled_buffers++;
@@ -360,7 +361,8 @@ static int qede_xdp_xmit(struct qede_dev *edev, struct qede_fastpath *fp,
metadata->mapping + padding,
length, PCI_DMA_TODEVICE);
- txq->sw_tx_ring.pages[idx] = metadata->data;
+ txq->sw_tx_ring.xdp[idx].page = metadata->data;
+ txq->sw_tx_ring.xdp[idx].mapping = metadata->mapping;
txq->sw_tx_prod++;
/* Mark the fastpath for future XDP doorbell */
@@ -384,19 +386,19 @@ int qede_txq_has_work(struct qede_tx_queue *txq)
static void qede_xdp_tx_int(struct qede_dev *edev, struct qede_tx_queue *txq)
{
- struct eth_tx_1st_bd *bd;
- u16 hw_bd_cons;
+ u16 hw_bd_cons, idx;
hw_bd_cons = le16_to_cpu(*txq->hw_cons_ptr);
barrier();
while (hw_bd_cons != qed_chain_get_cons_idx(&txq->tx_pbl)) {
- bd = (struct eth_tx_1st_bd *)qed_chain_consume(&txq->tx_pbl);
+ qed_chain_consume(&txq->tx_pbl);
+ idx = txq->sw_tx_cons & NUM_TX_BDS_MAX;
- dma_unmap_single(&edev->pdev->dev, BD_UNMAP_ADDR(bd),
- PAGE_SIZE, DMA_BIDIRECTIONAL);
- __free_page(txq->sw_tx_ring.pages[txq->sw_tx_cons &
- NUM_TX_BDS_MAX]);
+ dma_unmap_page(&edev->pdev->dev,
+ txq->sw_tx_ring.xdp[idx].mapping,
+ PAGE_SIZE, DMA_BIDIRECTIONAL);
+ __free_page(txq->sw_tx_ring.xdp[idx].page);
txq->sw_tx_cons++;
txq->xmit_pkts++;
@@ -508,7 +510,8 @@ static inline void qede_reuse_page(struct qede_rx_queue *rxq,
new_mapping = curr_prod->mapping + curr_prod->page_offset;
rx_bd_prod->addr.hi = cpu_to_le32(upper_32_bits(new_mapping));
- rx_bd_prod->addr.lo = cpu_to_le32(lower_32_bits(new_mapping));
+ rx_bd_prod->addr.lo = cpu_to_le32(lower_32_bits(new_mapping) +
+ rxq->rx_headroom);
rxq->sw_rx_prod++;
curr_cons->data = NULL;
@@ -624,7 +627,6 @@ static inline void qede_skb_receive(struct qede_dev *edev,
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
napi_gro_receive(&fp->napi, skb);
- rxq->rcv_pkts++;
}
static void qede_set_gro_params(struct qede_dev *edev,
@@ -884,9 +886,9 @@ static inline void qede_tpa_cont(struct qede_dev *edev,
"Strange - TPA cont with more than a single len_list entry\n");
}
-static void qede_tpa_end(struct qede_dev *edev,
- struct qede_fastpath *fp,
- struct eth_fast_path_rx_tpa_end_cqe *cqe)
+static int qede_tpa_end(struct qede_dev *edev,
+ struct qede_fastpath *fp,
+ struct eth_fast_path_rx_tpa_end_cqe *cqe)
{
struct qede_rx_queue *rxq = fp->rxq;
struct qede_agg_info *tpa_info;
@@ -934,11 +936,12 @@ static void qede_tpa_end(struct qede_dev *edev,
tpa_info->state = QEDE_AGG_STATE_NONE;
- return;
+ return 1;
err:
tpa_info->state = QEDE_AGG_STATE_NONE;
dev_kfree_skb_any(tpa_info->skb);
tpa_info->skb = NULL;
+ return 0;
}
static u8 qede_check_notunn_csum(u16 flag)
@@ -990,14 +993,15 @@ static bool qede_rx_xdp(struct qede_dev *edev,
struct qede_rx_queue *rxq,
struct bpf_prog *prog,
struct sw_rx_data *bd,
- struct eth_fast_path_rx_reg_cqe *cqe)
+ struct eth_fast_path_rx_reg_cqe *cqe,
+ u16 *data_offset, u16 *len)
{
- u16 len = le16_to_cpu(cqe->len_on_first_bd);
struct xdp_buff xdp;
enum xdp_action act;
- xdp.data = page_address(bd->data) + cqe->placement_offset;
- xdp.data_end = xdp.data + len;
+ xdp.data_hard_start = page_address(bd->data);
+ xdp.data = xdp.data_hard_start + *data_offset;
+ xdp.data_end = xdp.data + *len;
/* Queues always have a full reset currently, so for the time
* being until there's atomic program replace just mark read
@@ -1007,6 +1011,10 @@ static bool qede_rx_xdp(struct qede_dev *edev,
act = bpf_prog_run_xdp(prog, &xdp);
rcu_read_unlock();
+ /* Recalculate, as XDP might have changed the headers */
+ *data_offset = xdp.data - xdp.data_hard_start;
+ *len = xdp.data_end - xdp.data;
+
if (act == XDP_PASS)
return true;
@@ -1025,7 +1033,7 @@ static bool qede_rx_xdp(struct qede_dev *edev,
/* Now if there's a transmission problem, we'd still have to
* throw current buffer, as replacement was already allocated.
*/
- if (qede_xdp_xmit(edev, fp, bd, cqe->placement_offset, len)) {
+ if (qede_xdp_xmit(edev, fp, bd, *data_offset, *len)) {
dma_unmap_page(rxq->dev, bd->mapping,
PAGE_SIZE, DMA_BIDIRECTIONAL);
__free_page(bd->data);
@@ -1052,7 +1060,7 @@ static struct sk_buff *qede_rx_allocate_skb(struct qede_dev *edev,
struct sw_rx_data *bd, u16 len,
u16 pad)
{
- unsigned int offset = bd->page_offset;
+ unsigned int offset = bd->page_offset + pad;
struct skb_frag_struct *frag;
struct page *page = bd->data;
unsigned int pull_len;
@@ -1069,7 +1077,7 @@ static struct sk_buff *qede_rx_allocate_skb(struct qede_dev *edev,
*/
if (len + pad <= edev->rx_copybreak) {
memcpy(skb_put(skb, len),
- page_address(page) + pad + offset, len);
+ page_address(page) + offset, len);
qede_reuse_page(rxq, bd);
goto out;
}
@@ -1077,7 +1085,7 @@ static struct sk_buff *qede_rx_allocate_skb(struct qede_dev *edev,
frag = &skb_shinfo(skb)->frags[0];
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
- page, pad + offset, len, rxq->rx_buf_seg_size);
+ page, offset, len, rxq->rx_buf_seg_size);
va = skb_frag_address(frag);
pull_len = eth_get_headlen(va, QEDE_RX_HDR_SIZE);
@@ -1178,8 +1186,7 @@ static int qede_rx_process_tpa_cqe(struct qede_dev *edev,
qede_tpa_cont(edev, rxq, &cqe->fast_path_tpa_cont);
return 0;
case ETH_RX_CQE_TYPE_TPA_END:
- qede_tpa_end(edev, fp, &cqe->fast_path_tpa_end);
- return 1;
+ return qede_tpa_end(edev, fp, &cqe->fast_path_tpa_end);
default:
return 0;
}
@@ -1224,12 +1231,13 @@ static int qede_rx_process_cqe(struct qede_dev *edev,
fp_cqe = &cqe->fast_path_regular;
len = le16_to_cpu(fp_cqe->len_on_first_bd);
- pad = fp_cqe->placement_offset;
+ pad = fp_cqe->placement_offset + rxq->rx_headroom;
/* Run eBPF program if one is attached */
if (xdp_prog)
- if (!qede_rx_xdp(edev, fp, rxq, xdp_prog, bd, fp_cqe))
- return 1;
+ if (!qede_rx_xdp(edev, fp, rxq, xdp_prog, bd, fp_cqe,
+ &pad, &len))
+ return 0;
/* If this is an error packet then drop it */
flags = cqe->fast_path_regular.pars_flags.flags;
@@ -1290,8 +1298,8 @@ static int qede_rx_int(struct qede_fastpath *fp, int budget)
{
struct qede_rx_queue *rxq = fp->rxq;
struct qede_dev *edev = fp->edev;
+ int work_done = 0, rcv_pkts = 0;
u16 hw_comp_cons, sw_comp_cons;
- int work_done = 0;
hw_comp_cons = le16_to_cpu(*rxq->hw_cons_ptr);
sw_comp_cons = qed_chain_get_cons_idx(&rxq->rx_comp_ring);
@@ -1305,12 +1313,14 @@ static int qede_rx_int(struct qede_fastpath *fp, int budget)
/* Loop to complete all indicated BDs */
while ((sw_comp_cons != hw_comp_cons) && (work_done < budget)) {
- qede_rx_process_cqe(edev, fp, rxq);
+ rcv_pkts += qede_rx_process_cqe(edev, fp, rxq);
qed_chain_recycle_consumed(&rxq->rx_comp_ring);
sw_comp_cons = qed_chain_get_cons_idx(&rxq->rx_comp_ring);
work_done++;
}
+ rxq->rcv_pkts += rcv_pkts;
+
/* Allocate replacement buffers */
while (rxq->num_rx_buffers - rxq->filled_buffers)
if (qede_alloc_rx_buffer(rxq, false))
@@ -1687,13 +1697,24 @@ netdev_features_t qede_features_check(struct sk_buff *skb,
}
/* Disable offloads for geneve tunnels, as HW can't parse
- * the geneve header which has option length greater than 32B.
+ * the geneve header which has option length greater than 32b
+ * and disable offloads for the ports which are not offloaded.
*/
- if ((l4_proto == IPPROTO_UDP) &&
- ((skb_inner_mac_header(skb) -
- skb_transport_header(skb)) > QEDE_MAX_TUN_HDR_LEN))
- return features & ~(NETIF_F_CSUM_MASK |
- NETIF_F_GSO_MASK);
+ if (l4_proto == IPPROTO_UDP) {
+ struct qede_dev *edev = netdev_priv(dev);
+ u16 hdrlen, vxln_port, gnv_port;
+
+ hdrlen = QEDE_MAX_TUN_HDR_LEN;
+ vxln_port = edev->vxlan_dst_port;
+ gnv_port = edev->geneve_dst_port;
+
+ if ((skb_inner_mac_header(skb) -
+ skb_transport_header(skb)) > hdrlen ||
+ (ntohs(udp_hdr(skb)->dest) != vxln_port &&
+ ntohs(udp_hdr(skb)->dest) != gnv_port))
+ return features & ~(NETIF_F_CSUM_MASK |
+ NETIF_F_GSO_MASK);
+ }
}
return features;
diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c
index 3a78c3f25157..b9ba23d71c61 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_main.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_main.c
@@ -84,6 +84,8 @@ static const struct qed_eth_ops *qed_ops;
#define CHIP_NUM_57980S_50 0x1654
#define CHIP_NUM_57980S_25 0x1656
#define CHIP_NUM_57980S_IOV 0x1664
+#define CHIP_NUM_AH 0x8070
+#define CHIP_NUM_AH_IOV 0x8090
#ifndef PCI_DEVICE_ID_NX2_57980E
#define PCI_DEVICE_ID_57980S_40 CHIP_NUM_57980S_40
@@ -93,6 +95,9 @@ static const struct qed_eth_ops *qed_ops;
#define PCI_DEVICE_ID_57980S_50 CHIP_NUM_57980S_50
#define PCI_DEVICE_ID_57980S_25 CHIP_NUM_57980S_25
#define PCI_DEVICE_ID_57980S_IOV CHIP_NUM_57980S_IOV
+#define PCI_DEVICE_ID_AH CHIP_NUM_AH
+#define PCI_DEVICE_ID_AH_IOV CHIP_NUM_AH_IOV
+
#endif
enum qede_pci_private {
@@ -110,6 +115,10 @@ static const struct pci_device_id qede_pci_tbl[] = {
#ifdef CONFIG_QED_SRIOV
{PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_IOV), QEDE_PRIVATE_VF},
#endif
+ {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_AH), QEDE_PRIVATE_PF},
+#ifdef CONFIG_QED_SRIOV
+ {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_AH_IOV), QEDE_PRIVATE_VF},
+#endif
{ 0 }
};
@@ -216,9 +225,13 @@ static struct pci_driver qede_pci_driver = {
static struct qed_eth_cb_ops qede_ll_ops = {
{
+#ifdef CONFIG_RFS_ACCEL
+ .arfs_filter_op = qede_arfs_filter_op,
+#endif
.link_update = qede_link_update,
},
.force_mac = qede_force_mac,
+ .ports_update = qede_udp_ports_update,
};
static int qede_netdev_event(struct notifier_block *this, unsigned long event,
@@ -314,122 +327,135 @@ static int qede_close(struct net_device *ndev);
void qede_fill_by_demand_stats(struct qede_dev *edev)
{
+ struct qede_stats_common *p_common = &edev->stats.common;
struct qed_eth_stats stats;
edev->ops->get_vport_stats(edev->cdev, &stats);
- edev->stats.no_buff_discards = stats.no_buff_discards;
- edev->stats.packet_too_big_discard = stats.packet_too_big_discard;
- edev->stats.ttl0_discard = stats.ttl0_discard;
- edev->stats.rx_ucast_bytes = stats.rx_ucast_bytes;
- edev->stats.rx_mcast_bytes = stats.rx_mcast_bytes;
- edev->stats.rx_bcast_bytes = stats.rx_bcast_bytes;
- edev->stats.rx_ucast_pkts = stats.rx_ucast_pkts;
- edev->stats.rx_mcast_pkts = stats.rx_mcast_pkts;
- edev->stats.rx_bcast_pkts = stats.rx_bcast_pkts;
- edev->stats.mftag_filter_discards = stats.mftag_filter_discards;
- edev->stats.mac_filter_discards = stats.mac_filter_discards;
-
- edev->stats.tx_ucast_bytes = stats.tx_ucast_bytes;
- edev->stats.tx_mcast_bytes = stats.tx_mcast_bytes;
- edev->stats.tx_bcast_bytes = stats.tx_bcast_bytes;
- edev->stats.tx_ucast_pkts = stats.tx_ucast_pkts;
- edev->stats.tx_mcast_pkts = stats.tx_mcast_pkts;
- edev->stats.tx_bcast_pkts = stats.tx_bcast_pkts;
- edev->stats.tx_err_drop_pkts = stats.tx_err_drop_pkts;
- edev->stats.coalesced_pkts = stats.tpa_coalesced_pkts;
- edev->stats.coalesced_events = stats.tpa_coalesced_events;
- edev->stats.coalesced_aborts_num = stats.tpa_aborts_num;
- edev->stats.non_coalesced_pkts = stats.tpa_not_coalesced_pkts;
- edev->stats.coalesced_bytes = stats.tpa_coalesced_bytes;
-
- edev->stats.rx_64_byte_packets = stats.rx_64_byte_packets;
- edev->stats.rx_65_to_127_byte_packets = stats.rx_65_to_127_byte_packets;
- edev->stats.rx_128_to_255_byte_packets =
- stats.rx_128_to_255_byte_packets;
- edev->stats.rx_256_to_511_byte_packets =
- stats.rx_256_to_511_byte_packets;
- edev->stats.rx_512_to_1023_byte_packets =
- stats.rx_512_to_1023_byte_packets;
- edev->stats.rx_1024_to_1518_byte_packets =
- stats.rx_1024_to_1518_byte_packets;
- edev->stats.rx_1519_to_1522_byte_packets =
- stats.rx_1519_to_1522_byte_packets;
- edev->stats.rx_1519_to_2047_byte_packets =
- stats.rx_1519_to_2047_byte_packets;
- edev->stats.rx_2048_to_4095_byte_packets =
- stats.rx_2048_to_4095_byte_packets;
- edev->stats.rx_4096_to_9216_byte_packets =
- stats.rx_4096_to_9216_byte_packets;
- edev->stats.rx_9217_to_16383_byte_packets =
- stats.rx_9217_to_16383_byte_packets;
- edev->stats.rx_crc_errors = stats.rx_crc_errors;
- edev->stats.rx_mac_crtl_frames = stats.rx_mac_crtl_frames;
- edev->stats.rx_pause_frames = stats.rx_pause_frames;
- edev->stats.rx_pfc_frames = stats.rx_pfc_frames;
- edev->stats.rx_align_errors = stats.rx_align_errors;
- edev->stats.rx_carrier_errors = stats.rx_carrier_errors;
- edev->stats.rx_oversize_packets = stats.rx_oversize_packets;
- edev->stats.rx_jabbers = stats.rx_jabbers;
- edev->stats.rx_undersize_packets = stats.rx_undersize_packets;
- edev->stats.rx_fragments = stats.rx_fragments;
- edev->stats.tx_64_byte_packets = stats.tx_64_byte_packets;
- edev->stats.tx_65_to_127_byte_packets = stats.tx_65_to_127_byte_packets;
- edev->stats.tx_128_to_255_byte_packets =
- stats.tx_128_to_255_byte_packets;
- edev->stats.tx_256_to_511_byte_packets =
- stats.tx_256_to_511_byte_packets;
- edev->stats.tx_512_to_1023_byte_packets =
- stats.tx_512_to_1023_byte_packets;
- edev->stats.tx_1024_to_1518_byte_packets =
- stats.tx_1024_to_1518_byte_packets;
- edev->stats.tx_1519_to_2047_byte_packets =
- stats.tx_1519_to_2047_byte_packets;
- edev->stats.tx_2048_to_4095_byte_packets =
- stats.tx_2048_to_4095_byte_packets;
- edev->stats.tx_4096_to_9216_byte_packets =
- stats.tx_4096_to_9216_byte_packets;
- edev->stats.tx_9217_to_16383_byte_packets =
- stats.tx_9217_to_16383_byte_packets;
- edev->stats.tx_pause_frames = stats.tx_pause_frames;
- edev->stats.tx_pfc_frames = stats.tx_pfc_frames;
- edev->stats.tx_lpi_entry_count = stats.tx_lpi_entry_count;
- edev->stats.tx_total_collisions = stats.tx_total_collisions;
- edev->stats.brb_truncates = stats.brb_truncates;
- edev->stats.brb_discards = stats.brb_discards;
- edev->stats.tx_mac_ctrl_frames = stats.tx_mac_ctrl_frames;
+
+ p_common->no_buff_discards = stats.common.no_buff_discards;
+ p_common->packet_too_big_discard = stats.common.packet_too_big_discard;
+ p_common->ttl0_discard = stats.common.ttl0_discard;
+ p_common->rx_ucast_bytes = stats.common.rx_ucast_bytes;
+ p_common->rx_mcast_bytes = stats.common.rx_mcast_bytes;
+ p_common->rx_bcast_bytes = stats.common.rx_bcast_bytes;
+ p_common->rx_ucast_pkts = stats.common.rx_ucast_pkts;
+ p_common->rx_mcast_pkts = stats.common.rx_mcast_pkts;
+ p_common->rx_bcast_pkts = stats.common.rx_bcast_pkts;
+ p_common->mftag_filter_discards = stats.common.mftag_filter_discards;
+ p_common->mac_filter_discards = stats.common.mac_filter_discards;
+
+ p_common->tx_ucast_bytes = stats.common.tx_ucast_bytes;
+ p_common->tx_mcast_bytes = stats.common.tx_mcast_bytes;
+ p_common->tx_bcast_bytes = stats.common.tx_bcast_bytes;
+ p_common->tx_ucast_pkts = stats.common.tx_ucast_pkts;
+ p_common->tx_mcast_pkts = stats.common.tx_mcast_pkts;
+ p_common->tx_bcast_pkts = stats.common.tx_bcast_pkts;
+ p_common->tx_err_drop_pkts = stats.common.tx_err_drop_pkts;
+ p_common->coalesced_pkts = stats.common.tpa_coalesced_pkts;
+ p_common->coalesced_events = stats.common.tpa_coalesced_events;
+ p_common->coalesced_aborts_num = stats.common.tpa_aborts_num;
+ p_common->non_coalesced_pkts = stats.common.tpa_not_coalesced_pkts;
+ p_common->coalesced_bytes = stats.common.tpa_coalesced_bytes;
+
+ p_common->rx_64_byte_packets = stats.common.rx_64_byte_packets;
+ p_common->rx_65_to_127_byte_packets =
+ stats.common.rx_65_to_127_byte_packets;
+ p_common->rx_128_to_255_byte_packets =
+ stats.common.rx_128_to_255_byte_packets;
+ p_common->rx_256_to_511_byte_packets =
+ stats.common.rx_256_to_511_byte_packets;
+ p_common->rx_512_to_1023_byte_packets =
+ stats.common.rx_512_to_1023_byte_packets;
+ p_common->rx_1024_to_1518_byte_packets =
+ stats.common.rx_1024_to_1518_byte_packets;
+ p_common->rx_crc_errors = stats.common.rx_crc_errors;
+ p_common->rx_mac_crtl_frames = stats.common.rx_mac_crtl_frames;
+ p_common->rx_pause_frames = stats.common.rx_pause_frames;
+ p_common->rx_pfc_frames = stats.common.rx_pfc_frames;
+ p_common->rx_align_errors = stats.common.rx_align_errors;
+ p_common->rx_carrier_errors = stats.common.rx_carrier_errors;
+ p_common->rx_oversize_packets = stats.common.rx_oversize_packets;
+ p_common->rx_jabbers = stats.common.rx_jabbers;
+ p_common->rx_undersize_packets = stats.common.rx_undersize_packets;
+ p_common->rx_fragments = stats.common.rx_fragments;
+ p_common->tx_64_byte_packets = stats.common.tx_64_byte_packets;
+ p_common->tx_65_to_127_byte_packets =
+ stats.common.tx_65_to_127_byte_packets;
+ p_common->tx_128_to_255_byte_packets =
+ stats.common.tx_128_to_255_byte_packets;
+ p_common->tx_256_to_511_byte_packets =
+ stats.common.tx_256_to_511_byte_packets;
+ p_common->tx_512_to_1023_byte_packets =
+ stats.common.tx_512_to_1023_byte_packets;
+ p_common->tx_1024_to_1518_byte_packets =
+ stats.common.tx_1024_to_1518_byte_packets;
+ p_common->tx_pause_frames = stats.common.tx_pause_frames;
+ p_common->tx_pfc_frames = stats.common.tx_pfc_frames;
+ p_common->brb_truncates = stats.common.brb_truncates;
+ p_common->brb_discards = stats.common.brb_discards;
+ p_common->tx_mac_ctrl_frames = stats.common.tx_mac_ctrl_frames;
+
+ if (QEDE_IS_BB(edev)) {
+ struct qede_stats_bb *p_bb = &edev->stats.bb;
+
+ p_bb->rx_1519_to_1522_byte_packets =
+ stats.bb.rx_1519_to_1522_byte_packets;
+ p_bb->rx_1519_to_2047_byte_packets =
+ stats.bb.rx_1519_to_2047_byte_packets;
+ p_bb->rx_2048_to_4095_byte_packets =
+ stats.bb.rx_2048_to_4095_byte_packets;
+ p_bb->rx_4096_to_9216_byte_packets =
+ stats.bb.rx_4096_to_9216_byte_packets;
+ p_bb->rx_9217_to_16383_byte_packets =
+ stats.bb.rx_9217_to_16383_byte_packets;
+ p_bb->tx_1519_to_2047_byte_packets =
+ stats.bb.tx_1519_to_2047_byte_packets;
+ p_bb->tx_2048_to_4095_byte_packets =
+ stats.bb.tx_2048_to_4095_byte_packets;
+ p_bb->tx_4096_to_9216_byte_packets =
+ stats.bb.tx_4096_to_9216_byte_packets;
+ p_bb->tx_9217_to_16383_byte_packets =
+ stats.bb.tx_9217_to_16383_byte_packets;
+ p_bb->tx_lpi_entry_count = stats.bb.tx_lpi_entry_count;
+ p_bb->tx_total_collisions = stats.bb.tx_total_collisions;
+ } else {
+ struct qede_stats_ah *p_ah = &edev->stats.ah;
+
+ p_ah->rx_1519_to_max_byte_packets =
+ stats.ah.rx_1519_to_max_byte_packets;
+ p_ah->tx_1519_to_max_byte_packets =
+ stats.ah.tx_1519_to_max_byte_packets;
+ }
}
static void qede_get_stats64(struct net_device *dev,
struct rtnl_link_stats64 *stats)
{
struct qede_dev *edev = netdev_priv(dev);
+ struct qede_stats_common *p_common;
qede_fill_by_demand_stats(edev);
+ p_common = &edev->stats.common;
- stats->rx_packets = edev->stats.rx_ucast_pkts +
- edev->stats.rx_mcast_pkts +
- edev->stats.rx_bcast_pkts;
- stats->tx_packets = edev->stats.tx_ucast_pkts +
- edev->stats.tx_mcast_pkts +
- edev->stats.tx_bcast_pkts;
-
- stats->rx_bytes = edev->stats.rx_ucast_bytes +
- edev->stats.rx_mcast_bytes +
- edev->stats.rx_bcast_bytes;
+ stats->rx_packets = p_common->rx_ucast_pkts + p_common->rx_mcast_pkts +
+ p_common->rx_bcast_pkts;
+ stats->tx_packets = p_common->tx_ucast_pkts + p_common->tx_mcast_pkts +
+ p_common->tx_bcast_pkts;
- stats->tx_bytes = edev->stats.tx_ucast_bytes +
- edev->stats.tx_mcast_bytes +
- edev->stats.tx_bcast_bytes;
+ stats->rx_bytes = p_common->rx_ucast_bytes + p_common->rx_mcast_bytes +
+ p_common->rx_bcast_bytes;
+ stats->tx_bytes = p_common->tx_ucast_bytes + p_common->tx_mcast_bytes +
+ p_common->tx_bcast_bytes;
- stats->tx_errors = edev->stats.tx_err_drop_pkts;
- stats->multicast = edev->stats.rx_mcast_pkts +
- edev->stats.rx_bcast_pkts;
+ stats->tx_errors = p_common->tx_err_drop_pkts;
+ stats->multicast = p_common->rx_mcast_pkts + p_common->rx_bcast_pkts;
- stats->rx_fifo_errors = edev->stats.no_buff_discards;
+ stats->rx_fifo_errors = p_common->no_buff_discards;
- stats->collisions = edev->stats.tx_total_collisions;
- stats->rx_crc_errors = edev->stats.rx_crc_errors;
- stats->rx_frame_errors = edev->stats.rx_align_errors;
+ if (QEDE_IS_BB(edev))
+ stats->collisions = edev->stats.bb.tx_total_collisions;
+ stats->rx_crc_errors = p_common->rx_crc_errors;
+ stats->rx_frame_errors = p_common->rx_align_errors;
}
#ifdef CONFIG_QED_SRIOV
@@ -532,6 +558,9 @@ static const struct net_device_ops qede_netdev_ops = {
.ndo_udp_tunnel_del = qede_udp_tunnel_del,
.ndo_features_check = qede_features_check,
.ndo_xdp = qede_xdp,
+#ifdef CONFIG_RFS_ACCEL
+ .ndo_rx_flow_steer = qede_rx_flow_steer,
+#endif
};
/* -------------------------------------------------------------------------
@@ -581,7 +610,8 @@ static void qede_init_ndev(struct qede_dev *edev)
{
struct net_device *ndev = edev->ndev;
struct pci_dev *pdev = edev->pdev;
- u32 hw_features;
+ bool udp_tunnel_enable = false;
+ netdev_features_t hw_features;
pci_set_drvdata(pdev, ndev);
@@ -603,16 +633,33 @@ static void qede_init_ndev(struct qede_dev *edev)
NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
NETIF_F_TSO | NETIF_F_TSO6;
- /* Encap features*/
- hw_features |= NETIF_F_GSO_GRE | NETIF_F_GSO_UDP_TUNNEL |
- NETIF_F_TSO_ECN | NETIF_F_GSO_UDP_TUNNEL_CSUM |
- NETIF_F_GSO_GRE_CSUM;
- ndev->hw_enc_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
- NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO_ECN |
- NETIF_F_TSO6 | NETIF_F_GSO_GRE |
- NETIF_F_GSO_UDP_TUNNEL | NETIF_F_RXCSUM |
- NETIF_F_GSO_UDP_TUNNEL_CSUM |
- NETIF_F_GSO_GRE_CSUM;
+ if (!IS_VF(edev) && edev->dev_info.common.num_hwfns == 1)
+ hw_features |= NETIF_F_NTUPLE;
+
+ if (edev->dev_info.common.vxlan_enable ||
+ edev->dev_info.common.geneve_enable)
+ udp_tunnel_enable = true;
+
+ if (udp_tunnel_enable || edev->dev_info.common.gre_enable) {
+ hw_features |= NETIF_F_TSO_ECN;
+ ndev->hw_enc_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
+ NETIF_F_SG | NETIF_F_TSO |
+ NETIF_F_TSO_ECN | NETIF_F_TSO6 |
+ NETIF_F_RXCSUM;
+ }
+
+ if (udp_tunnel_enable) {
+ hw_features |= (NETIF_F_GSO_UDP_TUNNEL |
+ NETIF_F_GSO_UDP_TUNNEL_CSUM);
+ ndev->hw_enc_features |= (NETIF_F_GSO_UDP_TUNNEL |
+ NETIF_F_GSO_UDP_TUNNEL_CSUM);
+ }
+
+ if (edev->dev_info.common.gre_enable) {
+ hw_features |= (NETIF_F_GSO_GRE | NETIF_F_GSO_GRE_CSUM);
+ ndev->hw_enc_features |= (NETIF_F_GSO_GRE |
+ NETIF_F_GSO_GRE_CSUM);
+ }
ndev->vlan_features = hw_features | NETIF_F_RXHASH | NETIF_F_RXCSUM |
NETIF_F_HIGHDMA;
@@ -750,7 +797,6 @@ static void qede_sp_task(struct work_struct *work)
{
struct qede_dev *edev = container_of(work, struct qede_dev,
sp_task.work);
- struct qed_dev *cdev = edev->cdev;
__qede_lock(edev);
@@ -758,24 +804,12 @@ static void qede_sp_task(struct work_struct *work)
if (edev->state == QEDE_STATE_OPEN)
qede_config_rx_mode(edev->ndev);
- if (test_and_clear_bit(QEDE_SP_VXLAN_PORT_CONFIG, &edev->sp_flags)) {
- struct qed_tunn_params tunn_params;
-
- memset(&tunn_params, 0, sizeof(tunn_params));
- tunn_params.update_vxlan_port = 1;
- tunn_params.vxlan_port = edev->vxlan_dst_port;
- qed_ops->tunn_config(cdev, &tunn_params);
- }
-
- if (test_and_clear_bit(QEDE_SP_GENEVE_PORT_CONFIG, &edev->sp_flags)) {
- struct qed_tunn_params tunn_params;
-
- memset(&tunn_params, 0, sizeof(tunn_params));
- tunn_params.update_geneve_port = 1;
- tunn_params.geneve_port = edev->geneve_dst_port;
- qed_ops->tunn_config(cdev, &tunn_params);
+#ifdef CONFIG_RFS_ACCEL
+ if (test_and_clear_bit(QEDE_SP_ARFS_CONFIG, &edev->sp_flags)) {
+ if (edev->state == QEDE_STATE_OPEN)
+ qede_process_arfs_filters(edev, false);
}
-
+#endif
__qede_unlock(edev);
}
@@ -786,6 +820,9 @@ static void qede_update_pf_params(struct qed_dev *cdev)
/* 64 rx + 64 tx + 64 XDP */
memset(&pf_params, 0, sizeof(struct qed_pf_params));
pf_params.eth_pf_params.num_cons = (MAX_SB_PER_PF_MIMD - 1) * 3;
+#ifdef CONFIG_RFS_ACCEL
+ pf_params.eth_pf_params.num_arfs_filters = QEDE_RFS_MAX_FLTR;
+#endif
qed_ops->common->update_pf_params(cdev, &pf_params);
}
@@ -870,13 +907,8 @@ static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level,
edev->ops->common->set_id(cdev, edev->ndev->name, DRV_MODULE_VERSION);
/* PTP not supported on VFs */
- if (!is_vf) {
- rc = qede_ptp_register_phc(edev);
- if (rc) {
- DP_NOTICE(edev, "Cannot register PHC\n");
- goto err5;
- }
- }
+ if (!is_vf)
+ qede_ptp_enable(edev, true);
edev->ops->register_ops(cdev, &qede_ll_ops, edev);
@@ -891,8 +923,6 @@ static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level,
return 0;
-err5:
- unregister_netdev(edev->ndev);
err4:
qede_roce_dev_remove(edev);
err3:
@@ -940,11 +970,10 @@ static void __qede_remove(struct pci_dev *pdev, enum qede_remove_mode mode)
DP_INFO(edev, "Starting qede_remove\n");
- cancel_delayed_work_sync(&edev->sp_task);
-
unregister_netdev(ndev);
+ cancel_delayed_work_sync(&edev->sp_task);
- qede_ptp_remove(edev);
+ qede_ptp_disable(edev);
qede_roce_dev_remove(edev);
@@ -1165,9 +1194,11 @@ static int qede_alloc_mem_rxq(struct qede_dev *edev, struct qede_rx_queue *rxq)
rxq->num_rx_buffers = edev->q_num_rx_buffers;
rxq->rx_buf_size = NET_IP_ALIGN + ETH_OVERHEAD + edev->ndev->mtu;
+ rxq->rx_headroom = edev->xdp_prog ? XDP_PACKET_HEADROOM : 0;
- if (rxq->rx_buf_size > PAGE_SIZE)
- rxq->rx_buf_size = PAGE_SIZE;
+ /* Make sure that the headroom and payload fit in a single page */
+ if (rxq->rx_buf_size + rxq->rx_headroom > PAGE_SIZE)
+ rxq->rx_buf_size = PAGE_SIZE - rxq->rx_headroom;
/* Segment size to spilt a page in multiple equal parts,
* unless XDP is used in which case we'd use the entire page.
@@ -1229,7 +1260,7 @@ static void qede_free_mem_txq(struct qede_dev *edev, struct qede_tx_queue *txq)
{
/* Free the parallel SW ring */
if (txq->is_xdp)
- kfree(txq->sw_tx_ring.pages);
+ kfree(txq->sw_tx_ring.xdp);
else
kfree(txq->sw_tx_ring.skbs);
@@ -1247,9 +1278,9 @@ static int qede_alloc_mem_txq(struct qede_dev *edev, struct qede_tx_queue *txq)
/* Allocate the parallel driver ring for Tx buffers */
if (txq->is_xdp) {
- size = sizeof(*txq->sw_tx_ring.pages) * TX_RING_SIZE;
- txq->sw_tx_ring.pages = kzalloc(size, GFP_KERNEL);
- if (!txq->sw_tx_ring.pages)
+ size = sizeof(*txq->sw_tx_ring.xdp) * TX_RING_SIZE;
+ txq->sw_tx_ring.xdp = kzalloc(size, GFP_KERNEL);
+ if (!txq->sw_tx_ring.xdp)
goto err;
} else {
size = sizeof(*txq->sw_tx_ring.skbs) * TX_RING_SIZE;
@@ -1466,6 +1497,18 @@ static int qede_req_msix_irqs(struct qede_dev *edev)
}
for (i = 0; i < QEDE_QUEUE_CNT(edev); i++) {
+#ifdef CONFIG_RFS_ACCEL
+ struct qede_fastpath *fp = &edev->fp_array[i];
+
+ if (edev->ndev->rx_cpu_rmap && (fp->type & QEDE_FASTPATH_RX)) {
+ rc = irq_cpu_rmap_add(edev->ndev->rx_cpu_rmap,
+ edev->int_info.msix[i].vector);
+ if (rc) {
+ DP_ERR(edev, "Failed to add CPU rmap\n");
+ qede_free_arfs(edev);
+ }
+ }
+#endif
rc = request_irq(edev->int_info.msix[i].vector,
qede_msix_fp_int, 0, edev->fp_array[i].name,
&edev->fp_array[i]);
@@ -1827,8 +1870,6 @@ static void qede_unload(struct qede_dev *edev, enum qede_unload_mode mode,
qede_roce_dev_event_close(edev);
edev->state = QEDE_STATE_CLOSED;
- qede_ptp_stop(edev);
-
/* Close OS Tx */
netif_tx_disable(edev->ndev);
netif_carrier_off(edev->ndev);
@@ -1847,7 +1888,12 @@ static void qede_unload(struct qede_dev *edev, enum qede_unload_mode mode,
qede_vlan_mark_nonconfigured(edev);
edev->ops->fastpath_stop(edev->cdev);
-
+#ifdef CONFIG_RFS_ACCEL
+ if (!IS_VF(edev) && edev->dev_info.common.num_hwfns == 1) {
+ qede_poll_for_freeing_arfs_filters(edev);
+ qede_free_arfs(edev);
+ }
+#endif
/* Release the interrupts */
qede_sync_free_irqs(edev);
edev->ops->common->set_fp_int(edev->cdev, 0);
@@ -1899,6 +1945,13 @@ static int qede_load(struct qede_dev *edev, enum qede_load_mode mode,
if (rc)
goto err2;
+#ifdef CONFIG_RFS_ACCEL
+ if (!IS_VF(edev) && edev->dev_info.common.num_hwfns == 1) {
+ rc = qede_alloc_arfs(edev);
+ if (rc)
+ DP_NOTICE(edev, "aRFS memory allocation failed\n");
+ }
+#endif
qede_napi_add_enable(edev);
DP_INFO(edev, "Napi added and enabled\n");
@@ -1925,13 +1978,10 @@ static int qede_load(struct qede_dev *edev, enum qede_load_mode mode,
qede_roce_dev_event_open(edev);
- qede_ptp_start(edev, (mode == QEDE_LOAD_NORMAL));
-
edev->state = QEDE_STATE_OPEN;
DP_INFO(edev, "Ending successfully qede load\n");
-
goto out;
err4:
qede_sync_free_irqs(edev);
diff --git a/drivers/net/ethernet/qlogic/qede/qede_ptp.c b/drivers/net/ethernet/qlogic/qede/qede_ptp.c
index 2e62dec09bd7..24f06e2ef43e 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_ptp.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_ptp.c
@@ -181,6 +181,7 @@ static void qede_ptp_task(struct work_struct *work)
skb_tstamp_tx(ptp->tx_skb, &shhwtstamps);
dev_kfree_skb_any(ptp->tx_skb);
ptp->tx_skb = NULL;
+ clear_bit_unlock(QEDE_FLAGS_PTP_TX_IN_PRORGESS, &edev->flags);
DP_VERBOSE(edev, QED_MSG_DEBUG,
"Tx timestamp, timestamp cycles = %llu, ns = %llu\n",
@@ -206,23 +207,10 @@ static u64 qede_ptp_read_cc(const struct cyclecounter *cc)
return phc_cycles;
}
-static void qede_ptp_init_cc(struct qede_dev *edev)
-{
- struct qede_ptp *ptp;
-
- ptp = edev->ptp;
- if (!ptp)
- return;
-
- memset(&ptp->cc, 0, sizeof(ptp->cc));
- ptp->cc.read = qede_ptp_read_cc;
- ptp->cc.mask = CYCLECOUNTER_MASK(64);
- ptp->cc.shift = 0;
- ptp->cc.mult = 1;
-}
-
static int qede_ptp_cfg_filters(struct qede_dev *edev)
{
+ enum qed_ptp_hwtstamp_tx_type tx_type = QED_PTP_HWTSTAMP_TX_ON;
+ enum qed_ptp_filter_type rx_filter = QED_PTP_FILTER_NONE;
struct qede_ptp *ptp = edev->ptp;
if (!ptp)
@@ -236,7 +224,12 @@ static int qede_ptp_cfg_filters(struct qede_dev *edev)
switch (ptp->tx_type) {
case HWTSTAMP_TX_ON:
edev->flags |= QEDE_TX_TIMESTAMPING_EN;
- ptp->ops->hwtstamp_tx_on(edev->cdev);
+ tx_type = QED_PTP_HWTSTAMP_TX_ON;
+ break;
+
+ case HWTSTAMP_TX_OFF:
+ edev->flags &= ~QEDE_TX_TIMESTAMPING_EN;
+ tx_type = QED_PTP_HWTSTAMP_TX_OFF;
break;
case HWTSTAMP_TX_ONESTEP_SYNC:
@@ -247,42 +240,57 @@ static int qede_ptp_cfg_filters(struct qede_dev *edev)
spin_lock_bh(&ptp->lock);
switch (ptp->rx_filter) {
case HWTSTAMP_FILTER_NONE:
+ rx_filter = QED_PTP_FILTER_NONE;
break;
case HWTSTAMP_FILTER_ALL:
case HWTSTAMP_FILTER_SOME:
ptp->rx_filter = HWTSTAMP_FILTER_NONE;
+ rx_filter = QED_PTP_FILTER_ALL;
break;
case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
+ ptp->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
+ rx_filter = QED_PTP_FILTER_V1_L4_EVENT;
+ break;
case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
ptp->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
/* Initialize PTP detection for UDP/IPv4 events */
- ptp->ops->cfg_rx_filters(edev->cdev, QED_PTP_FILTER_IPV4);
+ rx_filter = QED_PTP_FILTER_V1_L4_GEN;
break;
case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+ ptp->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
+ rx_filter = QED_PTP_FILTER_V2_L4_EVENT;
+ break;
case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
ptp->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
/* Initialize PTP detection for UDP/IPv4 or UDP/IPv6 events */
- ptp->ops->cfg_rx_filters(edev->cdev, QED_PTP_FILTER_IPV4_IPV6);
+ rx_filter = QED_PTP_FILTER_V2_L4_GEN;
break;
case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+ ptp->rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
+ rx_filter = QED_PTP_FILTER_V2_L2_EVENT;
+ break;
case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
ptp->rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
/* Initialize PTP detection L2 events */
- ptp->ops->cfg_rx_filters(edev->cdev, QED_PTP_FILTER_L2);
+ rx_filter = QED_PTP_FILTER_V2_L2_GEN;
break;
case HWTSTAMP_FILTER_PTP_V2_EVENT:
+ ptp->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
+ rx_filter = QED_PTP_FILTER_V2_EVENT;
+ break;
case HWTSTAMP_FILTER_PTP_V2_SYNC:
case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
ptp->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
/* Initialize PTP detection L2, UDP/IPv4 or UDP/IPv6 events */
- ptp->ops->cfg_rx_filters(edev->cdev,
- QED_PTP_FILTER_L2_IPV4_IPV6);
+ rx_filter = QED_PTP_FILTER_V2_GEN;
break;
}
+ ptp->ops->cfg_filters(edev->cdev, rx_filter, tx_type);
+
spin_unlock_bh(&ptp->lock);
return 0;
@@ -324,61 +332,6 @@ int qede_ptp_hw_ts(struct qede_dev *edev, struct ifreq *ifr)
sizeof(config)) ? -EFAULT : 0;
}
-/* Called during load, to initialize PTP-related stuff */
-static void qede_ptp_init(struct qede_dev *edev, bool init_tc)
-{
- struct qede_ptp *ptp;
- int rc;
-
- ptp = edev->ptp;
- if (!ptp)
- return;
-
- spin_lock_init(&ptp->lock);
-
- /* Configure PTP in HW */
- rc = ptp->ops->enable(edev->cdev);
- if (rc) {
- DP_ERR(edev, "Stopping PTP initialization\n");
- return;
- }
-
- /* Init work queue for Tx timestamping */
- INIT_WORK(&ptp->work, qede_ptp_task);
-
- /* Init cyclecounter and timecounter. This is done only in the first
- * load. If done in every load, PTP application will fail when doing
- * unload / load (e.g. MTU change) while it is running.
- */
- if (init_tc) {
- qede_ptp_init_cc(edev);
- timecounter_init(&ptp->tc, &ptp->cc,
- ktime_to_ns(ktime_get_real()));
- }
-
- DP_VERBOSE(edev, QED_MSG_DEBUG, "PTP initialization is successful\n");
-}
-
-void qede_ptp_start(struct qede_dev *edev, bool init_tc)
-{
- qede_ptp_init(edev, init_tc);
- qede_ptp_cfg_filters(edev);
-}
-
-void qede_ptp_remove(struct qede_dev *edev)
-{
- struct qede_ptp *ptp;
-
- ptp = edev->ptp;
- if (ptp && ptp->clock) {
- ptp_clock_unregister(ptp->clock);
- ptp->clock = NULL;
- }
-
- kfree(ptp);
- edev->ptp = NULL;
-}
-
int qede_ptp_get_ts_info(struct qede_dev *edev, struct ethtool_ts_info *info)
{
struct qede_ptp *ptp = edev->ptp;
@@ -417,8 +370,7 @@ int qede_ptp_get_ts_info(struct qede_dev *edev, struct ethtool_ts_info *info)
return 0;
}
-/* Called during unload, to stop PTP-related stuff */
-void qede_ptp_stop(struct qede_dev *edev)
+void qede_ptp_disable(struct qede_dev *edev)
{
struct qede_ptp *ptp;
@@ -426,6 +378,11 @@ void qede_ptp_stop(struct qede_dev *edev)
if (!ptp)
return;
+ if (ptp->clock) {
+ ptp_clock_unregister(ptp->clock);
+ ptp->clock = NULL;
+ }
+
/* Cancel PTP work queue. Should be done after the Tx queues are
* drained to prevent additional scheduling.
*/
@@ -439,11 +396,54 @@ void qede_ptp_stop(struct qede_dev *edev)
spin_lock_bh(&ptp->lock);
ptp->ops->disable(edev->cdev);
spin_unlock_bh(&ptp->lock);
+
+ kfree(ptp);
+ edev->ptp = NULL;
+}
+
+static int qede_ptp_init(struct qede_dev *edev, bool init_tc)
+{
+ struct qede_ptp *ptp;
+ int rc;
+
+ ptp = edev->ptp;
+ if (!ptp)
+ return -EINVAL;
+
+ spin_lock_init(&ptp->lock);
+
+ /* Configure PTP in HW */
+ rc = ptp->ops->enable(edev->cdev);
+ if (rc) {
+ DP_INFO(edev, "PTP HW enable failed\n");
+ return rc;
+ }
+
+ /* Init work queue for Tx timestamping */
+ INIT_WORK(&ptp->work, qede_ptp_task);
+
+ /* Init cyclecounter and timecounter. This is done only in the first
+ * load. If done in every load, PTP application will fail when doing
+ * unload / load (e.g. MTU change) while it is running.
+ */
+ if (init_tc) {
+ memset(&ptp->cc, 0, sizeof(ptp->cc));
+ ptp->cc.read = qede_ptp_read_cc;
+ ptp->cc.mask = CYCLECOUNTER_MASK(64);
+ ptp->cc.shift = 0;
+ ptp->cc.mult = 1;
+
+ timecounter_init(&ptp->tc, &ptp->cc,
+ ktime_to_ns(ktime_get_real()));
+ }
+
+ return rc;
}
-int qede_ptp_register_phc(struct qede_dev *edev)
+int qede_ptp_enable(struct qede_dev *edev, bool init_tc)
{
struct qede_ptp *ptp;
+ int rc;
ptp = kzalloc(sizeof(*ptp), GFP_KERNEL);
if (!ptp) {
@@ -454,14 +454,19 @@ int qede_ptp_register_phc(struct qede_dev *edev)
ptp->edev = edev;
ptp->ops = edev->ops->ptp;
if (!ptp->ops) {
- kfree(ptp);
- edev->ptp = NULL;
- DP_ERR(edev, "PTP clock registeration failed\n");
- return -EIO;
+ DP_INFO(edev, "PTP enable failed\n");
+ rc = -EIO;
+ goto err1;
}
edev->ptp = ptp;
+ rc = qede_ptp_init(edev, init_tc);
+ if (rc)
+ goto err1;
+
+ qede_ptp_cfg_filters(edev);
+
/* Fill the ptp_clock_info struct and register PTP clock */
ptp->clock_info.owner = THIS_MODULE;
snprintf(ptp->clock_info.name, 16, "%s", edev->ndev->name);
@@ -478,13 +483,21 @@ int qede_ptp_register_phc(struct qede_dev *edev)
ptp->clock = ptp_clock_register(&ptp->clock_info, &edev->pdev->dev);
if (IS_ERR(ptp->clock)) {
- ptp->clock = NULL;
- kfree(ptp);
- edev->ptp = NULL;
+ rc = -EINVAL;
DP_ERR(edev, "PTP clock registeration failed\n");
+ goto err2;
}
return 0;
+
+err2:
+ qede_ptp_disable(edev);
+ ptp->clock = NULL;
+err1:
+ kfree(ptp);
+ edev->ptp = NULL;
+
+ return rc;
}
void qede_ptp_tx_ts(struct qede_dev *edev, struct sk_buff *skb)
@@ -495,6 +508,9 @@ void qede_ptp_tx_ts(struct qede_dev *edev, struct sk_buff *skb)
if (!ptp)
return;
+ if (test_and_set_bit_lock(QEDE_FLAGS_PTP_TX_IN_PRORGESS, &edev->flags))
+ return;
+
if (unlikely(!(edev->flags & QEDE_TX_TIMESTAMPING_EN))) {
DP_NOTICE(edev,
"Tx timestamping was not enabled, this packet will not be timestamped\n");
diff --git a/drivers/net/ethernet/qlogic/qede/qede_ptp.h b/drivers/net/ethernet/qlogic/qede/qede_ptp.h
index f328f9bba53a..691a14c4b2c5 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_ptp.h
+++ b/drivers/net/ethernet/qlogic/qede/qede_ptp.h
@@ -40,10 +40,8 @@
void qede_ptp_rx_ts(struct qede_dev *edev, struct sk_buff *skb);
void qede_ptp_tx_ts(struct qede_dev *edev, struct sk_buff *skb);
int qede_ptp_hw_ts(struct qede_dev *edev, struct ifreq *req);
-void qede_ptp_start(struct qede_dev *edev, bool init_tc);
-void qede_ptp_stop(struct qede_dev *edev);
-void qede_ptp_remove(struct qede_dev *edev);
-int qede_ptp_register_phc(struct qede_dev *edev);
+void qede_ptp_disable(struct qede_dev *edev);
+int qede_ptp_enable(struct qede_dev *edev, bool init_tc);
int qede_ptp_get_ts_info(struct qede_dev *edev, struct ethtool_ts_info *ts);
static inline void qede_ptp_record_rx_ts(struct qede_dev *edev,
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
index d7107055ec60..2f656f395f39 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
@@ -128,6 +128,8 @@ static int qlcnic_sriov_virtid_fn(struct qlcnic_adapter *adapter, int vf_id)
return 0;
pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV);
+ if (!pos)
+ return 0;
pci_read_config_word(dev, pos + PCI_SRIOV_VF_OFFSET, &offset);
pci_read_config_word(dev, pos + PCI_SRIOV_VF_STRIDE, &stride);
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
index e9e647072596..1188d420fe53 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c
+++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
@@ -4686,7 +4686,8 @@ static int ql_init_device(struct pci_dev *pdev, struct net_device *ndev,
/*
* Set up the operating parameters.
*/
- qdev->workqueue = alloc_ordered_workqueue(ndev->name, WQ_MEM_RECLAIM);
+ qdev->workqueue = alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM,
+ ndev->name);
INIT_DELAYED_WORK(&qdev->asic_reset_work, ql_asic_reset_work);
INIT_DELAYED_WORK(&qdev->mpi_reset_work, ql_mpi_reset_work);
INIT_DELAYED_WORK(&qdev->mpi_work, ql_mpi_work);