summaryrefslogtreecommitdiff
path: root/drivers/infiniband/hw/bnxt_re
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/infiniband/hw/bnxt_re')
-rw-r--r--drivers/infiniband/hw/bnxt_re/Makefile3
-rw-r--r--drivers/infiniband/hw/bnxt_re/bnxt_re.h102
-rw-r--r--drivers/infiniband/hw/bnxt_re/debugfs.c359
-rw-r--r--drivers/infiniband/hw/bnxt_re/debugfs.h36
-rw-r--r--drivers/infiniband/hw/bnxt_re/hw_counters.c107
-rw-r--r--drivers/infiniband/hw/bnxt_re/ib_verbs.c529
-rw-r--r--drivers/infiniband/hw/bnxt_re/ib_verbs.h36
-rw-r--r--drivers/infiniband/hw/bnxt_re/main.c1075
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_fp.c246
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_fp.h61
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_rcfw.c76
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_rcfw.h10
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_res.c37
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_res.h59
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_sp.c211
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_sp.h21
-rw-r--r--drivers/infiniband/hw/bnxt_re/roce_hsi.h123
17 files changed, 2436 insertions, 655 deletions
diff --git a/drivers/infiniband/hw/bnxt_re/Makefile b/drivers/infiniband/hw/bnxt_re/Makefile
index ee9bb1be61ea..f63417d2ccc6 100644
--- a/drivers/infiniband/hw/bnxt_re/Makefile
+++ b/drivers/infiniband/hw/bnxt_re/Makefile
@@ -4,4 +4,5 @@ ccflags-y := -I $(srctree)/drivers/net/ethernet/broadcom/bnxt
obj-$(CONFIG_INFINIBAND_BNXT_RE) += bnxt_re.o
bnxt_re-y := main.o ib_verbs.o \
qplib_res.o qplib_rcfw.o \
- qplib_sp.o qplib_fp.o hw_counters.o
+ qplib_sp.o qplib_fp.o hw_counters.o \
+ debugfs.o
diff --git a/drivers/infiniband/hw/bnxt_re/bnxt_re.h b/drivers/infiniband/hw/bnxt_re/bnxt_re.h
index 9dca451ed522..6df5a2738c95 100644
--- a/drivers/infiniband/hw/bnxt_re/bnxt_re.h
+++ b/drivers/infiniband/hw/bnxt_re/bnxt_re.h
@@ -53,12 +53,6 @@
#define BNXT_RE_MAX_MR_SIZE_HIGH BIT_ULL(39)
#define BNXT_RE_MAX_MR_SIZE BNXT_RE_MAX_MR_SIZE_HIGH
-#define BNXT_RE_MAX_QPC_COUNT (64 * 1024)
-#define BNXT_RE_MAX_MRW_COUNT (64 * 1024)
-#define BNXT_RE_MAX_SRQC_COUNT (64 * 1024)
-#define BNXT_RE_MAX_CQ_COUNT (64 * 1024)
-#define BNXT_RE_MAX_MRW_COUNT_64K (64 * 1024)
-#define BNXT_RE_MAX_MRW_COUNT_256K (256 * 1024)
/* Number of MRs to reserve for PF, leaving remainder for VFs */
#define BNXT_RE_RESVD_MR_FOR_PF (32 * 1024)
@@ -91,6 +85,15 @@ struct bnxt_re_ring_attr {
u8 mode;
};
+/*
+ * Data structure and defines to handle
+ * recovery
+ */
+#define BNXT_RE_PRE_RECOVERY_REMOVE 0x1
+#define BNXT_RE_COMPLETE_REMOVE 0x2
+#define BNXT_RE_POST_RECOVERY_INIT 0x4
+#define BNXT_RE_COMPLETE_INIT 0x8
+
struct bnxt_re_sqp_entries {
struct bnxt_qplib_sge sge;
u64 wrid;
@@ -107,8 +110,11 @@ struct bnxt_re_gsi_context {
struct bnxt_re_sqp_entries *sqp_tbl;
};
-#define BNXT_RE_MIN_MSIX 2
-#define BNXT_RE_MAX_MSIX 9
+struct bnxt_re_en_dev_info {
+ struct bnxt_en_dev *en_dev;
+ struct bnxt_re_dev *rdev;
+};
+
#define BNXT_RE_AEQ_IDX 0
#define BNXT_RE_NQ_IDX 1
#define BNXT_RE_GEN_P5_MAX_VF 64
@@ -131,12 +137,36 @@ struct bnxt_re_pacing {
#define BNXT_RE_PACING_ALARM_TH_MULTIPLE 2 /* Multiple of pacing algo threshold */
/* Default do_pacing value when there is no congestion */
#define BNXT_RE_DBR_DO_PACING_NO_CONGESTION 0x7F /* 1 in 512 probability */
-#define BNXT_RE_DB_FIFO_ROOM_MASK 0x1FFF8000
-#define BNXT_RE_MAX_FIFO_DEPTH 0x2c00
-#define BNXT_RE_DB_FIFO_ROOM_SHIFT 15
+
+#define BNXT_RE_MAX_FIFO_DEPTH_P5 0x2c00
+#define BNXT_RE_MAX_FIFO_DEPTH_P7 0x8000
+
+#define BNXT_RE_MAX_FIFO_DEPTH(ctx) \
+ (bnxt_qplib_is_chip_gen_p7((ctx)) ? \
+ BNXT_RE_MAX_FIFO_DEPTH_P7 :\
+ BNXT_RE_MAX_FIFO_DEPTH_P5)
+
#define BNXT_RE_GRC_FIFO_REG_BASE 0x2000
+#define BNXT_RE_MIN_MSIX 2
+#define BNXT_RE_MAX_MSIX BNXT_MAX_ROCE_MSIX
+struct bnxt_re_nq_record {
+ struct bnxt_msix_entry msix_entries[BNXT_RE_MAX_MSIX];
+ struct bnxt_qplib_nq nq[BNXT_RE_MAX_MSIX];
+ int num_msix;
+ /* serialize NQ access */
+ struct mutex load_lock;
+};
+
#define MAX_CQ_HASH_BITS (16)
+#define MAX_SRQ_HASH_BITS (16)
+
+static inline bool bnxt_re_chip_gen_p7(u16 chip_num)
+{
+ return (chip_num == CHIP_NUM_58818 ||
+ chip_num == CHIP_NUM_57608);
+}
+
struct bnxt_re_dev {
struct ib_device ibdev;
struct list_head list;
@@ -150,31 +180,28 @@ struct bnxt_re_dev {
#define BNXT_RE_FLAG_ERR_DEVICE_DETACHED 17
#define BNXT_RE_FLAG_ISSUE_ROCE_STATS 29
struct net_device *netdev;
- struct notifier_block nb;
+ struct auxiliary_device *adev;
unsigned int version, major, minor;
struct bnxt_qplib_chip_ctx *chip_ctx;
struct bnxt_en_dev *en_dev;
- int num_msix;
int id;
struct delayed_work worker;
u8 cur_prio_map;
- /* FP Notification Queue (CQ & SRQ) */
- struct tasklet_struct nq_task;
-
/* RCFW Channel */
struct bnxt_qplib_rcfw rcfw;
- /* NQ */
- struct bnxt_qplib_nq nq[BNXT_RE_MAX_MSIX];
+ /* NQ record */
+ struct bnxt_re_nq_record *nqr;
/* Device Resources */
- struct bnxt_qplib_dev_attr dev_attr;
+ struct bnxt_qplib_dev_attr *dev_attr;
struct bnxt_qplib_ctx qplib_ctx;
struct bnxt_qplib_res qplib_res;
struct bnxt_qplib_dpi dpi_privileged;
+ struct bnxt_qplib_cq_coal_param cq_coalescing;
struct mutex qp_lock; /* protect qp list */
struct list_head qp_list;
@@ -192,6 +219,14 @@ struct bnxt_re_dev {
struct work_struct dbq_fifo_check_work;
struct delayed_work dbq_pacing_work;
DECLARE_HASHTABLE(cq_hash, MAX_CQ_HASH_BITS);
+ DECLARE_HASHTABLE(srq_hash, MAX_SRQ_HASH_BITS);
+ struct dentry *dbg_root;
+ struct dentry *qp_debugfs;
+ unsigned long event_bitmap;
+ struct bnxt_qplib_cc_param cc_param;
+ struct workqueue_struct *dcb_wq;
+ struct dentry *cc_config;
+ struct bnxt_re_dbg_cc_config_params *cc_config_params;
};
#define to_bnxt_re_dev(ptr, member) \
@@ -204,6 +239,10 @@ struct bnxt_re_dev {
#define BNXT_RE_CHECK_RC(x) ((x) && ((x) != -ETIMEDOUT))
void bnxt_re_pacing_alert(struct bnxt_re_dev *rdev);
+int bnxt_re_assign_pma_port_counters(struct bnxt_re_dev *rdev, struct ib_mad *out_mad);
+int bnxt_re_assign_pma_port_ext_counters(struct bnxt_re_dev *rdev,
+ struct ib_mad *out_mad);
+
static inline struct device *rdev_to_dev(struct bnxt_re_dev *rdev)
{
if (rdev)
@@ -212,4 +251,29 @@ static inline struct device *rdev_to_dev(struct bnxt_re_dev *rdev)
}
extern const struct uapi_definition bnxt_re_uapi_defs[];
+
+static inline void bnxt_re_set_pacing_dev_state(struct bnxt_re_dev *rdev)
+{
+ rdev->qplib_res.pacing_data->dev_err_state =
+ test_bit(BNXT_RE_FLAG_ERR_DEVICE_DETACHED, &rdev->flags);
+}
+
+static inline int bnxt_re_read_context_allowed(struct bnxt_re_dev *rdev)
+{
+ if (!bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx) ||
+ rdev->rcfw.res->cctx->hwrm_intf_ver < HWRM_VERSION_READ_CTX)
+ return -EOPNOTSUPP;
+ return 0;
+}
+
+#define BNXT_RE_CONTEXT_TYPE_QPC_SIZE_P5 1088
+#define BNXT_RE_CONTEXT_TYPE_CQ_SIZE_P5 128
+#define BNXT_RE_CONTEXT_TYPE_MRW_SIZE_P5 128
+#define BNXT_RE_CONTEXT_TYPE_SRQ_SIZE_P5 192
+
+#define BNXT_RE_CONTEXT_TYPE_QPC_SIZE_P7 1088
+#define BNXT_RE_CONTEXT_TYPE_CQ_SIZE_P7 192
+#define BNXT_RE_CONTEXT_TYPE_MRW_SIZE_P7 192
+#define BNXT_RE_CONTEXT_TYPE_SRQ_SIZE_P7 192
+
#endif
diff --git a/drivers/infiniband/hw/bnxt_re/debugfs.c b/drivers/infiniband/hw/bnxt_re/debugfs.c
new file mode 100644
index 000000000000..e632f1661b92
--- /dev/null
+++ b/drivers/infiniband/hw/bnxt_re/debugfs.c
@@ -0,0 +1,359 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause
+/*
+ * Copyright (c) 2024, Broadcom. All rights reserved. The term
+ * Broadcom refers to Broadcom Limited and/or its subsidiaries.
+ *
+ * Description: Debugfs component of the bnxt_re driver
+ */
+
+#include <linux/debugfs.h>
+#include <linux/pci.h>
+#include <rdma/ib_addr.h>
+
+#include "bnxt_ulp.h"
+#include "roce_hsi.h"
+#include "qplib_res.h"
+#include "qplib_sp.h"
+#include "qplib_fp.h"
+#include "qplib_rcfw.h"
+#include "bnxt_re.h"
+#include "ib_verbs.h"
+#include "debugfs.h"
+
+static struct dentry *bnxt_re_debugfs_root;
+
+static const char * const bnxt_re_cc_gen0_name[] = {
+ "enable_cc",
+ "run_avg_weight_g",
+ "num_phase_per_state",
+ "init_cr",
+ "init_tr",
+ "tos_ecn",
+ "tos_dscp",
+ "alt_vlan_pcp",
+ "alt_vlan_dscp",
+ "rtt",
+ "cc_mode",
+ "tcp_cp",
+ "tx_queue",
+ "inactivity_cp",
+};
+
+static inline const char *bnxt_re_qp_state_str(u8 state)
+{
+ switch (state) {
+ case CMDQ_MODIFY_QP_NEW_STATE_RESET:
+ return "RST";
+ case CMDQ_MODIFY_QP_NEW_STATE_INIT:
+ return "INIT";
+ case CMDQ_MODIFY_QP_NEW_STATE_RTR:
+ return "RTR";
+ case CMDQ_MODIFY_QP_NEW_STATE_RTS:
+ return "RTS";
+ case CMDQ_MODIFY_QP_NEW_STATE_SQE:
+ return "SQER";
+ case CMDQ_MODIFY_QP_NEW_STATE_SQD:
+ return "SQD";
+ case CMDQ_MODIFY_QP_NEW_STATE_ERR:
+ return "ERR";
+ default:
+ return "Invalid QP state";
+ }
+}
+
+static inline const char *bnxt_re_qp_type_str(u8 type)
+{
+ switch (type) {
+ case CMDQ_CREATE_QP1_TYPE_GSI: return "QP1";
+ case CMDQ_CREATE_QP_TYPE_GSI: return "QP1";
+ case CMDQ_CREATE_QP_TYPE_RC: return "RC";
+ case CMDQ_CREATE_QP_TYPE_UD: return "UD";
+ case CMDQ_CREATE_QP_TYPE_RAW_ETHERTYPE: return "RAW_ETHERTYPE";
+ default: return "Invalid transport type";
+ }
+}
+
+static ssize_t qp_info_read(struct file *filep,
+ char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ struct bnxt_re_qp *qp = filep->private_data;
+ char *buf;
+ int len;
+
+ if (*ppos)
+ return 0;
+
+ buf = kasprintf(GFP_KERNEL,
+ "QPN\t\t: %d\n"
+ "transport\t: %s\n"
+ "state\t\t: %s\n"
+ "mtu\t\t: %d\n"
+ "timeout\t\t: %d\n"
+ "remote QPN\t: %d\n",
+ qp->qplib_qp.id,
+ bnxt_re_qp_type_str(qp->qplib_qp.type),
+ bnxt_re_qp_state_str(qp->qplib_qp.state),
+ qp->qplib_qp.mtu,
+ qp->qplib_qp.timeout,
+ qp->qplib_qp.dest_qpn);
+ if (!buf)
+ return -ENOMEM;
+ if (count < strlen(buf)) {
+ kfree(buf);
+ return -ENOSPC;
+ }
+ len = simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
+ kfree(buf);
+ return len;
+}
+
+static const struct file_operations debugfs_qp_fops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .read = qp_info_read,
+};
+
+void bnxt_re_debug_add_qpinfo(struct bnxt_re_dev *rdev, struct bnxt_re_qp *qp)
+{
+ char resn[32];
+
+ sprintf(resn, "0x%x", qp->qplib_qp.id);
+ qp->dentry = debugfs_create_file(resn, 0400, rdev->qp_debugfs, qp, &debugfs_qp_fops);
+}
+
+void bnxt_re_debug_rem_qpinfo(struct bnxt_re_dev *rdev, struct bnxt_re_qp *qp)
+{
+ debugfs_remove(qp->dentry);
+}
+
+static int map_cc_config_offset_gen0_ext0(u32 offset, struct bnxt_qplib_cc_param *ccparam, u32 *val)
+{
+ u64 map_offset;
+
+ map_offset = BIT(offset);
+
+ switch (map_offset) {
+ case CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_ENABLE_CC:
+ *val = ccparam->enable;
+ break;
+ case CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_G:
+ *val = ccparam->g;
+ break;
+ case CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_NUMPHASEPERSTATE:
+ *val = ccparam->nph_per_state;
+ break;
+ case CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_INIT_CR:
+ *val = ccparam->init_cr;
+ break;
+ case CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_INIT_TR:
+ *val = ccparam->init_tr;
+ break;
+ case CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_TOS_ECN:
+ *val = ccparam->tos_ecn;
+ break;
+ case CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_TOS_DSCP:
+ *val = ccparam->tos_dscp;
+ break;
+ case CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_ALT_VLAN_PCP:
+ *val = ccparam->alt_vlan_pcp;
+ break;
+ case CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_ALT_TOS_DSCP:
+ *val = ccparam->alt_tos_dscp;
+ break;
+ case CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_RTT:
+ *val = ccparam->rtt;
+ break;
+ case CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_CC_MODE:
+ *val = ccparam->cc_mode;
+ break;
+ case CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_TCP_CP:
+ *val = ccparam->tcp_cp;
+ break;
+ case CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_INACTIVITY_CP:
+ *val = ccparam->inact_th;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static ssize_t bnxt_re_cc_config_get(struct file *filp, char __user *buffer,
+ size_t usr_buf_len, loff_t *ppos)
+{
+ struct bnxt_re_cc_param *dbg_cc_param = filp->private_data;
+ struct bnxt_re_dev *rdev = dbg_cc_param->rdev;
+ struct bnxt_qplib_cc_param ccparam = {};
+ u32 offset = dbg_cc_param->offset;
+ char buf[16];
+ u32 val;
+ int rc;
+
+ rc = bnxt_qplib_query_cc_param(&rdev->qplib_res, &ccparam);
+ if (rc)
+ return rc;
+
+ rc = map_cc_config_offset_gen0_ext0(offset, &ccparam, &val);
+ if (rc)
+ return rc;
+
+ rc = snprintf(buf, sizeof(buf), "%d\n", val);
+ if (rc < 0)
+ return rc;
+
+ return simple_read_from_buffer(buffer, usr_buf_len, ppos, (u8 *)(buf), rc);
+}
+
+static int bnxt_re_fill_gen0_ext0(struct bnxt_qplib_cc_param *ccparam, u32 offset, u32 val)
+{
+ u32 modify_mask;
+
+ modify_mask = BIT(offset);
+
+ switch (modify_mask) {
+ case CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_ENABLE_CC:
+ ccparam->enable = val;
+ break;
+ case CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_G:
+ ccparam->g = val;
+ break;
+ case CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_NUMPHASEPERSTATE:
+ ccparam->nph_per_state = val;
+ break;
+ case CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_INIT_CR:
+ ccparam->init_cr = val;
+ break;
+ case CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_INIT_TR:
+ ccparam->init_tr = val;
+ break;
+ case CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_TOS_ECN:
+ ccparam->tos_ecn = val;
+ break;
+ case CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_TOS_DSCP:
+ ccparam->tos_dscp = val;
+ break;
+ case CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_ALT_VLAN_PCP:
+ ccparam->alt_vlan_pcp = val;
+ break;
+ case CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_ALT_TOS_DSCP:
+ ccparam->alt_tos_dscp = val;
+ break;
+ case CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_RTT:
+ ccparam->rtt = val;
+ break;
+ case CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_CC_MODE:
+ ccparam->cc_mode = val;
+ break;
+ case CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_TCP_CP:
+ ccparam->tcp_cp = val;
+ break;
+ case CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_TX_QUEUE:
+ return -EOPNOTSUPP;
+ case CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_INACTIVITY_CP:
+ ccparam->inact_th = val;
+ break;
+ case CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_TIME_PER_PHASE:
+ ccparam->time_pph = val;
+ break;
+ case CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_PKTS_PER_PHASE:
+ ccparam->pkts_pph = val;
+ break;
+ }
+
+ ccparam->mask = modify_mask;
+ return 0;
+}
+
+static int bnxt_re_configure_cc(struct bnxt_re_dev *rdev, u32 gen_ext, u32 offset, u32 val)
+{
+ struct bnxt_qplib_cc_param ccparam = { };
+ int rc;
+
+ if (gen_ext != CC_CONFIG_GEN0_EXT0)
+ return -EOPNOTSUPP;
+
+ rc = bnxt_re_fill_gen0_ext0(&ccparam, offset, val);
+ if (rc)
+ return rc;
+
+ bnxt_qplib_modify_cc(&rdev->qplib_res, &ccparam);
+ return 0;
+}
+
+static ssize_t bnxt_re_cc_config_set(struct file *filp, const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ struct bnxt_re_cc_param *dbg_cc_param = filp->private_data;
+ struct bnxt_re_dev *rdev = dbg_cc_param->rdev;
+ u32 offset = dbg_cc_param->offset;
+ u8 cc_gen = dbg_cc_param->cc_gen;
+ char buf[16];
+ u32 val;
+ int rc;
+
+ if (count >= sizeof(buf))
+ return -EINVAL;
+
+ if (copy_from_user(buf, buffer, count))
+ return -EFAULT;
+
+ buf[count] = '\0';
+ if (kstrtou32(buf, 0, &val))
+ return -EINVAL;
+
+ rc = bnxt_re_configure_cc(rdev, cc_gen, offset, val);
+ return rc ? rc : count;
+}
+
+static const struct file_operations bnxt_re_cc_config_ops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .read = bnxt_re_cc_config_get,
+ .write = bnxt_re_cc_config_set,
+};
+
+void bnxt_re_debugfs_add_pdev(struct bnxt_re_dev *rdev)
+{
+ struct pci_dev *pdev = rdev->en_dev->pdev;
+ struct bnxt_re_dbg_cc_config_params *cc_params;
+ int i;
+
+ rdev->dbg_root = debugfs_create_dir(dev_name(&pdev->dev), bnxt_re_debugfs_root);
+
+ rdev->qp_debugfs = debugfs_create_dir("QPs", rdev->dbg_root);
+ rdev->cc_config = debugfs_create_dir("cc_config", rdev->dbg_root);
+
+ rdev->cc_config_params = kzalloc(sizeof(*cc_params), GFP_KERNEL);
+
+ for (i = 0; i < BNXT_RE_CC_PARAM_GEN0; i++) {
+ struct bnxt_re_cc_param *tmp_params = &rdev->cc_config_params->gen0_parms[i];
+
+ tmp_params->rdev = rdev;
+ tmp_params->offset = i;
+ tmp_params->cc_gen = CC_CONFIG_GEN0_EXT0;
+ tmp_params->dentry = debugfs_create_file(bnxt_re_cc_gen0_name[i], 0400,
+ rdev->cc_config, tmp_params,
+ &bnxt_re_cc_config_ops);
+ }
+}
+
+void bnxt_re_debugfs_rem_pdev(struct bnxt_re_dev *rdev)
+{
+ debugfs_remove_recursive(rdev->qp_debugfs);
+ debugfs_remove_recursive(rdev->cc_config);
+ kfree(rdev->cc_config_params);
+ debugfs_remove_recursive(rdev->dbg_root);
+ rdev->dbg_root = NULL;
+}
+
+void bnxt_re_register_debugfs(void)
+{
+ bnxt_re_debugfs_root = debugfs_create_dir("bnxt_re", NULL);
+}
+
+void bnxt_re_unregister_debugfs(void)
+{
+ debugfs_remove(bnxt_re_debugfs_root);
+}
diff --git a/drivers/infiniband/hw/bnxt_re/debugfs.h b/drivers/infiniband/hw/bnxt_re/debugfs.h
new file mode 100644
index 000000000000..8f101df4e838
--- /dev/null
+++ b/drivers/infiniband/hw/bnxt_re/debugfs.h
@@ -0,0 +1,36 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause
+/*
+ * Copyright (c) 2024, Broadcom. All rights reserved. The term
+ * Broadcom refers to Broadcom Limited and/or its subsidiaries.
+ *
+ * Description: Debugfs header
+ */
+
+#ifndef __BNXT_RE_DEBUGFS__
+#define __BNXT_RE_DEBUGFS__
+
+void bnxt_re_debug_add_qpinfo(struct bnxt_re_dev *rdev, struct bnxt_re_qp *qp);
+void bnxt_re_debug_rem_qpinfo(struct bnxt_re_dev *rdev, struct bnxt_re_qp *qp);
+
+void bnxt_re_debugfs_add_pdev(struct bnxt_re_dev *rdev);
+void bnxt_re_debugfs_rem_pdev(struct bnxt_re_dev *rdev);
+
+void bnxt_re_register_debugfs(void);
+void bnxt_re_unregister_debugfs(void);
+
+#define CC_CONFIG_GEN_EXT(x, y) (((x) << 16) | (y))
+#define CC_CONFIG_GEN0_EXT0 CC_CONFIG_GEN_EXT(0, 0)
+
+#define BNXT_RE_CC_PARAM_GEN0 14
+
+struct bnxt_re_cc_param {
+ struct bnxt_re_dev *rdev;
+ struct dentry *dentry;
+ u32 offset;
+ u8 cc_gen;
+};
+
+struct bnxt_re_dbg_cc_config_params {
+ struct bnxt_re_cc_param gen0_parms[BNXT_RE_CC_PARAM_GEN0];
+};
+#endif
diff --git a/drivers/infiniband/hw/bnxt_re/hw_counters.c b/drivers/infiniband/hw/bnxt_re/hw_counters.c
index 128651c01595..44bb082e0a60 100644
--- a/drivers/infiniband/hw/bnxt_re/hw_counters.c
+++ b/drivers/infiniband/hw/bnxt_re/hw_counters.c
@@ -37,18 +37,11 @@
*
*/
-#include <linux/interrupt.h>
#include <linux/types.h>
-#include <linux/spinlock.h>
-#include <linux/sched.h>
-#include <linux/slab.h>
#include <linux/pci.h>
-#include <linux/prefetch.h>
-#include <linux/delay.h>
+#include <rdma/ib_mad.h>
+#include <rdma/ib_pma.h>
-#include <rdma/ib_addr.h>
-
-#include "bnxt_ulp.h"
#include "roce_hsi.h"
#include "qplib_res.h"
#include "qplib_sp.h"
@@ -294,6 +287,96 @@ static void bnxt_re_copy_db_pacing_stats(struct bnxt_re_dev *rdev,
readl(rdev->en_dev->bar0 + rdev->pacing.dbr_db_fifo_reg_off);
}
+int bnxt_re_assign_pma_port_ext_counters(struct bnxt_re_dev *rdev, struct ib_mad *out_mad)
+{
+ struct ib_pma_portcounters_ext *pma_cnt_ext;
+ struct bnxt_qplib_ext_stat *estat = &rdev->stats.rstat.ext_stat;
+ struct ctx_hw_stats *hw_stats = NULL;
+ int rc;
+
+ hw_stats = rdev->qplib_ctx.stats.dma;
+
+ pma_cnt_ext = (struct ib_pma_portcounters_ext *)(out_mad->data + 40);
+ if (_is_ext_stats_supported(rdev->dev_attr->dev_cap_flags)) {
+ u32 fid = PCI_FUNC(rdev->en_dev->pdev->devfn);
+
+ rc = bnxt_qplib_qext_stat(&rdev->rcfw, fid, estat);
+ if (rc)
+ return rc;
+ }
+
+ pma_cnt_ext = (struct ib_pma_portcounters_ext *)(out_mad->data + 40);
+ if ((bnxt_qplib_is_chip_gen_p5(rdev->chip_ctx) && rdev->is_virtfn) ||
+ !bnxt_qplib_is_chip_gen_p5(rdev->chip_ctx)) {
+ pma_cnt_ext->port_xmit_data =
+ cpu_to_be64(le64_to_cpu(hw_stats->tx_ucast_bytes) / 4);
+ pma_cnt_ext->port_rcv_data =
+ cpu_to_be64(le64_to_cpu(hw_stats->rx_ucast_bytes) / 4);
+ pma_cnt_ext->port_xmit_packets =
+ cpu_to_be64(le64_to_cpu(hw_stats->tx_ucast_pkts));
+ pma_cnt_ext->port_rcv_packets =
+ cpu_to_be64(le64_to_cpu(hw_stats->rx_ucast_pkts));
+ pma_cnt_ext->port_unicast_rcv_packets =
+ cpu_to_be64(le64_to_cpu(hw_stats->rx_ucast_pkts));
+ pma_cnt_ext->port_unicast_xmit_packets =
+ cpu_to_be64(le64_to_cpu(hw_stats->tx_ucast_pkts));
+
+ } else {
+ pma_cnt_ext->port_rcv_packets = cpu_to_be64(estat->rx_roce_good_pkts);
+ pma_cnt_ext->port_rcv_data = cpu_to_be64(estat->rx_roce_good_bytes / 4);
+ pma_cnt_ext->port_xmit_packets = cpu_to_be64(estat->tx_roce_pkts);
+ pma_cnt_ext->port_xmit_data = cpu_to_be64(estat->tx_roce_bytes / 4);
+ pma_cnt_ext->port_unicast_rcv_packets = cpu_to_be64(estat->rx_roce_good_pkts);
+ pma_cnt_ext->port_unicast_xmit_packets = cpu_to_be64(estat->tx_roce_pkts);
+ }
+ return 0;
+}
+
+int bnxt_re_assign_pma_port_counters(struct bnxt_re_dev *rdev, struct ib_mad *out_mad)
+{
+ struct bnxt_qplib_ext_stat *estat = &rdev->stats.rstat.ext_stat;
+ struct ib_pma_portcounters *pma_cnt;
+ struct ctx_hw_stats *hw_stats = NULL;
+ int rc;
+
+ hw_stats = rdev->qplib_ctx.stats.dma;
+
+ pma_cnt = (struct ib_pma_portcounters *)(out_mad->data + 40);
+ if (_is_ext_stats_supported(rdev->dev_attr->dev_cap_flags)) {
+ u32 fid = PCI_FUNC(rdev->en_dev->pdev->devfn);
+
+ rc = bnxt_qplib_qext_stat(&rdev->rcfw, fid, estat);
+ if (rc)
+ return rc;
+ }
+ if ((bnxt_qplib_is_chip_gen_p5(rdev->chip_ctx) && rdev->is_virtfn) ||
+ !bnxt_qplib_is_chip_gen_p5(rdev->chip_ctx)) {
+ pma_cnt->port_rcv_packets =
+ cpu_to_be32((u32)(le64_to_cpu(hw_stats->rx_ucast_pkts)) & 0xFFFFFFFF);
+ pma_cnt->port_rcv_data =
+ cpu_to_be32((u32)((le64_to_cpu(hw_stats->rx_ucast_bytes) &
+ 0xFFFFFFFF) / 4));
+ pma_cnt->port_xmit_packets =
+ cpu_to_be32((u32)(le64_to_cpu(hw_stats->tx_ucast_pkts)) & 0xFFFFFFFF);
+ pma_cnt->port_xmit_data =
+ cpu_to_be32((u32)((le64_to_cpu(hw_stats->tx_ucast_bytes)
+ & 0xFFFFFFFF) / 4));
+ } else {
+ pma_cnt->port_rcv_packets = cpu_to_be32(estat->rx_roce_good_pkts);
+ pma_cnt->port_rcv_data = cpu_to_be32((estat->rx_roce_good_bytes / 4));
+ pma_cnt->port_xmit_packets = cpu_to_be32(estat->tx_roce_pkts);
+ pma_cnt->port_xmit_data = cpu_to_be32((estat->tx_roce_bytes / 4));
+ }
+ pma_cnt->port_rcv_constraint_errors = (u8)(le64_to_cpu(hw_stats->rx_discard_pkts) & 0xFF);
+ pma_cnt->port_rcv_errors = cpu_to_be16((u16)(le64_to_cpu(hw_stats->rx_error_pkts)
+ & 0xFFFF));
+ pma_cnt->port_xmit_constraint_errors = (u8)(le64_to_cpu(hw_stats->tx_error_pkts) & 0xFF);
+ pma_cnt->port_xmit_discards = cpu_to_be16((u16)(le64_to_cpu(hw_stats->tx_discard_pkts)
+ & 0xFFFF));
+
+ return 0;
+}
+
int bnxt_re_ib_get_hw_stats(struct ib_device *ibdev,
struct rdma_hw_stats *stats,
u32 port, int index)
@@ -357,8 +440,8 @@ int bnxt_re_ib_get_hw_stats(struct ib_device *ibdev,
goto done;
}
bnxt_re_copy_err_stats(rdev, stats, err_s);
- if (_is_ext_stats_supported(rdev->dev_attr.dev_cap_flags) &&
- !rdev->is_virtfn) {
+ if (bnxt_ext_stats_supported(rdev->chip_ctx, rdev->dev_attr->dev_cap_flags,
+ rdev->is_virtfn)) {
rc = bnxt_re_get_ext_stat(rdev, stats);
if (rc) {
clear_bit(BNXT_RE_FLAG_ISSUE_ROCE_STATS,
@@ -366,7 +449,7 @@ int bnxt_re_ib_get_hw_stats(struct ib_device *ibdev,
goto done;
}
}
- if (rdev->pacing.dbr_pacing)
+ if (rdev->pacing.dbr_pacing && bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx))
bnxt_re_copy_db_pacing_stats(rdev, stats);
}
diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
index ce9c5bae83bf..063801384b2b 100644
--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c
+++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
@@ -49,11 +49,10 @@
#include <rdma/ib_addr.h>
#include <rdma/ib_mad.h>
#include <rdma/ib_cache.h>
+#include <rdma/ib_pma.h>
#include <rdma/uverbs_ioctl.h>
#include <linux/hashtable.h>
-#include "bnxt_ulp.h"
-
#include "roce_hsi.h"
#include "qplib_res.h"
#include "qplib_sp.h"
@@ -62,6 +61,7 @@
#include "bnxt_re.h"
#include "ib_verbs.h"
+#include "debugfs.h"
#include <rdma/uverbs_types.h>
#include <rdma/uverbs_std_types.h>
@@ -94,9 +94,9 @@ static int __from_ib_access_flags(int iflags)
return qflags;
};
-static enum ib_access_flags __to_ib_access_flags(int qflags)
+static int __to_ib_access_flags(int qflags)
{
- enum ib_access_flags iflags = 0;
+ int iflags = 0;
if (qflags & BNXT_QPLIB_ACCESS_LOCAL_WRITE)
iflags |= IB_ACCESS_LOCAL_WRITE;
@@ -113,7 +113,57 @@ static enum ib_access_flags __to_ib_access_flags(int qflags)
if (qflags & BNXT_QPLIB_ACCESS_ON_DEMAND)
iflags |= IB_ACCESS_ON_DEMAND;
return iflags;
-};
+}
+
+static u8 __qp_access_flags_from_ib(struct bnxt_qplib_chip_ctx *cctx, int iflags)
+{
+ u8 qflags = 0;
+
+ if (!bnxt_qplib_is_chip_gen_p5_p7(cctx))
+ /* For Wh+ */
+ return (u8)__from_ib_access_flags(iflags);
+
+ /* For P5, P7 and later chips */
+ if (iflags & IB_ACCESS_LOCAL_WRITE)
+ qflags |= CMDQ_MODIFY_QP_ACCESS_LOCAL_WRITE;
+ if (iflags & IB_ACCESS_REMOTE_WRITE)
+ qflags |= CMDQ_MODIFY_QP_ACCESS_REMOTE_WRITE;
+ if (iflags & IB_ACCESS_REMOTE_READ)
+ qflags |= CMDQ_MODIFY_QP_ACCESS_REMOTE_READ;
+ if (iflags & IB_ACCESS_REMOTE_ATOMIC)
+ qflags |= CMDQ_MODIFY_QP_ACCESS_REMOTE_ATOMIC;
+
+ return qflags;
+}
+
+static int __qp_access_flags_to_ib(struct bnxt_qplib_chip_ctx *cctx, u8 qflags)
+{
+ int iflags = 0;
+
+ if (!bnxt_qplib_is_chip_gen_p5_p7(cctx))
+ /* For Wh+ */
+ return __to_ib_access_flags(qflags);
+
+ /* For P5, P7 and later chips */
+ if (qflags & CMDQ_MODIFY_QP_ACCESS_LOCAL_WRITE)
+ iflags |= IB_ACCESS_LOCAL_WRITE;
+ if (qflags & CMDQ_MODIFY_QP_ACCESS_REMOTE_WRITE)
+ iflags |= IB_ACCESS_REMOTE_WRITE;
+ if (qflags & CMDQ_MODIFY_QP_ACCESS_REMOTE_READ)
+ iflags |= IB_ACCESS_REMOTE_READ;
+ if (qflags & CMDQ_MODIFY_QP_ACCESS_REMOTE_ATOMIC)
+ iflags |= IB_ACCESS_REMOTE_ATOMIC;
+
+ return iflags;
+}
+
+static void bnxt_re_check_and_set_relaxed_ordering(struct bnxt_re_dev *rdev,
+ struct bnxt_qplib_mrw *qplib_mr)
+{
+ if (_is_relaxed_ordering_supported(rdev->dev_attr->dev_cap_flags2) &&
+ pcie_relaxed_ordering_enabled(rdev->en_dev->pdev))
+ qplib_mr->flags |= CMDQ_REGISTER_MR_FLAGS_ENABLE_RO;
+}
static int bnxt_re_build_sgl(struct ib_sge *ib_sg_list,
struct bnxt_qplib_sge *sg_list, int num)
@@ -135,7 +185,7 @@ int bnxt_re_query_device(struct ib_device *ibdev,
struct ib_udata *udata)
{
struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
- struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
+ struct bnxt_qplib_dev_attr *dev_attr = rdev->dev_attr;
memset(ib_attr, 0, sizeof(*ib_attr));
memcpy(&ib_attr->fw_ver, dev_attr->fw_ver,
@@ -148,7 +198,7 @@ int bnxt_re_query_device(struct ib_device *ibdev,
ib_attr->vendor_id = rdev->en_dev->pdev->vendor;
ib_attr->vendor_part_id = rdev->en_dev->pdev->device;
- ib_attr->hw_ver = rdev->en_dev->pdev->subsystem_device;
+ ib_attr->hw_ver = rdev->en_dev->pdev->revision;
ib_attr->max_qp = dev_attr->max_qp;
ib_attr->max_qp_wr = dev_attr->max_qp_wqes;
ib_attr->device_cap_flags =
@@ -203,12 +253,28 @@ int bnxt_re_query_device(struct ib_device *ibdev,
return 0;
}
+int bnxt_re_modify_device(struct ib_device *ibdev,
+ int device_modify_mask,
+ struct ib_device_modify *device_modify)
+{
+ ibdev_dbg(ibdev, "Modify device with mask 0x%x", device_modify_mask);
+
+ if (device_modify_mask & ~IB_DEVICE_MODIFY_NODE_DESC)
+ return -EOPNOTSUPP;
+
+ if (!(device_modify_mask & IB_DEVICE_MODIFY_NODE_DESC))
+ return 0;
+
+ memcpy(ibdev->node_desc, device_modify->node_desc, IB_DEVICE_NODE_DESC_MAX);
+ return 0;
+}
+
/* Port */
int bnxt_re_query_port(struct ib_device *ibdev, u32 port_num,
struct ib_port_attr *port_attr)
{
struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
- struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
+ struct bnxt_qplib_dev_attr *dev_attr = rdev->dev_attr;
int rc;
memset(port_attr, 0, sizeof(*port_attr));
@@ -266,8 +332,8 @@ void bnxt_re_query_fw_str(struct ib_device *ibdev, char *str)
struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
snprintf(str, IB_FW_VERSION_NAME_MAX, "%d.%d.%d.%d",
- rdev->dev_attr.fw_ver[0], rdev->dev_attr.fw_ver[1],
- rdev->dev_attr.fw_ver[2], rdev->dev_attr.fw_ver[3]);
+ rdev->dev_attr->fw_ver[0], rdev->dev_attr->fw_ver[1],
+ rdev->dev_attr->fw_ver[2], rdev->dev_attr->fw_ver[3]);
}
int bnxt_re_query_pkey(struct ib_device *ibdev, u32 port_num,
@@ -517,15 +583,19 @@ static int bnxt_re_create_fence_mr(struct bnxt_re_pd *pd)
mr->rdev = rdev;
mr->qplib_mr.pd = &pd->qplib_pd;
mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR;
- mr->qplib_mr.flags = __from_ib_access_flags(mr_access_flags);
- rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
- if (rc) {
- ibdev_err(&rdev->ibdev, "Failed to alloc fence-HW-MR\n");
- goto fail;
- }
+ mr->qplib_mr.access_flags = __from_ib_access_flags(mr_access_flags);
+ if (!_is_alloc_mr_unified(rdev->dev_attr->dev_cap_flags)) {
+ rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
+ if (rc) {
+ ibdev_err(&rdev->ibdev, "Failed to alloc fence-HW-MR\n");
+ goto fail;
+ }
- /* Register MR */
- mr->ib_mr.lkey = mr->qplib_mr.lkey;
+ /* Register MR */
+ mr->ib_mr.lkey = mr->qplib_mr.lkey;
+ } else {
+ mr->qplib_mr.flags = CMDQ_REGISTER_MR_FLAGS_ALLOC_MR;
+ }
mr->qplib_mr.va = (u64)(unsigned long)fence->va;
mr->qplib_mr.total_size = BNXT_RE_FENCE_BYTES;
rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mr->qplib_mr, NULL,
@@ -896,13 +966,13 @@ int bnxt_re_destroy_qp(struct ib_qp *ib_qp, struct ib_udata *udata)
unsigned int flags;
int rc;
+ bnxt_re_debug_rem_qpinfo(rdev, qp);
+
bnxt_qplib_flush_cqn_wq(&qp->qplib_qp);
rc = bnxt_qplib_destroy_qp(&rdev->qplib_res, &qp->qplib_qp);
- if (rc) {
+ if (rc)
ibdev_err(&rdev->ibdev, "Failed to destroy HW QP");
- return rc;
- }
if (rdma_is_kernel_res(&qp->ib_qp.res)) {
flags = bnxt_re_lock_cqs(qp);
@@ -912,11 +982,8 @@ int bnxt_re_destroy_qp(struct ib_qp *ib_qp, struct ib_udata *udata)
bnxt_qplib_free_qp_res(&rdev->qplib_res, &qp->qplib_qp);
- if (ib_qp->qp_type == IB_QPT_GSI && rdev->gsi_ctx.gsi_sqp) {
- rc = bnxt_re_destroy_gsi_sqp(qp);
- if (rc)
- return rc;
- }
+ if (ib_qp->qp_type == IB_QPT_GSI && rdev->gsi_ctx.gsi_sqp)
+ bnxt_re_destroy_gsi_sqp(qp);
mutex_lock(&rdev->qp_lock);
list_del(&qp->list);
@@ -989,48 +1056,42 @@ static int bnxt_re_setup_swqe_size(struct bnxt_re_qp *qp,
rdev = qp->rdev;
qplqp = &qp->qplib_qp;
sq = &qplqp->sq;
- dev_attr = &rdev->dev_attr;
+ dev_attr = rdev->dev_attr;
align = sizeof(struct sq_send_hdr);
ilsize = ALIGN(init_attr->cap.max_inline_data, align);
- sq->wqe_size = bnxt_re_get_wqe_size(ilsize, sq->max_sge);
- if (sq->wqe_size > bnxt_re_get_swqe_size(dev_attr->max_qp_sges))
- return -EINVAL;
- /* For gen p4 and gen p5 backward compatibility mode
- * wqe size is fixed to 128 bytes
+ /* For gen p4 and gen p5 fixed wqe compatibility mode
+ * wqe size is fixed to 128 bytes - ie 6 SGEs
*/
- if (sq->wqe_size < bnxt_re_get_swqe_size(dev_attr->max_qp_sges) &&
- qplqp->wqe_mode == BNXT_QPLIB_WQE_MODE_STATIC)
- sq->wqe_size = bnxt_re_get_swqe_size(dev_attr->max_qp_sges);
+ if (qplqp->wqe_mode == BNXT_QPLIB_WQE_MODE_STATIC) {
+ sq->wqe_size = bnxt_re_get_swqe_size(BNXT_STATIC_MAX_SGE);
+ sq->max_sge = BNXT_STATIC_MAX_SGE;
+ } else {
+ sq->wqe_size = bnxt_re_get_wqe_size(ilsize, sq->max_sge);
+ if (sq->wqe_size > bnxt_re_get_swqe_size(dev_attr->max_qp_sges))
+ return -EINVAL;
+ }
if (init_attr->cap.max_inline_data) {
qplqp->max_inline_data = sq->wqe_size -
sizeof(struct sq_send_hdr);
init_attr->cap.max_inline_data = qplqp->max_inline_data;
- if (qplqp->wqe_mode == BNXT_QPLIB_WQE_MODE_STATIC)
- sq->max_sge = qplqp->max_inline_data /
- sizeof(struct sq_sge);
}
return 0;
}
static int bnxt_re_init_user_qp(struct bnxt_re_dev *rdev, struct bnxt_re_pd *pd,
- struct bnxt_re_qp *qp, struct ib_udata *udata)
+ struct bnxt_re_qp *qp, struct bnxt_re_ucontext *cntx,
+ struct bnxt_re_qp_req *ureq)
{
struct bnxt_qplib_qp *qplib_qp;
- struct bnxt_re_ucontext *cntx;
- struct bnxt_re_qp_req ureq;
int bytes = 0, psn_sz;
struct ib_umem *umem;
int psn_nume;
qplib_qp = &qp->qplib_qp;
- cntx = rdma_udata_to_drv_context(udata, struct bnxt_re_ucontext,
- ib_uctx);
- if (ib_copy_from_udata(&ureq, udata, sizeof(ureq)))
- return -EFAULT;
bytes = (qplib_qp->sq.max_wqe * qplib_qp->sq.wqe_size);
/* Consider mapping PSN search memory only for RC QPs. */
@@ -1038,15 +1099,20 @@ static int bnxt_re_init_user_qp(struct bnxt_re_dev *rdev, struct bnxt_re_pd *pd,
psn_sz = bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx) ?
sizeof(struct sq_psn_search_ext) :
sizeof(struct sq_psn_search);
- psn_nume = (qplib_qp->wqe_mode == BNXT_QPLIB_WQE_MODE_STATIC) ?
- qplib_qp->sq.max_wqe :
- ((qplib_qp->sq.max_wqe * qplib_qp->sq.wqe_size) /
- sizeof(struct bnxt_qplib_sge));
+ if (cntx && bnxt_re_is_var_size_supported(rdev, cntx)) {
+ psn_nume = ureq->sq_slots;
+ } else {
+ psn_nume = (qplib_qp->wqe_mode == BNXT_QPLIB_WQE_MODE_STATIC) ?
+ qplib_qp->sq.max_wqe : ((qplib_qp->sq.max_wqe * qplib_qp->sq.wqe_size) /
+ sizeof(struct bnxt_qplib_sge));
+ }
+ if (_is_host_msn_table(rdev->qplib_res.dattr->dev_cap_flags2))
+ psn_nume = roundup_pow_of_two(psn_nume);
bytes += (psn_nume * psn_sz);
}
bytes = PAGE_ALIGN(bytes);
- umem = ib_umem_get(&rdev->ibdev, ureq.qpsva, bytes,
+ umem = ib_umem_get(&rdev->ibdev, ureq->qpsva, bytes,
IB_ACCESS_LOCAL_WRITE);
if (IS_ERR(umem))
return PTR_ERR(umem);
@@ -1055,12 +1121,12 @@ static int bnxt_re_init_user_qp(struct bnxt_re_dev *rdev, struct bnxt_re_pd *pd,
qplib_qp->sq.sg_info.umem = umem;
qplib_qp->sq.sg_info.pgsize = PAGE_SIZE;
qplib_qp->sq.sg_info.pgshft = PAGE_SHIFT;
- qplib_qp->qp_handle = ureq.qp_handle;
+ qplib_qp->qp_handle = ureq->qp_handle;
if (!qp->qplib_qp.srq) {
bytes = (qplib_qp->rq.max_wqe * qplib_qp->rq.wqe_size);
bytes = PAGE_ALIGN(bytes);
- umem = ib_umem_get(&rdev->ibdev, ureq.qprva, bytes,
+ umem = ib_umem_get(&rdev->ibdev, ureq->qprva, bytes,
IB_ACCESS_LOCAL_WRITE);
if (IS_ERR(umem))
goto rqfail;
@@ -1156,6 +1222,7 @@ static struct bnxt_re_qp *bnxt_re_create_shadow_qp
/* Shadow QP SQ depth should be same as QP1 RQ depth */
qp->qplib_qp.sq.wqe_size = bnxt_re_get_wqe_size(0, 6);
qp->qplib_qp.sq.max_wqe = qp1_qp->rq.max_wqe;
+ qp->qplib_qp.sq.max_sw_wqe = qp1_qp->rq.max_wqe;
qp->qplib_qp.sq.max_sge = 2;
/* Q full delta can be 1 since it is internal QP */
qp->qplib_qp.sq.q_full_delta = 1;
@@ -1167,6 +1234,7 @@ static struct bnxt_re_qp *bnxt_re_create_shadow_qp
qp->qplib_qp.rq.wqe_size = bnxt_re_get_rwqe_size(6);
qp->qplib_qp.rq.max_wqe = qp1_qp->rq.max_wqe;
+ qp->qplib_qp.rq.max_sw_wqe = qp1_qp->rq.max_wqe;
qp->qplib_qp.rq.max_sge = qp1_qp->rq.max_sge;
/* Q full delta can be 1 since it is internal QP */
qp->qplib_qp.rq.q_full_delta = 1;
@@ -1208,7 +1276,7 @@ static int bnxt_re_init_rq_attr(struct bnxt_re_qp *qp,
rdev = qp->rdev;
qplqp = &qp->qplib_qp;
rq = &qplqp->rq;
- dev_attr = &rdev->dev_attr;
+ dev_attr = rdev->dev_attr;
if (init_attr->srq) {
struct bnxt_re_srq *srq;
@@ -1228,6 +1296,7 @@ static int bnxt_re_init_rq_attr(struct bnxt_re_qp *qp,
*/
entries = bnxt_re_init_depth(init_attr->cap.max_recv_wr + 1, uctx);
rq->max_wqe = min_t(u32, entries, dev_attr->max_qp_wqes + 1);
+ rq->max_sw_wqe = rq->max_wqe;
rq->q_full_delta = 0;
rq->sg_info.pgsize = PAGE_SIZE;
rq->sg_info.pgshft = PAGE_SHIFT;
@@ -1244,7 +1313,7 @@ static void bnxt_re_adjust_gsi_rq_attr(struct bnxt_re_qp *qp)
rdev = qp->rdev;
qplqp = &qp->qplib_qp;
- dev_attr = &rdev->dev_attr;
+ dev_attr = rdev->dev_attr;
if (!bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx)) {
qplqp->rq.max_sge = dev_attr->max_qp_sges;
@@ -1256,37 +1325,49 @@ static void bnxt_re_adjust_gsi_rq_attr(struct bnxt_re_qp *qp)
static int bnxt_re_init_sq_attr(struct bnxt_re_qp *qp,
struct ib_qp_init_attr *init_attr,
- struct bnxt_re_ucontext *uctx)
+ struct bnxt_re_ucontext *uctx,
+ struct bnxt_re_qp_req *ureq)
{
struct bnxt_qplib_dev_attr *dev_attr;
struct bnxt_qplib_qp *qplqp;
struct bnxt_re_dev *rdev;
struct bnxt_qplib_q *sq;
+ int diff = 0;
int entries;
- int diff;
int rc;
rdev = qp->rdev;
qplqp = &qp->qplib_qp;
sq = &qplqp->sq;
- dev_attr = &rdev->dev_attr;
+ dev_attr = rdev->dev_attr;
sq->max_sge = init_attr->cap.max_send_sge;
- if (sq->max_sge > dev_attr->max_qp_sges) {
- sq->max_sge = dev_attr->max_qp_sges;
- init_attr->cap.max_send_sge = sq->max_sge;
- }
+ entries = init_attr->cap.max_send_wr;
+ if (uctx && qplqp->wqe_mode == BNXT_QPLIB_WQE_MODE_VARIABLE) {
+ sq->max_wqe = ureq->sq_slots;
+ sq->max_sw_wqe = ureq->sq_slots;
+ sq->wqe_size = sizeof(struct sq_sge);
+ } else {
+ if (sq->max_sge > dev_attr->max_qp_sges) {
+ sq->max_sge = dev_attr->max_qp_sges;
+ init_attr->cap.max_send_sge = sq->max_sge;
+ }
- rc = bnxt_re_setup_swqe_size(qp, init_attr);
- if (rc)
- return rc;
+ rc = bnxt_re_setup_swqe_size(qp, init_attr);
+ if (rc)
+ return rc;
- entries = init_attr->cap.max_send_wr;
- /* Allocate 128 + 1 more than what's provided */
- diff = (qplqp->wqe_mode == BNXT_QPLIB_WQE_MODE_VARIABLE) ?
- 0 : BNXT_QPLIB_RESERVED_QP_WRS;
- entries = bnxt_re_init_depth(entries + diff + 1, uctx);
- sq->max_wqe = min_t(u32, entries, dev_attr->max_qp_wqes + diff + 1);
+ /* Allocate 128 + 1 more than what's provided */
+ diff = (qplqp->wqe_mode == BNXT_QPLIB_WQE_MODE_VARIABLE) ?
+ 0 : BNXT_QPLIB_RESERVED_QP_WRS;
+ entries = bnxt_re_init_depth(entries + diff + 1, uctx);
+ sq->max_wqe = min_t(u32, entries, dev_attr->max_qp_wqes + diff + 1);
+ if (qplqp->wqe_mode == BNXT_QPLIB_WQE_MODE_VARIABLE)
+ sq->max_sw_wqe = bnxt_qplib_get_depth(sq, qplqp->wqe_mode, true);
+ else
+ sq->max_sw_wqe = sq->max_wqe;
+
+ }
sq->q_full_delta = diff + 1;
/*
* Reserving one slot for Phantom WQE. Application can
@@ -1311,7 +1392,7 @@ static void bnxt_re_adjust_gsi_sq_attr(struct bnxt_re_qp *qp,
rdev = qp->rdev;
qplqp = &qp->qplib_qp;
- dev_attr = &rdev->dev_attr;
+ dev_attr = rdev->dev_attr;
if (!bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx)) {
entries = bnxt_re_init_depth(init_attr->cap.max_send_wr + 1, uctx);
@@ -1349,10 +1430,10 @@ out:
static int bnxt_re_init_qp_attr(struct bnxt_re_qp *qp, struct bnxt_re_pd *pd,
struct ib_qp_init_attr *init_attr,
- struct ib_udata *udata)
+ struct bnxt_re_ucontext *uctx,
+ struct bnxt_re_qp_req *ureq)
{
struct bnxt_qplib_dev_attr *dev_attr;
- struct bnxt_re_ucontext *uctx;
struct bnxt_qplib_qp *qplqp;
struct bnxt_re_dev *rdev;
struct bnxt_re_cq *cq;
@@ -1360,9 +1441,8 @@ static int bnxt_re_init_qp_attr(struct bnxt_re_qp *qp, struct bnxt_re_pd *pd,
rdev = qp->rdev;
qplqp = &qp->qplib_qp;
- dev_attr = &rdev->dev_attr;
+ dev_attr = rdev->dev_attr;
- uctx = rdma_udata_to_drv_context(udata, struct bnxt_re_ucontext, ib_uctx);
/* Setup misc params */
ether_addr_copy(qplqp->smac, rdev->netdev->dev_addr);
qplqp->pd = &pd->qplib_pd;
@@ -1375,8 +1455,7 @@ static int bnxt_re_init_qp_attr(struct bnxt_re_qp *qp, struct bnxt_re_pd *pd,
goto out;
}
qplqp->type = (u8)qptype;
- qplqp->wqe_mode = rdev->chip_ctx->modes.wqe_mode;
-
+ qplqp->wqe_mode = bnxt_re_is_var_size_supported(rdev, uctx);
if (init_attr->qp_type == IB_QPT_RC) {
qplqp->max_rd_atomic = dev_attr->max_qp_rd_atom;
qplqp->max_dest_rd_atomic = dev_attr->max_qp_init_rd_atom;
@@ -1411,14 +1490,14 @@ static int bnxt_re_init_qp_attr(struct bnxt_re_qp *qp, struct bnxt_re_pd *pd,
bnxt_re_adjust_gsi_rq_attr(qp);
/* Setup SQ */
- rc = bnxt_re_init_sq_attr(qp, init_attr, uctx);
+ rc = bnxt_re_init_sq_attr(qp, init_attr, uctx, ureq);
if (rc)
goto out;
if (init_attr->qp_type == IB_QPT_GSI)
bnxt_re_adjust_gsi_sq_attr(qp, init_attr, uctx);
- if (udata) /* This will update DPI and qp_handle */
- rc = bnxt_re_init_user_qp(rdev, pd, qp, udata);
+ if (uctx) /* This will update DPI and qp_handle */
+ rc = bnxt_re_init_user_qp(rdev, pd, qp, uctx, ureq);
out:
return rc;
}
@@ -1519,14 +1598,27 @@ static bool bnxt_re_test_qp_limits(struct bnxt_re_dev *rdev,
int bnxt_re_create_qp(struct ib_qp *ib_qp, struct ib_qp_init_attr *qp_init_attr,
struct ib_udata *udata)
{
- struct ib_pd *ib_pd = ib_qp->pd;
- struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
- struct bnxt_re_dev *rdev = pd->rdev;
- struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
- struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
+ struct bnxt_qplib_dev_attr *dev_attr;
+ struct bnxt_re_ucontext *uctx;
+ struct bnxt_re_qp_req ureq;
+ struct bnxt_re_dev *rdev;
+ struct bnxt_re_pd *pd;
+ struct bnxt_re_qp *qp;
+ struct ib_pd *ib_pd;
u32 active_qps;
int rc;
+ ib_pd = ib_qp->pd;
+ pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
+ rdev = pd->rdev;
+ dev_attr = rdev->dev_attr;
+ qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
+
+ uctx = rdma_udata_to_drv_context(udata, struct bnxt_re_ucontext, ib_uctx);
+ if (udata)
+ if (ib_copy_from_udata(&ureq, udata, min(udata->inlen, sizeof(ureq))))
+ return -EFAULT;
+
rc = bnxt_re_test_qp_limits(rdev, qp_init_attr, dev_attr);
if (!rc) {
rc = -EINVAL;
@@ -1534,7 +1626,7 @@ int bnxt_re_create_qp(struct ib_qp *ib_qp, struct ib_qp_init_attr *qp_init_attr,
}
qp->rdev = rdev;
- rc = bnxt_re_init_qp_attr(qp, pd, qp_init_attr, udata);
+ rc = bnxt_re_init_qp_attr(qp, pd, qp_init_attr, uctx, &ureq);
if (rc)
goto fail;
@@ -1585,6 +1677,7 @@ int bnxt_re_create_qp(struct ib_qp *ib_qp, struct ib_qp_init_attr *qp_init_attr,
if (active_qps > rdev->stats.res.ud_qp_watermark)
rdev->stats.res.ud_qp_watermark = active_qps;
}
+ bnxt_re_debug_add_qpinfo(rdev, qp);
return 0;
qp_destroy:
@@ -1681,15 +1774,14 @@ int bnxt_re_destroy_srq(struct ib_srq *ib_srq, struct ib_udata *udata)
ib_srq);
struct bnxt_re_dev *rdev = srq->rdev;
struct bnxt_qplib_srq *qplib_srq = &srq->qplib_srq;
- struct bnxt_qplib_nq *nq = NULL;
- if (qplib_srq->cq)
- nq = qplib_srq->cq->nq;
+ if (rdev->chip_ctx->modes.toggle_bits & BNXT_QPLIB_SRQ_TOGGLE_BIT) {
+ free_page((unsigned long)srq->uctx_srq_page);
+ hash_del(&srq->hash_entry);
+ }
bnxt_qplib_destroy_srq(&rdev->qplib_res, qplib_srq);
ib_umem_release(srq->umem);
atomic_dec(&rdev->stats.res.srq_count);
- if (nq)
- nq->budget--;
return 0;
}
@@ -1730,7 +1822,6 @@ int bnxt_re_create_srq(struct ib_srq *ib_srq,
struct ib_udata *udata)
{
struct bnxt_qplib_dev_attr *dev_attr;
- struct bnxt_qplib_nq *nq = NULL;
struct bnxt_re_ucontext *uctx;
struct bnxt_re_dev *rdev;
struct bnxt_re_srq *srq;
@@ -1742,7 +1833,7 @@ int bnxt_re_create_srq(struct ib_srq *ib_srq,
ib_pd = ib_srq->pd;
pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
rdev = pd->rdev;
- dev_attr = &rdev->dev_attr;
+ dev_attr = rdev->dev_attr;
srq = container_of(ib_srq, struct bnxt_re_srq, ib_srq);
if (srq_init_attr->attr.max_wr >= dev_attr->max_srq_wqes) {
@@ -1773,8 +1864,9 @@ int bnxt_re_create_srq(struct ib_srq *ib_srq,
srq->qplib_srq.wqe_size = bnxt_re_get_rwqe_size(dev_attr->max_srq_sges);
srq->qplib_srq.threshold = srq_init_attr->attr.srq_limit;
srq->srq_limit = srq_init_attr->attr.srq_limit;
- srq->qplib_srq.eventq_hw_ring_id = rdev->nq[0].ring_id;
- nq = &rdev->nq[0];
+ srq->qplib_srq.eventq_hw_ring_id = rdev->nqr->nq[0].ring_id;
+ srq->qplib_srq.sg_info.pgsize = PAGE_SIZE;
+ srq->qplib_srq.sg_info.pgshft = PAGE_SHIFT;
if (udata) {
rc = bnxt_re_init_user_srq(rdev, pd, srq, udata);
@@ -1789,9 +1881,18 @@ int bnxt_re_create_srq(struct ib_srq *ib_srq,
}
if (udata) {
- struct bnxt_re_srq_resp resp;
+ struct bnxt_re_srq_resp resp = {};
resp.srqid = srq->qplib_srq.id;
+ if (rdev->chip_ctx->modes.toggle_bits & BNXT_QPLIB_SRQ_TOGGLE_BIT) {
+ hash_add(rdev->srq_hash, &srq->hash_entry, srq->qplib_srq.id);
+ srq->uctx_srq_page = (void *)get_zeroed_page(GFP_KERNEL);
+ if (!srq->uctx_srq_page) {
+ rc = -ENOMEM;
+ goto fail;
+ }
+ resp.comp_mask |= BNXT_RE_SRQ_TOGGLE_PAGE_SUPPORT;
+ }
rc = ib_copy_to_udata(udata, &resp, sizeof(resp));
if (rc) {
ibdev_err(&rdev->ibdev, "SRQ copy to udata failed!");
@@ -1800,8 +1901,6 @@ int bnxt_re_create_srq(struct ib_srq *ib_srq,
goto fail;
}
}
- if (nq)
- nq->budget++;
active_srqs = atomic_inc_return(&rdev->stats.res.srq_count);
if (active_srqs > rdev->stats.res.srq_watermark)
rdev->stats.res.srq_watermark = active_srqs;
@@ -1937,7 +2036,7 @@ int bnxt_re_modify_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
{
struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
struct bnxt_re_dev *rdev = qp->rdev;
- struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
+ struct bnxt_qplib_dev_attr *dev_attr = rdev->dev_attr;
enum ib_qp_state curr_qp_state, new_qp_state;
int rc, entries;
unsigned int flags;
@@ -1991,12 +2090,10 @@ int bnxt_re_modify_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
if (qp_attr_mask & IB_QP_ACCESS_FLAGS) {
qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_ACCESS;
qp->qplib_qp.access =
- __from_ib_access_flags(qp_attr->qp_access_flags);
+ __qp_access_flags_from_ib(qp->qplib_qp.cctx,
+ qp_attr->qp_access_flags);
/* LOCAL_WRITE access must be set to allow RC receive */
- qp->qplib_qp.access |= BNXT_QPLIB_ACCESS_LOCAL_WRITE;
- /* Temp: Set all params on QP as of now */
- qp->qplib_qp.access |= CMDQ_MODIFY_QP_ACCESS_REMOTE_WRITE;
- qp->qplib_qp.access |= CMDQ_MODIFY_QP_ACCESS_REMOTE_READ;
+ qp->qplib_qp.access |= CMDQ_MODIFY_QP_ACCESS_LOCAL_WRITE;
}
if (qp_attr_mask & IB_QP_PKEY_INDEX) {
qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_PKEY;
@@ -2030,7 +2127,7 @@ int bnxt_re_modify_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
qp->qplib_qp.ah.sgid_index = ctx->idx;
qp->qplib_qp.ah.host_sgid_index = grh->sgid_index;
qp->qplib_qp.ah.hop_limit = grh->hop_limit;
- qp->qplib_qp.ah.traffic_class = grh->traffic_class;
+ qp->qplib_qp.ah.traffic_class = grh->traffic_class >> 2;
qp->qplib_qp.ah.sl = rdma_ah_get_sl(&qp_attr->ah_attr);
ether_addr_copy(qp->qplib_qp.ah.dmac,
qp_attr->ah_attr.roce.dmac);
@@ -2057,18 +2154,20 @@ int bnxt_re_modify_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
}
}
- if (qp_attr_mask & IB_QP_PATH_MTU) {
- qp->qplib_qp.modify_flags |=
- CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU;
- qp->qplib_qp.path_mtu = __from_ib_mtu(qp_attr->path_mtu);
- qp->qplib_qp.mtu = ib_mtu_enum_to_int(qp_attr->path_mtu);
- } else if (qp_attr->qp_state == IB_QPS_RTR) {
- qp->qplib_qp.modify_flags |=
- CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU;
- qp->qplib_qp.path_mtu =
- __from_ib_mtu(iboe_get_mtu(rdev->netdev->mtu));
- qp->qplib_qp.mtu =
- ib_mtu_enum_to_int(iboe_get_mtu(rdev->netdev->mtu));
+ if (qp_attr->qp_state == IB_QPS_RTR) {
+ enum ib_mtu qpmtu;
+
+ qpmtu = iboe_get_mtu(rdev->netdev->mtu);
+ if (qp_attr_mask & IB_QP_PATH_MTU) {
+ if (ib_mtu_enum_to_int(qp_attr->path_mtu) >
+ ib_mtu_enum_to_int(qpmtu))
+ return -EINVAL;
+ qpmtu = qp_attr->path_mtu;
+ }
+
+ qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU;
+ qp->qplib_qp.path_mtu = __from_ib_mtu(qpmtu);
+ qp->qplib_qp.mtu = ib_mtu_enum_to_int(qpmtu);
}
if (qp_attr_mask & IB_QP_TIMEOUT) {
@@ -2155,6 +2254,7 @@ int bnxt_re_modify_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
entries = bnxt_re_init_depth(qp_attr->cap.max_recv_wr, uctx);
qp->qplib_qp.rq.max_wqe =
min_t(u32, entries, dev_attr->max_qp_wqes + 1);
+ qp->qplib_qp.rq.max_sw_wqe = qp->qplib_qp.rq.max_wqe;
qp->qplib_qp.rq.q_full_delta = qp->qplib_qp.rq.max_wqe -
qp_attr->cap.max_recv_wr;
qp->qplib_qp.rq.max_sge = qp_attr->cap.max_recv_sge;
@@ -2200,7 +2300,8 @@ int bnxt_re_query_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
qp_attr->qp_state = __to_ib_qp_state(qplib_qp->state);
qp_attr->cur_qp_state = __to_ib_qp_state(qplib_qp->cur_qp_state);
qp_attr->en_sqd_async_notify = qplib_qp->en_sqd_async_notify ? 1 : 0;
- qp_attr->qp_access_flags = __to_ib_access_flags(qplib_qp->access);
+ qp_attr->qp_access_flags = __qp_access_flags_to_ib(qp->qplib_qp.cctx,
+ qplib_qp->access);
qp_attr->pkey_index = qplib_qp->pkey_index;
qp_attr->qkey = qplib_qp->qkey;
qp_attr->ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE;
@@ -2216,6 +2317,7 @@ int bnxt_re_query_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
qp_attr->retry_cnt = qplib_qp->retry_cnt;
qp_attr->rnr_retry = qplib_qp->rnr_retry;
qp_attr->min_rnr_timer = qplib_qp->min_rnr_timer;
+ qp_attr->port_num = __to_ib_port_num(qplib_qp->port_id);
qp_attr->rq_psn = qplib_qp->rq.psn;
qp_attr->max_rd_atomic = qplib_qp->max_rd_atomic;
qp_attr->sq_psn = qplib_qp->sq.psn;
@@ -2479,7 +2581,7 @@ static int bnxt_re_build_send_wqe(struct bnxt_re_qp *qp,
break;
case IB_WR_SEND_WITH_IMM:
wqe->type = BNXT_QPLIB_SWQE_TYPE_SEND_WITH_IMM;
- wqe->send.imm_data = wr->ex.imm_data;
+ wqe->send.imm_data = be32_to_cpu(wr->ex.imm_data);
break;
case IB_WR_SEND_WITH_INV:
wqe->type = BNXT_QPLIB_SWQE_TYPE_SEND_WITH_INV;
@@ -2509,7 +2611,7 @@ static int bnxt_re_build_rdma_wqe(const struct ib_send_wr *wr,
break;
case IB_WR_RDMA_WRITE_WITH_IMM:
wqe->type = BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE_WITH_IMM;
- wqe->rdma.imm_data = wr->ex.imm_data;
+ wqe->rdma.imm_data = be32_to_cpu(wr->ex.imm_data);
break;
case IB_WR_RDMA_READ:
wqe->type = BNXT_QPLIB_SWQE_TYPE_RDMA_READ;
@@ -2712,7 +2814,8 @@ bad:
wr = wr->next;
}
bnxt_qplib_post_send_db(&qp->qplib_qp);
- bnxt_ud_qp_hw_stall_workaround(qp);
+ if (!bnxt_qplib_is_chip_gen_p5_p7(qp->rdev->chip_ctx))
+ bnxt_ud_qp_hw_stall_workaround(qp);
spin_unlock_irqrestore(&qp->sq_lock, flags);
return rc;
}
@@ -2824,7 +2927,8 @@ bad:
wr = wr->next;
}
bnxt_qplib_post_send_db(&qp->qplib_qp);
- bnxt_ud_qp_hw_stall_workaround(qp);
+ if (!bnxt_qplib_is_chip_gen_p5_p7(qp->rdev->chip_ctx))
+ bnxt_ud_qp_hw_stall_workaround(qp);
spin_unlock_irqrestore(&qp->sq_lock, flags);
return rc;
@@ -2921,6 +3025,28 @@ int bnxt_re_post_recv(struct ib_qp *ib_qp, const struct ib_recv_wr *wr,
return rc;
}
+static struct bnxt_qplib_nq *bnxt_re_get_nq(struct bnxt_re_dev *rdev)
+{
+ int min, indx;
+
+ mutex_lock(&rdev->nqr->load_lock);
+ for (indx = 0, min = 0; indx < (rdev->nqr->num_msix - 1); indx++) {
+ if (rdev->nqr->nq[min].load > rdev->nqr->nq[indx].load)
+ min = indx;
+ }
+ rdev->nqr->nq[min].load++;
+ mutex_unlock(&rdev->nqr->load_lock);
+
+ return &rdev->nqr->nq[min];
+}
+
+static void bnxt_re_put_nq(struct bnxt_re_dev *rdev, struct bnxt_qplib_nq *nq)
+{
+ mutex_lock(&rdev->nqr->load_lock);
+ nq->load--;
+ mutex_unlock(&rdev->nqr->load_lock);
+}
+
/* Completion Queues */
int bnxt_re_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
{
@@ -2939,25 +3065,25 @@ int bnxt_re_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
hash_del(&cq->hash_entry);
}
bnxt_qplib_destroy_cq(&rdev->qplib_res, &cq->qplib_cq);
+
+ bnxt_re_put_nq(rdev, nq);
ib_umem_release(cq->umem);
atomic_dec(&rdev->stats.res.cq_count);
- nq->budget--;
kfree(cq->cql);
return 0;
}
int bnxt_re_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
- struct ib_udata *udata)
+ struct uverbs_attr_bundle *attrs)
{
struct bnxt_re_cq *cq = container_of(ibcq, struct bnxt_re_cq, ib_cq);
struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibcq->device, ibdev);
+ struct ib_udata *udata = &attrs->driver_udata;
struct bnxt_re_ucontext *uctx =
rdma_udata_to_drv_context(udata, struct bnxt_re_ucontext, ib_uctx);
- struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
+ struct bnxt_qplib_dev_attr *dev_attr = rdev->dev_attr;
struct bnxt_qplib_chip_ctx *cctx;
- struct bnxt_qplib_nq *nq = NULL;
- unsigned int nq_alloc_cnt;
int cqe = attr->cqe;
int rc, entries;
u32 active_cqs;
@@ -3008,15 +3134,10 @@ int bnxt_re_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
cq->qplib_cq.dpi = &rdev->dpi_privileged;
}
- /*
- * Allocating the NQ in a round robin fashion. nq_alloc_cnt is a
- * used for getting the NQ index.
- */
- nq_alloc_cnt = atomic_inc_return(&rdev->nq_alloc_cnt);
- nq = &rdev->nq[nq_alloc_cnt % (rdev->num_msix - 1)];
cq->qplib_cq.max_wqe = entries;
- cq->qplib_cq.cnq_hw_ring_id = nq->ring_id;
- cq->qplib_cq.nq = nq;
+ cq->qplib_cq.coalescing = &rdev->cq_coalescing;
+ cq->qplib_cq.nq = bnxt_re_get_nq(rdev);
+ cq->qplib_cq.cnq_hw_ring_id = cq->qplib_cq.nq->ring_id;
rc = bnxt_qplib_create_cq(&rdev->qplib_res, &cq->qplib_cq);
if (rc) {
@@ -3026,7 +3147,6 @@ int bnxt_re_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
cq->ib_cq.cqe = entries;
cq->cq_period = cq->qplib_cq.period;
- nq->budget++;
active_cqs = atomic_inc_return(&rdev->stats.res.cq_count);
if (active_cqs > rdev->stats.res.cq_watermark)
@@ -3097,7 +3217,7 @@ int bnxt_re_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata)
cq = container_of(ibcq, struct bnxt_re_cq, ib_cq);
rdev = cq->rdev;
- dev_attr = &rdev->dev_attr;
+ dev_attr = rdev->dev_attr;
if (!ibcq->uobject) {
ibdev_err(&rdev->ibdev, "Kernel CQ Resize not supported");
return -EOPNOTSUPP;
@@ -3581,7 +3701,7 @@ static void bnxt_re_process_res_shadow_qp_wc(struct bnxt_re_qp *gsi_sqp,
wc->byte_len = orig_cqe->length;
wc->qp = &gsi_qp->ib_qp;
- wc->ex.imm_data = orig_cqe->immdata;
+ wc->ex.imm_data = cpu_to_be32(orig_cqe->immdata);
wc->src_qp = orig_cqe->src_qp;
memcpy(wc->smac, orig_cqe->smac, ETH_ALEN);
if (bnxt_re_is_vlan_pkt(orig_cqe, &vlan_id, &sl)) {
@@ -3726,7 +3846,10 @@ int bnxt_re_poll_cq(struct ib_cq *ib_cq, int num_entries, struct ib_wc *wc)
(unsigned long)(cqe->qp_handle),
struct bnxt_re_qp, qplib_qp);
wc->qp = &qp->ib_qp;
- wc->ex.imm_data = cqe->immdata;
+ if (cqe->flags & CQ_RES_RC_FLAGS_IMM)
+ wc->ex.imm_data = cpu_to_be32(cqe->immdata);
+ else
+ wc->ex.invalidate_rkey = cqe->invrkey;
wc->src_qp = cqe->src_qp;
memcpy(wc->smac, cqe->smac, ETH_ALEN);
wc->port_num = 1;
@@ -3844,9 +3967,12 @@ struct ib_mr *bnxt_re_get_dma_mr(struct ib_pd *ib_pd, int mr_access_flags)
mr->rdev = rdev;
mr->qplib_mr.pd = &pd->qplib_pd;
- mr->qplib_mr.flags = __from_ib_access_flags(mr_access_flags);
+ mr->qplib_mr.access_flags = __from_ib_access_flags(mr_access_flags);
mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR;
+ if (mr_access_flags & IB_ACCESS_RELAXED_ORDERING)
+ bnxt_re_check_and_set_relaxed_ordering(rdev, &mr->qplib_mr);
+
/* Allocate and register 0 as the address */
rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
if (rc)
@@ -3944,7 +4070,7 @@ struct ib_mr *bnxt_re_alloc_mr(struct ib_pd *ib_pd, enum ib_mr_type type,
mr->rdev = rdev;
mr->qplib_mr.pd = &pd->qplib_pd;
- mr->qplib_mr.flags = BNXT_QPLIB_FR_PMR;
+ mr->qplib_mr.access_flags = BNXT_QPLIB_FR_PMR;
mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR;
rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
@@ -4061,21 +4187,28 @@ static struct ib_mr *__bnxt_re_user_reg_mr(struct ib_pd *ib_pd, u64 length, u64
mr->rdev = rdev;
mr->qplib_mr.pd = &pd->qplib_pd;
- mr->qplib_mr.flags = __from_ib_access_flags(mr_access_flags);
+ mr->qplib_mr.access_flags = __from_ib_access_flags(mr_access_flags);
mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_MR;
- rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
- if (rc) {
- ibdev_err(&rdev->ibdev, "Failed to allocate MR rc = %d", rc);
- rc = -EIO;
- goto free_mr;
+ if (!_is_alloc_mr_unified(rdev->dev_attr->dev_cap_flags)) {
+ rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
+ if (rc) {
+ ibdev_err(&rdev->ibdev, "Failed to allocate MR rc = %d", rc);
+ rc = -EIO;
+ goto free_mr;
+ }
+ /* The fixed portion of the rkey is the same as the lkey */
+ mr->ib_mr.rkey = mr->qplib_mr.rkey;
+ } else {
+ mr->qplib_mr.flags = CMDQ_REGISTER_MR_FLAGS_ALLOC_MR;
}
- /* The fixed portion of the rkey is the same as the lkey */
- mr->ib_mr.rkey = mr->qplib_mr.rkey;
mr->ib_umem = umem;
mr->qplib_mr.va = virt_addr;
mr->qplib_mr.total_size = length;
+ if (mr_access_flags & IB_ACCESS_RELAXED_ORDERING)
+ bnxt_re_check_and_set_relaxed_ordering(rdev, &mr->qplib_mr);
+
umem_pgs = ib_umem_num_dma_blocks(umem, page_size);
rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mr->qplib_mr, umem,
umem_pgs, page_size);
@@ -4121,7 +4254,8 @@ struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *ib_pd, u64 start, u64 length,
struct ib_mr *bnxt_re_reg_user_mr_dmabuf(struct ib_pd *ib_pd, u64 start,
u64 length, u64 virt_addr, int fd,
- int mr_access_flags, struct ib_udata *udata)
+ int mr_access_flags,
+ struct uverbs_attr_bundle *attrs)
{
struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
struct bnxt_re_dev *rdev = pd->rdev;
@@ -4148,7 +4282,7 @@ int bnxt_re_alloc_ucontext(struct ib_ucontext *ctx, struct ib_udata *udata)
struct bnxt_re_ucontext *uctx =
container_of(ctx, struct bnxt_re_ucontext, ib_uctx);
struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
- struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
+ struct bnxt_qplib_dev_attr *dev_attr = rdev->dev_attr;
struct bnxt_re_user_mmap_entry *entry;
struct bnxt_re_uctx_resp resp = {};
struct bnxt_re_uctx_req ureq = {};
@@ -4186,9 +4320,6 @@ int bnxt_re_alloc_ucontext(struct ib_ucontext *ctx, struct ib_udata *udata)
resp.cqe_sz = sizeof(struct cq_base);
resp.max_cqd = dev_attr->max_cq_wqes;
- resp.comp_mask |= BNXT_RE_UCNTX_CMASK_HAVE_MODE;
- resp.mode = rdev->chip_ctx->modes.wqe_mode;
-
if (rdev->chip_ctx->modes.db_push)
resp.comp_mask |= BNXT_RE_UCNTX_CMASK_WC_DPI_ENABLED;
@@ -4201,13 +4332,22 @@ int bnxt_re_alloc_ucontext(struct ib_ucontext *ctx, struct ib_udata *udata)
if (rdev->pacing.dbr_pacing)
resp.comp_mask |= BNXT_RE_UCNTX_CMASK_DBR_PACING_ENABLED;
+ if (_is_host_msn_table(rdev->qplib_res.dattr->dev_cap_flags2))
+ resp.comp_mask |= BNXT_RE_UCNTX_CMASK_MSN_TABLE_ENABLED;
+
if (udata->inlen >= sizeof(ureq)) {
rc = ib_copy_from_udata(&ureq, udata, min(udata->inlen, sizeof(ureq)));
if (rc)
goto cfail;
if (ureq.comp_mask & BNXT_RE_COMP_MASK_REQ_UCNTX_POW2_SUPPORT) {
resp.comp_mask |= BNXT_RE_UCNTX_CMASK_POW2_DISABLED;
- uctx->cmask |= BNXT_RE_UCNTX_CMASK_POW2_DISABLED;
+ uctx->cmask |= BNXT_RE_UCNTX_CAP_POW2_DISABLED;
+ }
+ if (ureq.comp_mask & BNXT_RE_COMP_MASK_REQ_UCNTX_VAR_WQE_SUPPORT) {
+ resp.comp_mask |= BNXT_RE_UCNTX_CMASK_HAVE_MODE;
+ resp.mode = rdev->chip_ctx->modes.wqe_mode;
+ if (resp.mode == BNXT_QPLIB_WQE_MODE_VARIABLE)
+ uctx->cmask |= BNXT_RE_UCNTX_CAP_VAR_WQE_ENABLED;
}
}
@@ -4261,6 +4401,19 @@ static struct bnxt_re_cq *bnxt_re_search_for_cq(struct bnxt_re_dev *rdev, u32 cq
return cq;
}
+static struct bnxt_re_srq *bnxt_re_search_for_srq(struct bnxt_re_dev *rdev, u32 srq_id)
+{
+ struct bnxt_re_srq *srq = NULL, *tmp_srq;
+
+ hash_for_each_possible(rdev->srq_hash, tmp_srq, hash_entry, srq_id) {
+ if (tmp_srq->qplib_srq.id == srq_id) {
+ srq = tmp_srq;
+ break;
+ }
+ }
+ return srq;
+}
+
/* Helper function to mmap the virtual memory from user app */
int bnxt_re_mmap(struct ib_ucontext *ib_uctx, struct vm_area_struct *vma)
{
@@ -4305,9 +4458,10 @@ int bnxt_re_mmap(struct ib_ucontext *ib_uctx, struct vm_area_struct *vma)
case BNXT_RE_MMAP_TOGGLE_PAGE:
/* Driver doesn't expect write access for user space */
if (vma->vm_flags & VM_WRITE)
- return -EFAULT;
- ret = vm_insert_page(vma, vma->vm_start,
- virt_to_page((void *)bnxt_entry->mem_offset));
+ ret = -EFAULT;
+ else
+ ret = vm_insert_page(vma, vma->vm_start,
+ virt_to_page((void *)bnxt_entry->mem_offset));
break;
default:
ret = -EINVAL;
@@ -4328,6 +4482,41 @@ void bnxt_re_mmap_free(struct rdma_user_mmap_entry *rdma_entry)
kfree(bnxt_entry);
}
+int bnxt_re_process_mad(struct ib_device *ibdev, int mad_flags,
+ u32 port_num, const struct ib_wc *in_wc,
+ const struct ib_grh *in_grh,
+ const struct ib_mad *in_mad, struct ib_mad *out_mad,
+ size_t *out_mad_size, u16 *out_mad_pkey_index)
+{
+ struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
+ struct ib_class_port_info cpi = {};
+ int ret = IB_MAD_RESULT_SUCCESS;
+ int rc = 0;
+
+ if (in_mad->mad_hdr.mgmt_class != IB_MGMT_CLASS_PERF_MGMT)
+ return ret;
+
+ switch (in_mad->mad_hdr.attr_id) {
+ case IB_PMA_CLASS_PORT_INFO:
+ cpi.capability_mask = IB_PMA_CLASS_CAP_EXT_WIDTH;
+ memcpy((out_mad->data + 40), &cpi, sizeof(cpi));
+ break;
+ case IB_PMA_PORT_COUNTERS_EXT:
+ rc = bnxt_re_assign_pma_port_ext_counters(rdev, out_mad);
+ break;
+ case IB_PMA_PORT_COUNTERS:
+ rc = bnxt_re_assign_pma_port_counters(rdev, out_mad);
+ break;
+ default:
+ rc = -EINVAL;
+ break;
+ }
+ if (rc)
+ return IB_MAD_RESULT_FAILURE;
+ ret |= IB_MAD_RESULT_REPLY;
+ return ret;
+}
+
static int UVERBS_HANDLER(BNXT_RE_METHOD_NOTIFY_DRV)(struct uverbs_attr_bundle *attrs)
{
struct bnxt_re_ucontext *uctx;
@@ -4489,12 +4678,13 @@ static int UVERBS_HANDLER(BNXT_RE_METHOD_GET_TOGGLE_MEM)(struct uverbs_attr_bund
struct bnxt_re_ucontext *uctx;
struct ib_ucontext *ib_uctx;
struct bnxt_re_dev *rdev;
+ struct bnxt_re_srq *srq;
+ u32 length = PAGE_SIZE;
struct bnxt_re_cq *cq;
u64 mem_offset;
+ u32 offset = 0;
u64 addr = 0;
- u32 length;
- u32 offset;
- u32 cq_id;
+ u32 res_id;
int err;
ib_uctx = ib_uverbs_get_ucontext(attrs);
@@ -4507,23 +4697,24 @@ static int UVERBS_HANDLER(BNXT_RE_METHOD_GET_TOGGLE_MEM)(struct uverbs_attr_bund
uctx = container_of(ib_uctx, struct bnxt_re_ucontext, ib_uctx);
rdev = uctx->rdev;
+ err = uverbs_copy_from(&res_id, attrs, BNXT_RE_TOGGLE_MEM_RES_ID);
+ if (err)
+ return err;
switch (res_type) {
case BNXT_RE_CQ_TOGGLE_MEM:
- err = uverbs_copy_from(&cq_id, attrs, BNXT_RE_TOGGLE_MEM_RES_ID);
- if (err)
- return err;
-
- cq = bnxt_re_search_for_cq(rdev, cq_id);
+ cq = bnxt_re_search_for_cq(rdev, res_id);
if (!cq)
return -EINVAL;
- length = PAGE_SIZE;
addr = (u64)cq->uctx_cq_page;
- mmap_flag = BNXT_RE_MMAP_TOGGLE_PAGE;
- offset = 0;
break;
case BNXT_RE_SRQ_TOGGLE_MEM:
+ srq = bnxt_re_search_for_srq(rdev, res_id);
+ if (!srq)
+ return -EINVAL;
+
+ addr = (u64)srq->uctx_srq_page;
break;
default:
diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.h b/drivers/infiniband/hw/bnxt_re/ib_verbs.h
index b267d6d5975f..22c9eb8e9cfc 100644
--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.h
+++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.h
@@ -77,6 +77,8 @@ struct bnxt_re_srq {
struct bnxt_qplib_srq qplib_srq;
struct ib_umem *umem;
spinlock_t lock; /* protect srq */
+ void *uctx_srq_page;
+ struct hlist_node hash_entry;
};
struct bnxt_re_qp {
@@ -93,6 +95,7 @@ struct bnxt_re_qp {
struct ib_ud_header qp1_hdr;
struct bnxt_re_cq *scq;
struct bnxt_re_cq *rcq;
+ struct dentry *dentry;
};
struct bnxt_re_cq {
@@ -171,15 +174,32 @@ static inline u16 bnxt_re_get_rwqe_size(int nsge)
return sizeof(struct rq_wqe_hdr) + (nsge * sizeof(struct sq_sge));
}
+enum {
+ BNXT_RE_UCNTX_CAP_POW2_DISABLED = 0x1ULL,
+ BNXT_RE_UCNTX_CAP_VAR_WQE_ENABLED = 0x2ULL,
+};
+
static inline u32 bnxt_re_init_depth(u32 ent, struct bnxt_re_ucontext *uctx)
{
- return uctx ? (uctx->cmask & BNXT_RE_UCNTX_CMASK_POW2_DISABLED) ?
+ return uctx ? (uctx->cmask & BNXT_RE_UCNTX_CAP_POW2_DISABLED) ?
ent : roundup_pow_of_two(ent) : ent;
}
+static inline bool bnxt_re_is_var_size_supported(struct bnxt_re_dev *rdev,
+ struct bnxt_re_ucontext *uctx)
+{
+ if (uctx)
+ return uctx->cmask & BNXT_RE_UCNTX_CAP_VAR_WQE_ENABLED;
+ else
+ return rdev->chip_ctx->modes.wqe_mode;
+}
+
int bnxt_re_query_device(struct ib_device *ibdev,
struct ib_device_attr *ib_attr,
struct ib_udata *udata);
+int bnxt_re_modify_device(struct ib_device *ibdev,
+ int device_modify_mask,
+ struct ib_device_modify *device_modify);
int bnxt_re_query_port(struct ib_device *ibdev, u32 port_num,
struct ib_port_attr *port_attr);
int bnxt_re_get_port_immutable(struct ib_device *ibdev, u32 port_num,
@@ -221,7 +241,7 @@ int bnxt_re_post_send(struct ib_qp *qp, const struct ib_send_wr *send_wr,
int bnxt_re_post_recv(struct ib_qp *qp, const struct ib_recv_wr *recv_wr,
const struct ib_recv_wr **bad_recv_wr);
int bnxt_re_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
- struct ib_udata *udata);
+ struct uverbs_attr_bundle *attrs);
int bnxt_re_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata);
int bnxt_re_destroy_cq(struct ib_cq *cq, struct ib_udata *udata);
int bnxt_re_poll_cq(struct ib_cq *cq, int num_entries, struct ib_wc *wc);
@@ -242,12 +262,22 @@ struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
struct ib_mr *bnxt_re_reg_user_mr_dmabuf(struct ib_pd *ib_pd, u64 start,
u64 length, u64 virt_addr,
int fd, int mr_access_flags,
- struct ib_udata *udata);
+ struct uverbs_attr_bundle *attrs);
int bnxt_re_alloc_ucontext(struct ib_ucontext *ctx, struct ib_udata *udata);
void bnxt_re_dealloc_ucontext(struct ib_ucontext *context);
int bnxt_re_mmap(struct ib_ucontext *context, struct vm_area_struct *vma);
void bnxt_re_mmap_free(struct rdma_user_mmap_entry *rdma_entry);
+int bnxt_re_process_mad(struct ib_device *device, int process_mad_flags,
+ u32 port_num, const struct ib_wc *in_wc,
+ const struct ib_grh *in_grh,
+ const struct ib_mad *in_mad, struct ib_mad *out_mad,
+ size_t *out_mad_size, u16 *out_mad_pkey_index);
+
+static inline u32 __to_ib_port_num(u16 port_id)
+{
+ return (u32)port_id + 1;
+}
unsigned long bnxt_re_lock_cqs(struct bnxt_re_qp *qp);
void bnxt_re_unlock_cqs(struct bnxt_re_qp *qp, unsigned long flags);
diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c
index 54b4d2f3a5d8..293b0a96c8e3 100644
--- a/drivers/infiniband/hw/bnxt_re/main.c
+++ b/drivers/infiniband/hw/bnxt_re/main.c
@@ -67,6 +67,7 @@
#include <rdma/bnxt_re-abi.h>
#include "bnxt.h"
#include "hw_counters.h"
+#include "debugfs.h"
static char version[] =
BNXT_RE_DESC "\n";
@@ -78,16 +79,12 @@ MODULE_LICENSE("Dual BSD/GPL");
/* globals */
static DEFINE_MUTEX(bnxt_re_mutex);
-static void bnxt_re_stop_irq(void *handle);
-static void bnxt_re_dev_stop(struct bnxt_re_dev *rdev);
-static int bnxt_re_netdev_event(struct notifier_block *notifier,
- unsigned long event, void *ptr);
-static struct bnxt_re_dev *bnxt_re_from_netdev(struct net_device *netdev);
-static void bnxt_re_dev_uninit(struct bnxt_re_dev *rdev);
static int bnxt_re_hwrm_qcaps(struct bnxt_re_dev *rdev);
static int bnxt_re_hwrm_qcfg(struct bnxt_re_dev *rdev, u32 *db_len,
u32 *offset);
+static void bnxt_re_dispatch_event(struct ib_device *ibdev, struct ib_qp *qp,
+ u8 port_num, enum ib_event_type event);
static void bnxt_re_set_db_offset(struct bnxt_re_dev *rdev)
{
struct bnxt_qplib_chip_ctx *cctx;
@@ -129,18 +126,20 @@ static void bnxt_re_set_db_offset(struct bnxt_re_dev *rdev)
}
}
-static void bnxt_re_set_drv_mode(struct bnxt_re_dev *rdev, u8 mode)
+static void bnxt_re_set_drv_mode(struct bnxt_re_dev *rdev)
{
struct bnxt_qplib_chip_ctx *cctx;
cctx = rdev->chip_ctx;
- cctx->modes.wqe_mode = bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx) ?
- mode : BNXT_QPLIB_WQE_MODE_STATIC;
+ cctx->modes.wqe_mode = bnxt_qplib_is_chip_gen_p7(rdev->chip_ctx) ?
+ BNXT_QPLIB_WQE_MODE_VARIABLE : BNXT_QPLIB_WQE_MODE_STATIC;
if (bnxt_re_hwrm_qcaps(rdev))
dev_err(rdev_to_dev(rdev),
"Failed to query hwrm qcaps\n");
- if (bnxt_qplib_is_chip_gen_p7(rdev->chip_ctx))
+ if (bnxt_qplib_is_chip_gen_p7(rdev->chip_ctx)) {
cctx->modes.toggle_bits |= BNXT_QPLIB_CQ_TOGGLE_BIT;
+ cctx->modes.toggle_bits |= BNXT_QPLIB_SRQ_TOGGLE_BIT;
+ }
}
static void bnxt_re_destroy_chip_ctx(struct bnxt_re_dev *rdev)
@@ -149,6 +148,10 @@ static void bnxt_re_destroy_chip_ctx(struct bnxt_re_dev *rdev)
if (!rdev->chip_ctx)
return;
+
+ kfree(rdev->dev_attr);
+ rdev->dev_attr = NULL;
+
chip_ctx = rdev->chip_ctx;
rdev->chip_ctx = NULL;
rdev->rcfw.res = NULL;
@@ -158,14 +161,15 @@ static void bnxt_re_destroy_chip_ctx(struct bnxt_re_dev *rdev)
kfree(chip_ctx);
}
-static int bnxt_re_setup_chip_ctx(struct bnxt_re_dev *rdev, u8 wqe_mode)
+static int bnxt_re_setup_chip_ctx(struct bnxt_re_dev *rdev)
{
struct bnxt_qplib_chip_ctx *chip_ctx;
struct bnxt_en_dev *en_dev;
- int rc;
+ int rc = -ENOMEM;
en_dev = rdev->en_dev;
+ rdev->qplib_res.pdev = en_dev->pdev;
chip_ctx = kzalloc(sizeof(*chip_ctx), GFP_KERNEL);
if (!chip_ctx)
return -ENOMEM;
@@ -177,20 +181,31 @@ static int bnxt_re_setup_chip_ctx(struct bnxt_re_dev *rdev, u8 wqe_mode)
rdev->qplib_res.cctx = rdev->chip_ctx;
rdev->rcfw.res = &rdev->qplib_res;
- rdev->qplib_res.dattr = &rdev->dev_attr;
+ rdev->dev_attr = kzalloc(sizeof(*rdev->dev_attr), GFP_KERNEL);
+ if (!rdev->dev_attr)
+ goto free_chip_ctx;
+ rdev->qplib_res.dattr = rdev->dev_attr;
rdev->qplib_res.is_vf = BNXT_EN_VF(en_dev);
+ rdev->qplib_res.en_dev = en_dev;
- bnxt_re_set_drv_mode(rdev, wqe_mode);
+ bnxt_re_set_drv_mode(rdev);
bnxt_re_set_db_offset(rdev);
rc = bnxt_qplib_map_db_bar(&rdev->qplib_res);
if (rc)
- return rc;
+ goto free_dev_attr;
if (bnxt_qplib_determine_atomics(en_dev->pdev))
ibdev_info(&rdev->ibdev,
"platform doesn't support global atomics.");
return 0;
+free_dev_attr:
+ kfree(rdev->dev_attr);
+ rdev->dev_attr = NULL;
+free_chip_ctx:
+ kfree(rdev->chip_ctx);
+ rdev->chip_ctx = NULL;
+ return rc;
}
/* SR-IOV helper functions */
@@ -212,7 +227,7 @@ static void bnxt_re_limit_pf_res(struct bnxt_re_dev *rdev)
struct bnxt_qplib_ctx *ctx;
int i;
- attr = &rdev->dev_attr;
+ attr = rdev->dev_attr;
ctx = &rdev->qplib_ctx;
ctx->qpc_count = min_t(u32, BNXT_RE_MAX_QPC_COUNT,
@@ -226,7 +241,7 @@ static void bnxt_re_limit_pf_res(struct bnxt_re_dev *rdev)
if (!bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx))
for (i = 0; i < MAX_TQM_ALLOC_REQ; i++)
rdev->qplib_ctx.tqm_ctx.qcount[i] =
- rdev->dev_attr.tqm_alloc_reqs[i];
+ rdev->dev_attr->tqm_alloc_reqs[i];
}
static void bnxt_re_limit_vf_res(struct bnxt_qplib_ctx *qplib_ctx, u32 num_vf)
@@ -280,33 +295,163 @@ static void bnxt_re_set_resource_limits(struct bnxt_re_dev *rdev)
static void bnxt_re_vf_res_config(struct bnxt_re_dev *rdev)
{
+ /*
+ * Use the total VF count since the actual VF count may not be
+ * available at this point.
+ */
rdev->num_vfs = pci_sriov_get_totalvfs(rdev->en_dev->pdev);
- if (!bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx)) {
- bnxt_re_set_resource_limits(rdev);
- bnxt_qplib_set_func_resources(&rdev->qplib_res, &rdev->rcfw,
- &rdev->qplib_ctx);
+ if (!rdev->num_vfs)
+ return;
+
+ bnxt_re_set_resource_limits(rdev);
+ bnxt_qplib_set_func_resources(&rdev->qplib_res, &rdev->rcfw,
+ &rdev->qplib_ctx);
+}
+
+struct bnxt_re_dcb_work {
+ struct work_struct work;
+ struct bnxt_re_dev *rdev;
+ struct hwrm_async_event_cmpl cmpl;
+};
+
+static bool bnxt_re_is_qp1_qp(struct bnxt_re_qp *qp)
+{
+ return qp->ib_qp.qp_type == IB_QPT_GSI;
+}
+
+static struct bnxt_re_qp *bnxt_re_get_qp1_qp(struct bnxt_re_dev *rdev)
+{
+ struct bnxt_re_qp *qp;
+
+ mutex_lock(&rdev->qp_lock);
+ list_for_each_entry(qp, &rdev->qp_list, list) {
+ if (bnxt_re_is_qp1_qp(qp)) {
+ mutex_unlock(&rdev->qp_lock);
+ return qp;
+ }
}
+ mutex_unlock(&rdev->qp_lock);
+ return NULL;
}
-static void bnxt_re_shutdown(struct auxiliary_device *adev)
+static int bnxt_re_update_qp1_tos_dscp(struct bnxt_re_dev *rdev)
+{
+ struct bnxt_re_qp *qp;
+
+ if (!bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx))
+ return 0;
+
+ qp = bnxt_re_get_qp1_qp(rdev);
+ if (!qp)
+ return 0;
+
+ qp->qplib_qp.modify_flags = CMDQ_MODIFY_QP_MODIFY_MASK_TOS_DSCP;
+ qp->qplib_qp.tos_dscp = rdev->cc_param.qp1_tos_dscp;
+
+ return bnxt_qplib_modify_qp(&rdev->qplib_res, &qp->qplib_qp);
+}
+
+static void bnxt_re_init_dcb_wq(struct bnxt_re_dev *rdev)
+{
+ rdev->dcb_wq = create_singlethread_workqueue("bnxt_re_dcb_wq");
+}
+
+static void bnxt_re_uninit_dcb_wq(struct bnxt_re_dev *rdev)
+{
+ if (!rdev->dcb_wq)
+ return;
+ destroy_workqueue(rdev->dcb_wq);
+}
+
+static void bnxt_re_dcb_wq_task(struct work_struct *work)
+{
+ struct bnxt_re_dcb_work *dcb_work =
+ container_of(work, struct bnxt_re_dcb_work, work);
+ struct bnxt_re_dev *rdev = dcb_work->rdev;
+ struct bnxt_qplib_cc_param *cc_param;
+ int rc;
+
+ if (!rdev)
+ goto free_dcb;
+
+ cc_param = &rdev->cc_param;
+ rc = bnxt_qplib_query_cc_param(&rdev->qplib_res, cc_param);
+ if (rc) {
+ ibdev_dbg(&rdev->ibdev, "Failed to query ccparam rc:%d", rc);
+ goto free_dcb;
+ }
+ if (cc_param->qp1_tos_dscp != cc_param->tos_dscp) {
+ cc_param->qp1_tos_dscp = cc_param->tos_dscp;
+ rc = bnxt_re_update_qp1_tos_dscp(rdev);
+ if (rc) {
+ ibdev_dbg(&rdev->ibdev, "%s: Failed to modify QP1 rc:%d",
+ __func__, rc);
+ goto free_dcb;
+ }
+ }
+
+free_dcb:
+ kfree(dcb_work);
+}
+
+static void bnxt_re_async_notifier(void *handle, struct hwrm_async_event_cmpl *cmpl)
{
- struct bnxt_re_dev *rdev = auxiliary_get_drvdata(adev);
+ struct bnxt_re_en_dev_info *en_info = auxiliary_get_drvdata(handle);
+ struct bnxt_re_dcb_work *dcb_work;
+ struct bnxt_re_dev *rdev;
+ u32 data1, data2;
+ u16 event_id;
+ rdev = en_info->rdev;
if (!rdev)
return;
- ib_unregister_device(&rdev->ibdev);
- bnxt_re_dev_uninit(rdev);
+
+ event_id = le16_to_cpu(cmpl->event_id);
+ data1 = le32_to_cpu(cmpl->event_data1);
+ data2 = le32_to_cpu(cmpl->event_data2);
+
+ ibdev_dbg(&rdev->ibdev, "Async event_id = %d data1 = %d data2 = %d",
+ event_id, data1, data2);
+
+ switch (event_id) {
+ case ASYNC_EVENT_CMPL_EVENT_ID_DCB_CONFIG_CHANGE:
+ dcb_work = kzalloc(sizeof(*dcb_work), GFP_ATOMIC);
+ if (!dcb_work)
+ break;
+
+ dcb_work->rdev = rdev;
+ memcpy(&dcb_work->cmpl, cmpl, sizeof(*cmpl));
+ INIT_WORK(&dcb_work->work, bnxt_re_dcb_wq_task);
+ queue_work(rdev->dcb_wq, &dcb_work->work);
+ break;
+ default:
+ break;
+ }
}
-static void bnxt_re_stop_irq(void *handle)
+static void bnxt_re_stop_irq(void *handle, bool reset)
{
- struct bnxt_re_dev *rdev = (struct bnxt_re_dev *)handle;
- struct bnxt_qplib_rcfw *rcfw = &rdev->rcfw;
+ struct bnxt_re_en_dev_info *en_info = auxiliary_get_drvdata(handle);
+ struct bnxt_qplib_rcfw *rcfw;
+ struct bnxt_re_dev *rdev;
struct bnxt_qplib_nq *nq;
int indx;
- for (indx = BNXT_RE_NQ_IDX; indx < rdev->num_msix; indx++) {
- nq = &rdev->nq[indx - 1];
+ rdev = en_info->rdev;
+ if (!rdev)
+ return;
+ rcfw = &rdev->rcfw;
+
+ if (reset) {
+ set_bit(ERR_DEVICE_DETACHED, &rdev->rcfw.cmdq.flags);
+ set_bit(BNXT_RE_FLAG_ERR_DEVICE_DETACHED, &rdev->flags);
+ wake_up_all(&rdev->rcfw.cmdq.waitq);
+ bnxt_re_dispatch_event(&rdev->ibdev, NULL, 1,
+ IB_EVENT_DEVICE_FATAL);
+ }
+
+ for (indx = BNXT_RE_NQ_IDX; indx < rdev->nqr->num_msix; indx++) {
+ nq = &rdev->nqr->nq[indx - 1];
bnxt_qplib_nq_stop_irq(nq, false);
}
@@ -315,12 +460,18 @@ static void bnxt_re_stop_irq(void *handle)
static void bnxt_re_start_irq(void *handle, struct bnxt_msix_entry *ent)
{
- struct bnxt_re_dev *rdev = (struct bnxt_re_dev *)handle;
- struct bnxt_msix_entry *msix_ent = rdev->en_dev->msix_entries;
- struct bnxt_qplib_rcfw *rcfw = &rdev->rcfw;
+ struct bnxt_re_en_dev_info *en_info = auxiliary_get_drvdata(handle);
+ struct bnxt_msix_entry *msix_ent;
+ struct bnxt_qplib_rcfw *rcfw;
+ struct bnxt_re_dev *rdev;
struct bnxt_qplib_nq *nq;
int indx, rc;
+ rdev = en_info->rdev;
+ if (!rdev)
+ return;
+ msix_ent = rdev->nqr->msix_entries;
+ rcfw = &rdev->rcfw;
if (!ent) {
/* Not setting the f/w timeout bit in rcfw.
* During the driver unload the first command
@@ -334,8 +485,8 @@ static void bnxt_re_start_irq(void *handle, struct bnxt_msix_entry *ent)
/* Vectors may change after restart, so update with new vectors
* in device sctructure.
*/
- for (indx = 0; indx < rdev->num_msix; indx++)
- rdev->en_dev->msix_entries[indx].vector = ent[indx].vector;
+ for (indx = 0; indx < rdev->nqr->num_msix; indx++)
+ rdev->nqr->msix_entries[indx].vector = ent[indx].vector;
rc = bnxt_qplib_rcfw_start_irq(rcfw, msix_ent[BNXT_RE_AEQ_IDX].vector,
false);
@@ -343,8 +494,8 @@ static void bnxt_re_start_irq(void *handle, struct bnxt_msix_entry *ent)
ibdev_warn(&rdev->ibdev, "Failed to reinit CREQ\n");
return;
}
- for (indx = BNXT_RE_NQ_IDX ; indx < rdev->num_msix; indx++) {
- nq = &rdev->nq[indx - 1];
+ for (indx = BNXT_RE_NQ_IDX ; indx < rdev->nqr->num_msix; indx++) {
+ nq = &rdev->nqr->nq[indx - 1];
rc = bnxt_qplib_nq_start_irq(nq, indx - 1,
msix_ent[indx].vector, false);
if (rc) {
@@ -356,6 +507,7 @@ static void bnxt_re_start_irq(void *handle, struct bnxt_msix_entry *ent)
}
static struct bnxt_ulp_ops bnxt_re_ulp_ops = {
+ .ulp_async_notifier = bnxt_re_async_notifier,
.ulp_irq_stop = bnxt_re_stop_irq,
.ulp_irq_restart = bnxt_re_start_irq
};
@@ -365,14 +517,9 @@ static struct bnxt_ulp_ops bnxt_re_ulp_ops = {
static int bnxt_re_register_netdev(struct bnxt_re_dev *rdev)
{
struct bnxt_en_dev *en_dev;
- int rc;
en_dev = rdev->en_dev;
-
- rc = bnxt_register_dev(en_dev, &bnxt_re_ulp_ops, rdev);
- if (!rc)
- rdev->qplib_res.pdev = rdev->en_dev->pdev;
- return rc;
+ return bnxt_register_dev(en_dev, &bnxt_re_ulp_ops, rdev->adev);
}
static void bnxt_re_init_hwrm_hdr(struct input *hdr, u16 opcd)
@@ -423,6 +570,7 @@ int bnxt_re_hwrm_qcaps(struct bnxt_re_dev *rdev)
struct hwrm_func_qcaps_input req = {};
struct bnxt_qplib_chip_ctx *cctx;
struct bnxt_fw_msg fw_msg = {};
+ u32 flags_ext2;
int rc;
cctx = rdev->chip_ctx;
@@ -436,14 +584,15 @@ int bnxt_re_hwrm_qcaps(struct bnxt_re_dev *rdev)
return rc;
cctx->modes.db_push = le32_to_cpu(resp.flags) & FUNC_QCAPS_RESP_FLAGS_WCB_PUSH_MODE;
- cctx->modes.dbr_pacing =
- le32_to_cpu(resp.flags_ext2) &
- FUNC_QCAPS_RESP_FLAGS_EXT2_DBR_PACING_EXT_SUPPORTED;
+ flags_ext2 = le32_to_cpu(resp.flags_ext2);
+ cctx->modes.dbr_pacing = flags_ext2 & FUNC_QCAPS_RESP_FLAGS_EXT2_DBR_PACING_EXT_SUPPORTED ||
+ flags_ext2 & FUNC_QCAPS_RESP_FLAGS_EXT2_DBR_PACING_V0_SUPPORTED;
return 0;
}
static int bnxt_re_hwrm_dbr_pacing_qcfg(struct bnxt_re_dev *rdev)
{
+ struct bnxt_qplib_db_pacing_data *pacing_data = rdev->qplib_res.pacing_data;
struct hwrm_func_dbr_pacing_qcfg_output resp = {};
struct hwrm_func_dbr_pacing_qcfg_input req = {};
struct bnxt_en_dev *en_dev = rdev->en_dev;
@@ -465,6 +614,13 @@ static int bnxt_re_hwrm_dbr_pacing_qcfg(struct bnxt_re_dev *rdev)
cctx->dbr_stat_db_fifo =
le32_to_cpu(resp.dbr_stat_db_fifo_reg) &
~FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_SPACE_MASK;
+
+ pacing_data->fifo_max_depth = le32_to_cpu(resp.dbr_stat_db_max_fifo_depth);
+ if (!pacing_data->fifo_max_depth)
+ pacing_data->fifo_max_depth = BNXT_RE_MAX_FIFO_DEPTH(cctx);
+ pacing_data->fifo_room_mask = le32_to_cpu(resp.dbr_stat_db_fifo_reg_fifo_room_mask);
+ pacing_data->fifo_room_shift = resp.dbr_stat_db_fifo_reg_fifo_room_shift;
+
return 0;
}
@@ -479,24 +635,55 @@ static void bnxt_re_set_default_pacing_data(struct bnxt_re_dev *rdev)
pacing_data->pacing_th * BNXT_RE_PACING_ALARM_TH_MULTIPLE;
}
-static void __wait_for_fifo_occupancy_below_th(struct bnxt_re_dev *rdev)
+static u32 __get_fifo_occupancy(struct bnxt_re_dev *rdev)
{
+ struct bnxt_qplib_db_pacing_data *pacing_data = rdev->qplib_res.pacing_data;
u32 read_val, fifo_occup;
+ read_val = readl(rdev->en_dev->bar0 + rdev->pacing.dbr_db_fifo_reg_off);
+ fifo_occup = pacing_data->fifo_max_depth -
+ ((read_val & pacing_data->fifo_room_mask) >>
+ pacing_data->fifo_room_shift);
+ return fifo_occup;
+}
+
+static bool is_dbr_fifo_full(struct bnxt_re_dev *rdev)
+{
+ u32 max_occup, fifo_occup;
+
+ fifo_occup = __get_fifo_occupancy(rdev);
+ max_occup = BNXT_RE_MAX_FIFO_DEPTH(rdev->chip_ctx) - 1;
+ if (fifo_occup == max_occup)
+ return true;
+
+ return false;
+}
+
+static void __wait_for_fifo_occupancy_below_th(struct bnxt_re_dev *rdev)
+{
+ struct bnxt_qplib_db_pacing_data *pacing_data = rdev->qplib_res.pacing_data;
+ u32 retry_fifo_check = 1000;
+ u32 fifo_occup;
+
/* loop shouldn't run infintely as the occupancy usually goes
* below pacing algo threshold as soon as pacing kicks in.
*/
while (1) {
- read_val = readl(rdev->en_dev->bar0 + rdev->pacing.dbr_db_fifo_reg_off);
- fifo_occup = BNXT_RE_MAX_FIFO_DEPTH -
- ((read_val & BNXT_RE_DB_FIFO_ROOM_MASK) >>
- BNXT_RE_DB_FIFO_ROOM_SHIFT);
+ fifo_occup = __get_fifo_occupancy(rdev);
/* Fifo occupancy cannot be greater the MAX FIFO depth */
- if (fifo_occup > BNXT_RE_MAX_FIFO_DEPTH)
+ if (fifo_occup > pacing_data->fifo_max_depth)
break;
- if (fifo_occup < rdev->qplib_res.pacing_data->pacing_th)
+ if (fifo_occup < pacing_data->pacing_th)
+ break;
+ if (!retry_fifo_check--) {
+ dev_info_once(rdev_to_dev(rdev),
+ "%s: fifo_occup = 0x%xfifo_max_depth = 0x%x pacing_th = 0x%x\n",
+ __func__, fifo_occup, pacing_data->fifo_max_depth,
+ pacing_data->pacing_th);
break;
+ }
+
}
}
@@ -546,16 +733,13 @@ static void bnxt_re_pacing_timer_exp(struct work_struct *work)
struct bnxt_re_dev *rdev = container_of(work, struct bnxt_re_dev,
dbq_pacing_work.work);
struct bnxt_qplib_db_pacing_data *pacing_data;
- u32 read_val, fifo_occup;
+ u32 fifo_occup;
if (!mutex_trylock(&rdev->pacing.dbq_lock))
return;
pacing_data = rdev->qplib_res.pacing_data;
- read_val = readl(rdev->en_dev->bar0 + rdev->pacing.dbr_db_fifo_reg_off);
- fifo_occup = BNXT_RE_MAX_FIFO_DEPTH -
- ((read_val & BNXT_RE_DB_FIFO_ROOM_MASK) >>
- BNXT_RE_DB_FIFO_ROOM_SHIFT);
+ fifo_occup = __get_fifo_occupancy(rdev);
if (fifo_occup > pacing_data->pacing_th)
goto restart_timer;
@@ -594,7 +778,7 @@ void bnxt_re_pacing_alert(struct bnxt_re_dev *rdev)
* Increase the alarm_th to max so that other user lib instances do not
* keep alerting the driver.
*/
- pacing_data->alarm_th = BNXT_RE_MAX_FIFO_DEPTH;
+ pacing_data->alarm_th = pacing_data->fifo_max_depth;
pacing_data->do_pacing = BNXT_RE_MAX_DBR_DO_PACING;
cancel_work_sync(&rdev->dbq_fifo_check_work);
schedule_work(&rdev->dbq_fifo_check_work);
@@ -603,9 +787,6 @@ void bnxt_re_pacing_alert(struct bnxt_re_dev *rdev)
static int bnxt_re_initialize_dbr_pacing(struct bnxt_re_dev *rdev)
{
- if (bnxt_re_hwrm_dbr_pacing_qcfg(rdev))
- return -EIO;
-
/* Allocate a page for app use */
rdev->pacing.dbr_page = (void *)__get_free_page(GFP_KERNEL);
if (!rdev->pacing.dbr_page)
@@ -614,6 +795,12 @@ static int bnxt_re_initialize_dbr_pacing(struct bnxt_re_dev *rdev)
memset((u8 *)rdev->pacing.dbr_page, 0, PAGE_SIZE);
rdev->qplib_res.pacing_data = (struct bnxt_qplib_db_pacing_data *)rdev->pacing.dbr_page;
+ if (bnxt_re_hwrm_dbr_pacing_qcfg(rdev)) {
+ free_page((u64)rdev->pacing.dbr_page);
+ rdev->pacing.dbr_page = NULL;
+ return -EIO;
+ }
+
/* MAP HW window 2 for reading db fifo depth */
writel(rdev->chip_ctx->dbr_stat_db_fifo & BNXT_GRC_BASE_MASK,
rdev->en_dev->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT + 4);
@@ -623,13 +810,16 @@ static int bnxt_re_initialize_dbr_pacing(struct bnxt_re_dev *rdev)
rdev->pacing.dbr_bar_addr =
pci_resource_start(rdev->qplib_res.pdev, 0) + rdev->pacing.dbr_db_fifo_reg_off;
+ if (is_dbr_fifo_full(rdev)) {
+ free_page((u64)rdev->pacing.dbr_page);
+ rdev->pacing.dbr_page = NULL;
+ return -EIO;
+ }
+
rdev->pacing.pacing_algo_th = BNXT_RE_PACING_ALGO_THRESHOLD;
rdev->pacing.dbq_pacing_time = BNXT_RE_DBR_PACING_TIME;
rdev->pacing.dbr_def_do_pacing = BNXT_RE_DBR_DO_PACING_NO_CONGESTION;
rdev->pacing.do_pacing_save = rdev->pacing.dbr_def_do_pacing;
- rdev->qplib_res.pacing_data->fifo_max_depth = BNXT_RE_MAX_FIFO_DEPTH;
- rdev->qplib_res.pacing_data->fifo_room_mask = BNXT_RE_DB_FIFO_ROOM_MASK;
- rdev->qplib_res.pacing_data->fifo_room_shift = BNXT_RE_DB_FIFO_ROOM_SHIFT;
rdev->qplib_res.pacing_data->grc_reg_offset = rdev->pacing.dbr_db_fifo_reg_off;
bnxt_re_set_default_pacing_data(rdev);
/* Initialize worker for DBR Pacing */
@@ -779,17 +969,6 @@ static void bnxt_re_disassociate_ucontext(struct ib_ucontext *ibcontext)
}
/* Device */
-
-static struct bnxt_re_dev *bnxt_re_from_netdev(struct net_device *netdev)
-{
- struct ib_device *ibdev =
- ib_device_get_by_netdev(netdev, RDMA_DRIVER_BNXT_RE);
- if (!ibdev)
- return NULL;
-
- return container_of(ibdev, struct bnxt_re_dev, ibdev);
-}
-
static ssize_t hw_rev_show(struct device *device, struct device_attribute *attr,
char *buf)
{
@@ -820,6 +999,253 @@ static const struct attribute_group bnxt_re_dev_attr_group = {
.attrs = bnxt_re_attributes,
};
+static int bnxt_re_fill_res_mr_entry(struct sk_buff *msg, struct ib_mr *ib_mr)
+{
+ struct bnxt_qplib_hwq *mr_hwq;
+ struct nlattr *table_attr;
+ struct bnxt_re_mr *mr;
+
+ table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_DRIVER);
+ if (!table_attr)
+ return -EMSGSIZE;
+
+ mr = container_of(ib_mr, struct bnxt_re_mr, ib_mr);
+ mr_hwq = &mr->qplib_mr.hwq;
+
+ if (rdma_nl_put_driver_u32(msg, "page_size",
+ mr_hwq->qe_ppg * mr_hwq->element_size))
+ goto err;
+ if (rdma_nl_put_driver_u32(msg, "max_elements", mr_hwq->max_elements))
+ goto err;
+ if (rdma_nl_put_driver_u32(msg, "element_size", mr_hwq->element_size))
+ goto err;
+ if (rdma_nl_put_driver_u64_hex(msg, "hwq", (unsigned long)mr_hwq))
+ goto err;
+ if (rdma_nl_put_driver_u64_hex(msg, "va", mr->qplib_mr.va))
+ goto err;
+
+ nla_nest_end(msg, table_attr);
+ return 0;
+
+err:
+ nla_nest_cancel(msg, table_attr);
+ return -EMSGSIZE;
+}
+
+static int bnxt_re_fill_res_mr_entry_raw(struct sk_buff *msg, struct ib_mr *ib_mr)
+{
+ struct bnxt_re_dev *rdev;
+ struct bnxt_re_mr *mr;
+ int err, len;
+ void *data;
+
+ mr = container_of(ib_mr, struct bnxt_re_mr, ib_mr);
+ rdev = mr->rdev;
+
+ err = bnxt_re_read_context_allowed(rdev);
+ if (err)
+ return err;
+
+ len = bnxt_qplib_is_chip_gen_p7(rdev->chip_ctx) ? BNXT_RE_CONTEXT_TYPE_MRW_SIZE_P7 :
+ BNXT_RE_CONTEXT_TYPE_MRW_SIZE_P5;
+ data = kzalloc(len, GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ err = bnxt_qplib_read_context(&rdev->rcfw, CMDQ_READ_CONTEXT_TYPE_MRW,
+ mr->qplib_mr.lkey, len, data);
+ if (!err)
+ err = nla_put(msg, RDMA_NLDEV_ATTR_RES_RAW, len, data);
+
+ kfree(data);
+ return err;
+}
+
+static int bnxt_re_fill_res_cq_entry(struct sk_buff *msg, struct ib_cq *ib_cq)
+{
+ struct bnxt_qplib_hwq *cq_hwq;
+ struct nlattr *table_attr;
+ struct bnxt_re_cq *cq;
+
+ cq = container_of(ib_cq, struct bnxt_re_cq, ib_cq);
+ cq_hwq = &cq->qplib_cq.hwq;
+
+ table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_DRIVER);
+ if (!table_attr)
+ return -EMSGSIZE;
+
+ if (rdma_nl_put_driver_u32(msg, "cq_depth", cq_hwq->depth))
+ goto err;
+ if (rdma_nl_put_driver_u32(msg, "max_elements", cq_hwq->max_elements))
+ goto err;
+ if (rdma_nl_put_driver_u32(msg, "element_size", cq_hwq->element_size))
+ goto err;
+ if (rdma_nl_put_driver_u32(msg, "max_wqe", cq->qplib_cq.max_wqe))
+ goto err;
+
+ nla_nest_end(msg, table_attr);
+ return 0;
+
+err:
+ nla_nest_cancel(msg, table_attr);
+ return -EMSGSIZE;
+}
+
+static int bnxt_re_fill_res_cq_entry_raw(struct sk_buff *msg, struct ib_cq *ib_cq)
+{
+ struct bnxt_re_dev *rdev;
+ struct bnxt_re_cq *cq;
+ int err, len;
+ void *data;
+
+ cq = container_of(ib_cq, struct bnxt_re_cq, ib_cq);
+ rdev = cq->rdev;
+
+ err = bnxt_re_read_context_allowed(rdev);
+ if (err)
+ return err;
+
+ len = bnxt_qplib_is_chip_gen_p7(rdev->chip_ctx) ? BNXT_RE_CONTEXT_TYPE_CQ_SIZE_P7 :
+ BNXT_RE_CONTEXT_TYPE_CQ_SIZE_P5;
+ data = kzalloc(len, GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ err = bnxt_qplib_read_context(&rdev->rcfw,
+ CMDQ_READ_CONTEXT_TYPE_CQ,
+ cq->qplib_cq.id, len, data);
+ if (!err)
+ err = nla_put(msg, RDMA_NLDEV_ATTR_RES_RAW, len, data);
+
+ kfree(data);
+ return err;
+}
+
+static int bnxt_re_fill_res_qp_entry(struct sk_buff *msg, struct ib_qp *ib_qp)
+{
+ struct bnxt_qplib_qp *qplib_qp;
+ struct nlattr *table_attr;
+ struct bnxt_re_qp *qp;
+
+ table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_DRIVER);
+ if (!table_attr)
+ return -EMSGSIZE;
+
+ qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
+ qplib_qp = &qp->qplib_qp;
+
+ if (rdma_nl_put_driver_u32(msg, "sq_max_wqe", qplib_qp->sq.max_wqe))
+ goto err;
+ if (rdma_nl_put_driver_u32(msg, "sq_max_sge", qplib_qp->sq.max_sge))
+ goto err;
+ if (rdma_nl_put_driver_u32(msg, "sq_wqe_size", qplib_qp->sq.wqe_size))
+ goto err;
+ if (rdma_nl_put_driver_u32(msg, "sq_swq_start", qplib_qp->sq.swq_start))
+ goto err;
+ if (rdma_nl_put_driver_u32(msg, "sq_swq_last", qplib_qp->sq.swq_last))
+ goto err;
+ if (rdma_nl_put_driver_u32(msg, "rq_max_wqe", qplib_qp->rq.max_wqe))
+ goto err;
+ if (rdma_nl_put_driver_u32(msg, "rq_max_sge", qplib_qp->rq.max_sge))
+ goto err;
+ if (rdma_nl_put_driver_u32(msg, "rq_wqe_size", qplib_qp->rq.wqe_size))
+ goto err;
+ if (rdma_nl_put_driver_u32(msg, "rq_swq_start", qplib_qp->rq.swq_start))
+ goto err;
+ if (rdma_nl_put_driver_u32(msg, "rq_swq_last", qplib_qp->rq.swq_last))
+ goto err;
+ if (rdma_nl_put_driver_u32(msg, "timeout", qplib_qp->timeout))
+ goto err;
+
+ nla_nest_end(msg, table_attr);
+ return 0;
+
+err:
+ nla_nest_cancel(msg, table_attr);
+ return -EMSGSIZE;
+}
+
+static int bnxt_re_fill_res_qp_entry_raw(struct sk_buff *msg, struct ib_qp *ibqp)
+{
+ struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibqp->device, ibdev);
+ int err, len;
+ void *data;
+
+ err = bnxt_re_read_context_allowed(rdev);
+ if (err)
+ return err;
+
+ len = bnxt_qplib_is_chip_gen_p7(rdev->chip_ctx) ? BNXT_RE_CONTEXT_TYPE_QPC_SIZE_P7 :
+ BNXT_RE_CONTEXT_TYPE_QPC_SIZE_P5;
+ data = kzalloc(len, GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ err = bnxt_qplib_read_context(&rdev->rcfw, CMDQ_READ_CONTEXT_TYPE_QPC,
+ ibqp->qp_num, len, data);
+ if (!err)
+ err = nla_put(msg, RDMA_NLDEV_ATTR_RES_RAW, len, data);
+
+ kfree(data);
+ return err;
+}
+
+static int bnxt_re_fill_res_srq_entry(struct sk_buff *msg, struct ib_srq *ib_srq)
+{
+ struct nlattr *table_attr;
+ struct bnxt_re_srq *srq;
+
+ table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_DRIVER);
+ if (!table_attr)
+ return -EMSGSIZE;
+
+ srq = container_of(ib_srq, struct bnxt_re_srq, ib_srq);
+
+ if (rdma_nl_put_driver_u32_hex(msg, "wqe_size", srq->qplib_srq.wqe_size))
+ goto err;
+ if (rdma_nl_put_driver_u32_hex(msg, "max_wqe", srq->qplib_srq.max_wqe))
+ goto err;
+ if (rdma_nl_put_driver_u32_hex(msg, "max_sge", srq->qplib_srq.max_sge))
+ goto err;
+
+ nla_nest_end(msg, table_attr);
+ return 0;
+
+err:
+ nla_nest_cancel(msg, table_attr);
+ return -EMSGSIZE;
+}
+
+static int bnxt_re_fill_res_srq_entry_raw(struct sk_buff *msg, struct ib_srq *ib_srq)
+{
+ struct bnxt_re_dev *rdev;
+ struct bnxt_re_srq *srq;
+ int err, len;
+ void *data;
+
+ srq = container_of(ib_srq, struct bnxt_re_srq, ib_srq);
+ rdev = srq->rdev;
+
+ err = bnxt_re_read_context_allowed(rdev);
+ if (err)
+ return err;
+
+ len = bnxt_qplib_is_chip_gen_p7(rdev->chip_ctx) ? BNXT_RE_CONTEXT_TYPE_SRQ_SIZE_P7 :
+ BNXT_RE_CONTEXT_TYPE_SRQ_SIZE_P5;
+
+ data = kzalloc(len, GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ err = bnxt_qplib_read_context(&rdev->rcfw, CMDQ_READ_CONTEXT_TYPE_SRQ,
+ srq->qplib_srq.id, len, data);
+ if (!err)
+ err = nla_put(msg, RDMA_NLDEV_ATTR_RES_RAW, len, data);
+
+ kfree(data);
+ return err;
+}
+
static const struct ib_device_ops bnxt_re_dev_ops = {
.owner = THIS_MODULE,
.driver_id = RDMA_DRIVER_BNXT_RE,
@@ -859,8 +1285,10 @@ static const struct ib_device_ops bnxt_re_dev_ops = {
.post_recv = bnxt_re_post_recv,
.post_send = bnxt_re_post_send,
.post_srq_recv = bnxt_re_post_srq_recv,
+ .process_mad = bnxt_re_process_mad,
.query_ah = bnxt_re_query_ah,
.query_device = bnxt_re_query_device,
+ .modify_device = bnxt_re_modify_device,
.query_pkey = bnxt_re_query_pkey,
.query_port = bnxt_re_query_port,
.query_qp = bnxt_re_query_qp,
@@ -877,6 +1305,17 @@ static const struct ib_device_ops bnxt_re_dev_ops = {
INIT_RDMA_OBJ_SIZE(ib_ucontext, bnxt_re_ucontext, ib_uctx),
};
+static const struct ib_device_ops restrack_ops = {
+ .fill_res_cq_entry = bnxt_re_fill_res_cq_entry,
+ .fill_res_cq_entry_raw = bnxt_re_fill_res_cq_entry_raw,
+ .fill_res_qp_entry = bnxt_re_fill_res_qp_entry,
+ .fill_res_qp_entry_raw = bnxt_re_fill_res_qp_entry_raw,
+ .fill_res_mr_entry = bnxt_re_fill_res_mr_entry,
+ .fill_res_mr_entry_raw = bnxt_re_fill_res_mr_entry_raw,
+ .fill_res_srq_entry = bnxt_re_fill_res_srq_entry,
+ .fill_res_srq_entry_raw = bnxt_re_fill_res_srq_entry_raw,
+};
+
static int bnxt_re_register_ib(struct bnxt_re_dev *rdev)
{
struct ib_device *ibdev = &rdev->ibdev;
@@ -890,7 +1329,7 @@ static int bnxt_re_register_ib(struct bnxt_re_dev *rdev)
addrconf_addr_eui48((u8 *)&ibdev->node_guid, rdev->netdev->dev_addr);
- ibdev->num_comp_vectors = rdev->num_msix - 1;
+ ibdev->num_comp_vectors = rdev->nqr->num_msix - 1;
ibdev->dev.parent = &rdev->en_dev->pdev->dev;
ibdev->local_dma_lkey = BNXT_QPLIB_RSVD_LKEY;
@@ -898,6 +1337,7 @@ static int bnxt_re_register_ib(struct bnxt_re_dev *rdev)
ibdev->driver_def = bnxt_re_uapi_defs;
ib_set_device_ops(ibdev, &bnxt_re_dev_ops);
+ ib_set_device_ops(ibdev, &restrack_ops);
ret = ib_device_set_netdev(&rdev->ibdev, rdev->netdev, 1);
if (ret)
return ret;
@@ -907,7 +1347,7 @@ static int bnxt_re_register_ib(struct bnxt_re_dev *rdev)
return ib_register_device(ibdev, "bnxt_re%d", &rdev->en_dev->pdev->dev);
}
-static struct bnxt_re_dev *bnxt_re_dev_add(struct bnxt_aux_priv *aux_priv,
+static struct bnxt_re_dev *bnxt_re_dev_add(struct auxiliary_device *adev,
struct bnxt_en_dev *en_dev)
{
struct bnxt_re_dev *rdev;
@@ -920,9 +1360,9 @@ static struct bnxt_re_dev *bnxt_re_dev_add(struct bnxt_aux_priv *aux_priv,
return NULL;
}
/* Default values */
- rdev->nb.notifier_call = NULL;
rdev->netdev = en_dev->net;
rdev->en_dev = en_dev;
+ rdev->adev = adev;
rdev->id = rdev->en_dev->pdev->devfn;
INIT_LIST_HEAD(&rdev->qp_list);
mutex_init(&rdev->qp_lock);
@@ -936,6 +1376,15 @@ static struct bnxt_re_dev *bnxt_re_dev_add(struct bnxt_aux_priv *aux_priv,
atomic_set(&rdev->stats.res.pd_count, 0);
rdev->cosq[0] = 0xFFFF;
rdev->cosq[1] = 0xFFFF;
+ rdev->cq_coalescing.buf_maxtime = BNXT_QPLIB_CQ_COAL_DEF_BUF_MAXTIME;
+ if (bnxt_re_chip_gen_p7(en_dev->chip_num)) {
+ rdev->cq_coalescing.normal_maxbuf = BNXT_QPLIB_CQ_COAL_DEF_NORMAL_MAXBUF_P7;
+ rdev->cq_coalescing.during_maxbuf = BNXT_QPLIB_CQ_COAL_DEF_DURING_MAXBUF_P7;
+ } else {
+ rdev->cq_coalescing.normal_maxbuf = BNXT_QPLIB_CQ_COAL_DEF_NORMAL_MAXBUF_P5;
+ rdev->cq_coalescing.during_maxbuf = BNXT_QPLIB_CQ_COAL_DEF_DURING_MAXBUF_P5;
+ }
+ rdev->cq_coalescing.en_ring_idle_mode = BNXT_QPLIB_CQ_COAL_DEF_EN_RING_IDLE_MODE;
return rdev;
}
@@ -975,12 +1424,15 @@ static int bnxt_re_handle_unaffi_async_event(struct creq_func_event
static int bnxt_re_handle_qp_async_event(struct creq_qp_event *qp_event,
struct bnxt_re_qp *qp)
{
- struct bnxt_re_srq *srq = container_of(qp->qplib_qp.srq, struct bnxt_re_srq,
- qplib_srq);
struct creq_qp_error_notification *err_event;
+ struct bnxt_re_srq *srq = NULL;
struct ib_event event = {};
unsigned int flags;
+ if (qp->qplib_qp.srq)
+ srq = container_of(qp->qplib_qp.srq, struct bnxt_re_srq,
+ qplib_srq);
+
if (qp->qplib_qp.state == CMDQ_MODIFY_QP_NEW_STATE_ERR &&
rdma_is_kernel_res(&qp->ib_qp.res)) {
flags = bnxt_re_lock_cqs(qp);
@@ -1208,15 +1660,9 @@ static int bnxt_re_cqn_handler(struct bnxt_qplib_nq *nq,
{
struct bnxt_re_cq *cq = container_of(handle, struct bnxt_re_cq,
qplib_cq);
- u32 *cq_ptr;
- if (cq->ib_cq.comp_handler) {
- if (cq->uctx_cq_page) {
- cq_ptr = (u32 *)cq->uctx_cq_page;
- *cq_ptr = cq->qplib_cq.toggle;
- }
+ if (cq->ib_cq.comp_handler)
(*cq->ib_cq.comp_handler)(&cq->ib_cq, cq->ib_cq.cq_context);
- }
return 0;
}
@@ -1225,8 +1671,8 @@ static void bnxt_re_cleanup_res(struct bnxt_re_dev *rdev)
{
int i;
- for (i = 1; i < rdev->num_msix; i++)
- bnxt_qplib_disable_nq(&rdev->nq[i - 1]);
+ for (i = 1; i < rdev->nqr->num_msix; i++)
+ bnxt_qplib_disable_nq(&rdev->nqr->nq[i - 1]);
if (rdev->qplib_res.rcfw)
bnxt_qplib_cleanup_res(&rdev->qplib_res);
@@ -1240,10 +1686,12 @@ static int bnxt_re_init_res(struct bnxt_re_dev *rdev)
bnxt_qplib_init_res(&rdev->qplib_res);
- for (i = 1; i < rdev->num_msix ; i++) {
- db_offt = rdev->en_dev->msix_entries[i].db_offset;
- rc = bnxt_qplib_enable_nq(rdev->en_dev->pdev, &rdev->nq[i - 1],
- i - 1, rdev->en_dev->msix_entries[i].vector,
+ mutex_init(&rdev->nqr->load_lock);
+
+ for (i = 1; i < rdev->nqr->num_msix ; i++) {
+ db_offt = rdev->nqr->msix_entries[i].db_offset;
+ rc = bnxt_qplib_enable_nq(rdev->en_dev->pdev, &rdev->nqr->nq[i - 1],
+ i - 1, rdev->nqr->msix_entries[i].vector,
db_offt, &bnxt_re_cqn_handler,
&bnxt_re_srqn_handler);
if (rc) {
@@ -1256,20 +1704,22 @@ static int bnxt_re_init_res(struct bnxt_re_dev *rdev)
return 0;
fail:
for (i = num_vec_enabled; i >= 0; i--)
- bnxt_qplib_disable_nq(&rdev->nq[i]);
+ bnxt_qplib_disable_nq(&rdev->nqr->nq[i]);
return rc;
}
static void bnxt_re_free_nq_res(struct bnxt_re_dev *rdev)
{
+ struct bnxt_qplib_nq *nq;
u8 type;
int i;
- for (i = 0; i < rdev->num_msix - 1; i++) {
+ for (i = 0; i < rdev->nqr->num_msix - 1; i++) {
type = bnxt_qplib_get_ring_type(rdev->chip_ctx);
- bnxt_re_net_ring_free(rdev, rdev->nq[i].ring_id, type);
- bnxt_qplib_free_nq(&rdev->nq[i]);
- rdev->nq[i].res = NULL;
+ nq = &rdev->nqr->nq[i];
+ bnxt_re_net_ring_free(rdev, nq->ring_id, type);
+ bnxt_qplib_free_nq(nq);
+ nq->res = NULL;
}
}
@@ -1296,12 +1746,11 @@ static int bnxt_re_alloc_res(struct bnxt_re_dev *rdev)
/* Configure and allocate resources for qplib */
rdev->qplib_res.rcfw = &rdev->rcfw;
- rc = bnxt_qplib_get_dev_attr(&rdev->rcfw, &rdev->dev_attr);
+ rc = bnxt_qplib_get_dev_attr(&rdev->rcfw);
if (rc)
goto fail;
- rc = bnxt_qplib_alloc_res(&rdev->qplib_res, rdev->en_dev->pdev,
- rdev->netdev, &rdev->dev_attr);
+ rc = bnxt_qplib_alloc_res(&rdev->qplib_res, rdev->netdev);
if (rc)
goto fail;
@@ -1311,12 +1760,12 @@ static int bnxt_re_alloc_res(struct bnxt_re_dev *rdev)
if (rc)
goto dealloc_res;
- for (i = 0; i < rdev->num_msix - 1; i++) {
+ for (i = 0; i < rdev->nqr->num_msix - 1; i++) {
struct bnxt_qplib_nq *nq;
- nq = &rdev->nq[i];
+ nq = &rdev->nqr->nq[i];
nq->hwq.max_elements = BNXT_QPLIB_NQE_MAX_CNT;
- rc = bnxt_qplib_alloc_nq(&rdev->qplib_res, &rdev->nq[i]);
+ rc = bnxt_qplib_alloc_nq(&rdev->qplib_res, nq);
if (rc) {
ibdev_err(&rdev->ibdev, "Alloc Failed NQ%d rc:%#x",
i, rc);
@@ -1324,17 +1773,17 @@ static int bnxt_re_alloc_res(struct bnxt_re_dev *rdev)
}
type = bnxt_qplib_get_ring_type(rdev->chip_ctx);
rattr.dma_arr = nq->hwq.pbl[PBL_LVL_0].pg_map_arr;
- rattr.pages = nq->hwq.pbl[rdev->nq[i].hwq.level].pg_count;
+ rattr.pages = nq->hwq.pbl[rdev->nqr->nq[i].hwq.level].pg_count;
rattr.type = type;
rattr.mode = RING_ALLOC_REQ_INT_MODE_MSIX;
rattr.depth = BNXT_QPLIB_NQE_MAX_CNT - 1;
- rattr.lrid = rdev->en_dev->msix_entries[i + 1].ring_idx;
+ rattr.lrid = rdev->nqr->msix_entries[i + 1].ring_idx;
rc = bnxt_re_net_ring_alloc(rdev, &rattr, &nq->ring_id);
if (rc) {
ibdev_err(&rdev->ibdev,
"Failed to allocate NQ fw id with rc = 0x%x",
rc);
- bnxt_qplib_free_nq(&rdev->nq[i]);
+ bnxt_qplib_free_nq(nq);
goto free_nq;
}
num_vec_created++;
@@ -1343,8 +1792,8 @@ static int bnxt_re_alloc_res(struct bnxt_re_dev *rdev)
free_nq:
for (i = num_vec_created - 1; i >= 0; i--) {
type = bnxt_qplib_get_ring_type(rdev->chip_ctx);
- bnxt_re_net_ring_free(rdev, rdev->nq[i].ring_id, type);
- bnxt_qplib_free_nq(&rdev->nq[i]);
+ bnxt_re_net_ring_free(rdev, rdev->nqr->nq[i].ring_id, type);
+ bnxt_qplib_free_nq(&rdev->nqr->nq[i]);
}
bnxt_qplib_dealloc_dpi(&rdev->qplib_res,
&rdev->dpi_privileged);
@@ -1384,11 +1833,8 @@ static bool bnxt_re_is_qp1_or_shadow_qp(struct bnxt_re_dev *rdev,
static void bnxt_re_dev_stop(struct bnxt_re_dev *rdev)
{
- int mask = IB_QP_STATE;
- struct ib_qp_attr qp_attr;
struct bnxt_re_qp *qp;
- qp_attr.qp_state = IB_QPS_ERR;
mutex_lock(&rdev->qp_lock);
list_for_each_entry(qp, &rdev->qp_list, list) {
/* Modify the state of all QPs except QP1/Shadow QP */
@@ -1396,12 +1842,9 @@ static void bnxt_re_dev_stop(struct bnxt_re_dev *rdev)
if (qp->qplib_qp.state !=
CMDQ_MODIFY_QP_NEW_STATE_RESET &&
qp->qplib_qp.state !=
- CMDQ_MODIFY_QP_NEW_STATE_ERR) {
+ CMDQ_MODIFY_QP_NEW_STATE_ERR)
bnxt_re_dispatch_event(&rdev->ibdev, &qp->ib_qp,
1, IB_EVENT_QP_FATAL);
- bnxt_re_modify_qp(&qp->ib_qp, &qp_attr, mask,
- NULL);
- }
}
}
mutex_unlock(&rdev->qp_lock);
@@ -1482,6 +1925,26 @@ static int bnxt_re_setup_qos(struct bnxt_re_dev *rdev)
return 0;
}
+static void bnxt_re_net_unregister_async_event(struct bnxt_re_dev *rdev)
+{
+ if (rdev->is_virtfn)
+ return;
+
+ memset(&rdev->event_bitmap, 0, sizeof(rdev->event_bitmap));
+ bnxt_register_async_events(rdev->en_dev, &rdev->event_bitmap,
+ ASYNC_EVENT_CMPL_EVENT_ID_DCB_CONFIG_CHANGE);
+}
+
+static void bnxt_re_net_register_async_event(struct bnxt_re_dev *rdev)
+{
+ if (rdev->is_virtfn)
+ return;
+
+ rdev->event_bitmap |= (1 << ASYNC_EVENT_CMPL_EVENT_ID_DCB_CONFIG_CHANGE);
+ bnxt_register_async_events(rdev->en_dev, &rdev->event_bitmap,
+ ASYNC_EVENT_CMPL_EVENT_ID_DCB_CONFIG_CHANGE);
+}
+
static void bnxt_re_query_hwrm_intf_version(struct bnxt_re_dev *rdev)
{
struct bnxt_en_dev *en_dev = rdev->en_dev;
@@ -1539,11 +2002,31 @@ static int bnxt_re_ib_init(struct bnxt_re_dev *rdev)
return rc;
}
-static void bnxt_re_dev_uninit(struct bnxt_re_dev *rdev)
+static int bnxt_re_alloc_nqr_mem(struct bnxt_re_dev *rdev)
+{
+ rdev->nqr = kzalloc(sizeof(*rdev->nqr), GFP_KERNEL);
+ if (!rdev->nqr)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void bnxt_re_free_nqr_mem(struct bnxt_re_dev *rdev)
+{
+ kfree(rdev->nqr);
+ rdev->nqr = NULL;
+}
+
+static void bnxt_re_dev_uninit(struct bnxt_re_dev *rdev, u8 op_type)
{
u8 type;
int rc;
+ bnxt_re_debugfs_rem_pdev(rdev);
+
+ bnxt_re_net_unregister_async_event(rdev);
+ bnxt_re_uninit_dcb_wq(rdev);
+
if (test_and_clear_bit(BNXT_RE_FLAG_QOS_WORK_REG, &rdev->flags))
cancel_delayed_work_sync(&rdev->worker);
@@ -1566,14 +2049,17 @@ static void bnxt_re_dev_uninit(struct bnxt_re_dev *rdev)
bnxt_qplib_free_rcfw_channel(&rdev->rcfw);
}
- rdev->num_msix = 0;
+ rdev->nqr->num_msix = 0;
if (rdev->pacing.dbr_pacing)
bnxt_re_deinitialize_dbr_pacing(rdev);
+ bnxt_re_free_nqr_mem(rdev);
bnxt_re_destroy_chip_ctx(rdev);
- if (test_and_clear_bit(BNXT_RE_FLAG_NETDEV_REGISTERED, &rdev->flags))
- bnxt_unregister_dev(rdev->en_dev);
+ if (op_type == BNXT_RE_COMPLETE_REMOVE) {
+ if (test_and_clear_bit(BNXT_RE_FLAG_NETDEV_REGISTERED, &rdev->flags))
+ bnxt_unregister_dev(rdev->en_dev);
+ }
}
/* worker thread for polling periodic events. Now used for QoS programming*/
@@ -1586,7 +2072,7 @@ static void bnxt_re_worker(struct work_struct *work)
schedule_delayed_work(&rdev->worker, msecs_to_jiffies(30000));
}
-static int bnxt_re_dev_init(struct bnxt_re_dev *rdev, u8 wqe_mode)
+static int bnxt_re_dev_init(struct bnxt_re_dev *rdev, u8 op_type)
{
struct bnxt_re_ring_attr rattr = {};
struct bnxt_qplib_creq_ctx *creq;
@@ -1595,16 +2081,29 @@ static int bnxt_re_dev_init(struct bnxt_re_dev *rdev, u8 wqe_mode)
u8 type;
int rc;
- /* Registered a new RoCE device instance to netdev */
- rc = bnxt_re_register_netdev(rdev);
- if (rc) {
+ if (op_type == BNXT_RE_COMPLETE_INIT) {
+ /* Registered a new RoCE device instance to netdev */
+ rc = bnxt_re_register_netdev(rdev);
+ if (rc) {
+ ibdev_err(&rdev->ibdev,
+ "Failed to register with netedev: %#x\n", rc);
+ return -EINVAL;
+ }
+ }
+ set_bit(BNXT_RE_FLAG_NETDEV_REGISTERED, &rdev->flags);
+
+ if (rdev->en_dev->ulp_tbl->msix_requested < BNXT_RE_MIN_MSIX) {
ibdev_err(&rdev->ibdev,
- "Failed to register with netedev: %#x\n", rc);
+ "RoCE requires minimum 2 MSI-X vectors, but only %d reserved\n",
+ rdev->en_dev->ulp_tbl->msix_requested);
+ bnxt_unregister_dev(rdev->en_dev);
+ clear_bit(BNXT_RE_FLAG_NETDEV_REGISTERED, &rdev->flags);
return -EINVAL;
}
- set_bit(BNXT_RE_FLAG_NETDEV_REGISTERED, &rdev->flags);
+ ibdev_dbg(&rdev->ibdev, "Got %d MSI-X vectors\n",
+ rdev->en_dev->ulp_tbl->msix_requested);
- rc = bnxt_re_setup_chip_ctx(rdev, wqe_mode);
+ rc = bnxt_re_setup_chip_ctx(rdev);
if (rc) {
bnxt_unregister_dev(rdev->en_dev);
clear_bit(BNXT_RE_FLAG_NETDEV_REGISTERED, &rdev->flags);
@@ -1612,27 +2111,27 @@ static int bnxt_re_dev_init(struct bnxt_re_dev *rdev, u8 wqe_mode)
return -EINVAL;
}
+ rc = bnxt_re_alloc_nqr_mem(rdev);
+ if (rc) {
+ bnxt_re_destroy_chip_ctx(rdev);
+ bnxt_unregister_dev(rdev->en_dev);
+ clear_bit(BNXT_RE_FLAG_NETDEV_REGISTERED, &rdev->flags);
+ return rc;
+ }
+ rdev->nqr->num_msix = rdev->en_dev->ulp_tbl->msix_requested;
+ memcpy(rdev->nqr->msix_entries, rdev->en_dev->msix_entries,
+ sizeof(struct bnxt_msix_entry) * rdev->nqr->num_msix);
+
/* Check whether VF or PF */
bnxt_re_get_sriov_func_type(rdev);
- if (!rdev->en_dev->ulp_tbl->msix_requested) {
- ibdev_err(&rdev->ibdev,
- "Failed to get MSI-X vectors: %#x\n", rc);
- rc = -EINVAL;
- goto fail;
- }
- ibdev_dbg(&rdev->ibdev, "Got %d MSI-X vectors\n",
- rdev->en_dev->ulp_tbl->msix_requested);
- rdev->num_msix = rdev->en_dev->ulp_tbl->msix_requested;
-
bnxt_re_query_hwrm_intf_version(rdev);
/* Establish RCFW Communication Channel to initialize the context
* memory for the function and all child VFs
*/
rc = bnxt_qplib_alloc_rcfw_channel(&rdev->qplib_res, &rdev->rcfw,
- &rdev->qplib_ctx,
- BNXT_RE_MAX_QPC_COUNT);
+ &rdev->qplib_ctx);
if (rc) {
ibdev_err(&rdev->ibdev,
"Failed to allocate RCFW Channel: %#x\n", rc);
@@ -1646,14 +2145,14 @@ static int bnxt_re_dev_init(struct bnxt_re_dev *rdev, u8 wqe_mode)
rattr.type = type;
rattr.mode = RING_ALLOC_REQ_INT_MODE_MSIX;
rattr.depth = BNXT_QPLIB_CREQE_MAX_CNT - 1;
- rattr.lrid = rdev->en_dev->msix_entries[BNXT_RE_AEQ_IDX].ring_idx;
+ rattr.lrid = rdev->nqr->msix_entries[BNXT_RE_AEQ_IDX].ring_idx;
rc = bnxt_re_net_ring_alloc(rdev, &rattr, &creq->ring_id);
if (rc) {
ibdev_err(&rdev->ibdev, "Failed to allocate CREQ: %#x\n", rc);
goto free_rcfw;
}
- db_offt = rdev->en_dev->msix_entries[BNXT_RE_AEQ_IDX].db_offset;
- vid = rdev->en_dev->msix_entries[BNXT_RE_AEQ_IDX].vector;
+ db_offt = rdev->nqr->msix_entries[BNXT_RE_AEQ_IDX].db_offset;
+ vid = rdev->nqr->msix_entries[BNXT_RE_AEQ_IDX].vector;
rc = bnxt_qplib_enable_rcfw_channel(&rdev->rcfw,
vid, db_offt,
&bnxt_re_aeq_handler);
@@ -1673,7 +2172,7 @@ static int bnxt_re_dev_init(struct bnxt_re_dev *rdev, u8 wqe_mode)
rdev->pacing.dbr_pacing = false;
}
}
- rc = bnxt_qplib_get_dev_attr(&rdev->rcfw, &rdev->dev_attr);
+ rc = bnxt_qplib_get_dev_attr(&rdev->rcfw);
if (rc)
goto disable_rcfw;
@@ -1722,6 +2221,11 @@ static int bnxt_re_dev_init(struct bnxt_re_dev *rdev, u8 wqe_mode)
set_bit(BNXT_RE_FLAG_RESOURCES_INITIALIZED, &rdev->flags);
if (!rdev->is_virtfn) {
+ /* Query f/w defaults of CC params */
+ rc = bnxt_qplib_query_cc_param(&rdev->qplib_res, &rdev->cc_param);
+ if (rc)
+ ibdev_warn(&rdev->ibdev, "Failed to query CC defaults\n");
+
rc = bnxt_re_setup_qos(rdev);
if (rc)
ibdev_info(&rdev->ibdev,
@@ -1730,13 +2234,18 @@ static int bnxt_re_dev_init(struct bnxt_re_dev *rdev, u8 wqe_mode)
INIT_DELAYED_WORK(&rdev->worker, bnxt_re_worker);
set_bit(BNXT_RE_FLAG_QOS_WORK_REG, &rdev->flags);
schedule_delayed_work(&rdev->worker, msecs_to_jiffies(30000));
- /*
- * Use the total VF count since the actual VF count may not be
- * available at this point.
- */
- bnxt_re_vf_res_config(rdev);
+
+ if (!(rdev->qplib_res.en_dev->flags & BNXT_EN_FLAG_ROCE_VF_RES_MGMT))
+ bnxt_re_vf_res_config(rdev);
}
hash_init(rdev->cq_hash);
+ if (rdev->chip_ctx->modes.toggle_bits & BNXT_QPLIB_SRQ_TOGGLE_BIT)
+ hash_init(rdev->srq_hash);
+
+ bnxt_re_debugfs_add_pdev(rdev);
+
+ bnxt_re_init_dcb_wq(rdev);
+ bnxt_re_net_register_async_event(rdev);
return 0;
free_sctx:
@@ -1751,50 +2260,11 @@ free_ring:
free_rcfw:
bnxt_qplib_free_rcfw_channel(&rdev->rcfw);
fail:
- bnxt_re_dev_uninit(rdev);
+ bnxt_re_dev_uninit(rdev, BNXT_RE_COMPLETE_REMOVE);
return rc;
}
-static int bnxt_re_add_device(struct auxiliary_device *adev, u8 wqe_mode)
-{
- struct bnxt_aux_priv *aux_priv =
- container_of(adev, struct bnxt_aux_priv, aux_dev);
- struct bnxt_en_dev *en_dev;
- struct bnxt_re_dev *rdev;
- int rc;
-
- /* en_dev should never be NULL as long as adev and aux_dev are valid. */
- en_dev = aux_priv->edev;
-
- rdev = bnxt_re_dev_add(aux_priv, en_dev);
- if (!rdev || !rdev_to_dev(rdev)) {
- rc = -ENOMEM;
- goto exit;
- }
-
- rc = bnxt_re_dev_init(rdev, wqe_mode);
- if (rc)
- goto re_dev_dealloc;
-
- rc = bnxt_re_ib_init(rdev);
- if (rc) {
- pr_err("Failed to register with IB: %s",
- aux_priv->aux_dev.name);
- goto re_dev_uninit;
- }
- auxiliary_set_drvdata(adev, rdev);
-
- return 0;
-
-re_dev_uninit:
- bnxt_re_dev_uninit(rdev);
-re_dev_dealloc:
- ib_dealloc_device(&rdev->ibdev);
-exit:
- return rc;
-}
-
static void bnxt_re_setup_cc(struct bnxt_re_dev *rdev, bool enable)
{
struct bnxt_qplib_cc_param cc_param = {};
@@ -1809,142 +2279,139 @@ static void bnxt_re_setup_cc(struct bnxt_re_dev *rdev, bool enable)
if (enable) {
cc_param.enable = 1;
- cc_param.cc_mode = CMDQ_MODIFY_ROCE_CC_CC_MODE_PROBABILISTIC_CC_MODE;
+ cc_param.tos_ecn = 1;
}
- cc_param.mask = (CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_CC_MODE |
- CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_ENABLE_CC |
+ cc_param.mask = (CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_ENABLE_CC |
CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_TOS_ECN);
if (bnxt_qplib_modify_cc(&rdev->qplib_res, &cc_param))
ibdev_err(&rdev->ibdev, "Failed to setup CC enable = %d\n", enable);
}
-/*
- * "Notifier chain callback can be invoked for the same chain from
- * different CPUs at the same time".
- *
- * For cases when the netdev is already present, our call to the
- * register_netdevice_notifier() will actually get the rtnl_lock()
- * before sending NETDEV_REGISTER and (if up) NETDEV_UP
- * events.
- *
- * But for cases when the netdev is not already present, the notifier
- * chain is subjected to be invoked from different CPUs simultaneously.
- *
- * This is protected by the netdev_mutex.
- */
-static int bnxt_re_netdev_event(struct notifier_block *notifier,
- unsigned long event, void *ptr)
+static void bnxt_re_update_en_info_rdev(struct bnxt_re_dev *rdev,
+ struct bnxt_re_en_dev_info *en_info,
+ struct auxiliary_device *adev)
{
- struct net_device *real_dev, *netdev = netdev_notifier_info_to_dev(ptr);
+ /* Before updating the rdev pointer in bnxt_re_en_dev_info structure,
+ * take the rtnl lock to avoid accessing invalid rdev pointer from
+ * L2 ULP callbacks. This is applicable in all the places where rdev
+ * pointer is updated in bnxt_re_en_dev_info.
+ */
+ rtnl_lock();
+ en_info->rdev = rdev;
+ rtnl_unlock();
+}
+
+static int bnxt_re_add_device(struct auxiliary_device *adev, u8 op_type)
+{
+ struct bnxt_aux_priv *aux_priv =
+ container_of(adev, struct bnxt_aux_priv, aux_dev);
+ struct bnxt_re_en_dev_info *en_info;
+ struct bnxt_en_dev *en_dev;
struct bnxt_re_dev *rdev;
+ int rc;
+
+ en_info = auxiliary_get_drvdata(adev);
+ en_dev = en_info->en_dev;
- real_dev = rdma_vlan_dev_real_dev(netdev);
- if (!real_dev)
- real_dev = netdev;
- if (real_dev != netdev)
+ rdev = bnxt_re_dev_add(adev, en_dev);
+ if (!rdev || !rdev_to_dev(rdev)) {
+ rc = -ENOMEM;
goto exit;
+ }
- rdev = bnxt_re_from_netdev(real_dev);
- if (!rdev)
- return NOTIFY_DONE;
+ bnxt_re_update_en_info_rdev(rdev, en_info, adev);
+ rc = bnxt_re_dev_init(rdev, op_type);
+ if (rc)
+ goto re_dev_dealloc;
- switch (event) {
- case NETDEV_UP:
- case NETDEV_DOWN:
- case NETDEV_CHANGE:
- bnxt_re_dispatch_event(&rdev->ibdev, NULL, 1,
- netif_carrier_ok(real_dev) ?
- IB_EVENT_PORT_ACTIVE :
- IB_EVENT_PORT_ERR);
- break;
- default:
- break;
+ rc = bnxt_re_ib_init(rdev);
+ if (rc) {
+ pr_err("Failed to register with IB: %s",
+ aux_priv->aux_dev.name);
+ goto re_dev_uninit;
}
- ib_device_put(&rdev->ibdev);
+
+ bnxt_re_setup_cc(rdev, true);
+
+ return 0;
+
+re_dev_uninit:
+ bnxt_re_update_en_info_rdev(NULL, en_info, adev);
+ bnxt_re_dev_uninit(rdev, BNXT_RE_COMPLETE_REMOVE);
+re_dev_dealloc:
+ ib_dealloc_device(&rdev->ibdev);
exit:
- return NOTIFY_DONE;
+ return rc;
}
#define BNXT_ADEV_NAME "bnxt_en"
-static void bnxt_re_remove(struct auxiliary_device *adev)
+static void bnxt_re_remove_device(struct bnxt_re_dev *rdev, u8 op_type,
+ struct auxiliary_device *aux_dev)
{
- struct bnxt_re_dev *rdev = auxiliary_get_drvdata(adev);
-
- if (!rdev)
- return;
-
- mutex_lock(&bnxt_re_mutex);
- if (rdev->nb.notifier_call) {
- unregister_netdevice_notifier(&rdev->nb);
- rdev->nb.notifier_call = NULL;
- } else {
- /* If notifier is null, we should have already done a
- * clean up before coming here.
- */
- goto skip_remove;
- }
bnxt_re_setup_cc(rdev, false);
ib_unregister_device(&rdev->ibdev);
- bnxt_re_dev_uninit(rdev);
+ bnxt_re_dev_uninit(rdev, op_type);
ib_dealloc_device(&rdev->ibdev);
-skip_remove:
+}
+
+static void bnxt_re_remove(struct auxiliary_device *adev)
+{
+ struct bnxt_re_en_dev_info *en_info = auxiliary_get_drvdata(adev);
+ struct bnxt_re_dev *rdev;
+
+ mutex_lock(&bnxt_re_mutex);
+ rdev = en_info->rdev;
+
+ if (rdev)
+ bnxt_re_remove_device(rdev, BNXT_RE_COMPLETE_REMOVE, adev);
+ kfree(en_info);
mutex_unlock(&bnxt_re_mutex);
}
static int bnxt_re_probe(struct auxiliary_device *adev,
const struct auxiliary_device_id *id)
{
- struct bnxt_re_dev *rdev;
+ struct bnxt_aux_priv *aux_priv =
+ container_of(adev, struct bnxt_aux_priv, aux_dev);
+ struct bnxt_re_en_dev_info *en_info;
+ struct bnxt_en_dev *en_dev;
int rc;
+ en_dev = aux_priv->edev;
+
mutex_lock(&bnxt_re_mutex);
- rc = bnxt_re_add_device(adev, BNXT_QPLIB_WQE_MODE_STATIC);
- if (rc) {
+ en_info = kzalloc(sizeof(*en_info), GFP_KERNEL);
+ if (!en_info) {
mutex_unlock(&bnxt_re_mutex);
- return rc;
+ return -ENOMEM;
}
+ en_info->en_dev = en_dev;
- rdev = auxiliary_get_drvdata(adev);
+ auxiliary_set_drvdata(adev, en_info);
- rdev->nb.notifier_call = bnxt_re_netdev_event;
- rc = register_netdevice_notifier(&rdev->nb);
- if (rc) {
- rdev->nb.notifier_call = NULL;
- pr_err("%s: Cannot register to netdevice_notifier",
- ROCE_DRV_MODULE_NAME);
- goto err;
- }
-
- bnxt_re_setup_cc(rdev, true);
- mutex_unlock(&bnxt_re_mutex);
- return 0;
+ rc = bnxt_re_add_device(adev, BNXT_RE_COMPLETE_INIT);
+ if (rc)
+ kfree(en_info);
-err:
mutex_unlock(&bnxt_re_mutex);
- bnxt_re_remove(adev);
return rc;
}
static int bnxt_re_suspend(struct auxiliary_device *adev, pm_message_t state)
{
- struct bnxt_re_dev *rdev = auxiliary_get_drvdata(adev);
-
- if (!rdev)
- return 0;
+ struct bnxt_re_en_dev_info *en_info = auxiliary_get_drvdata(adev);
+ struct bnxt_en_dev *en_dev;
+ struct bnxt_re_dev *rdev;
+ rdev = en_info->rdev;
+ en_dev = en_info->en_dev;
mutex_lock(&bnxt_re_mutex);
- /* L2 driver may invoke this callback during device error/crash or device
- * reset. Current RoCE driver doesn't recover the device in case of
- * error. Handle the error by dispatching fatal events to all qps
- * ie. by calling bnxt_re_dev_stop and release the MSIx vectors as
- * L2 driver want to modify the MSIx table.
- */
ibdev_info(&rdev->ibdev, "Handle device suspend call");
/* Check the current device state from bnxt_en_dev and move the
@@ -1952,17 +2419,20 @@ static int bnxt_re_suspend(struct auxiliary_device *adev, pm_message_t state)
* This prevents more commands to HW during clean-up,
* in case the device is already in error.
*/
- if (test_bit(BNXT_STATE_FW_FATAL_COND, &rdev->en_dev->en_state))
+ if (test_bit(BNXT_STATE_FW_FATAL_COND, &rdev->en_dev->en_state)) {
set_bit(ERR_DEVICE_DETACHED, &rdev->rcfw.cmdq.flags);
+ set_bit(BNXT_RE_FLAG_ERR_DEVICE_DETACHED, &rdev->flags);
+ wake_up_all(&rdev->rcfw.cmdq.waitq);
+ bnxt_re_dev_stop(rdev);
+ }
- bnxt_re_dev_stop(rdev);
- bnxt_re_stop_irq(rdev);
- /* Move the device states to detached and avoid sending any more
- * commands to HW
- */
- set_bit(BNXT_RE_FLAG_ERR_DEVICE_DETACHED, &rdev->flags);
- set_bit(ERR_DEVICE_DETACHED, &rdev->rcfw.cmdq.flags);
- wake_up_all(&rdev->rcfw.cmdq.waitq);
+ if (rdev->pacing.dbr_pacing)
+ bnxt_re_set_pacing_dev_state(rdev);
+
+ ibdev_info(&rdev->ibdev, "%s: L2 driver notified to stop en_state 0x%lx",
+ __func__, en_dev->en_state);
+ bnxt_re_remove_device(rdev, BNXT_RE_PRE_RECOVERY_REMOVE, adev);
+ bnxt_re_update_en_info_rdev(NULL, en_info, adev);
mutex_unlock(&bnxt_re_mutex);
return 0;
@@ -1970,25 +2440,28 @@ static int bnxt_re_suspend(struct auxiliary_device *adev, pm_message_t state)
static int bnxt_re_resume(struct auxiliary_device *adev)
{
- struct bnxt_re_dev *rdev = auxiliary_get_drvdata(adev);
-
- if (!rdev)
- return 0;
+ struct bnxt_re_en_dev_info *en_info = auxiliary_get_drvdata(adev);
+ struct bnxt_re_dev *rdev;
mutex_lock(&bnxt_re_mutex);
- /* L2 driver may invoke this callback during device recovery, resume.
- * reset. Current RoCE driver doesn't recover the device in case of
- * error. Handle the error by dispatching fatal events to all qps
- * ie. by calling bnxt_re_dev_stop and release the MSIx vectors as
- * L2 driver want to modify the MSIx table.
- */
-
- ibdev_info(&rdev->ibdev, "Handle device resume call");
+ bnxt_re_add_device(adev, BNXT_RE_POST_RECOVERY_INIT);
+ rdev = en_info->rdev;
+ ibdev_info(&rdev->ibdev, "Device resume completed");
mutex_unlock(&bnxt_re_mutex);
return 0;
}
+static void bnxt_re_shutdown(struct auxiliary_device *adev)
+{
+ struct bnxt_re_en_dev_info *en_info = auxiliary_get_drvdata(adev);
+ struct bnxt_re_dev *rdev;
+
+ rdev = en_info->rdev;
+ ib_unregister_device(&rdev->ibdev);
+ bnxt_re_dev_uninit(rdev, BNXT_RE_COMPLETE_REMOVE);
+}
+
static const struct auxiliary_device_id bnxt_re_id_table[] = {
{ .name = BNXT_ADEV_NAME ".rdma", },
{},
@@ -2011,18 +2484,24 @@ static int __init bnxt_re_mod_init(void)
int rc;
pr_info("%s: %s", ROCE_DRV_MODULE_NAME, version);
+ bnxt_re_register_debugfs();
+
rc = auxiliary_driver_register(&bnxt_re_driver);
if (rc) {
pr_err("%s: Failed to register auxiliary driver\n",
ROCE_DRV_MODULE_NAME);
- return rc;
+ goto err_debug;
}
return 0;
+err_debug:
+ bnxt_re_unregister_debugfs();
+ return rc;
}
static void __exit bnxt_re_mod_exit(void)
{
auxiliary_driver_unregister(&bnxt_re_driver);
+ bnxt_re_unregister_debugfs();
}
module_init(bnxt_re_mod_init);
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.c b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
index 439d0c7c5d0c..be34c605d516 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_fp.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
@@ -54,6 +54,10 @@
#include "qplib_rcfw.h"
#include "qplib_sp.h"
#include "qplib_fp.h"
+#include <rdma/ib_addr.h>
+#include "bnxt_ulp.h"
+#include "bnxt_re.h"
+#include "ib_verbs.h"
static void __clean_cq(struct bnxt_qplib_cq *cq, u64 qp);
@@ -323,6 +327,7 @@ static void bnxt_qplib_service_nq(struct tasklet_struct *t)
case NQ_BASE_TYPE_CQ_NOTIFICATION:
{
struct nq_cn *nqcne = (struct nq_cn *)nqe;
+ struct bnxt_re_cq *cq_p;
q_handle = le32_to_cpu(nqcne->cq_handle_low);
q_handle |= (u64)le32_to_cpu(nqcne->cq_handle_high)
@@ -333,6 +338,10 @@ static void bnxt_qplib_service_nq(struct tasklet_struct *t)
cq->toggle = (le16_to_cpu(nqe->info10_type) &
NQ_CN_TOGGLE_MASK) >> NQ_CN_TOGGLE_SFT;
cq->dbinfo.toggle = cq->toggle;
+ cq_p = container_of(cq, struct bnxt_re_cq, qplib_cq);
+ if (cq_p->uctx_cq_page)
+ *((u32 *)cq_p->uctx_cq_page) = cq->toggle;
+
bnxt_qplib_armen_db(&cq->dbinfo,
DBC_DBC_TYPE_CQ_ARMENA);
spin_lock_bh(&cq->compl_lock);
@@ -347,6 +356,7 @@ static void bnxt_qplib_service_nq(struct tasklet_struct *t)
case NQ_BASE_TYPE_SRQ_EVENT:
{
struct bnxt_qplib_srq *srq;
+ struct bnxt_re_srq *srq_p;
struct nq_srq_event *nqsrqe =
(struct nq_srq_event *)nqe;
@@ -354,6 +364,12 @@ static void bnxt_qplib_service_nq(struct tasklet_struct *t)
q_handle |= (u64)le32_to_cpu(nqsrqe->srq_handle_high)
<< 32;
srq = (struct bnxt_qplib_srq *)q_handle;
+ srq->toggle = (le16_to_cpu(nqe->info10_type) & NQ_CN_TOGGLE_MASK)
+ >> NQ_CN_TOGGLE_SFT;
+ srq->dbinfo.toggle = srq->toggle;
+ srq_p = container_of(srq, struct bnxt_re_srq, qplib_srq);
+ if (srq_p->uctx_srq_page)
+ *((u32 *)srq_p->uctx_srq_page) = srq->toggle;
bnxt_qplib_armen_db(&srq->dbinfo,
DBC_DBC_TYPE_SRQ_ARMENA);
if (nq->srqn_handler(nq,
@@ -540,6 +556,7 @@ int bnxt_qplib_enable_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq,
nq->pdev = pdev;
nq->cqn_handler = cqn_handler;
nq->srqn_handler = srqn_handler;
+ nq->load = 0;
/* Have a task to schedule CQ notifiers in post send case */
nq->cqn_wq = create_singlethread_workqueue("bnxt_qplib_nq");
@@ -642,13 +659,6 @@ int bnxt_qplib_create_srq(struct bnxt_qplib_res *res,
rc = bnxt_qplib_alloc_init_hwq(&srq->hwq, &hwq_attr);
if (rc)
return rc;
-
- srq->swq = kcalloc(srq->hwq.max_elements, sizeof(*srq->swq),
- GFP_KERNEL);
- if (!srq->swq) {
- rc = -ENOMEM;
- goto fail;
- }
srq->dbinfo.flags = 0;
bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
CMDQ_BASE_OPCODE_CREATE_SRQ,
@@ -677,9 +687,17 @@ int bnxt_qplib_create_srq(struct bnxt_qplib_res *res,
spin_lock_init(&srq->lock);
srq->start_idx = 0;
srq->last_idx = srq->hwq.max_elements - 1;
- for (idx = 0; idx < srq->hwq.max_elements; idx++)
- srq->swq[idx].next_idx = idx + 1;
- srq->swq[srq->last_idx].next_idx = -1;
+ if (!srq->hwq.is_user) {
+ srq->swq = kcalloc(srq->hwq.max_elements, sizeof(*srq->swq),
+ GFP_KERNEL);
+ if (!srq->swq) {
+ rc = -ENOMEM;
+ goto fail;
+ }
+ for (idx = 0; idx < srq->hwq.max_elements; idx++)
+ srq->swq[idx].next_idx = idx + 1;
+ srq->swq[srq->last_idx].next_idx = -1;
+ }
srq->id = le32_to_cpu(resp.xid);
srq->dbinfo.hwq = &srq->hwq;
@@ -809,13 +827,13 @@ static int bnxt_qplib_alloc_init_swq(struct bnxt_qplib_q *que)
{
int indx;
- que->swq = kcalloc(que->max_wqe, sizeof(*que->swq), GFP_KERNEL);
+ que->swq = kcalloc(que->max_sw_wqe, sizeof(*que->swq), GFP_KERNEL);
if (!que->swq)
return -ENOMEM;
que->swq_start = 0;
- que->swq_last = que->max_wqe - 1;
- for (indx = 0; indx < que->max_wqe; indx++)
+ que->swq_last = que->max_sw_wqe - 1;
+ for (indx = 0; indx < que->max_sw_wqe; indx++)
que->swq[indx].next_idx = indx + 1;
que->swq[que->swq_last].next_idx = 0; /* Make it circular */
que->swq_last = 0;
@@ -851,7 +869,7 @@ int bnxt_qplib_create_qp1(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
hwq_attr.res = res;
hwq_attr.sginfo = &sq->sg_info;
hwq_attr.stride = sizeof(struct sq_sge);
- hwq_attr.depth = bnxt_qplib_get_depth(sq);
+ hwq_attr.depth = bnxt_qplib_get_depth(sq, qp->wqe_mode, false);
hwq_attr.type = HWQ_TYPE_QUEUE;
rc = bnxt_qplib_alloc_init_hwq(&sq->hwq, &hwq_attr);
if (rc)
@@ -879,7 +897,7 @@ int bnxt_qplib_create_qp1(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
hwq_attr.res = res;
hwq_attr.sginfo = &rq->sg_info;
hwq_attr.stride = sizeof(struct sq_sge);
- hwq_attr.depth = bnxt_qplib_get_depth(rq);
+ hwq_attr.depth = bnxt_qplib_get_depth(rq, qp->wqe_mode, false);
hwq_attr.type = HWQ_TYPE_QUEUE;
rc = bnxt_qplib_alloc_init_hwq(&rq->hwq, &hwq_attr);
if (rc)
@@ -983,9 +1001,7 @@ int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
u32 tbl_indx;
u16 nsge;
- if (res->dattr)
- qp->dev_cap_flags = res->dattr->dev_cap_flags;
-
+ qp->is_host_msn_tbl = _is_host_msn_table(res->dattr->dev_cap_flags2);
sq->dbinfo.flags = 0;
bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
CMDQ_BASE_OPCODE_CREATE_QP,
@@ -1002,7 +1018,7 @@ int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
sizeof(struct sq_psn_search_ext) :
sizeof(struct sq_psn_search);
- if (BNXT_RE_HW_RETX(qp->dev_cap_flags)) {
+ if (qp->is_host_msn_tbl) {
psn_sz = sizeof(struct sq_msn_search);
qp->msn = 0;
}
@@ -1011,12 +1027,18 @@ int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
hwq_attr.res = res;
hwq_attr.sginfo = &sq->sg_info;
hwq_attr.stride = sizeof(struct sq_sge);
- hwq_attr.depth = bnxt_qplib_get_depth(sq);
+ hwq_attr.depth = bnxt_qplib_get_depth(sq, qp->wqe_mode, true);
hwq_attr.aux_stride = psn_sz;
- hwq_attr.aux_depth = bnxt_qplib_set_sq_size(sq, qp->wqe_mode);
+ hwq_attr.aux_depth = psn_sz ? bnxt_qplib_set_sq_size(sq, qp->wqe_mode)
+ : 0;
/* Update msn tbl size */
- if (BNXT_RE_HW_RETX(qp->dev_cap_flags) && psn_sz) {
- hwq_attr.aux_depth = roundup_pow_of_two(bnxt_qplib_set_sq_size(sq, qp->wqe_mode));
+ if (qp->is_host_msn_tbl && psn_sz) {
+ if (qp->wqe_mode == BNXT_QPLIB_WQE_MODE_STATIC)
+ hwq_attr.aux_depth =
+ roundup_pow_of_two(bnxt_qplib_set_sq_size(sq, qp->wqe_mode));
+ else
+ hwq_attr.aux_depth =
+ roundup_pow_of_two(bnxt_qplib_set_sq_size(sq, qp->wqe_mode)) / 2;
qp->msn_tbl_sz = hwq_attr.aux_depth;
qp->msn = 0;
}
@@ -1026,13 +1048,14 @@ int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
if (rc)
return rc;
- rc = bnxt_qplib_alloc_init_swq(sq);
- if (rc)
- goto fail_sq;
-
- if (psn_sz)
- bnxt_qplib_init_psn_ptr(qp, psn_sz);
+ if (!sq->hwq.is_user) {
+ rc = bnxt_qplib_alloc_init_swq(sq);
+ if (rc)
+ goto fail_sq;
+ if (psn_sz)
+ bnxt_qplib_init_psn_ptr(qp, psn_sz);
+ }
req.sq_size = cpu_to_le32(bnxt_qplib_set_sq_size(sq, qp->wqe_mode));
pbl = &sq->hwq.pbl[PBL_LVL_0];
req.sq_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
@@ -1051,16 +1074,18 @@ int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
hwq_attr.res = res;
hwq_attr.sginfo = &rq->sg_info;
hwq_attr.stride = sizeof(struct sq_sge);
- hwq_attr.depth = bnxt_qplib_get_depth(rq);
+ hwq_attr.depth = bnxt_qplib_get_depth(rq, qp->wqe_mode, false);
hwq_attr.aux_stride = 0;
hwq_attr.aux_depth = 0;
hwq_attr.type = HWQ_TYPE_QUEUE;
rc = bnxt_qplib_alloc_init_hwq(&rq->hwq, &hwq_attr);
if (rc)
goto sq_swq;
- rc = bnxt_qplib_alloc_init_swq(rq);
- if (rc)
- goto fail_rq;
+ if (!rq->hwq.is_user) {
+ rc = bnxt_qplib_alloc_init_swq(rq);
+ if (rc)
+ goto fail_rq;
+ }
req.rq_size = cpu_to_le32(rq->max_wqe);
pbl = &rq->hwq.pbl[PBL_LVL_0];
@@ -1088,7 +1113,7 @@ int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_FORCE_COMPLETION;
if (qp->wqe_mode == BNXT_QPLIB_WQE_MODE_VARIABLE)
qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_VARIABLE_SIZED_WQE_ENABLED;
- if (_is_ext_stats_supported(res->dattr->dev_cap_flags) && !res->is_vf)
+ if (bnxt_ext_stats_supported(res->cctx, res->dattr->dev_cap_flags, res->is_vf))
qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_EXT_STATS_ENABLED;
req.qp_flags = cpu_to_le32(qp_flags);
@@ -1156,9 +1181,11 @@ int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
rq->dbinfo.db = qp->dpi->dbr;
rq->dbinfo.max_slot = bnxt_qplib_set_rq_max_slot(rq->wqe_size);
}
+ spin_lock_bh(&rcfw->tbl_lock);
tbl_indx = map_qp_id_to_tbl_indx(qp->id, rcfw);
rcfw->qp_tbl[tbl_indx].qp_id = qp->id;
rcfw->qp_tbl[tbl_indx].qp_handle = (void *)qp;
+ spin_unlock_bh(&rcfw->tbl_lock);
return 0;
fail:
@@ -1190,8 +1217,6 @@ static void __modify_flags_from_init_state(struct bnxt_qplib_qp *qp)
qp->path_mtu =
CMDQ_MODIFY_QP_PATH_MTU_MTU_2048;
}
- qp->modify_flags &=
- ~CMDQ_MODIFY_QP_MODIFY_MASK_VLAN_ID;
/* Bono FW require the max_dest_rd_atomic to be >= 1 */
if (qp->max_dest_rd_atomic < 1)
qp->max_dest_rd_atomic = 1;
@@ -1265,12 +1290,56 @@ static void __filter_modify_flags(struct bnxt_qplib_qp *qp)
}
}
+static void bnxt_set_mandatory_attributes(struct bnxt_qplib_res *res,
+ struct bnxt_qplib_qp *qp,
+ struct cmdq_modify_qp *req)
+{
+ u32 mandatory_flags = 0;
+
+ if (qp->type == CMDQ_MODIFY_QP_QP_TYPE_RC)
+ mandatory_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_ACCESS;
+
+ if (qp->cur_qp_state == CMDQ_MODIFY_QP_NEW_STATE_INIT &&
+ qp->state == CMDQ_MODIFY_QP_NEW_STATE_RTR) {
+ if (qp->type == CMDQ_MODIFY_QP_QP_TYPE_RC && qp->srq)
+ req->flags = cpu_to_le16(CMDQ_MODIFY_QP_FLAGS_SRQ_USED);
+ mandatory_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_PKEY;
+ }
+
+ if (_is_min_rnr_in_rtr_rts_mandatory(res->dattr->dev_cap_flags2) &&
+ (qp->cur_qp_state == CMDQ_MODIFY_QP_NEW_STATE_RTR &&
+ qp->state == CMDQ_MODIFY_QP_NEW_STATE_RTS)) {
+ if (qp->type == CMDQ_MODIFY_QP_QP_TYPE_RC)
+ mandatory_flags |=
+ CMDQ_MODIFY_QP_MODIFY_MASK_MIN_RNR_TIMER;
+ }
+
+ if (qp->type == CMDQ_MODIFY_QP_QP_TYPE_UD ||
+ qp->type == CMDQ_MODIFY_QP_QP_TYPE_GSI)
+ mandatory_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_QKEY;
+
+ qp->modify_flags |= mandatory_flags;
+ req->qp_type = qp->type;
+}
+
+static bool is_optimized_state_transition(struct bnxt_qplib_qp *qp)
+{
+ if ((qp->cur_qp_state == CMDQ_MODIFY_QP_NEW_STATE_INIT &&
+ qp->state == CMDQ_MODIFY_QP_NEW_STATE_RTR) ||
+ (qp->cur_qp_state == CMDQ_MODIFY_QP_NEW_STATE_RTR &&
+ qp->state == CMDQ_MODIFY_QP_NEW_STATE_RTS))
+ return true;
+
+ return false;
+}
+
int bnxt_qplib_modify_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
{
struct bnxt_qplib_rcfw *rcfw = res->rcfw;
struct creq_modify_qp_resp resp = {};
struct bnxt_qplib_cmdqmsg msg = {};
struct cmdq_modify_qp req = {};
+ u16 vlan_pcp_vlan_dei_vlan_id;
u32 temp32[4];
u32 bmask;
int rc;
@@ -1281,6 +1350,12 @@ int bnxt_qplib_modify_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
/* Filter out the qp_attr_mask based on the state->new transition */
__filter_modify_flags(qp);
+ if (qp->modify_flags & CMDQ_MODIFY_QP_MODIFY_MASK_STATE) {
+ /* Set mandatory attributes for INIT -> RTR and RTR -> RTS transition */
+ if (_is_optimize_modify_qp_supported(res->dattr->dev_cap_flags2) &&
+ is_optimized_state_transition(qp))
+ bnxt_set_mandatory_attributes(res, qp, &req);
+ }
bmask = qp->modify_flags;
req.modify_mask = cpu_to_le32(qp->modify_flags);
req.qp_cid = cpu_to_le32(qp->id);
@@ -1361,7 +1436,16 @@ int bnxt_qplib_modify_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_DEST_QP_ID)
req.dest_qp_id = cpu_to_le32(qp->dest_qpn);
- req.vlan_pcp_vlan_dei_vlan_id = cpu_to_le16(qp->vlan_id);
+ if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_VLAN_ID) {
+ vlan_pcp_vlan_dei_vlan_id =
+ ((res->sgid_tbl.tbl[qp->ah.sgid_index].vlan_id <<
+ CMDQ_MODIFY_QP_VLAN_ID_SFT) &
+ CMDQ_MODIFY_QP_VLAN_ID_MASK);
+ vlan_pcp_vlan_dei_vlan_id |=
+ ((qp->ah.sl << CMDQ_MODIFY_QP_VLAN_PCP_SFT) &
+ CMDQ_MODIFY_QP_VLAN_PCP_MASK);
+ req.vlan_pcp_vlan_dei_vlan_id = cpu_to_le16(vlan_pcp_vlan_dei_vlan_id);
+ }
bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req), sizeof(resp), 0);
rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
@@ -1453,6 +1537,7 @@ int bnxt_qplib_query_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
qp->dest_qpn = le32_to_cpu(sb->dest_qp_id);
memcpy(qp->smac, sb->src_mac, 6);
qp->vlan_id = le16_to_cpu(sb->vlan_pcp_vlan_dei_vlan_id);
+ qp->port_id = le16_to_cpu(sb->port_id);
bail:
dma_free_coherent(&rcfw->pdev->dev, sbuf.size,
sbuf.sb, sbuf.dma_addr);
@@ -1515,9 +1600,11 @@ int bnxt_qplib_destroy_qp(struct bnxt_qplib_res *res,
u32 tbl_indx;
int rc;
+ spin_lock_bh(&rcfw->tbl_lock);
tbl_indx = map_qp_id_to_tbl_indx(qp->id, rcfw);
rcfw->qp_tbl[tbl_indx].qp_id = BNXT_QPLIB_QP_ID_INVALID;
rcfw->qp_tbl[tbl_indx].qp_handle = NULL;
+ spin_unlock_bh(&rcfw->tbl_lock);
bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
CMDQ_BASE_OPCODE_DESTROY_QP,
@@ -1528,8 +1615,10 @@ int bnxt_qplib_destroy_qp(struct bnxt_qplib_res *res,
sizeof(resp), 0);
rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
if (rc) {
+ spin_lock_bh(&rcfw->tbl_lock);
rcfw->qp_tbl[tbl_indx].qp_id = qp->id;
rcfw->qp_tbl[tbl_indx].qp_handle = qp;
+ spin_unlock_bh(&rcfw->tbl_lock);
return rc;
}
@@ -1636,7 +1725,7 @@ static void bnxt_qplib_fill_psn_search(struct bnxt_qplib_qp *qp,
if (!swq->psn_search)
return;
/* Handle MSN differently on cap flags */
- if (BNXT_RE_HW_RETX(qp->dev_cap_flags)) {
+ if (qp->is_host_msn_tbl) {
bnxt_qplib_fill_msn_search(qp, wqe, swq);
return;
}
@@ -1818,7 +1907,7 @@ int bnxt_qplib_post_send(struct bnxt_qplib_qp *qp,
}
swq = bnxt_qplib_get_swqe(sq, &wqe_idx);
- bnxt_qplib_pull_psn_buff(qp, sq, swq, BNXT_RE_HW_RETX(qp->dev_cap_flags));
+ bnxt_qplib_pull_psn_buff(qp, sq, swq, qp->is_host_msn_tbl);
idx = 0;
swq->slot_idx = hwq->prod;
@@ -2008,7 +2097,7 @@ int bnxt_qplib_post_send(struct bnxt_qplib_qp *qp,
rc = -EINVAL;
goto done;
}
- if (!BNXT_RE_HW_RETX(qp->dev_cap_flags) || msn_update) {
+ if (!qp->is_host_msn_tbl || msn_update) {
swq->next_psn = sq->psn & BTH_PSN_MASK;
bnxt_qplib_fill_psn_search(qp, wqe, swq);
}
@@ -2130,6 +2219,7 @@ int bnxt_qplib_create_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq)
struct bnxt_qplib_cmdqmsg msg = {};
struct cmdq_create_cq req = {};
struct bnxt_qplib_pbl *pbl;
+ u32 coalescing = 0;
u32 pg_sz_lvl;
int rc;
@@ -2156,6 +2246,25 @@ int bnxt_qplib_create_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq)
req.dpi = cpu_to_le32(cq->dpi->dpi);
req.cq_handle = cpu_to_le64(cq->cq_handle);
req.cq_size = cpu_to_le32(cq->max_wqe);
+
+ if (_is_cq_coalescing_supported(res->dattr->dev_cap_flags2)) {
+ req.flags |= cpu_to_le16(CMDQ_CREATE_CQ_FLAGS_COALESCING_VALID);
+ coalescing |= ((cq->coalescing->buf_maxtime <<
+ CMDQ_CREATE_CQ_BUF_MAXTIME_SFT) &
+ CMDQ_CREATE_CQ_BUF_MAXTIME_MASK);
+ coalescing |= ((cq->coalescing->normal_maxbuf <<
+ CMDQ_CREATE_CQ_NORMAL_MAXBUF_SFT) &
+ CMDQ_CREATE_CQ_NORMAL_MAXBUF_MASK);
+ coalescing |= ((cq->coalescing->during_maxbuf <<
+ CMDQ_CREATE_CQ_DURING_MAXBUF_SFT) &
+ CMDQ_CREATE_CQ_DURING_MAXBUF_MASK);
+ if (cq->coalescing->en_ring_idle_mode)
+ coalescing |= CMDQ_CREATE_CQ_ENABLE_RING_IDLE_MODE;
+ else
+ coalescing &= ~CMDQ_CREATE_CQ_ENABLE_RING_IDLE_MODE;
+ req.coalescing = cpu_to_le32(coalescing);
+ }
+
pbl = &cq->hwq.pbl[PBL_LVL_0];
pg_sz_lvl = (bnxt_qplib_base_pg_size(&cq->hwq) <<
CMDQ_CREATE_CQ_PG_SIZE_SFT);
@@ -2470,6 +2579,32 @@ out:
return rc;
}
+static int bnxt_qplib_get_cqe_sq_cons(struct bnxt_qplib_q *sq, u32 cqe_slot)
+{
+ struct bnxt_qplib_hwq *sq_hwq;
+ struct bnxt_qplib_swq *swq;
+ int cqe_sq_cons = -1;
+ u32 start, last;
+
+ sq_hwq = &sq->hwq;
+
+ start = sq->swq_start;
+ last = sq->swq_last;
+
+ while (last != start) {
+ swq = &sq->swq[last];
+ if (swq->slot_idx == cqe_slot) {
+ cqe_sq_cons = swq->next_idx;
+ dev_err(&sq_hwq->pdev->dev, "%s: Found cons wqe = %d slot = %d\n",
+ __func__, cqe_sq_cons, cqe_slot);
+ break;
+ }
+
+ last = swq->next_idx;
+ }
+ return cqe_sq_cons;
+}
+
static int bnxt_qplib_cq_process_req(struct bnxt_qplib_cq *cq,
struct cq_req *hwcqe,
struct bnxt_qplib_cqe **pcqe, int *budget,
@@ -2477,9 +2612,10 @@ static int bnxt_qplib_cq_process_req(struct bnxt_qplib_cq *cq,
{
struct bnxt_qplib_swq *swq;
struct bnxt_qplib_cqe *cqe;
+ u32 cqe_sq_cons, slot_num;
struct bnxt_qplib_qp *qp;
struct bnxt_qplib_q *sq;
- u32 cqe_sq_cons;
+ int cqe_cons;
int rc = 0;
qp = (struct bnxt_qplib_qp *)((unsigned long)
@@ -2491,12 +2627,26 @@ static int bnxt_qplib_cq_process_req(struct bnxt_qplib_cq *cq,
}
sq = &qp->sq;
- cqe_sq_cons = le16_to_cpu(hwcqe->sq_cons_idx) % sq->max_wqe;
+ cqe_sq_cons = le16_to_cpu(hwcqe->sq_cons_idx) % sq->max_sw_wqe;
if (qp->sq.flushed) {
dev_dbg(&cq->hwq.pdev->dev,
"%s: QP in Flush QP = %p\n", __func__, qp);
goto done;
}
+
+ if (__is_err_cqe_for_var_wqe(qp, hwcqe->status)) {
+ slot_num = le16_to_cpu(hwcqe->sq_cons_idx);
+ cqe_cons = bnxt_qplib_get_cqe_sq_cons(sq, slot_num);
+ if (cqe_cons < 0) {
+ dev_err(&cq->hwq.pdev->dev, "%s: Wrong SQ cons cqe_slot_indx = %d\n",
+ __func__, slot_num);
+ goto done;
+ }
+ cqe_sq_cons = cqe_cons;
+ dev_err(&cq->hwq.pdev->dev, "%s: cqe_sq_cons = %d swq_last = %d swq_start = %d\n",
+ __func__, cqe_sq_cons, sq->swq_last, sq->swq_start);
+ }
+
/* Require to walk the sq's swq to fabricate CQEs for all previously
* signaled SWQEs due to CQE aggregation from the current sq cons
* to the cqe_sq_cons
@@ -2534,10 +2684,12 @@ static int bnxt_qplib_cq_process_req(struct bnxt_qplib_cq *cq,
bnxt_qplib_add_flush_qp(qp);
} else {
/* Before we complete, do WA 9060 */
- if (do_wa9060(qp, cq, cq_cons, sq->swq_last,
- cqe_sq_cons)) {
- *lib_qp = qp;
- goto out;
+ if (!bnxt_qplib_is_chip_gen_p5_p7(qp->cctx)) {
+ if (do_wa9060(qp, cq, cq_cons, sq->swq_last,
+ cqe_sq_cons)) {
+ *lib_qp = qp;
+ goto out;
+ }
}
if (swq->flags & SQ_SEND_FLAGS_SIGNAL_COMP) {
cqe->status = CQ_REQ_STATUS_OK;
@@ -2881,7 +3033,7 @@ static int bnxt_qplib_cq_process_terminal(struct bnxt_qplib_cq *cq,
cqe_cons = le16_to_cpu(hwcqe->sq_cons_idx);
if (cqe_cons == 0xFFFF)
goto do_rq;
- cqe_cons %= sq->max_wqe;
+ cqe_cons %= sq->max_sw_wqe;
if (qp->sq.flushed) {
dev_dbg(&cq->hwq.pdev->dev,
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.h b/drivers/infiniband/hw/bnxt_re/qplib_fp.h
index 7fd4506b3584..0d9487c889ff 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_fp.h
+++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.h
@@ -105,6 +105,7 @@ struct bnxt_qplib_srq {
struct bnxt_qplib_sg_info sg_info;
u16 eventq_hw_ring_id;
spinlock_t lock; /* protect SRQE link list */
+ u8 toggle;
};
struct bnxt_qplib_sge {
@@ -113,7 +114,6 @@ struct bnxt_qplib_sge {
u32 size;
};
-#define BNXT_QPLIB_QP_MAX_SGL 6
struct bnxt_qplib_swq {
u64 wr_id;
int next_idx;
@@ -153,7 +153,7 @@ struct bnxt_qplib_swqe {
#define BNXT_QPLIB_SWQE_FLAGS_UC_FENCE BIT(2)
#define BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT BIT(3)
#define BNXT_QPLIB_SWQE_FLAGS_INLINE BIT(4)
- struct bnxt_qplib_sge sg_list[BNXT_QPLIB_QP_MAX_SGL];
+ struct bnxt_qplib_sge sg_list[BNXT_VAR_MAX_SGE];
int num_sge;
/* Max inline data is 96 bytes */
u32 inline_len;
@@ -164,12 +164,12 @@ struct bnxt_qplib_swqe {
/* Send, with imm, inval key */
struct {
union {
- __be32 imm_data;
+ u32 imm_data;
u32 inv_key;
};
u32 q_key;
u32 dst_qp;
- u16 avid;
+ u32 avid;
} send;
/* Send Raw Ethernet and QP1 */
@@ -182,7 +182,7 @@ struct bnxt_qplib_swqe {
/* RDMA write, with imm, read */
struct {
union {
- __be32 imm_data;
+ u32 imm_data;
u32 inv_key;
};
u64 remote_va;
@@ -251,6 +251,7 @@ struct bnxt_qplib_q {
struct bnxt_qplib_db_info dbinfo;
struct bnxt_qplib_sg_info sg_info;
u32 max_wqe;
+ u32 max_sw_wqe;
u16 wqe_size;
u16 q_full_delta;
u16 max_sge;
@@ -297,6 +298,7 @@ struct bnxt_qplib_qp {
u32 dest_qpn;
u8 smac[6];
u16 vlan_id;
+ u16 port_id;
u8 nw_type;
struct bnxt_qplib_ah ah;
@@ -340,7 +342,8 @@ struct bnxt_qplib_qp {
struct list_head rq_flush;
u32 msn;
u32 msn_tbl_sz;
- u16 dev_cap_flags;
+ bool is_host_msn_tbl;
+ u8 tos_dscp;
};
#define BNXT_QPLIB_MAX_CQE_ENTRY_SIZE sizeof(struct cq_base)
@@ -381,6 +384,25 @@ static inline bool bnxt_qplib_queue_full(struct bnxt_qplib_q *que,
return avail <= slots;
}
+/* CQ coalescing parameters */
+struct bnxt_qplib_cq_coal_param {
+ u16 buf_maxtime;
+ u8 normal_maxbuf;
+ u8 during_maxbuf;
+ u8 en_ring_idle_mode;
+};
+
+#define BNXT_QPLIB_CQ_COAL_DEF_BUF_MAXTIME 0x1
+#define BNXT_QPLIB_CQ_COAL_DEF_NORMAL_MAXBUF_P7 0x8
+#define BNXT_QPLIB_CQ_COAL_DEF_DURING_MAXBUF_P7 0x8
+#define BNXT_QPLIB_CQ_COAL_DEF_NORMAL_MAXBUF_P5 0x1
+#define BNXT_QPLIB_CQ_COAL_DEF_DURING_MAXBUF_P5 0x1
+#define BNXT_QPLIB_CQ_COAL_DEF_EN_RING_IDLE_MODE 0x1
+#define BNXT_QPLIB_CQ_COAL_MAX_BUF_MAXTIME 0x1bf
+#define BNXT_QPLIB_CQ_COAL_MAX_NORMAL_MAXBUF 0x1f
+#define BNXT_QPLIB_CQ_COAL_MAX_DURING_MAXBUF 0x1f
+#define BNXT_QPLIB_CQ_COAL_MAX_EN_RING_IDLE_MODE 0x1
+
struct bnxt_qplib_cqe {
u8 status;
u8 type;
@@ -389,7 +411,7 @@ struct bnxt_qplib_cqe {
u16 cfa_meta;
u64 wr_id;
union {
- __be32 immdata;
+ u32 immdata;
u32 invrkey;
};
u64 qp_handle;
@@ -443,6 +465,7 @@ struct bnxt_qplib_cq {
*/
spinlock_t flush_lock; /* QP flush management */
u16 cnq_events;
+ struct bnxt_qplib_cq_coal_param *coalescing;
};
#define BNXT_QPLIB_MAX_IRRQE_ENTRY_SIZE sizeof(struct xrrq_irrq)
@@ -497,6 +520,7 @@ struct bnxt_qplib_nq {
struct tasklet_struct nq_tasklet;
bool requested;
int budget;
+ u32 load;
cqn_handler_t cqn_handler;
srqn_handler_t srqn_handler;
@@ -586,15 +610,22 @@ static inline void bnxt_qplib_swq_mod_start(struct bnxt_qplib_q *que, u32 idx)
que->swq_start = que->swq[idx].next_idx;
}
-static inline u32 bnxt_qplib_get_depth(struct bnxt_qplib_q *que)
+static inline u32 bnxt_qplib_get_depth(struct bnxt_qplib_q *que, u8 wqe_mode, bool is_sq)
{
- return (que->wqe_size * que->max_wqe) / sizeof(struct sq_sge);
+ u32 slots;
+
+ /* Queue depth is the number of slots. */
+ slots = (que->wqe_size * que->max_wqe) / sizeof(struct sq_sge);
+ /* For variable WQE mode, need to align the slots to 256 */
+ if (wqe_mode == BNXT_QPLIB_WQE_MODE_VARIABLE && is_sq)
+ slots = ALIGN(slots, BNXT_VAR_MAX_SLOT_ALIGN);
+ return slots;
}
static inline u32 bnxt_qplib_set_sq_size(struct bnxt_qplib_q *que, u8 wqe_mode)
{
return (wqe_mode == BNXT_QPLIB_WQE_MODE_STATIC) ?
- que->max_wqe : bnxt_qplib_get_depth(que);
+ que->max_wqe : bnxt_qplib_get_depth(que, wqe_mode, true);
}
static inline u32 bnxt_qplib_set_sq_max_slot(u8 wqe_mode)
@@ -641,4 +672,14 @@ static inline __le64 bnxt_re_update_msn_tbl(u32 st_idx, u32 npsn, u32 start_psn)
(((start_psn) << SQ_MSN_SEARCH_START_PSN_SFT) &
SQ_MSN_SEARCH_START_PSN_MASK));
}
+
+static inline bool __is_var_wqe(struct bnxt_qplib_qp *qp)
+{
+ return (qp->wqe_mode == BNXT_QPLIB_WQE_MODE_VARIABLE);
+}
+
+static inline bool __is_err_cqe_for_var_wqe(struct bnxt_qplib_qp *qp, u8 status)
+{
+ return (status != CQ_REQ_STATUS_OK) && __is_var_wqe(qp);
+}
#endif /* __BNXT_QPLIB_FP_H__ */
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
index 3ffaef0c2651..804bc773b4ef 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
@@ -160,7 +160,7 @@ static int __wait_for_resp(struct bnxt_qplib_rcfw *rcfw, u16 cookie)
wait_event_timeout(cmdq->waitq,
!crsqe->is_in_used ||
test_bit(ERR_DEVICE_DETACHED, &cmdq->flags),
- msecs_to_jiffies(rcfw->max_timeout * 1000));
+ secs_to_jiffies(rcfw->max_timeout));
if (!crsqe->is_in_used)
return 0;
@@ -290,7 +290,6 @@ static int __send_message(struct bnxt_qplib_rcfw *rcfw,
struct bnxt_qplib_hwq *hwq;
u32 sw_prod, cmdq_prod;
struct pci_dev *pdev;
- unsigned long flags;
u16 cookie;
u8 *preq;
@@ -301,7 +300,7 @@ static int __send_message(struct bnxt_qplib_rcfw *rcfw,
/* Cmdq are in 16-byte units, each request can consume 1 or more
* cmdqe
*/
- spin_lock_irqsave(&hwq->lock, flags);
+ spin_lock_bh(&hwq->lock);
required_slots = bnxt_qplib_get_cmd_slots(msg->req);
free_slots = HWQ_FREE_SLOTS(hwq);
cookie = cmdq->seq_num & RCFW_MAX_COOKIE_VALUE;
@@ -311,7 +310,7 @@ static int __send_message(struct bnxt_qplib_rcfw *rcfw,
dev_info_ratelimited(&pdev->dev,
"CMDQ is full req/free %d/%d!",
required_slots, free_slots);
- spin_unlock_irqrestore(&hwq->lock, flags);
+ spin_unlock_bh(&hwq->lock);
return -EAGAIN;
}
if (msg->block)
@@ -367,7 +366,7 @@ static int __send_message(struct bnxt_qplib_rcfw *rcfw,
wmb();
writel(cmdq_prod, cmdq->cmdq_mbox.prod);
writel(RCFW_CMDQ_TRIG_VAL, cmdq->cmdq_mbox.db);
- spin_unlock_irqrestore(&hwq->lock, flags);
+ spin_unlock_bh(&hwq->lock);
/* Return the CREQ response pointer */
return 0;
}
@@ -425,7 +424,8 @@ static int __send_message_basic_sanity(struct bnxt_qplib_rcfw *rcfw,
/* Prevent posting if f/w is not in a state to process */
if (test_bit(ERR_DEVICE_DETACHED, &rcfw->cmdq.flags))
- return bnxt_qplib_map_rc(opcode);
+ return -ENXIO;
+
if (test_bit(FIRMWARE_STALL_DETECTED, &cmdq->flags))
return -ETIMEDOUT;
@@ -486,7 +486,6 @@ static int __bnxt_qplib_rcfw_send_message(struct bnxt_qplib_rcfw *rcfw,
{
struct creq_qp_event *evnt = (struct creq_qp_event *)msg->resp;
struct bnxt_qplib_crsqe *crsqe;
- unsigned long flags;
u16 cookie;
int rc;
u8 opcode;
@@ -495,7 +494,7 @@ static int __bnxt_qplib_rcfw_send_message(struct bnxt_qplib_rcfw *rcfw,
rc = __send_message_basic_sanity(rcfw, msg, opcode);
if (rc)
- return rc;
+ return rc == -ENXIO ? bnxt_qplib_map_rc(opcode) : rc;
rc = __send_message(rcfw, msg, opcode);
if (rc)
@@ -512,12 +511,12 @@ static int __bnxt_qplib_rcfw_send_message(struct bnxt_qplib_rcfw *rcfw,
rc = __poll_for_resp(rcfw, cookie);
if (rc) {
- spin_lock_irqsave(&rcfw->cmdq.hwq.lock, flags);
+ spin_lock_bh(&rcfw->cmdq.hwq.lock);
crsqe = &rcfw->crsqe_tbl[cookie];
crsqe->is_waiter_alive = false;
if (rc == -ENODEV)
set_bit(FIRMWARE_STALL_DETECTED, &rcfw->cmdq.flags);
- spin_unlock_irqrestore(&rcfw->cmdq.hwq.lock, flags);
+ spin_unlock_bh(&rcfw->cmdq.hwq.lock);
return -ETIMEDOUT;
}
@@ -525,7 +524,7 @@ static int __bnxt_qplib_rcfw_send_message(struct bnxt_qplib_rcfw *rcfw,
/* failed with status */
dev_err(&rcfw->pdev->dev, "cmdq[%#x]=%#x status %#x\n",
cookie, opcode, evnt->status);
- rc = -EFAULT;
+ rc = -EIO;
}
return rc;
@@ -628,7 +627,6 @@ static int bnxt_qplib_process_qp_event(struct bnxt_qplib_rcfw *rcfw,
u16 cookie, blocked = 0;
bool is_waiter_alive;
struct pci_dev *pdev;
- unsigned long flags;
u32 wait_cmds = 0;
int rc = 0;
@@ -637,17 +635,21 @@ static int bnxt_qplib_process_qp_event(struct bnxt_qplib_rcfw *rcfw,
case CREQ_QP_EVENT_EVENT_QP_ERROR_NOTIFICATION:
err_event = (struct creq_qp_error_notification *)qp_event;
qp_id = le32_to_cpu(err_event->xid);
+ spin_lock(&rcfw->tbl_lock);
tbl_indx = map_qp_id_to_tbl_indx(qp_id, rcfw);
qp = rcfw->qp_tbl[tbl_indx].qp_handle;
+ if (!qp) {
+ spin_unlock(&rcfw->tbl_lock);
+ break;
+ }
+ bnxt_qplib_mark_qp_error(qp);
+ rc = rcfw->creq.aeq_handler(rcfw, qp_event, qp);
+ spin_unlock(&rcfw->tbl_lock);
dev_dbg(&pdev->dev, "Received QP error notification\n");
dev_dbg(&pdev->dev,
"qpid 0x%x, req_err=0x%x, resp_err=0x%x\n",
qp_id, err_event->req_err_state_reason,
err_event->res_err_state_reason);
- if (!qp)
- break;
- bnxt_qplib_mark_qp_error(qp);
- rc = rcfw->creq.aeq_handler(rcfw, qp_event, qp);
break;
default:
/*
@@ -659,8 +661,7 @@ static int bnxt_qplib_process_qp_event(struct bnxt_qplib_rcfw *rcfw,
*
*/
- spin_lock_irqsave_nested(&hwq->lock, flags,
- SINGLE_DEPTH_NESTING);
+ spin_lock_nested(&hwq->lock, SINGLE_DEPTH_NESTING);
cookie = le16_to_cpu(qp_event->cookie);
blocked = cookie & RCFW_CMD_IS_BLOCKING;
cookie &= RCFW_MAX_COOKIE_VALUE;
@@ -672,7 +673,7 @@ static int bnxt_qplib_process_qp_event(struct bnxt_qplib_rcfw *rcfw,
dev_info(&pdev->dev,
"rcfw timedout: cookie = %#x, free_slots = %d",
cookie, crsqe->free_slots);
- spin_unlock_irqrestore(&hwq->lock, flags);
+ spin_unlock(&hwq->lock);
return rc;
}
@@ -720,7 +721,7 @@ static int bnxt_qplib_process_qp_event(struct bnxt_qplib_rcfw *rcfw,
__destroy_timedout_ah(rcfw,
(struct creq_create_ah_resp *)
qp_event);
- spin_unlock_irqrestore(&hwq->lock, flags);
+ spin_unlock(&hwq->lock);
}
*num_wait += wait_cmds;
return rc;
@@ -734,12 +735,11 @@ static void bnxt_qplib_service_creq(struct tasklet_struct *t)
u32 type, budget = CREQ_ENTRY_POLL_BUDGET;
struct bnxt_qplib_hwq *hwq = &creq->hwq;
struct creq_base *creqe;
- unsigned long flags;
u32 num_wakeup = 0;
u32 hw_polled = 0;
/* Service the CREQ until budget is over */
- spin_lock_irqsave(&hwq->lock, flags);
+ spin_lock_bh(&hwq->lock);
while (budget > 0) {
creqe = bnxt_qplib_get_qe(hwq, hwq->cons, NULL);
if (!CREQ_CMP_VALID(creqe, creq->creq_db.dbinfo.flags))
@@ -782,7 +782,7 @@ static void bnxt_qplib_service_creq(struct tasklet_struct *t)
if (hw_polled)
bnxt_qplib_ring_nq_db(&creq->creq_db.dbinfo,
rcfw->res->cctx, true);
- spin_unlock_irqrestore(&hwq->lock, flags);
+ spin_unlock_bh(&hwq->lock);
if (num_wakeup)
wake_up_nr(&rcfw->cmdq.waitq, num_wakeup);
}
@@ -832,6 +832,7 @@ int bnxt_qplib_init_rcfw(struct bnxt_qplib_rcfw *rcfw,
struct creq_initialize_fw_resp resp = {};
struct cmdq_initialize_fw req = {};
struct bnxt_qplib_cmdqmsg msg = {};
+ u16 flags = 0;
u8 pgsz, lvl;
int rc;
@@ -850,10 +851,8 @@ int bnxt_qplib_init_rcfw(struct bnxt_qplib_rcfw *rcfw,
* shall setup this area for VF. Skipping the
* HW programming
*/
- if (is_virtfn)
+ if (is_virtfn || bnxt_qplib_is_chip_gen_p5_p7(rcfw->res->cctx))
goto skip_ctx_setup;
- if (bnxt_qplib_is_chip_gen_p5_p7(rcfw->res->cctx))
- goto config_vf_res;
lvl = ctx->qpc_tbl.level;
pgsz = bnxt_qplib_base_pg_size(&ctx->qpc_tbl);
@@ -897,16 +896,14 @@ int bnxt_qplib_init_rcfw(struct bnxt_qplib_rcfw *rcfw,
req.number_of_srq = cpu_to_le32(ctx->srqc_tbl.max_elements);
req.number_of_cq = cpu_to_le32(ctx->cq_tbl.max_elements);
-config_vf_res:
- req.max_qp_per_vf = cpu_to_le32(ctx->vf_res.max_qp_per_vf);
- req.max_mrw_per_vf = cpu_to_le32(ctx->vf_res.max_mrw_per_vf);
- req.max_srq_per_vf = cpu_to_le32(ctx->vf_res.max_srq_per_vf);
- req.max_cq_per_vf = cpu_to_le32(ctx->vf_res.max_cq_per_vf);
- req.max_gid_per_vf = cpu_to_le32(ctx->vf_res.max_gid_per_vf);
-
skip_ctx_setup:
if (BNXT_RE_HW_RETX(rcfw->res->dattr->dev_cap_flags))
- req.flags |= cpu_to_le16(CMDQ_INITIALIZE_FW_FLAGS_HW_REQUESTER_RETX_SUPPORTED);
+ flags |= CMDQ_INITIALIZE_FW_FLAGS_HW_REQUESTER_RETX_SUPPORTED;
+ if (_is_optimize_modify_qp_supported(rcfw->res->dattr->dev_cap_flags2))
+ flags |= CMDQ_INITIALIZE_FW_FLAGS_OPTIMIZE_MODIFY_QP_SUPPORTED;
+ if (rcfw->res->en_dev->flags & BNXT_EN_FLAG_ROCE_VF_RES_MGMT)
+ flags |= CMDQ_INITIALIZE_FW_FLAGS_L2_VF_RESOURCE_MGMT;
+ req.flags |= cpu_to_le16(flags);
req.stat_ctx_id = cpu_to_le32(ctx->stats.fw_id);
bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req), sizeof(resp), 0);
rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
@@ -918,7 +915,6 @@ skip_ctx_setup:
void bnxt_qplib_free_rcfw_channel(struct bnxt_qplib_rcfw *rcfw)
{
- kfree(rcfw->qp_tbl);
kfree(rcfw->crsqe_tbl);
bnxt_qplib_free_hwq(rcfw->res, &rcfw->cmdq.hwq);
bnxt_qplib_free_hwq(rcfw->res, &rcfw->creq.hwq);
@@ -927,8 +923,7 @@ void bnxt_qplib_free_rcfw_channel(struct bnxt_qplib_rcfw *rcfw)
int bnxt_qplib_alloc_rcfw_channel(struct bnxt_qplib_res *res,
struct bnxt_qplib_rcfw *rcfw,
- struct bnxt_qplib_ctx *ctx,
- int qp_tbl_sz)
+ struct bnxt_qplib_ctx *ctx)
{
struct bnxt_qplib_hwq_attr hwq_attr = {};
struct bnxt_qplib_sg_info sginfo = {};
@@ -972,12 +967,7 @@ int bnxt_qplib_alloc_rcfw_channel(struct bnxt_qplib_res *res,
if (!rcfw->crsqe_tbl)
goto fail;
- /* Allocate one extra to hold the QP1 entries */
- rcfw->qp_tbl_size = qp_tbl_sz + 1;
- rcfw->qp_tbl = kcalloc(rcfw->qp_tbl_size, sizeof(struct bnxt_qplib_qp_node),
- GFP_KERNEL);
- if (!rcfw->qp_tbl)
- goto fail;
+ spin_lock_init(&rcfw->tbl_lock);
rcfw->max_timeout = res->cctx->hwrm_cmd_max_timeout;
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h
index 45996e60a0d0..ff873c5f1b25 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h
+++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h
@@ -131,6 +131,8 @@ static inline u32 bnxt_qplib_set_cmd_slots(struct cmdq_base *req)
#define RCFW_CMD_IS_BLOCKING 0x8000
#define HWRM_VERSION_DEV_ATTR_MAX_DPI 0x1000A0000000DULL
+/* HWRM version 1.10.3.18 */
+#define HWRM_VERSION_READ_CTX 0x1000A00030012
/* Crsq buf is 1024-Byte */
struct bnxt_qplib_crsbe {
@@ -224,6 +226,8 @@ struct bnxt_qplib_rcfw {
struct bnxt_qplib_crsqe *crsqe_tbl;
int qp_tbl_size;
struct bnxt_qplib_qp_node *qp_tbl;
+ /* To synchronize the qp-handle hash table */
+ spinlock_t tbl_lock;
u64 oos_prev;
u32 init_oos_stats;
u32 cmdq_depth;
@@ -258,8 +262,7 @@ static inline void bnxt_qplib_fill_cmdqmsg(struct bnxt_qplib_cmdqmsg *msg,
void bnxt_qplib_free_rcfw_channel(struct bnxt_qplib_rcfw *rcfw);
int bnxt_qplib_alloc_rcfw_channel(struct bnxt_qplib_res *res,
struct bnxt_qplib_rcfw *rcfw,
- struct bnxt_qplib_ctx *ctx,
- int qp_tbl_sz);
+ struct bnxt_qplib_ctx *ctx);
void bnxt_qplib_rcfw_stop_irq(struct bnxt_qplib_rcfw *rcfw, bool kill);
void bnxt_qplib_disable_rcfw_channel(struct bnxt_qplib_rcfw *rcfw);
int bnxt_qplib_rcfw_start_irq(struct bnxt_qplib_rcfw *rcfw, int msix_vector,
@@ -281,9 +284,10 @@ int bnxt_qplib_deinit_rcfw(struct bnxt_qplib_rcfw *rcfw);
int bnxt_qplib_init_rcfw(struct bnxt_qplib_rcfw *rcfw,
struct bnxt_qplib_ctx *ctx, int is_virtfn);
void bnxt_qplib_mark_qp_error(void *qp_handle);
+
static inline u32 map_qp_id_to_tbl_indx(u32 qid, struct bnxt_qplib_rcfw *rcfw)
{
/* Last index of the qp_tbl is for QP1 ie. qp_tbl_size - 1*/
- return (qid == 1) ? rcfw->qp_tbl_size - 1 : qid % rcfw->qp_tbl_size - 2;
+ return (qid == 1) ? rcfw->qp_tbl_size - 1 : (qid % (rcfw->qp_tbl_size - 2));
}
#endif /* __BNXT_QPLIB_RCFW_H__ */
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_res.c b/drivers/infiniband/hw/bnxt_re/qplib_res.c
index dfc943fab87b..6cd05207ffed 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_res.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_res.c
@@ -244,6 +244,8 @@ int bnxt_qplib_alloc_init_hwq(struct bnxt_qplib_hwq *hwq,
sginfo.pgsize = npde * pg_size;
sginfo.npages = 1;
rc = __alloc_pbl(res, &hwq->pbl[PBL_LVL_0], &sginfo);
+ if (rc)
+ goto fail;
/* Alloc PBL pages */
sginfo.npages = npbl;
@@ -255,22 +257,9 @@ int bnxt_qplib_alloc_init_hwq(struct bnxt_qplib_hwq *hwq,
dst_virt_ptr =
(dma_addr_t **)hwq->pbl[PBL_LVL_0].pg_arr;
src_phys_ptr = hwq->pbl[PBL_LVL_1].pg_map_arr;
- if (hwq_attr->type == HWQ_TYPE_MR) {
- /* For MR it is expected that we supply only 1 contigous
- * page i.e only 1 entry in the PDL that will contain
- * all the PBLs for the user supplied memory region
- */
- for (i = 0; i < hwq->pbl[PBL_LVL_1].pg_count;
- i++)
- dst_virt_ptr[0][i] = src_phys_ptr[i] |
- flag;
- } else {
- for (i = 0; i < hwq->pbl[PBL_LVL_1].pg_count;
- i++)
- dst_virt_ptr[PTR_PG(i)][PTR_IDX(i)] =
- src_phys_ptr[i] |
- PTU_PDE_VALID;
- }
+ for (i = 0; i < hwq->pbl[PBL_LVL_1].pg_count; i++)
+ dst_virt_ptr[0][i] = src_phys_ptr[i] | flag;
+
/* Alloc or init PTEs */
rc = __alloc_pbl(res, &hwq->pbl[PBL_LVL_2],
hwq_attr->sginfo);
@@ -882,19 +871,27 @@ int bnxt_qplib_init_res(struct bnxt_qplib_res *res)
void bnxt_qplib_free_res(struct bnxt_qplib_res *res)
{
+ kfree(res->rcfw->qp_tbl);
bnxt_qplib_free_sgid_tbl(res, &res->sgid_tbl);
bnxt_qplib_free_pd_tbl(&res->pd_tbl);
bnxt_qplib_free_dpi_tbl(res, &res->dpi_tbl);
}
-int bnxt_qplib_alloc_res(struct bnxt_qplib_res *res, struct pci_dev *pdev,
- struct net_device *netdev,
- struct bnxt_qplib_dev_attr *dev_attr)
+int bnxt_qplib_alloc_res(struct bnxt_qplib_res *res, struct net_device *netdev)
{
+ struct bnxt_qplib_rcfw *rcfw = res->rcfw;
+ struct bnxt_qplib_dev_attr *dev_attr;
int rc;
- res->pdev = pdev;
res->netdev = netdev;
+ dev_attr = res->dattr;
+
+ /* Allocate one extra to hold the QP1 entries */
+ rcfw->qp_tbl_size = max_t(u32, BNXT_RE_MAX_QPC_COUNT + 1, dev_attr->max_qp);
+ rcfw->qp_tbl = kcalloc(rcfw->qp_tbl_size, sizeof(struct bnxt_qplib_qp_node),
+ GFP_KERNEL);
+ if (!rcfw->qp_tbl)
+ return -ENOMEM;
rc = bnxt_qplib_alloc_sgid_tbl(res, &res->sgid_tbl, dev_attr->max_sgid);
if (rc)
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_res.h b/drivers/infiniband/hw/bnxt_re/qplib_res.h
index 61628f7f1253..6a13927674b4 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_res.h
+++ b/drivers/infiniband/hw/bnxt_re/qplib_res.h
@@ -39,6 +39,8 @@
#ifndef __BNXT_QPLIB_RES_H__
#define __BNXT_QPLIB_RES_H__
+#include "bnxt_ulp.h"
+
extern const struct bnxt_qplib_gid bnxt_qplib_gid_zero;
#define CHIP_NUM_57508 0x1750
@@ -47,6 +49,13 @@ extern const struct bnxt_qplib_gid bnxt_qplib_gid_zero;
#define CHIP_NUM_58818 0xd818
#define CHIP_NUM_57608 0x1760
+#define BNXT_RE_MAX_QPC_COUNT (64 * 1024)
+#define BNXT_RE_MAX_MRW_COUNT (64 * 1024)
+#define BNXT_RE_MAX_SRQC_COUNT (64 * 1024)
+#define BNXT_RE_MAX_CQ_COUNT (64 * 1024)
+#define BNXT_RE_MAX_MRW_COUNT_64K (64 * 1024)
+#define BNXT_RE_MAX_MRW_COUNT_256K (256 * 1024)
+
#define BNXT_QPLIB_DBR_VALID (0x1UL << 26)
#define BNXT_QPLIB_DBR_EPOCH_SHIFT 24
#define BNXT_QPLIB_DBR_TOGGLE_SHIFT 25
@@ -82,6 +91,7 @@ struct bnxt_qplib_db_pacing_data {
u32 fifo_room_mask;
u32 fifo_room_shift;
u32 grc_reg_offset;
+ u32 dev_err_state;
};
#define BNXT_QPLIB_DBR_PF_DB_OFFSET 0x10000
@@ -301,6 +311,7 @@ struct bnxt_qplib_res {
struct bnxt_qplib_chip_ctx *cctx;
struct bnxt_qplib_dev_attr *dattr;
struct net_device *netdev;
+ struct bnxt_en_dev *en_dev;
struct bnxt_qplib_rcfw *rcfw;
struct bnxt_qplib_pd_tbl pd_tbl;
/* To protect the pd table bit map */
@@ -420,9 +431,7 @@ int bnxt_qplib_dealloc_dpi(struct bnxt_qplib_res *res,
void bnxt_qplib_cleanup_res(struct bnxt_qplib_res *res);
int bnxt_qplib_init_res(struct bnxt_qplib_res *res);
void bnxt_qplib_free_res(struct bnxt_qplib_res *res);
-int bnxt_qplib_alloc_res(struct bnxt_qplib_res *res, struct pci_dev *pdev,
- struct net_device *netdev,
- struct bnxt_qplib_dev_attr *dev_attr);
+int bnxt_qplib_alloc_res(struct bnxt_qplib_res *res, struct net_device *netdev);
void bnxt_qplib_free_ctx(struct bnxt_qplib_res *res,
struct bnxt_qplib_ctx *ctx);
int bnxt_qplib_alloc_ctx(struct bnxt_qplib_res *res,
@@ -545,6 +554,14 @@ static inline bool _is_ext_stats_supported(u16 dev_cap_flags)
CREQ_QUERY_FUNC_RESP_SB_EXT_STATS;
}
+static inline int bnxt_ext_stats_supported(struct bnxt_qplib_chip_ctx *ctx,
+ u16 flags, bool virtfn)
+{
+ /* ext stats supported if cap flag is set AND is a PF OR a Thor2 VF */
+ return (_is_ext_stats_supported(flags) &&
+ ((virtfn && bnxt_qplib_is_chip_gen_p7(ctx)) || (!virtfn)));
+}
+
static inline bool _is_hw_retx_supported(u16 dev_cap_flags)
{
return dev_cap_flags &
@@ -554,9 +571,45 @@ static inline bool _is_hw_retx_supported(u16 dev_cap_flags)
#define BNXT_RE_HW_RETX(a) _is_hw_retx_supported((a))
+static inline bool _is_host_msn_table(u16 dev_cap_ext_flags2)
+{
+ return (dev_cap_ext_flags2 & CREQ_QUERY_FUNC_RESP_SB_REQ_RETRANSMISSION_SUPPORT_MASK) ==
+ CREQ_QUERY_FUNC_RESP_SB_REQ_RETRANSMISSION_SUPPORT_HOST_MSN_TABLE;
+}
+
static inline u8 bnxt_qplib_dbr_pacing_en(struct bnxt_qplib_chip_ctx *cctx)
{
return cctx->modes.dbr_pacing;
}
+static inline bool _is_alloc_mr_unified(u16 dev_cap_flags)
+{
+ return dev_cap_flags & CREQ_QUERY_FUNC_RESP_SB_MR_REGISTER_ALLOC;
+}
+
+static inline bool _is_relaxed_ordering_supported(u16 dev_cap_ext_flags2)
+{
+ return dev_cap_ext_flags2 & CREQ_QUERY_FUNC_RESP_SB_MEMORY_REGION_RO_SUPPORTED;
+}
+
+static inline bool _is_optimize_modify_qp_supported(u16 dev_cap_ext_flags2)
+{
+ return dev_cap_ext_flags2 & CREQ_QUERY_FUNC_RESP_SB_OPTIMIZE_MODIFY_QP_SUPPORTED;
+}
+
+static inline bool _is_min_rnr_in_rtr_rts_mandatory(u16 dev_cap_ext_flags2)
+{
+ return !!(dev_cap_ext_flags2 & CREQ_QUERY_FUNC_RESP_SB_MIN_RNR_RTR_RTS_OPT_SUPPORTED);
+}
+
+static inline bool _is_cq_coalescing_supported(u16 dev_cap_ext_flags2)
+{
+ return dev_cap_ext_flags2 & CREQ_QUERY_FUNC_RESP_SB_CQ_COALESCING_SUPPORTED;
+}
+
+static inline bool _is_max_srq_ext_supported(u16 dev_cap_ext_flags_2)
+{
+ return !!(dev_cap_ext_flags_2 & CREQ_QUERY_FUNC_RESP_SB_MAX_SRQ_EXTENDED);
+}
+
#endif /* __BNXT_QPLIB_RES_H__ */
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.c b/drivers/infiniband/hw/bnxt_re/qplib_sp.c
index 8beeedd15061..9efd32a3dc55 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_sp.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.c
@@ -88,18 +88,20 @@ static void bnxt_qplib_query_version(struct bnxt_qplib_rcfw *rcfw,
fw_ver[3] = resp.fw_rsvd;
}
-int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw,
- struct bnxt_qplib_dev_attr *attr)
+int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw)
{
+ struct bnxt_qplib_dev_attr *attr = rcfw->res->dattr;
struct creq_query_func_resp resp = {};
struct bnxt_qplib_cmdqmsg msg = {};
struct creq_query_func_resp_sb *sb;
struct bnxt_qplib_rcfw_sbuf sbuf;
+ struct bnxt_qplib_chip_ctx *cctx;
struct cmdq_query_func req = {};
u8 *tqm_alloc;
int i, rc;
u32 temp;
+ cctx = rcfw->res->cctx;
bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
CMDQ_BASE_OPCODE_QUERY_FUNC,
sizeof(req));
@@ -127,16 +129,25 @@ int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw,
attr->max_qp_init_rd_atom =
sb->max_qp_init_rd_atom > BNXT_QPLIB_MAX_OUT_RD_ATOM ?
BNXT_QPLIB_MAX_OUT_RD_ATOM : sb->max_qp_init_rd_atom;
- attr->max_qp_wqes = le16_to_cpu(sb->max_qp_wr);
- /*
- * 128 WQEs needs to be reserved for the HW (8916). Prevent
- * reporting the max number
- */
- attr->max_qp_wqes -= BNXT_QPLIB_RESERVED_QP_WRS + 1;
- attr->max_qp_sges = bnxt_qplib_is_chip_gen_p5_p7(rcfw->res->cctx) ?
- 6 : sb->max_sge;
+ attr->max_qp_wqes = le16_to_cpu(sb->max_qp_wr) - 1;
+ if (!bnxt_qplib_is_chip_gen_p5_p7(rcfw->res->cctx)) {
+ /*
+ * 128 WQEs needs to be reserved for the HW (8916). Prevent
+ * reporting the max number on legacy devices
+ */
+ attr->max_qp_wqes -= BNXT_QPLIB_RESERVED_QP_WRS + 1;
+ }
+
+ /* Adjust for max_qp_wqes for variable wqe */
+ if (cctx->modes.wqe_mode == BNXT_QPLIB_WQE_MODE_VARIABLE)
+ attr->max_qp_wqes = BNXT_VAR_MAX_WQE - 1;
+
+ attr->max_qp_sges = cctx->modes.wqe_mode == BNXT_QPLIB_WQE_MODE_VARIABLE ?
+ min_t(u32, sb->max_sge_var_wqe, BNXT_VAR_MAX_SGE) : 6;
attr->max_cq = le32_to_cpu(sb->max_cq);
attr->max_cq_wqes = le32_to_cpu(sb->max_cqe);
+ if (!bnxt_qplib_is_chip_gen_p7(rcfw->res->cctx))
+ attr->max_cq_wqes = min_t(u32, BNXT_QPLIB_MAX_CQ_WQES, attr->max_cq_wqes);
attr->max_cq_sges = attr->max_qp_sges;
attr->max_mr = le32_to_cpu(sb->max_mr);
attr->max_mw = le32_to_cpu(sb->max_mw);
@@ -154,8 +165,19 @@ int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw,
if (!bnxt_qplib_is_chip_gen_p7(rcfw->res->cctx))
attr->l2_db_size = (sb->l2_db_space_size + 1) *
(0x01 << RCFW_DBR_BASE_PAGE_SHIFT);
- attr->max_sgid = BNXT_QPLIB_NUM_GIDS_SUPPORTED;
+ /*
+ * Read the max gid supported by HW.
+ * For each entry in HW GID in HW table, we consume 2
+ * GID entries in the kernel GID table. So max_gid reported
+ * to stack can be up to twice the value reported by the HW, up to 256 gids.
+ */
+ attr->max_sgid = le32_to_cpu(sb->max_gid);
+ attr->max_sgid = min_t(u32, BNXT_QPLIB_NUM_GIDS_SUPPORTED, 2 * attr->max_sgid);
attr->dev_cap_flags = le16_to_cpu(sb->dev_cap_flags);
+ attr->dev_cap_flags2 = le16_to_cpu(sb->dev_cap_ext_flags_2);
+
+ if (_is_max_srq_ext_supported(attr->dev_cap_flags2))
+ attr->max_srq += le16_to_cpu(sb->max_srq_ext);
bnxt_qplib_query_version(rcfw, attr->fw_ver);
@@ -540,7 +562,7 @@ int bnxt_qplib_alloc_mrw(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mrw)
req.pd_id = cpu_to_le32(mrw->pd->id);
req.mrw_flags = mrw->type;
if ((mrw->type == CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR &&
- mrw->flags & BNXT_QPLIB_FR_PMR) ||
+ mrw->access_flags & BNXT_QPLIB_FR_PMR) ||
mrw->type == CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE2A ||
mrw->type == CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE2B)
req.access = CMDQ_ALLOCATE_MRW_ACCESS_CONSUMER_OWNED_KEY;
@@ -652,9 +674,12 @@ int bnxt_qplib_reg_mr(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mr,
req.log2_pbl_pg_size = cpu_to_le16(((ilog2(PAGE_SIZE) <<
CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_SFT) &
CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_MASK));
- req.access = (mr->flags & 0xFFFF);
+ req.access = (mr->access_flags & 0xFFFF);
req.va = cpu_to_le64(mr->va);
req.key = cpu_to_le32(mr->lkey);
+ if (_is_alloc_mr_unified(res->dattr->dev_cap_flags))
+ req.key = cpu_to_le32(mr->pd->id);
+ req.flags = cpu_to_le16(mr->flags);
req.mr_size = cpu_to_le64(mr->total_size);
bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req),
@@ -663,6 +688,11 @@ int bnxt_qplib_reg_mr(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mr,
if (rc)
goto fail;
+ if (_is_alloc_mr_unified(res->dattr->dev_cap_flags)) {
+ mr->lkey = le32_to_cpu(resp.xid);
+ mr->rkey = mr->lkey;
+ }
+
return 0;
fail:
@@ -816,7 +846,12 @@ int bnxt_qplib_qext_stat(struct bnxt_qplib_rcfw *rcfw, u32 fid,
req.resp_size = sbuf.size / BNXT_QPLIB_CMDQE_UNITS;
req.resp_addr = cpu_to_le64(sbuf.dma_addr);
- req.function_id = cpu_to_le32(fid);
+ if (bnxt_qplib_is_chip_gen_p7(rcfw->res->cctx) && rcfw->res->is_vf)
+ req.function_id =
+ cpu_to_le32(CMDQ_QUERY_ROCE_STATS_EXT_VF_VALID |
+ (fid << CMDQ_QUERY_ROCE_STATS_EXT_VF_NUM_SFT));
+ else
+ req.function_id = cpu_to_le32(fid);
req.flags = cpu_to_le16(CMDQ_QUERY_ROCE_STATS_EXT_FLAGS_FUNCTION_ID);
bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, &sbuf, sizeof(req),
@@ -960,3 +995,151 @@ int bnxt_qplib_modify_cc(struct bnxt_qplib_res *res,
rc = bnxt_qplib_rcfw_send_message(res->rcfw, &msg);
return rc;
}
+
+int bnxt_qplib_read_context(struct bnxt_qplib_rcfw *rcfw, u8 res_type,
+ u32 xid, u32 resp_size, void *resp_va)
+{
+ struct creq_read_context resp = {};
+ struct bnxt_qplib_cmdqmsg msg = {};
+ struct cmdq_read_context req = {};
+ struct bnxt_qplib_rcfw_sbuf sbuf;
+ int rc;
+
+ sbuf.size = resp_size;
+ sbuf.sb = dma_alloc_coherent(&rcfw->pdev->dev, sbuf.size,
+ &sbuf.dma_addr, GFP_KERNEL);
+ if (!sbuf.sb)
+ return -ENOMEM;
+
+ bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
+ CMDQ_BASE_OPCODE_READ_CONTEXT, sizeof(req));
+ req.resp_addr = cpu_to_le64(sbuf.dma_addr);
+ req.resp_size = resp_size / BNXT_QPLIB_CMDQE_UNITS;
+
+ req.xid = cpu_to_le32(xid);
+ req.type = res_type;
+
+ bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, &sbuf, sizeof(req),
+ sizeof(resp), 0);
+ rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
+ if (rc)
+ goto free_mem;
+
+ memcpy(resp_va, sbuf.sb, resp_size);
+free_mem:
+ dma_free_coherent(&rcfw->pdev->dev, sbuf.size, sbuf.sb, sbuf.dma_addr);
+ return rc;
+}
+
+static void bnxt_qplib_read_cc_gen1(struct bnxt_qplib_cc_param_ext *cc_ext,
+ struct creq_query_roce_cc_gen1_resp_sb_tlv *sb)
+{
+ cc_ext->inact_th_hi = le16_to_cpu(sb->inactivity_th_hi);
+ cc_ext->min_delta_cnp = le16_to_cpu(sb->min_time_between_cnps);
+ cc_ext->init_cp = le16_to_cpu(sb->init_cp);
+ cc_ext->tr_update_mode = sb->tr_update_mode;
+ cc_ext->tr_update_cyls = sb->tr_update_cycles;
+ cc_ext->fr_rtt = sb->fr_num_rtts;
+ cc_ext->ai_rate_incr = sb->ai_rate_increase;
+ cc_ext->rr_rtt_th = le16_to_cpu(sb->reduction_relax_rtts_th);
+ cc_ext->ar_cr_th = le16_to_cpu(sb->additional_relax_cr_th);
+ cc_ext->cr_min_th = le16_to_cpu(sb->cr_min_th);
+ cc_ext->bw_avg_weight = sb->bw_avg_weight;
+ cc_ext->cr_factor = sb->actual_cr_factor;
+ cc_ext->cr_th_max_cp = le16_to_cpu(sb->max_cp_cr_th);
+ cc_ext->cp_bias_en = sb->cp_bias_en;
+ cc_ext->cp_bias = sb->cp_bias;
+ cc_ext->cnp_ecn = sb->cnp_ecn;
+ cc_ext->rtt_jitter_en = sb->rtt_jitter_en;
+ cc_ext->bytes_per_usec = le16_to_cpu(sb->link_bytes_per_usec);
+ cc_ext->cc_cr_reset_th = le16_to_cpu(sb->reset_cc_cr_th);
+ cc_ext->cr_width = sb->cr_width;
+ cc_ext->min_quota = sb->quota_period_min;
+ cc_ext->max_quota = sb->quota_period_max;
+ cc_ext->abs_max_quota = sb->quota_period_abs_max;
+ cc_ext->tr_lb = le16_to_cpu(sb->tr_lower_bound);
+ cc_ext->cr_prob_fac = sb->cr_prob_factor;
+ cc_ext->tr_prob_fac = sb->tr_prob_factor;
+ cc_ext->fair_cr_th = le16_to_cpu(sb->fairness_cr_th);
+ cc_ext->red_div = sb->red_div;
+ cc_ext->cnp_ratio_th = sb->cnp_ratio_th;
+ cc_ext->ai_ext_rtt = le16_to_cpu(sb->exp_ai_rtts);
+ cc_ext->exp_crcp_ratio = sb->exp_ai_cr_cp_ratio;
+ cc_ext->low_rate_en = sb->use_rate_table;
+ cc_ext->cpcr_update_th = le16_to_cpu(sb->cp_exp_update_th);
+ cc_ext->ai_rtt_th1 = le16_to_cpu(sb->high_exp_ai_rtts_th1);
+ cc_ext->ai_rtt_th2 = le16_to_cpu(sb->high_exp_ai_rtts_th2);
+ cc_ext->cf_rtt_th = le16_to_cpu(sb->actual_cr_cong_free_rtts_th);
+ cc_ext->sc_cr_th1 = le16_to_cpu(sb->severe_cong_cr_th1);
+ cc_ext->sc_cr_th2 = le16_to_cpu(sb->severe_cong_cr_th2);
+ cc_ext->l64B_per_rtt = le32_to_cpu(sb->link64B_per_rtt);
+ cc_ext->cc_ack_bytes = sb->cc_ack_bytes;
+ cc_ext->reduce_cf_rtt_th = le16_to_cpu(sb->reduce_init_cong_free_rtts_th);
+}
+
+int bnxt_qplib_query_cc_param(struct bnxt_qplib_res *res,
+ struct bnxt_qplib_cc_param *cc_param)
+{
+ struct bnxt_qplib_tlv_query_rcc_sb *ext_sb;
+ struct bnxt_qplib_rcfw *rcfw = res->rcfw;
+ struct creq_query_roce_cc_resp resp = {};
+ struct creq_query_roce_cc_resp_sb *sb;
+ struct bnxt_qplib_cmdqmsg msg = {};
+ struct cmdq_query_roce_cc req = {};
+ struct bnxt_qplib_rcfw_sbuf sbuf;
+ size_t resp_size;
+ int rc;
+
+ /* Query the parameters from chip */
+ bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req, CMDQ_BASE_OPCODE_QUERY_ROCE_CC,
+ sizeof(req));
+ if (bnxt_qplib_is_chip_gen_p5_p7(res->cctx))
+ resp_size = sizeof(*ext_sb);
+ else
+ resp_size = sizeof(*sb);
+
+ sbuf.size = ALIGN(resp_size, BNXT_QPLIB_CMDQE_UNITS);
+ sbuf.sb = dma_alloc_coherent(&rcfw->pdev->dev, sbuf.size,
+ &sbuf.dma_addr, GFP_KERNEL);
+ if (!sbuf.sb)
+ return -ENOMEM;
+
+ req.resp_size = sbuf.size / BNXT_QPLIB_CMDQE_UNITS;
+ bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, &sbuf, sizeof(req),
+ sizeof(resp), 0);
+ rc = bnxt_qplib_rcfw_send_message(res->rcfw, &msg);
+ if (rc)
+ goto out;
+
+ ext_sb = sbuf.sb;
+ sb = bnxt_qplib_is_chip_gen_p5_p7(res->cctx) ? &ext_sb->base_sb :
+ (struct creq_query_roce_cc_resp_sb *)ext_sb;
+
+ cc_param->enable = sb->enable_cc & CREQ_QUERY_ROCE_CC_RESP_SB_ENABLE_CC;
+ cc_param->tos_ecn = (sb->tos_dscp_tos_ecn &
+ CREQ_QUERY_ROCE_CC_RESP_SB_TOS_ECN_MASK) >>
+ CREQ_QUERY_ROCE_CC_RESP_SB_TOS_ECN_SFT;
+ cc_param->tos_dscp = (sb->tos_dscp_tos_ecn &
+ CREQ_QUERY_ROCE_CC_RESP_SB_TOS_DSCP_MASK) >>
+ CREQ_QUERY_ROCE_CC_RESP_SB_TOS_DSCP_SFT;
+ cc_param->alt_tos_dscp = sb->alt_tos_dscp;
+ cc_param->alt_vlan_pcp = sb->alt_vlan_pcp;
+
+ cc_param->g = sb->g;
+ cc_param->nph_per_state = sb->num_phases_per_state;
+ cc_param->init_cr = le16_to_cpu(sb->init_cr);
+ cc_param->init_tr = le16_to_cpu(sb->init_tr);
+ cc_param->cc_mode = sb->cc_mode;
+ cc_param->inact_th = le16_to_cpu(sb->inactivity_th);
+ cc_param->rtt = le16_to_cpu(sb->rtt);
+ cc_param->tcp_cp = le16_to_cpu(sb->tcp_cp);
+ cc_param->time_pph = sb->time_per_phase;
+ cc_param->pkts_pph = sb->pkts_per_phase;
+ if (bnxt_qplib_is_chip_gen_p5_p7(res->cctx)) {
+ bnxt_qplib_read_cc_gen1(&cc_param->cc_ext, &ext_sb->gen1_sb);
+ cc_param->inact_th |= (cc_param->cc_ext.inact_th_hi & 0x3F) << 16;
+ }
+out:
+ dma_free_coherent(&rcfw->pdev->dev, sbuf.size, sbuf.sb, sbuf.dma_addr);
+ return rc;
+}
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.h b/drivers/infiniband/hw/bnxt_re/qplib_sp.h
index d33c78b96217..e626b05038a1 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_sp.h
+++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.h
@@ -40,6 +40,7 @@
#ifndef __BNXT_QPLIB_SP_H__
#define __BNXT_QPLIB_SP_H__
+#include <rdma/bnxt_re-abi.h>
#define BNXT_QPLIB_RESERVED_QP_WRS 128
struct bnxt_qplib_dev_attr {
@@ -55,6 +56,7 @@ struct bnxt_qplib_dev_attr {
u32 max_qp_wqes;
u32 max_qp_sges;
u32 max_cq;
+#define BNXT_QPLIB_MAX_CQ_WQES 0xfffff
u32 max_cq_wqes;
u32 max_cq_sges;
u32 max_mr;
@@ -72,6 +74,7 @@ struct bnxt_qplib_dev_attr {
u8 tqm_alloc_reqs[MAX_TQM_ALLOC_REQ];
bool is_atomic;
u16 dev_cap_flags;
+ u16 dev_cap_flags2;
u32 max_dpi;
};
@@ -107,7 +110,7 @@ struct bnxt_qplib_ah {
struct bnxt_qplib_mrw {
struct bnxt_qplib_pd *pd;
int type;
- u32 flags;
+ u32 access_flags;
#define BNXT_QPLIB_FR_PMR 0x80000000
u32 lkey;
u32 rkey;
@@ -115,6 +118,7 @@ struct bnxt_qplib_mrw {
u64 va;
u64 total_size;
u32 npages;
+ u16 flags;
u64 mr_handle;
struct bnxt_qplib_hwq hwq;
};
@@ -292,6 +296,7 @@ struct bnxt_qplib_cc_param_ext {
struct bnxt_qplib_cc_param {
u8 alt_vlan_pcp;
+ u8 qp1_tos_dscp;
u16 alt_tos_dscp;
u8 cc_mode;
u8 enable;
@@ -321,8 +326,7 @@ int bnxt_qplib_add_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
int bnxt_qplib_update_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
struct bnxt_qplib_gid *gid, u16 gid_idx,
const u8 *smac);
-int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw,
- struct bnxt_qplib_dev_attr *attr);
+int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw);
int bnxt_qplib_set_func_resources(struct bnxt_qplib_res *res,
struct bnxt_qplib_rcfw *rcfw,
struct bnxt_qplib_ctx *ctx);
@@ -349,5 +353,16 @@ int bnxt_qplib_qext_stat(struct bnxt_qplib_rcfw *rcfw, u32 fid,
struct bnxt_qplib_ext_stat *estat);
int bnxt_qplib_modify_cc(struct bnxt_qplib_res *res,
struct bnxt_qplib_cc_param *cc_param);
+int bnxt_qplib_read_context(struct bnxt_qplib_rcfw *rcfw, u8 type, u32 xid,
+ u32 resp_size, void *resp_va);
+int bnxt_qplib_query_cc_param(struct bnxt_qplib_res *res,
+ struct bnxt_qplib_cc_param *cc_param);
+
+#define BNXT_VAR_MAX_WQE 4352
+#define BNXT_VAR_MAX_SLOT_ALIGN 256
+#define BNXT_VAR_MAX_SGE 13
+#define BNXT_RE_MAX_RQ_WQES 65536
+
+#define BNXT_STATIC_MAX_SGE 6
#endif /* __BNXT_QPLIB_SP_H__*/
diff --git a/drivers/infiniband/hw/bnxt_re/roce_hsi.h b/drivers/infiniband/hw/bnxt_re/roce_hsi.h
index 605c9463c408..7eceb3e9f4ce 100644
--- a/drivers/infiniband/hw/bnxt_re/roce_hsi.h
+++ b/drivers/infiniband/hw/bnxt_re/roce_hsi.h
@@ -216,6 +216,8 @@ struct cmdq_initialize_fw {
__le16 flags;
#define CMDQ_INITIALIZE_FW_FLAGS_MRAV_RESERVATION_SPLIT 0x1UL
#define CMDQ_INITIALIZE_FW_FLAGS_HW_REQUESTER_RETX_SUPPORTED 0x2UL
+ #define CMDQ_INITIALIZE_FW_FLAGS_OPTIMIZE_MODIFY_QP_SUPPORTED 0x8UL
+ #define CMDQ_INITIALIZE_FW_FLAGS_L2_VF_RESOURCE_MGMT 0x10UL
__le16 cookie;
u8 resp_size;
u8 reserved8;
@@ -409,7 +411,7 @@ struct creq_deinitialize_fw_resp {
u8 reserved48[6];
};
-/* cmdq_create_qp (size:768b/96B) */
+/* cmdq_create_qp (size:832b/104B) */
struct cmdq_create_qp {
u8 opcode;
#define CMDQ_CREATE_QP_OPCODE_CREATE_QP 0x1UL
@@ -430,8 +432,11 @@ struct cmdq_create_qp {
#define CMDQ_CREATE_QP_QP_FLAGS_OPTIMIZED_TRANSMIT_ENABLED 0x20UL
#define CMDQ_CREATE_QP_QP_FLAGS_RESPONDER_UD_CQE_WITH_CFA 0x40UL
#define CMDQ_CREATE_QP_QP_FLAGS_EXT_STATS_ENABLED 0x80UL
+ #define CMDQ_CREATE_QP_QP_FLAGS_EXPRESS_MODE_ENABLED 0x100UL
+ #define CMDQ_CREATE_QP_QP_FLAGS_STEERING_TAG_VALID 0x200UL
+ #define CMDQ_CREATE_QP_QP_FLAGS_RDMA_READ_OR_ATOMICS_USED 0x400UL
#define CMDQ_CREATE_QP_QP_FLAGS_LAST \
- CMDQ_CREATE_QP_QP_FLAGS_EXT_STATS_ENABLED
+ CMDQ_CREATE_QP_QP_FLAGS_RDMA_READ_OR_ATOMICS_USED
u8 type;
#define CMDQ_CREATE_QP_TYPE_RC 0x2UL
#define CMDQ_CREATE_QP_TYPE_UD 0x4UL
@@ -492,6 +497,9 @@ struct cmdq_create_qp {
__le64 rq_pbl;
__le64 irrq_addr;
__le64 orrq_addr;
+ __le32 request_xid;
+ __le16 steering_tag;
+ __le16 reserved16;
};
/* creq_create_qp_resp (size:128b/16B) */
@@ -553,6 +561,7 @@ struct cmdq_modify_qp {
#define CMDQ_MODIFY_QP_OPCODE_LAST CMDQ_MODIFY_QP_OPCODE_MODIFY_QP
u8 cmd_size;
__le16 flags;
+ #define CMDQ_MODIFY_QP_FLAGS_SRQ_USED 0x1UL
__le16 cookie;
u8 resp_size;
u8 qp_type;
@@ -972,13 +981,14 @@ struct creq_query_qp_extend_resp_sb_tlv {
__le16 reserved_16;
};
-/* cmdq_create_srq (size:384b/48B) */
+/* cmdq_create_srq (size:448b/56B) */
struct cmdq_create_srq {
u8 opcode;
#define CMDQ_CREATE_SRQ_OPCODE_CREATE_SRQ 0x5UL
#define CMDQ_CREATE_SRQ_OPCODE_LAST CMDQ_CREATE_SRQ_OPCODE_CREATE_SRQ
u8 cmd_size;
__le16 flags;
+ #define CMDQ_CREATE_SRQ_FLAGS_STEERING_TAG_VALID 0x1UL
__le16 cookie;
u8 resp_size;
u8 reserved8;
@@ -1012,6 +1022,8 @@ struct cmdq_create_srq {
__le32 dpi;
__le32 pd_id;
__le64 pbl;
+ __le16 steering_tag;
+ u8 reserved48[6];
};
/* creq_create_srq_resp (size:128b/16B) */
@@ -1118,7 +1130,7 @@ struct creq_query_srq_resp_sb {
__le32 data[4];
};
-/* cmdq_create_cq (size:384b/48B) */
+/* cmdq_create_cq (size:448b/56B) */
struct cmdq_create_cq {
u8 opcode;
#define CMDQ_CREATE_CQ_OPCODE_CREATE_CQ 0x9UL
@@ -1126,6 +1138,9 @@ struct cmdq_create_cq {
u8 cmd_size;
__le16 flags;
#define CMDQ_CREATE_CQ_FLAGS_DISABLE_CQ_OVERFLOW_DETECTION 0x1UL
+ #define CMDQ_CREATE_CQ_FLAGS_STEERING_TAG_VALID 0x2UL
+ #define CMDQ_CREATE_CQ_FLAGS_INFINITE_CQ_MODE 0x4UL
+ #define CMDQ_CREATE_CQ_FLAGS_COALESCING_VALID 0x8UL
__le16 cookie;
u8 resp_size;
u8 reserved8;
@@ -1157,6 +1172,19 @@ struct cmdq_create_cq {
__le32 dpi;
__le32 cq_size;
__le64 pbl;
+ __le16 steering_tag;
+ u8 reserved48[2];
+ __le32 coalescing;
+ #define CMDQ_CREATE_CQ_BUF_MAXTIME_MASK 0x1ffUL
+ #define CMDQ_CREATE_CQ_BUF_MAXTIME_SFT 0
+ #define CMDQ_CREATE_CQ_NORMAL_MAXBUF_MASK 0x3e00UL
+ #define CMDQ_CREATE_CQ_NORMAL_MAXBUF_SFT 9
+ #define CMDQ_CREATE_CQ_DURING_MAXBUF_MASK 0x7c000UL
+ #define CMDQ_CREATE_CQ_DURING_MAXBUF_SFT 14
+ #define CMDQ_CREATE_CQ_ENABLE_RING_IDLE_MODE 0x80000UL
+ #define CMDQ_CREATE_CQ_UNUSED12_MASK 0xfff00000UL
+ #define CMDQ_CREATE_CQ_UNUSED12_SFT 20
+ __le64 reserved64;
};
/* creq_create_cq_resp (size:128b/16B) */
@@ -1288,11 +1316,12 @@ struct cmdq_allocate_mrw {
#define CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE2A 0x3UL
#define CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE2B 0x4UL
#define CMDQ_ALLOCATE_MRW_MRW_FLAGS_LAST CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE2B
- #define CMDQ_ALLOCATE_MRW_UNUSED4_MASK 0xf0UL
- #define CMDQ_ALLOCATE_MRW_UNUSED4_SFT 4
+ #define CMDQ_ALLOCATE_MRW_STEERING_TAG_VALID 0x10UL
+ #define CMDQ_ALLOCATE_MRW_UNUSED4_MASK 0xe0UL
+ #define CMDQ_ALLOCATE_MRW_UNUSED4_SFT 5
u8 access;
#define CMDQ_ALLOCATE_MRW_ACCESS_CONSUMER_OWNED_KEY 0x20UL
- __le16 unused16;
+ __le16 steering_tag;
__le32 pd_id;
};
@@ -1359,14 +1388,16 @@ struct creq_deallocate_key_resp {
__le32 bound_window_info;
};
-/* cmdq_register_mr (size:384b/48B) */
+/* cmdq_register_mr (size:448b/56B) */
struct cmdq_register_mr {
u8 opcode;
#define CMDQ_REGISTER_MR_OPCODE_REGISTER_MR 0xfUL
#define CMDQ_REGISTER_MR_OPCODE_LAST CMDQ_REGISTER_MR_OPCODE_REGISTER_MR
u8 cmd_size;
__le16 flags;
- #define CMDQ_REGISTER_MR_FLAGS_ALLOC_MR 0x1UL
+ #define CMDQ_REGISTER_MR_FLAGS_ALLOC_MR 0x1UL
+ #define CMDQ_REGISTER_MR_FLAGS_STEERING_TAG_VALID 0x2UL
+ #define CMDQ_REGISTER_MR_FLAGS_ENABLE_RO 0x4UL
__le16 cookie;
u8 resp_size;
u8 reserved8;
@@ -1415,6 +1446,8 @@ struct cmdq_register_mr {
__le64 pbl;
__le64 va;
__le64 mr_size;
+ __le16 steering_tag;
+ u8 reserved48[6];
};
/* creq_register_mr_resp (size:128b/16B) */
@@ -2157,8 +2190,38 @@ struct creq_query_func_resp_sb {
__le32 tqm_alloc_reqs[12];
__le32 max_dpi;
u8 max_sge_var_wqe;
- u8 reserved_8;
+ u8 dev_cap_ext_flags;
+ #define CREQ_QUERY_FUNC_RESP_SB_ATOMIC_OPS_NOT_SUPPORTED 0x1UL
+ #define CREQ_QUERY_FUNC_RESP_SB_DRV_VERSION_RGTR_SUPPORTED 0x2UL
+ #define CREQ_QUERY_FUNC_RESP_SB_CREATE_QP_BATCH_SUPPORTED 0x4UL
+ #define CREQ_QUERY_FUNC_RESP_SB_DESTROY_QP_BATCH_SUPPORTED 0x8UL
+ #define CREQ_QUERY_FUNC_RESP_SB_ROCE_STATS_EXT_CTX_SUPPORTED 0x10UL
+ #define CREQ_QUERY_FUNC_RESP_SB_CREATE_SRQ_SGE_SUPPORTED 0x20UL
+ #define CREQ_QUERY_FUNC_RESP_SB_FIXED_SIZE_WQE_DISABLED 0x40UL
+ #define CREQ_QUERY_FUNC_RESP_SB_DCN_SUPPORTED 0x80UL
__le16 max_inline_data_var_wqe;
+ __le32 start_qid;
+ u8 max_msn_table_size;
+ u8 reserved8_1;
+ __le16 dev_cap_ext_flags_2;
+ #define CREQ_QUERY_FUNC_RESP_SB_OPTIMIZE_MODIFY_QP_SUPPORTED 0x1UL
+ #define CREQ_QUERY_FUNC_RESP_SB_CHANGE_UDP_SRC_PORT_WQE_SUPPORTED 0x2UL
+ #define CREQ_QUERY_FUNC_RESP_SB_CQ_COALESCING_SUPPORTED 0x4UL
+ #define CREQ_QUERY_FUNC_RESP_SB_MEMORY_REGION_RO_SUPPORTED 0x8UL
+ #define CREQ_QUERY_FUNC_RESP_SB_REQ_RETRANSMISSION_SUPPORT_MASK 0x30UL
+ #define CREQ_QUERY_FUNC_RESP_SB_REQ_RETRANSMISSION_SUPPORT_SFT 4
+ #define CREQ_QUERY_FUNC_RESP_SB_REQ_RETRANSMISSION_SUPPORT_HOST_PSN_TABLE (0x0UL << 4)
+ #define CREQ_QUERY_FUNC_RESP_SB_REQ_RETRANSMISSION_SUPPORT_HOST_MSN_TABLE (0x1UL << 4)
+ #define CREQ_QUERY_FUNC_RESP_SB_REQ_RETRANSMISSION_SUPPORT_IQM_MSN_TABLE (0x2UL << 4)
+ #define CREQ_QUERY_FUNC_RESP_SB_REQ_RETRANSMISSION_SUPPORT_LAST \
+ CREQ_QUERY_FUNC_RESP_SB_REQ_RETRANSMISSION_SUPPORT_IQM_MSN_TABLE
+ #define CREQ_QUERY_FUNC_RESP_SB_MAX_SRQ_EXTENDED 0x40UL
+ #define CREQ_QUERY_FUNC_RESP_SB_MIN_RNR_RTR_RTS_OPT_SUPPORTED 0x1000UL
+ __le16 max_xp_qp_size;
+ __le16 create_qp_batch_size;
+ __le16 destroy_qp_batch_size;
+ __le16 max_srq_ext;
+ __le64 reserved64;
};
/* cmdq_set_func_resources (size:448b/56B) */
@@ -2205,6 +2268,46 @@ struct creq_set_func_resources_resp {
u8 reserved48[6];
};
+/* cmdq_read_context (size:192b/24B) */
+struct cmdq_read_context {
+ u8 opcode;
+ #define CMDQ_READ_CONTEXT_OPCODE_READ_CONTEXT 0x85UL
+ #define CMDQ_READ_CONTEXT_OPCODE_LAST CMDQ_READ_CONTEXT_OPCODE_READ_CONTEXT
+ u8 cmd_size;
+ __le16 flags;
+ __le16 cookie;
+ u8 resp_size;
+ u8 reserved8;
+ __le64 resp_addr;
+ __le32 xid;
+ u8 type;
+ #define CMDQ_READ_CONTEXT_TYPE_QPC 0x0UL
+ #define CMDQ_READ_CONTEXT_TYPE_CQ 0x1UL
+ #define CMDQ_READ_CONTEXT_TYPE_MRW 0x2UL
+ #define CMDQ_READ_CONTEXT_TYPE_SRQ 0x3UL
+ #define CMDQ_READ_CONTEXT_TYPE_LAST CMDQ_READ_CONTEXT_TYPE_SRQ
+ u8 unused_0[3];
+};
+
+/* creq_read_context (size:128b/16B) */
+struct creq_read_context {
+ u8 type;
+ #define CREQ_READ_CONTEXT_TYPE_MASK 0x3fUL
+ #define CREQ_READ_CONTEXT_TYPE_SFT 0
+ #define CREQ_READ_CONTEXT_TYPE_QP_EVENT 0x38UL
+ #define CREQ_READ_CONTEXT_TYPE_LAST CREQ_READ_CONTEXT_TYPE_QP_EVENT
+ u8 status;
+ __le16 cookie;
+ __le32 reserved32;
+ u8 v;
+ #define CREQ_READ_CONTEXT_V 0x1UL
+ u8 event;
+ #define CREQ_READ_CONTEXT_EVENT_READ_CONTEXT 0x85UL
+ #define CREQ_READ_CONTEXT_EVENT_LAST CREQ_READ_CONTEXT_EVENT_READ_CONTEXT
+ __le16 reserved16;
+ __le32 reserved_32;
+};
+
/* cmdq_map_tc_to_cos (size:192b/24B) */
struct cmdq_map_tc_to_cos {
u8 opcode;