summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2022-01-13 11:09:11 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2022-01-13 11:09:11 -0800
commit747c19eb7539b5e6bb15ed57a0a14ebf9f3adb8e (patch)
tree90cc59178f3b344108b14c6d0ebe3b4b5cc6e30a
parentfeb7a43de5ef625ad74097d8fd3481d5dbc06a59 (diff)
parentc0fe82baaeb2719f910359684c0817057f79a84a (diff)
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma
Pull rdma updates from Jason Gunthorpe: "Another small cycle. Mostly cleanups and bug fixes, quite a bit assisted from bots. There are a few new syzkaller splats that haven't been solved yet but they should get into the rcs in a few weeks, I think. Summary: - Update drivers to use common helpers for GUIDs, pkeys, bitmaps, memset_startat, and others - General code cleanups from bots - Simplify some of the rxe pool code in preparation for a larger rework - Clean out old stuff from hns, including all support for hip06 devices - Fix a bug where GID table entries could be missed if the table had holes in it - Rename paths and sessions in rtrs for better understandability - Consolidate the roce source port selection code - NDR speed support in mlx5" * tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma: (83 commits) RDMA/irdma: Remove the redundant return RDMA/rxe: Use the standard method to produce udp source port RDMA/irdma: Make the source udp port vary RDMA/hns: Replace get_udp_sport with rdma_get_udp_sport RDMA/core: Calculate UDP source port based on flow label or lqpn/rqpn IB/qib: Fix typos RDMA/rtrs-clt: Rename rtrs_clt to rtrs_clt_sess RDMA/rtrs-srv: Rename rtrs_srv to rtrs_srv_sess RDMA/rtrs-clt: Rename rtrs_clt_sess to rtrs_clt_path RDMA/rtrs-srv: Rename rtrs_srv_sess to rtrs_srv_path RDMA/rtrs: Rename rtrs_sess to rtrs_path RDMA/hns: Modify the hop num of HIP09 EQ to 1 IB/iser: Align coding style across driver IB/iser: Remove un-needed casting to/from void pointer IB/iser: Don't suppress send completions IB/iser: Rename ib_ret local variable IB/iser: Fix RNR errors IB/iser: Remove deprecated pi_guard module param IB/mlx5: Expose NDR speed through MAD RDMA/cxgb4: Set queue pair state when being queried ...
-rw-r--r--drivers/block/rnbd/rnbd-clt.c4
-rw-r--r--drivers/block/rnbd/rnbd-clt.h2
-rw-r--r--drivers/block/rnbd/rnbd-srv.c16
-rw-r--r--drivers/block/rnbd/rnbd-srv.h2
-rw-r--r--drivers/infiniband/core/cache.c12
-rw-r--r--drivers/infiniband/core/cma.c18
-rw-r--r--drivers/infiniband/core/device.c3
-rw-r--r--drivers/infiniband/core/sysfs.c3
-rw-r--r--drivers/infiniband/core/umem_odp.c3
-rw-r--r--drivers/infiniband/core/uverbs_cmd.c1
-rw-r--r--drivers/infiniband/hw/bnxt_re/ib_verbs.c9
-rw-r--r--drivers/infiniband/hw/bnxt_re/main.c3
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_fp.c11
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_rcfw.c12
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_rcfw.h1
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_res.c50
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_res.h7
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_sp.c99
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_sp.h9
-rw-r--r--drivers/infiniband/hw/cxgb4/cm.c5
-rw-r--r--drivers/infiniband/hw/cxgb4/id_table.c17
-rw-r--r--drivers/infiniband/hw/cxgb4/provider.c8
-rw-r--r--drivers/infiniband/hw/cxgb4/qp.c1
-rw-r--r--drivers/infiniband/hw/hfi1/user_sdma.c8
-rw-r--r--drivers/infiniband/hw/hns/Kconfig17
-rw-r--r--drivers/infiniband/hw/hns/Makefile5
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_ah.c5
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_alloc.c3
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_cmd.c11
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_common.h202
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_cq.c13
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_db.c1
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_device.h108
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hem.c1
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v1.c4675
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v1.h1147
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v2.c49
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v2.h22
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_main.c85
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_mr.c32
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_pd.c17
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_qp.c93
-rw-r--r--drivers/infiniband/hw/irdma/i40iw_if.c2
-rw-r--r--drivers/infiniband/hw/irdma/pble.h2
-rw-r--r--drivers/infiniband/hw/irdma/verbs.c31
-rw-r--r--drivers/infiniband/hw/mlx4/main.c34
-rw-r--r--drivers/infiniband/hw/mlx5/cq.c5
-rw-r--r--drivers/infiniband/hw/mlx5/mad.c23
-rw-r--r--drivers/infiniband/hw/mlx5/mlx5_ib.h12
-rw-r--r--drivers/infiniband/hw/mthca/mthca_allocator.c15
-rw-r--r--drivers/infiniband/hw/mthca/mthca_mr.c25
-rw-r--r--drivers/infiniband/hw/mthca/mthca_provider.c20
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_hw.c16
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_main.c17
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_verbs.c18
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_verbs.h1
-rw-r--r--drivers/infiniband/hw/qedr/verbs.c2
-rw-r--r--drivers/infiniband/hw/qib/qib_iba6120.c2
-rw-r--r--drivers/infiniband/hw/qib/qib_iba7220.c2
-rw-r--r--drivers/infiniband/hw/qib/qib_iba7322.c2
-rw-r--r--drivers/infiniband/hw/usnic/usnic_ib_sysfs.c3
-rw-r--r--drivers/infiniband/hw/usnic/usnic_ib_verbs.c8
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_doorbell.c10
-rw-r--r--drivers/infiniband/sw/rxe/Makefile1
-rw-r--r--drivers/infiniband/sw/rxe/rxe.c4
-rw-r--r--drivers/infiniband/sw/rxe/rxe.h2
-rw-r--r--drivers/infiniband/sw/rxe/rxe_comp.c8
-rw-r--r--drivers/infiniband/sw/rxe/rxe_cq.c24
-rw-r--r--drivers/infiniband/sw/rxe/rxe_loc.h10
-rw-r--r--drivers/infiniband/sw/rxe/rxe_mcast.c11
-rw-r--r--drivers/infiniband/sw/rxe/rxe_mr.c6
-rw-r--r--drivers/infiniband/sw/rxe/rxe_mw.c21
-rw-r--r--drivers/infiniband/sw/rxe/rxe_net.c9
-rw-r--r--drivers/infiniband/sw/rxe/rxe_opcode.c739
-rw-r--r--drivers/infiniband/sw/rxe/rxe_pool.c177
-rw-r--r--drivers/infiniband/sw/rxe/rxe_pool.h54
-rw-r--r--drivers/infiniband/sw/rxe/rxe_qp.c6
-rw-r--r--drivers/infiniband/sw/rxe/rxe_queue.c9
-rw-r--r--drivers/infiniband/sw/rxe/rxe_req.c16
-rw-r--r--drivers/infiniband/sw/rxe/rxe_srq.c2
-rw-r--r--drivers/infiniband/sw/rxe/rxe_sysfs.c119
-rw-r--r--drivers/infiniband/sw/rxe/rxe_task.c18
-rw-r--r--drivers/infiniband/sw/rxe/rxe_verbs.c34
-rw-r--r--drivers/infiniband/sw/rxe/rxe_verbs.h24
-rw-r--r--drivers/infiniband/sw/siw/siw_verbs.c6
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.c76
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.h23
-rw-r--r--drivers/infiniband/ulp/iser/iser_initiator.c106
-rw-r--r--drivers/infiniband/ulp/iser/iser_memory.c58
-rw-r--r--drivers/infiniband/ulp/iser/iser_verbs.c138
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-clt-stats.c8
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-clt-sysfs.c145
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-clt.c1089
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-clt.h41
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-pri.h18
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-srv-sysfs.c121
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-srv.c684
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-srv.h16
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs.c98
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs.h34
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_msg.h2
-rw-r--r--include/rdma/ib_mad.h1
-rw-r--r--include/rdma/ib_smi.h12
-rw-r--r--include/rdma/ib_verbs.h17
-rw-r--r--include/uapi/rdma/hns-abi.h2
105 files changed, 2232 insertions, 8797 deletions
diff --git a/drivers/block/rnbd/rnbd-clt.c b/drivers/block/rnbd/rnbd-clt.c
index 67a8edbaa1fd..8f8443ee6fe4 100644
--- a/drivers/block/rnbd/rnbd-clt.c
+++ b/drivers/block/rnbd/rnbd-clt.c
@@ -433,7 +433,7 @@ static void msg_conf(void *priv, int errno)
schedule_work(&iu->work);
}
-static int send_usr_msg(struct rtrs_clt *rtrs, int dir,
+static int send_usr_msg(struct rtrs_clt_sess *rtrs, int dir,
struct rnbd_iu *iu, struct kvec *vec,
size_t len, struct scatterlist *sg, unsigned int sg_len,
void (*conf)(struct work_struct *work),
@@ -1010,7 +1010,7 @@ static int rnbd_client_xfer_request(struct rnbd_clt_dev *dev,
struct request *rq,
struct rnbd_iu *iu)
{
- struct rtrs_clt *rtrs = dev->sess->rtrs;
+ struct rtrs_clt_sess *rtrs = dev->sess->rtrs;
struct rtrs_permit *permit = iu->permit;
struct rnbd_msg_io msg;
struct rtrs_clt_req_ops req_ops;
diff --git a/drivers/block/rnbd/rnbd-clt.h b/drivers/block/rnbd/rnbd-clt.h
index 9ef8c4f306f2..0c2cae7f39b9 100644
--- a/drivers/block/rnbd/rnbd-clt.h
+++ b/drivers/block/rnbd/rnbd-clt.h
@@ -75,7 +75,7 @@ struct rnbd_cpu_qlist {
struct rnbd_clt_session {
struct list_head list;
- struct rtrs_clt *rtrs;
+ struct rtrs_clt_sess *rtrs;
wait_queue_head_t rtrs_waitq;
bool rtrs_ready;
struct rnbd_cpu_qlist __percpu
diff --git a/drivers/block/rnbd/rnbd-srv.c b/drivers/block/rnbd/rnbd-srv.c
index aafecfe97055..1ee808fc600c 100644
--- a/drivers/block/rnbd/rnbd-srv.c
+++ b/drivers/block/rnbd/rnbd-srv.c
@@ -263,15 +263,15 @@ out:
kfree(srv_sess);
}
-static int create_sess(struct rtrs_srv *rtrs)
+static int create_sess(struct rtrs_srv_sess *rtrs)
{
struct rnbd_srv_session *srv_sess;
- char sessname[NAME_MAX];
+ char pathname[NAME_MAX];
int err;
- err = rtrs_srv_get_sess_name(rtrs, sessname, sizeof(sessname));
+ err = rtrs_srv_get_path_name(rtrs, pathname, sizeof(pathname));
if (err) {
- pr_err("rtrs_srv_get_sess_name(%s): %d\n", sessname, err);
+ pr_err("rtrs_srv_get_path_name(%s): %d\n", pathname, err);
return err;
}
@@ -284,8 +284,8 @@ static int create_sess(struct rtrs_srv *rtrs)
offsetof(struct rnbd_dev_blk_io, bio),
BIOSET_NEED_BVECS);
if (err) {
- pr_err("Allocating srv_session for session %s failed\n",
- sessname);
+ pr_err("Allocating srv_session for path %s failed\n",
+ pathname);
kfree(srv_sess);
return err;
}
@@ -298,14 +298,14 @@ static int create_sess(struct rtrs_srv *rtrs)
mutex_unlock(&sess_lock);
srv_sess->rtrs = rtrs;
- strscpy(srv_sess->sessname, sessname, sizeof(srv_sess->sessname));
+ strscpy(srv_sess->sessname, pathname, sizeof(srv_sess->sessname));
rtrs_srv_set_sess_priv(rtrs, srv_sess);
return 0;
}
-static int rnbd_srv_link_ev(struct rtrs_srv *rtrs,
+static int rnbd_srv_link_ev(struct rtrs_srv_sess *rtrs,
enum rtrs_srv_link_ev ev, void *priv)
{
struct rnbd_srv_session *srv_sess = priv;
diff --git a/drivers/block/rnbd/rnbd-srv.h b/drivers/block/rnbd/rnbd-srv.h
index 98ddc31eb408..e5604bce123a 100644
--- a/drivers/block/rnbd/rnbd-srv.h
+++ b/drivers/block/rnbd/rnbd-srv.h
@@ -20,7 +20,7 @@
struct rnbd_srv_session {
/* Entry inside global sess_list */
struct list_head list;
- struct rtrs_srv *rtrs;
+ struct rtrs_srv_sess *rtrs;
char sessname[NAME_MAX];
int queue_depth;
struct bio_set sess_bio_set;
diff --git a/drivers/infiniband/core/cache.c b/drivers/infiniband/core/cache.c
index b79f816a7203..f6aa1a964573 100644
--- a/drivers/infiniband/core/cache.c
+++ b/drivers/infiniband/core/cache.c
@@ -956,7 +956,7 @@ int rdma_query_gid(struct ib_device *device, u32 port_num,
{
struct ib_gid_table *table;
unsigned long flags;
- int res = -EINVAL;
+ int res;
if (!rdma_is_port_valid(device, port_num))
return -EINVAL;
@@ -964,9 +964,15 @@ int rdma_query_gid(struct ib_device *device, u32 port_num,
table = rdma_gid_table(device, port_num);
read_lock_irqsave(&table->rwlock, flags);
- if (index < 0 || index >= table->sz ||
- !is_gid_entry_valid(table->data_vec[index]))
+ if (index < 0 || index >= table->sz) {
+ res = -EINVAL;
goto done;
+ }
+
+ if (!is_gid_entry_valid(table->data_vec[index])) {
+ res = -ENOENT;
+ goto done;
+ }
memcpy(gid, &table->data_vec[index]->attr.gid, sizeof(*gid));
res = 0;
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index 835ac54d4a24..27a00ce2e101 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -766,6 +766,7 @@ static int cma_resolve_ib_dev(struct rdma_id_private *id_priv)
unsigned int p;
u16 pkey, index;
enum ib_port_state port_state;
+ int ret;
int i;
cma_dev = NULL;
@@ -784,9 +785,14 @@ static int cma_resolve_ib_dev(struct rdma_id_private *id_priv)
if (ib_get_cached_port_state(cur_dev->device, p, &port_state))
continue;
- for (i = 0; !rdma_query_gid(cur_dev->device,
- p, i, &gid);
- i++) {
+
+ for (i = 0; i < cur_dev->device->port_data[p].immutable.gid_tbl_len;
+ ++i) {
+ ret = rdma_query_gid(cur_dev->device, p, i,
+ &gid);
+ if (ret)
+ continue;
+
if (!memcmp(&gid, dgid, sizeof(gid))) {
cma_dev = cur_dev;
sgid = gid;
@@ -4033,8 +4039,7 @@ static int cma_resolve_ib_udp(struct rdma_id_private *id_priv,
memset(&req, 0, sizeof req);
offset = cma_user_data_offset(id_priv);
- req.private_data_len = offset + conn_param->private_data_len;
- if (req.private_data_len < conn_param->private_data_len)
+ if (check_add_overflow(offset, conn_param->private_data_len, &req.private_data_len))
return -EINVAL;
if (req.private_data_len) {
@@ -4093,8 +4098,7 @@ static int cma_connect_ib(struct rdma_id_private *id_priv,
memset(&req, 0, sizeof req);
offset = cma_user_data_offset(id_priv);
- req.private_data_len = offset + conn_param->private_data_len;
- if (req.private_data_len < conn_param->private_data_len)
+ if (check_add_overflow(offset, conn_param->private_data_len, &req.private_data_len))
return -EINVAL;
if (req.private_data_len) {
diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c
index 22a4adda7981..a311df07b1bd 100644
--- a/drivers/infiniband/core/device.c
+++ b/drivers/infiniband/core/device.c
@@ -2461,7 +2461,8 @@ int ib_find_gid(struct ib_device *device, union ib_gid *gid,
++i) {
ret = rdma_query_gid(device, port, i, &tmp_gid);
if (ret)
- return ret;
+ continue;
+
if (!memcmp(&tmp_gid, gid, sizeof *gid)) {
*port_num = port;
if (index)
diff --git a/drivers/infiniband/core/sysfs.c b/drivers/infiniband/core/sysfs.c
index a3f84b50c46a..84c53bd2a52d 100644
--- a/drivers/infiniband/core/sysfs.c
+++ b/drivers/infiniband/core/sysfs.c
@@ -433,6 +433,7 @@ static struct attribute *port_default_attrs[] = {
&ib_port_attr_link_layer.attr,
NULL
};
+ATTRIBUTE_GROUPS(port_default);
static ssize_t print_ndev(const struct ib_gid_attr *gid_attr, char *buf)
{
@@ -774,7 +775,7 @@ static void ib_port_gid_attr_release(struct kobject *kobj)
static struct kobj_type port_type = {
.release = ib_port_release,
.sysfs_ops = &port_sysfs_ops,
- .default_attrs = port_default_attrs
+ .default_groups = port_default_groups,
};
static struct kobj_type gid_attr_type = {
diff --git a/drivers/infiniband/core/umem_odp.c b/drivers/infiniband/core/umem_odp.c
index 7a47343d11f9..aead24c1a682 100644
--- a/drivers/infiniband/core/umem_odp.c
+++ b/drivers/infiniband/core/umem_odp.c
@@ -227,7 +227,6 @@ struct ib_umem_odp *ib_umem_odp_get(struct ib_device *device,
const struct mmu_interval_notifier_ops *ops)
{
struct ib_umem_odp *umem_odp;
- struct mm_struct *mm;
int ret;
if (WARN_ON_ONCE(!(access & IB_ACCESS_ON_DEMAND)))
@@ -241,7 +240,7 @@ struct ib_umem_odp *ib_umem_odp_get(struct ib_device *device,
umem_odp->umem.length = size;
umem_odp->umem.address = addr;
umem_odp->umem.writable = ib_access_writable(access);
- umem_odp->umem.owning_mm = mm = current->mm;
+ umem_odp->umem.owning_mm = current->mm;
umem_odp->notifier.ops = ops;
umem_odp->page_shift = PAGE_SHIFT;
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index d1345d76d9b1..6b6393176b3c 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -1399,7 +1399,6 @@ static int create_qp(struct uverbs_attr_bundle *attrs,
attr.sq_sig_type = cmd->sq_sig_all ? IB_SIGNAL_ALL_WR :
IB_SIGNAL_REQ_WR;
attr.qp_type = cmd->qp_type;
- attr.create_flags = 0;
attr.cap.max_send_wr = cmd->max_send_wr;
attr.cap.max_recv_wr = cmd->max_recv_wr;
diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
index 29cc0d14399a..3224f18a66e5 100644
--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c
+++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
@@ -262,13 +262,12 @@ void bnxt_re_query_fw_str(struct ib_device *ibdev, char *str)
int bnxt_re_query_pkey(struct ib_device *ibdev, u32 port_num,
u16 index, u16 *pkey)
{
- struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
+ if (index > 0)
+ return -EINVAL;
- /* Ignore port_num */
+ *pkey = IB_DEFAULT_PKEY_FULL;
- memset(pkey, 0, sizeof(*pkey));
- return bnxt_qplib_get_pkey(&rdev->qplib_res,
- &rdev->qplib_res.pkey_tbl, index, pkey);
+ return 0;
}
int bnxt_re_query_gid(struct ib_device *ibdev, u32 port_num,
diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c
index b44944fb9b24..3d6834d3d4fb 100644
--- a/drivers/infiniband/hw/bnxt_re/main.c
+++ b/drivers/infiniband/hw/bnxt_re/main.c
@@ -893,7 +893,6 @@ static int bnxt_re_srqn_handler(struct bnxt_qplib_nq *nq,
struct bnxt_re_srq *srq = container_of(handle, struct bnxt_re_srq,
qplib_srq);
struct ib_event ib_event;
- int rc = 0;
ib_event.device = &srq->rdev->ibdev;
ib_event.element.srq = &srq->ib_srq;
@@ -907,7 +906,7 @@ static int bnxt_re_srqn_handler(struct bnxt_qplib_nq *nq,
(*srq->ib_srq.event_handler)(&ib_event,
srq->ib_srq.srq_context);
}
- return rc;
+ return 0;
}
static int bnxt_re_cqn_handler(struct bnxt_qplib_nq *nq,
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.c b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
index ca88849559bf..96e581ced50e 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_fp.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
@@ -46,6 +46,7 @@
#include <linux/delay.h>
#include <linux/prefetch.h>
#include <linux/if_ether.h>
+#include <rdma/ib_mad.h>
#include "roce_hsi.h"
@@ -1232,7 +1233,7 @@ int bnxt_qplib_modify_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
struct bnxt_qplib_rcfw *rcfw = res->rcfw;
struct cmdq_modify_qp req;
struct creq_modify_qp_resp resp;
- u16 cmd_flags = 0, pkey;
+ u16 cmd_flags = 0;
u32 temp32[4];
u32 bmask;
int rc;
@@ -1255,11 +1256,9 @@ int bnxt_qplib_modify_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_ACCESS)
req.access = qp->access;
- if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_PKEY) {
- if (!bnxt_qplib_get_pkey(res, &res->pkey_tbl,
- qp->pkey_index, &pkey))
- req.pkey = cpu_to_le16(pkey);
- }
+ if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_PKEY)
+ req.pkey = cpu_to_le16(IB_DEFAULT_PKEY_FULL);
+
if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_QKEY)
req.qkey = cpu_to_le32(qp->qkey);
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
index 3de854727460..061b2895dd9b 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
@@ -555,7 +555,7 @@ skip_ctx_setup:
void bnxt_qplib_free_rcfw_channel(struct bnxt_qplib_rcfw *rcfw)
{
- kfree(rcfw->cmdq.cmdq_bitmap);
+ bitmap_free(rcfw->cmdq.cmdq_bitmap);
kfree(rcfw->qp_tbl);
kfree(rcfw->crsqe_tbl);
bnxt_qplib_free_hwq(rcfw->res, &rcfw->cmdq.hwq);
@@ -572,7 +572,6 @@ int bnxt_qplib_alloc_rcfw_channel(struct bnxt_qplib_res *res,
struct bnxt_qplib_sg_info sginfo = {};
struct bnxt_qplib_cmdq_ctx *cmdq;
struct bnxt_qplib_creq_ctx *creq;
- u32 bmap_size = 0;
rcfw->pdev = res->pdev;
cmdq = &rcfw->cmdq;
@@ -613,13 +612,10 @@ int bnxt_qplib_alloc_rcfw_channel(struct bnxt_qplib_res *res,
if (!rcfw->crsqe_tbl)
goto fail;
- bmap_size = BITS_TO_LONGS(rcfw->cmdq_depth) * sizeof(unsigned long);
- cmdq->cmdq_bitmap = kzalloc(bmap_size, GFP_KERNEL);
+ cmdq->cmdq_bitmap = bitmap_zalloc(rcfw->cmdq_depth, GFP_KERNEL);
if (!cmdq->cmdq_bitmap)
goto fail;
- cmdq->bmap_size = bmap_size;
-
/* Allocate one extra to hold the QP1 entries */
rcfw->qp_tbl_size = qp_tbl_sz + 1;
rcfw->qp_tbl = kcalloc(rcfw->qp_tbl_size, sizeof(struct bnxt_qplib_qp_node),
@@ -667,8 +663,8 @@ void bnxt_qplib_disable_rcfw_channel(struct bnxt_qplib_rcfw *rcfw)
iounmap(cmdq->cmdq_mbox.reg.bar_reg);
iounmap(creq->creq_db.reg.bar_reg);
- indx = find_first_bit(cmdq->cmdq_bitmap, cmdq->bmap_size);
- if (indx != cmdq->bmap_size)
+ indx = find_first_bit(cmdq->cmdq_bitmap, rcfw->cmdq_depth);
+ if (indx != rcfw->cmdq_depth)
dev_err(&rcfw->pdev->dev,
"disabling RCFW with pending cmd-bit %lx\n", indx);
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h
index 82faa4e4cda8..0a3d8e7da3d4 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h
+++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h
@@ -152,7 +152,6 @@ struct bnxt_qplib_cmdq_ctx {
wait_queue_head_t waitq;
unsigned long flags;
unsigned long *cmdq_bitmap;
- u32 bmap_size;
u32 seq_num;
};
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_res.c b/drivers/infiniband/hw/bnxt_re/qplib_res.c
index bc1ba4b51ba4..126d4f26f75a 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_res.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_res.c
@@ -649,31 +649,6 @@ static void bnxt_qplib_init_sgid_tbl(struct bnxt_qplib_sgid_tbl *sgid_tbl,
memset(sgid_tbl->hw_id, -1, sizeof(u16) * sgid_tbl->max);
}
-static void bnxt_qplib_free_pkey_tbl(struct bnxt_qplib_res *res,
- struct bnxt_qplib_pkey_tbl *pkey_tbl)
-{
- if (!pkey_tbl->tbl)
- dev_dbg(&res->pdev->dev, "PKEY tbl not present\n");
- else
- kfree(pkey_tbl->tbl);
-
- pkey_tbl->tbl = NULL;
- pkey_tbl->max = 0;
- pkey_tbl->active = 0;
-}
-
-static int bnxt_qplib_alloc_pkey_tbl(struct bnxt_qplib_res *res,
- struct bnxt_qplib_pkey_tbl *pkey_tbl,
- u16 max)
-{
- pkey_tbl->tbl = kcalloc(max, sizeof(u16), GFP_KERNEL);
- if (!pkey_tbl->tbl)
- return -ENOMEM;
-
- pkey_tbl->max = max;
- return 0;
-};
-
/* PDs */
int bnxt_qplib_alloc_pd(struct bnxt_qplib_pd_tbl *pdt, struct bnxt_qplib_pd *pd)
{
@@ -843,24 +818,6 @@ unmap_io:
return -ENOMEM;
}
-/* PKEYs */
-static void bnxt_qplib_cleanup_pkey_tbl(struct bnxt_qplib_pkey_tbl *pkey_tbl)
-{
- memset(pkey_tbl->tbl, 0, sizeof(u16) * pkey_tbl->max);
- pkey_tbl->active = 0;
-}
-
-static void bnxt_qplib_init_pkey_tbl(struct bnxt_qplib_res *res,
- struct bnxt_qplib_pkey_tbl *pkey_tbl)
-{
- u16 pkey = 0xFFFF;
-
- memset(pkey_tbl->tbl, 0, sizeof(u16) * pkey_tbl->max);
-
- /* pkey default = 0xFFFF */
- bnxt_qplib_add_pkey(res, pkey_tbl, &pkey, false);
-}
-
/* Stats */
static void bnxt_qplib_free_stats_ctx(struct pci_dev *pdev,
struct bnxt_qplib_stats *stats)
@@ -891,21 +848,18 @@ static int bnxt_qplib_alloc_stats_ctx(struct pci_dev *pdev,
void bnxt_qplib_cleanup_res(struct bnxt_qplib_res *res)
{
- bnxt_qplib_cleanup_pkey_tbl(&res->pkey_tbl);
bnxt_qplib_cleanup_sgid_tbl(res, &res->sgid_tbl);
}
int bnxt_qplib_init_res(struct bnxt_qplib_res *res)
{
bnxt_qplib_init_sgid_tbl(&res->sgid_tbl, res->netdev);
- bnxt_qplib_init_pkey_tbl(res, &res->pkey_tbl);
return 0;
}
void bnxt_qplib_free_res(struct bnxt_qplib_res *res)
{
- bnxt_qplib_free_pkey_tbl(res, &res->pkey_tbl);
bnxt_qplib_free_sgid_tbl(res, &res->sgid_tbl);
bnxt_qplib_free_pd_tbl(&res->pd_tbl);
bnxt_qplib_free_dpi_tbl(res, &res->dpi_tbl);
@@ -924,10 +878,6 @@ int bnxt_qplib_alloc_res(struct bnxt_qplib_res *res, struct pci_dev *pdev,
if (rc)
goto fail;
- rc = bnxt_qplib_alloc_pkey_tbl(res, &res->pkey_tbl, dev_attr->max_pkey);
- if (rc)
- goto fail;
-
rc = bnxt_qplib_alloc_pd_tbl(res, &res->pd_tbl, dev_attr->max_pd);
if (rc)
goto fail;
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_res.h b/drivers/infiniband/hw/bnxt_re/qplib_res.h
index e1411a2352a7..982e2c96dac2 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_res.h
+++ b/drivers/infiniband/hw/bnxt_re/qplib_res.h
@@ -185,12 +185,6 @@ struct bnxt_qplib_sgid_tbl {
u8 *vlan;
};
-struct bnxt_qplib_pkey_tbl {
- u16 *tbl;
- u16 max;
- u16 active;
-};
-
struct bnxt_qplib_dpi {
u32 dpi;
void __iomem *dbr;
@@ -258,7 +252,6 @@ struct bnxt_qplib_res {
struct bnxt_qplib_rcfw *rcfw;
struct bnxt_qplib_pd_tbl pd_tbl;
struct bnxt_qplib_sgid_tbl sgid_tbl;
- struct bnxt_qplib_pkey_tbl pkey_tbl;
struct bnxt_qplib_dpi_tbl dpi_tbl;
bool prio;
bool is_vf;
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.c b/drivers/infiniband/hw/bnxt_re/qplib_sp.c
index 379e715ebd30..b802981b7171 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_sp.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.c
@@ -146,17 +146,7 @@ int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw,
attr->max_srq = le16_to_cpu(sb->max_srq);
attr->max_srq_wqes = le32_to_cpu(sb->max_srq_wr) - 1;
attr->max_srq_sges = sb->max_srq_sge;
- attr->max_pkey = le32_to_cpu(sb->max_pkeys);
- /*
- * Some versions of FW reports more than 0xFFFF.
- * Restrict it for now to 0xFFFF to avoid
- * reporting trucated value
- */
- if (attr->max_pkey > 0xFFFF) {
- /* ib_port_attr::pkey_tbl_len is u16 */
- attr->max_pkey = 0xFFFF;
- }
-
+ attr->max_pkey = 1;
attr->max_inline_data = le32_to_cpu(sb->max_inline_data);
attr->l2_db_size = (sb->l2_db_space_size + 1) *
(0x01 << RCFW_DBR_BASE_PAGE_SHIFT);
@@ -414,93 +404,6 @@ int bnxt_qplib_update_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
return rc;
}
-/* pkeys */
-int bnxt_qplib_get_pkey(struct bnxt_qplib_res *res,
- struct bnxt_qplib_pkey_tbl *pkey_tbl, u16 index,
- u16 *pkey)
-{
- if (index == 0xFFFF) {
- *pkey = 0xFFFF;
- return 0;
- }
- if (index >= pkey_tbl->max) {
- dev_err(&res->pdev->dev,
- "Index %d exceeded PKEY table max (%d)\n",
- index, pkey_tbl->max);
- return -EINVAL;
- }
- memcpy(pkey, &pkey_tbl->tbl[index], sizeof(*pkey));
- return 0;
-}
-
-int bnxt_qplib_del_pkey(struct bnxt_qplib_res *res,
- struct bnxt_qplib_pkey_tbl *pkey_tbl, u16 *pkey,
- bool update)
-{
- int i, rc = 0;
-
- if (!pkey_tbl) {
- dev_err(&res->pdev->dev, "PKEY table not allocated\n");
- return -EINVAL;
- }
-
- /* Do we need a pkey_lock here? */
- if (!pkey_tbl->active) {
- dev_err(&res->pdev->dev, "PKEY table has no active entries\n");
- return -ENOMEM;
- }
- for (i = 0; i < pkey_tbl->max; i++) {
- if (!memcmp(&pkey_tbl->tbl[i], pkey, sizeof(*pkey)))
- break;
- }
- if (i == pkey_tbl->max) {
- dev_err(&res->pdev->dev,
- "PKEY 0x%04x not found in the pkey table\n", *pkey);
- return -ENOMEM;
- }
- memset(&pkey_tbl->tbl[i], 0, sizeof(*pkey));
- pkey_tbl->active--;
-
- /* unlock */
- return rc;
-}
-
-int bnxt_qplib_add_pkey(struct bnxt_qplib_res *res,
- struct bnxt_qplib_pkey_tbl *pkey_tbl, u16 *pkey,
- bool update)
-{
- int i, free_idx, rc = 0;
-
- if (!pkey_tbl) {
- dev_err(&res->pdev->dev, "PKEY table not allocated\n");
- return -EINVAL;
- }
-
- /* Do we need a pkey_lock here? */
- if (pkey_tbl->active == pkey_tbl->max) {
- dev_err(&res->pdev->dev, "PKEY table is full\n");
- return -ENOMEM;
- }
- free_idx = pkey_tbl->max;
- for (i = 0; i < pkey_tbl->max; i++) {
- if (!memcmp(&pkey_tbl->tbl[i], pkey, sizeof(*pkey)))
- return -EALREADY;
- else if (!pkey_tbl->tbl[i] && free_idx == pkey_tbl->max)
- free_idx = i;
- }
- if (free_idx == pkey_tbl->max) {
- dev_err(&res->pdev->dev,
- "PKEY table is FULL but count is not MAX??\n");
- return -ENOMEM;
- }
- /* Add PKEY to the pkey_tbl */
- memcpy(&pkey_tbl->tbl[free_idx], pkey, sizeof(*pkey));
- pkey_tbl->active++;
-
- /* unlock */
- return rc;
-}
-
/* AH */
int bnxt_qplib_create_ah(struct bnxt_qplib_res *res, struct bnxt_qplib_ah *ah,
bool block)
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.h b/drivers/infiniband/hw/bnxt_re/qplib_sp.h
index a18f568cb23e..5939e8fc8353 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_sp.h
+++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.h
@@ -255,15 +255,6 @@ int bnxt_qplib_add_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
int bnxt_qplib_update_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
struct bnxt_qplib_gid *gid, u16 gid_idx,
const u8 *smac);
-int bnxt_qplib_get_pkey(struct bnxt_qplib_res *res,
- struct bnxt_qplib_pkey_tbl *pkey_tbl, u16 index,
- u16 *pkey);
-int bnxt_qplib_del_pkey(struct bnxt_qplib_res *res,
- struct bnxt_qplib_pkey_tbl *pkey_tbl, u16 *pkey,
- bool update);
-int bnxt_qplib_add_pkey(struct bnxt_qplib_res *res,
- struct bnxt_qplib_pkey_tbl *pkey_tbl, u16 *pkey,
- bool update);
int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw,
struct bnxt_qplib_dev_attr *attr, bool vf);
int bnxt_qplib_set_func_resources(struct bnxt_qplib_res *res,
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
index 913f39ee4416..c16017f6e8db 100644
--- a/drivers/infiniband/hw/cxgb4/cm.c
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@ -2471,7 +2471,8 @@ static int accept_cr(struct c4iw_ep *ep, struct sk_buff *skb,
skb_get(skb);
rpl = cplhdr(skb);
if (!is_t4(adapter_type)) {
- skb_trim(skb, roundup(sizeof(*rpl5), 16));
+ BUILD_BUG_ON(sizeof(*rpl5) != roundup(sizeof(*rpl5), 16));
+ skb_trim(skb, sizeof(*rpl5));
rpl5 = (void *)rpl;
INIT_TP_WR(rpl5, ep->hwtid);
} else {
@@ -2487,7 +2488,7 @@ static int accept_cr(struct c4iw_ep *ep, struct sk_buff *skb,
opt2 |= CONG_CNTRL_V(CONG_ALG_TAHOE);
opt2 |= T5_ISS_F;
rpl5 = (void *)rpl;
- memset(&rpl5->iss, 0, roundup(sizeof(*rpl5)-sizeof(*rpl), 16));
+ memset_after(rpl5, 0, iss);
if (peer2peer)
isn += 4;
rpl5->iss = cpu_to_be32(isn);
diff --git a/drivers/infiniband/hw/cxgb4/id_table.c b/drivers/infiniband/hw/cxgb4/id_table.c
index 724d23297b35..f64e7e02b129 100644
--- a/drivers/infiniband/hw/cxgb4/id_table.c
+++ b/drivers/infiniband/hw/cxgb4/id_table.c
@@ -59,7 +59,7 @@ u32 c4iw_id_alloc(struct c4iw_id_table *alloc)
alloc->last = obj + 1;
if (alloc->last >= alloc->max)
alloc->last = 0;
- set_bit(obj, alloc->table);
+ __set_bit(obj, alloc->table);
obj += alloc->start;
} else
obj = -1;
@@ -75,37 +75,32 @@ void c4iw_id_free(struct c4iw_id_table *alloc, u32 obj)
obj -= alloc->start;
spin_lock_irqsave(&alloc->lock, flags);
- clear_bit(obj, alloc->table);
+ __clear_bit(obj, alloc->table);
spin_unlock_irqrestore(&alloc->lock, flags);
}
int c4iw_id_table_alloc(struct c4iw_id_table *alloc, u32 start, u32 num,
u32 reserved, u32 flags)
{
- int i;
-
alloc->start = start;
alloc->flags = flags;
if (flags & C4IW_ID_TABLE_F_RANDOM)
alloc->last = prandom_u32() % RANDOM_SKIP;
else
alloc->last = 0;
- alloc->max = num;
+ alloc->max = num;
spin_lock_init(&alloc->lock);
- alloc->table = kmalloc_array(BITS_TO_LONGS(num), sizeof(long),
- GFP_KERNEL);
+ alloc->table = bitmap_zalloc(num, GFP_KERNEL);
if (!alloc->table)
return -ENOMEM;
- bitmap_zero(alloc->table, num);
if (!(alloc->flags & C4IW_ID_TABLE_F_EMPTY))
- for (i = 0; i < reserved; ++i)
- set_bit(i, alloc->table);
+ bitmap_set(alloc->table, 0, reserved);
return 0;
}
void c4iw_id_table_free(struct c4iw_id_table *alloc)
{
- kfree(alloc->table);
+ bitmap_free(alloc->table);
}
diff --git a/drivers/infiniband/hw/cxgb4/provider.c b/drivers/infiniband/hw/cxgb4/provider.c
index 0c8fd5a85fcb..89f36a3a9af0 100644
--- a/drivers/infiniband/hw/cxgb4/provider.c
+++ b/drivers/infiniband/hw/cxgb4/provider.c
@@ -41,6 +41,7 @@
#include <linux/ethtool.h>
#include <linux/rtnetlink.h>
#include <linux/inetdevice.h>
+#include <net/addrconf.h>
#include <linux/io.h>
#include <asm/irq.h>
@@ -264,7 +265,8 @@ static int c4iw_query_device(struct ib_device *ibdev, struct ib_device_attr *pro
return -EINVAL;
dev = to_c4iw_dev(ibdev);
- memcpy(&props->sys_image_guid, dev->rdev.lldi.ports[0]->dev_addr, 6);
+ addrconf_addr_eui48((u8 *)&props->sys_image_guid,
+ dev->rdev.lldi.ports[0]->dev_addr);
props->hw_ver = CHELSIO_CHIP_RELEASE(dev->rdev.lldi.adapter_type);
props->fw_ver = dev->rdev.lldi.fw_vers;
props->device_cap_flags = dev->device_cap_flags;
@@ -525,8 +527,8 @@ void c4iw_register_device(struct work_struct *work)
struct c4iw_dev *dev = ctx->dev;
pr_debug("c4iw_dev %p\n", dev);
- memset(&dev->ibdev.node_guid, 0, sizeof(dev->ibdev.node_guid));
- memcpy(&dev->ibdev.node_guid, dev->rdev.lldi.ports[0]->dev_addr, 6);
+ addrconf_addr_eui48((u8 *)&dev->ibdev.node_guid,
+ dev->rdev.lldi.ports[0]->dev_addr);
dev->device_cap_flags = IB_DEVICE_LOCAL_DMA_LKEY | IB_DEVICE_MEM_WINDOW;
if (fastreg_support)
dev->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS;
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c
index d20b4ef2c853..ffbd9a89981e 100644
--- a/drivers/infiniband/hw/cxgb4/qp.c
+++ b/drivers/infiniband/hw/cxgb4/qp.c
@@ -2460,6 +2460,7 @@ int c4iw_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
memset(attr, 0, sizeof(*attr));
memset(init_attr, 0, sizeof(*init_attr));
attr->qp_state = to_ib_qp_state(qhp->attr.state);
+ attr->cur_qp_state = to_ib_qp_state(qhp->attr.state);
init_attr->cap.max_send_wr = qhp->attr.sq_num_entries;
init_attr->cap.max_recv_wr = qhp->attr.rq_num_entries;
init_attr->cap.max_send_sge = qhp->attr.sq_max_sges;
diff --git a/drivers/infiniband/hw/hfi1/user_sdma.c b/drivers/infiniband/hw/hfi1/user_sdma.c
index 5b11c8282744..a71c5a36ceba 100644
--- a/drivers/infiniband/hw/hfi1/user_sdma.c
+++ b/drivers/infiniband/hw/hfi1/user_sdma.c
@@ -161,9 +161,7 @@ int hfi1_user_sdma_alloc_queues(struct hfi1_ctxtdata *uctxt,
if (!pq->reqs)
goto pq_reqs_nomem;
- pq->req_in_use = kcalloc(BITS_TO_LONGS(hfi1_sdma_comp_ring_size),
- sizeof(*pq->req_in_use),
- GFP_KERNEL);
+ pq->req_in_use = bitmap_zalloc(hfi1_sdma_comp_ring_size, GFP_KERNEL);
if (!pq->req_in_use)
goto pq_reqs_no_in_use;
@@ -210,7 +208,7 @@ cq_comps_nomem:
cq_nomem:
kmem_cache_destroy(pq->txreq_cache);
pq_txreq_nomem:
- kfree(pq->req_in_use);
+ bitmap_free(pq->req_in_use);
pq_reqs_no_in_use:
kfree(pq->reqs);
pq_reqs_nomem:
@@ -257,7 +255,7 @@ int hfi1_user_sdma_free_queues(struct hfi1_filedata *fd,
pq->wait,
!atomic_read(&pq->n_reqs));
kfree(pq->reqs);
- kfree(pq->req_in_use);
+ bitmap_free(pq->req_in_use);
kmem_cache_destroy(pq->txreq_cache);
flush_pq_iowait(pq);
kfree(pq);
diff --git a/drivers/infiniband/hw/hns/Kconfig b/drivers/infiniband/hw/hns/Kconfig
index 18d10ebf900b..ab3fbba70789 100644
--- a/drivers/infiniband/hw/hns/Kconfig
+++ b/drivers/infiniband/hw/hns/Kconfig
@@ -5,22 +5,9 @@ config INFINIBAND_HNS
depends on ARM64 || (COMPILE_TEST && 64BIT)
depends on (HNS_DSAF && HNS_ENET) || HNS3
help
- This is a RoCE/RDMA driver for the Hisilicon RoCE engine. The engine
- is used in Hisilicon Hip06 and more further ICT SoC based on
- platform device.
+ This is a RoCE/RDMA driver for the Hisilicon RoCE engine.
- To compile HIP06 or HIP08 driver as module, choose M here.
-
-config INFINIBAND_HNS_HIP06
- bool "Hisilicon Hip06 Family RoCE support"
- depends on INFINIBAND_HNS && HNS && HNS_DSAF && HNS_ENET
- depends on INFINIBAND_HNS=m || (HNS_DSAF=y && HNS_ENET=y)
- help
- RoCE driver support for Hisilicon RoCE engine in Hisilicon Hip06 and
- Hip07 SoC. These RoCE engines are platform devices.
-
- To compile this driver, choose Y here: if INFINIBAND_HNS is m, this
- module will be called hns-roce-hw-v1
+ To compile HIP08 driver as module, choose M here.
config INFINIBAND_HNS_HIP08
bool "Hisilicon Hip08 Family RoCE support"
diff --git a/drivers/infiniband/hw/hns/Makefile b/drivers/infiniband/hw/hns/Makefile
index e105945b94a1..9f04f25d9631 100644
--- a/drivers/infiniband/hw/hns/Makefile
+++ b/drivers/infiniband/hw/hns/Makefile
@@ -9,11 +9,6 @@ hns-roce-objs := hns_roce_main.o hns_roce_cmd.o hns_roce_pd.o \
hns_roce_ah.o hns_roce_hem.o hns_roce_mr.o hns_roce_qp.o \
hns_roce_cq.o hns_roce_alloc.o hns_roce_db.o hns_roce_srq.o hns_roce_restrack.o
-ifdef CONFIG_INFINIBAND_HNS_HIP06
-hns-roce-hw-v1-objs := hns_roce_hw_v1.o $(hns-roce-objs)
-obj-$(CONFIG_INFINIBAND_HNS) += hns-roce-hw-v1.o
-endif
-
ifdef CONFIG_INFINIBAND_HNS_HIP08
hns-roce-hw-v2-objs := hns_roce_hw_v2.o hns_roce_hw_v2_dfx.o $(hns-roce-objs)
obj-$(CONFIG_INFINIBAND_HNS) += hns-roce-hw-v2.o
diff --git a/drivers/infiniband/hw/hns/hns_roce_ah.c b/drivers/infiniband/hw/hns/hns_roce_ah.c
index cc258edec331..492b122d0521 100644
--- a/drivers/infiniband/hw/hns/hns_roce_ah.c
+++ b/drivers/infiniband/hw/hns/hns_roce_ah.c
@@ -30,7 +30,6 @@
* SOFTWARE.
*/
-#include <linux/platform_device.h>
#include <linux/pci.h>
#include <rdma/ib_addr.h>
#include <rdma/ib_cache.h>
@@ -61,7 +60,7 @@ int hns_roce_create_ah(struct ib_ah *ibah, struct rdma_ah_init_attr *init_attr,
struct hns_roce_ah *ah = to_hr_ah(ibah);
int ret = 0;
- if (hr_dev->pci_dev->revision <= PCI_REVISION_ID_HIP08 && udata)
+ if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08 && udata)
return -EOPNOTSUPP;
ah->av.port = rdma_ah_get_port_num(ah_attr);
@@ -80,7 +79,7 @@ int hns_roce_create_ah(struct ib_ah *ibah, struct rdma_ah_init_attr *init_attr,
memcpy(ah->av.mac, ah_attr->roce.dmac, ETH_ALEN);
/* HIP08 needs to record vlan info in Address Vector */
- if (hr_dev->pci_dev->revision <= PCI_REVISION_ID_HIP08) {
+ if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08) {
ret = rdma_read_gid_l2_fields(ah_attr->grh.sgid_attr,
&ah->av.vlan_id, NULL);
if (ret)
diff --git a/drivers/infiniband/hw/hns/hns_roce_alloc.c b/drivers/infiniband/hw/hns/hns_roce_alloc.c
index d4fa0fd52294..11a78ceae568 100644
--- a/drivers/infiniband/hw/hns/hns_roce_alloc.c
+++ b/drivers/infiniband/hw/hns/hns_roce_alloc.c
@@ -31,10 +31,9 @@
* SOFTWARE.
*/
-#include <linux/platform_device.h>
#include <linux/vmalloc.h>
-#include "hns_roce_device.h"
#include <rdma/ib_umem.h>
+#include "hns_roce_device.h"
void hns_roce_buf_free(struct hns_roce_dev *hr_dev, struct hns_roce_buf *buf)
{
diff --git a/drivers/infiniband/hw/hns/hns_roce_cmd.c b/drivers/infiniband/hw/hns/hns_roce_cmd.c
index 84f3f2b5f097..4b693d542ace 100644
--- a/drivers/infiniband/hw/hns/hns_roce_cmd.c
+++ b/drivers/infiniband/hw/hns/hns_roce_cmd.c
@@ -31,7 +31,6 @@
*/
#include <linux/dmapool.h>
-#include <linux/platform_device.h>
#include "hns_roce_common.h"
#include "hns_roce_device.h"
#include "hns_roce_cmd.h"
@@ -61,7 +60,7 @@ static int __hns_roce_cmd_mbox_poll(struct hns_roce_dev *hr_dev, u64 in_param,
CMD_POLL_TOKEN, 0);
if (ret) {
dev_err_ratelimited(hr_dev->dev,
- "failed to post mailbox %x in poll mode, ret = %d.\n",
+ "failed to post mailbox 0x%x in poll mode, ret = %d.\n",
op, ret);
return ret;
}
@@ -91,7 +90,7 @@ void hns_roce_cmd_event(struct hns_roce_dev *hr_dev, u16 token, u8 status,
if (unlikely(token != context->token)) {
dev_err_ratelimited(hr_dev->dev,
- "[cmd] invalid ae token %x,context token is %x!\n",
+ "[cmd] invalid ae token 0x%x, context token is 0x%x.\n",
token, context->token);
return;
}
@@ -130,14 +129,14 @@ static int __hns_roce_cmd_mbox_wait(struct hns_roce_dev *hr_dev, u64 in_param,
context->token, 1);
if (ret) {
dev_err_ratelimited(dev,
- "failed to post mailbox %x in event mode, ret = %d.\n",
+ "failed to post mailbox 0x%x in event mode, ret = %d.\n",
op, ret);
goto out;
}
if (!wait_for_completion_timeout(&context->done,
msecs_to_jiffies(timeout))) {
- dev_err_ratelimited(dev, "[cmd] token %x mailbox %x timeout.\n",
+ dev_err_ratelimited(dev, "[cmd] token 0x%x mailbox 0x%x timeout.\n",
context->token, op);
ret = -EBUSY;
goto out;
@@ -145,7 +144,7 @@ static int __hns_roce_cmd_mbox_wait(struct hns_roce_dev *hr_dev, u64 in_param,
ret = context->result;
if (ret)
- dev_err_ratelimited(dev, "[cmd] token %x mailbox %x error %d\n",
+ dev_err_ratelimited(dev, "[cmd] token 0x%x mailbox 0x%x error %d.\n",
context->token, op, ret);
out:
diff --git a/drivers/infiniband/hw/hns/hns_roce_common.h b/drivers/infiniband/hw/hns/hns_roce_common.h
index b73e55de83ac..465d1f914b6c 100644
--- a/drivers/infiniband/hw/hns/hns_roce_common.h
+++ b/drivers/infiniband/hw/hns/hns_roce_common.h
@@ -104,208 +104,6 @@
#define hr_reg_read(ptr, field) _hr_reg_read(ptr, field)
-#define ROCEE_GLB_CFG_ROCEE_DB_SQ_MODE_S 3
-#define ROCEE_GLB_CFG_ROCEE_DB_OTH_MODE_S 4
-
-#define ROCEE_GLB_CFG_SQ_EXT_DB_MODE_S 5
-
-#define ROCEE_GLB_CFG_OTH_EXT_DB_MODE_S 6
-
-#define ROCEE_GLB_CFG_ROCEE_PORT_ST_S 10
-#define ROCEE_GLB_CFG_ROCEE_PORT_ST_M \
- (((1UL << 6) - 1) << ROCEE_GLB_CFG_ROCEE_PORT_ST_S)
-
-#define ROCEE_GLB_CFG_TRP_RAQ_DROP_EN_S 16
-
-#define ROCEE_DMAE_USER_CFG1_ROCEE_STREAM_ID_TB_CFG_S 0
-#define ROCEE_DMAE_USER_CFG1_ROCEE_STREAM_ID_TB_CFG_M \
- (((1UL << 24) - 1) << ROCEE_DMAE_USER_CFG1_ROCEE_STREAM_ID_TB_CFG_S)
-
-#define ROCEE_DMAE_USER_CFG1_ROCEE_CACHE_TB_CFG_S 24
-#define ROCEE_DMAE_USER_CFG1_ROCEE_CACHE_TB_CFG_M \
- (((1UL << 4) - 1) << ROCEE_DMAE_USER_CFG1_ROCEE_CACHE_TB_CFG_S)
-
-#define ROCEE_DMAE_USER_CFG2_ROCEE_STREAM_ID_PKT_CFG_S 0
-#define ROCEE_DMAE_USER_CFG2_ROCEE_STREAM_ID_PKT_CFG_M \
- (((1UL << 24) - 1) << ROCEE_DMAE_USER_CFG2_ROCEE_STREAM_ID_PKT_CFG_S)
-
-#define ROCEE_DMAE_USER_CFG2_ROCEE_CACHE_PKT_CFG_S 24
-#define ROCEE_DMAE_USER_CFG2_ROCEE_CACHE_PKT_CFG_M \
- (((1UL << 4) - 1) << ROCEE_DMAE_USER_CFG2_ROCEE_CACHE_PKT_CFG_S)
-
-#define ROCEE_DB_SQ_WL_ROCEE_DB_SQ_WL_S 0
-#define ROCEE_DB_SQ_WL_ROCEE_DB_SQ_WL_M \
- (((1UL << 16) - 1) << ROCEE_DB_SQ_WL_ROCEE_DB_SQ_WL_S)
-
-#define ROCEE_DB_SQ_WL_ROCEE_DB_SQ_WL_EMPTY_S 16
-#define ROCEE_DB_SQ_WL_ROCEE_DB_SQ_WL_EMPTY_M \
- (((1UL << 16) - 1) << ROCEE_DB_SQ_WL_ROCEE_DB_SQ_WL_EMPTY_S)
-
-#define ROCEE_DB_OTHERS_WL_ROCEE_DB_OTH_WL_S 0
-#define ROCEE_DB_OTHERS_WL_ROCEE_DB_OTH_WL_M \
- (((1UL << 16) - 1) << ROCEE_DB_OTHERS_WL_ROCEE_DB_OTH_WL_S)
-
-#define ROCEE_DB_OTHERS_WL_ROCEE_DB_OTH_WL_EMPTY_S 16
-#define ROCEE_DB_OTHERS_WL_ROCEE_DB_OTH_WL_EMPTY_M \
- (((1UL << 16) - 1) << ROCEE_DB_OTHERS_WL_ROCEE_DB_OTH_WL_EMPTY_S)
-
-#define ROCEE_RAQ_WL_ROCEE_RAQ_WL_S 0
-#define ROCEE_RAQ_WL_ROCEE_RAQ_WL_M \
- (((1UL << 8) - 1) << ROCEE_RAQ_WL_ROCEE_RAQ_WL_S)
-
-#define ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_POL_TIME_INTERVAL_S 0
-#define ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_POL_TIME_INTERVAL_M \
- (((1UL << 15) - 1) << \
- ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_POL_TIME_INTERVAL_S)
-
-#define ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_RAQ_TIMEOUT_CHK_CFG_S 16
-#define ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_RAQ_TIMEOUT_CHK_CFG_M \
- (((1UL << 4) - 1) << \
- ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_RAQ_TIMEOUT_CHK_CFG_S)
-
-#define ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_RAQ_TIMEOUT_CHK_EN_S 20
-
-#define ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_EXT_RAQ_MODE 21
-
-#define ROCEE_EXT_DB_SQ_H_EXT_DB_SQ_SHIFT_S 0
-#define ROCEE_EXT_DB_SQ_H_EXT_DB_SQ_SHIFT_M \
- (((1UL << 5) - 1) << ROCEE_EXT_DB_SQ_H_EXT_DB_SQ_SHIFT_S)
-
-#define ROCEE_EXT_DB_SQ_H_EXT_DB_SQ_BA_H_S 5
-#define ROCEE_EXT_DB_SQ_H_EXT_DB_SQ_BA_H_M \
- (((1UL << 5) - 1) << ROCEE_EXT_DB_SQ_H_EXT_DB_SQ_BA_H_S)
-
-#define ROCEE_EXT_DB_OTH_H_EXT_DB_OTH_SHIFT_S 0
-#define ROCEE_EXT_DB_OTH_H_EXT_DB_OTH_SHIFT_M \
- (((1UL << 5) - 1) << ROCEE_EXT_DB_OTH_H_EXT_DB_OTH_SHIFT_S)
-
-#define ROCEE_EXT_DB_SQ_H_EXT_DB_OTH_BA_H_S 5
-#define ROCEE_EXT_DB_SQ_H_EXT_DB_OTH_BA_H_M \
- (((1UL << 5) - 1) << ROCEE_EXT_DB_SQ_H_EXT_DB_OTH_BA_H_S)
-
-#define ROCEE_EXT_RAQ_H_EXT_RAQ_SHIFT_S 0
-#define ROCEE_EXT_RAQ_H_EXT_RAQ_SHIFT_M \
- (((1UL << 5) - 1) << ROCEE_EXT_RAQ_H_EXT_RAQ_SHIFT_S)
-
-#define ROCEE_EXT_RAQ_H_EXT_RAQ_BA_H_S 8
-#define ROCEE_EXT_RAQ_H_EXT_RAQ_BA_H_M \
- (((1UL << 5) - 1) << ROCEE_EXT_RAQ_H_EXT_RAQ_BA_H_S)
-
-#define ROCEE_BT_CMD_H_ROCEE_BT_CMD_IN_MDF_S 0
-#define ROCEE_BT_CMD_H_ROCEE_BT_CMD_IN_MDF_M \
- (((1UL << 19) - 1) << ROCEE_BT_CMD_H_ROCEE_BT_CMD_IN_MDF_S)
-
-#define ROCEE_BT_CMD_H_ROCEE_BT_CMD_S 19
-
-#define ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_S 20
-#define ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_M \
- (((1UL << 2) - 1) << ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_S)
-
-#define ROCEE_BT_CMD_H_ROCEE_BT_CMD_BA_H_S 22
-#define ROCEE_BT_CMD_H_ROCEE_BT_CMD_BA_H_M \
- (((1UL << 5) - 1) << ROCEE_BT_CMD_H_ROCEE_BT_CMD_BA_H_S)
-
-#define ROCEE_BT_CMD_H_ROCEE_BT_CMD_HW_SYNS_S 31
-
-#define ROCEE_QP1C_CFG0_0_ROCEE_QP1C_QP_ST_S 0
-#define ROCEE_QP1C_CFG0_0_ROCEE_QP1C_QP_ST_M \
- (((1UL << 3) - 1) << ROCEE_QP1C_CFG0_0_ROCEE_QP1C_QP_ST_S)
-
-#define ROCEE_QP1C_CFG3_0_ROCEE_QP1C_RQ_HEAD_S 0
-#define ROCEE_QP1C_CFG3_0_ROCEE_QP1C_RQ_HEAD_M \
- (((1UL << 15) - 1) << ROCEE_QP1C_CFG3_0_ROCEE_QP1C_RQ_HEAD_S)
-
-#define ROCEE_MB6_ROCEE_MB_CMD_S 0
-#define ROCEE_MB6_ROCEE_MB_CMD_M \
- (((1UL << 8) - 1) << ROCEE_MB6_ROCEE_MB_CMD_S)
-
-#define ROCEE_MB6_ROCEE_MB_CMD_MDF_S 8
-#define ROCEE_MB6_ROCEE_MB_CMD_MDF_M \
- (((1UL << 4) - 1) << ROCEE_MB6_ROCEE_MB_CMD_MDF_S)
-
-#define ROCEE_MB6_ROCEE_MB_EVENT_S 14
-
-#define ROCEE_MB6_ROCEE_MB_HW_RUN_S 15
-
-#define ROCEE_MB6_ROCEE_MB_TOKEN_S 16
-#define ROCEE_MB6_ROCEE_MB_TOKEN_M \
- (((1UL << 16) - 1) << ROCEE_MB6_ROCEE_MB_TOKEN_S)
-
-#define ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_INP_H_S 0
-#define ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_INP_H_M \
- (((1UL << 24) - 1) << ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_INP_H_S)
-
-#define ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_MDF_S 24
-#define ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_MDF_M \
- (((1UL << 4) - 1) << ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_MDF_S)
-
-#define ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_S 28
-#define ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_M \
- (((1UL << 3) - 1) << ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_S)
-
-#define ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_HW_SYNS_S 31
-
-#define ROCEE_SMAC_H_ROCEE_SMAC_H_S 0
-#define ROCEE_SMAC_H_ROCEE_SMAC_H_M \
- (((1UL << 16) - 1) << ROCEE_SMAC_H_ROCEE_SMAC_H_S)
-
-#define ROCEE_SMAC_H_ROCEE_PORT_MTU_S 16
-#define ROCEE_SMAC_H_ROCEE_PORT_MTU_M \
- (((1UL << 4) - 1) << ROCEE_SMAC_H_ROCEE_PORT_MTU_S)
-
-#define ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_S 0
-#define ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_M \
- (((1UL << 2) - 1) << ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_S)
-
-#define ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_AEQE_SHIFT_S 8
-#define ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_AEQE_SHIFT_M \
- (((1UL << 4) - 1) << ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_AEQE_SHIFT_S)
-
-#define ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQ_ALM_OVF_INT_ST_S 17
-
-#define ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQ_BT_H_S 0
-#define ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQ_BT_H_M \
- (((1UL << 5) - 1) << ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQ_BT_H_S)
-
-#define ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQE_CUR_IDX_S 16
-#define ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQE_CUR_IDX_M \
- (((1UL << 16) - 1) << ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQE_CUR_IDX_S)
-
-#define ROCEE_CAEP_AEQE_CONS_IDX_CAEP_AEQE_CONS_IDX_S 0
-#define ROCEE_CAEP_AEQE_CONS_IDX_CAEP_AEQE_CONS_IDX_M \
- (((1UL << 16) - 1) << ROCEE_CAEP_AEQE_CONS_IDX_CAEP_AEQE_CONS_IDX_S)
-
-#define ROCEE_CAEP_CEQC_SHIFT_CAEP_CEQ_ALM_OVF_INT_ST_S 16
-#define ROCEE_CAEP_CE_IRQ_MASK_CAEP_CEQ_ALM_OVF_MASK_S 1
-#define ROCEE_CAEP_CEQ_ALM_OVF_CAEP_CEQ_ALM_OVF_S 0
-
-#define ROCEE_CAEP_AE_MASK_CAEP_AEQ_ALM_OVF_MASK_S 0
-#define ROCEE_CAEP_AE_MASK_CAEP_AE_IRQ_MASK_S 1
-
-#define ROCEE_CAEP_AE_ST_CAEP_AEQ_ALM_OVF_S 0
-
-#define ROCEE_SDB_ISSUE_PTR_SDB_ISSUE_PTR_S 0
-#define ROCEE_SDB_ISSUE_PTR_SDB_ISSUE_PTR_M \
- (((1UL << 28) - 1) << ROCEE_SDB_ISSUE_PTR_SDB_ISSUE_PTR_S)
-
-#define ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_S 0
-#define ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_M \
- (((1UL << 28) - 1) << ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_S)
-
-#define ROCEE_SDB_INV_CNT_SDB_INV_CNT_S 0
-#define ROCEE_SDB_INV_CNT_SDB_INV_CNT_M \
- (((1UL << 16) - 1) << ROCEE_SDB_INV_CNT_SDB_INV_CNT_S)
-
-#define ROCEE_SDB_RETRY_CNT_SDB_RETRY_CT_S 0
-#define ROCEE_SDB_RETRY_CNT_SDB_RETRY_CT_M \
- (((1UL << 16) - 1) << ROCEE_SDB_RETRY_CNT_SDB_RETRY_CT_S)
-
-#define ROCEE_SDB_CNT_CMP_BITS 16
-
-#define ROCEE_TSP_BP_ST_QH_FIFO_ENTRY_S 20
-
-#define ROCEE_CNT_CLR_CE_CNT_CLR_CE_S 0
-
/*************ROCEE_REG DEFINITION****************/
#define ROCEE_VENDOR_ID_REG 0x0
#define ROCEE_VENDOR_PART_ID_REG 0x4
diff --git a/drivers/infiniband/hw/hns/hns_roce_cq.c b/drivers/infiniband/hw/hns/hns_roce_cq.c
index d763f097599f..55057dcbb2dc 100644
--- a/drivers/infiniband/hw/hns/hns_roce_cq.c
+++ b/drivers/infiniband/hw/hns/hns_roce_cq.c
@@ -30,7 +30,6 @@
* SOFTWARE.
*/
-#include <linux/platform_device.h>
#include <rdma/ib_umem.h>
#include <rdma/uverbs_ioctl.h>
#include "hns_roce_device.h"
@@ -406,15 +405,6 @@ int hns_roce_create_cq(struct ib_cq *ib_cq, const struct ib_cq_init_attr *attr,
goto err_cqn;
}
- /*
- * For the QP created by kernel space, tptr value should be initialized
- * to zero; For the QP created by user space, it will cause synchronous
- * problems if tptr is set to zero here, so we initialize it in user
- * space.
- */
- if (!udata && hr_cq->tptr_addr)
- *hr_cq->tptr_addr = 0;
-
if (udata) {
resp.cqn = hr_cq->cqn;
ret = ib_copy_to_udata(udata, &resp,
@@ -441,9 +431,6 @@ int hns_roce_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
struct hns_roce_dev *hr_dev = to_hr_dev(ib_cq->device);
struct hns_roce_cq *hr_cq = to_hr_cq(ib_cq);
- if (hr_dev->hw->destroy_cq)
- hr_dev->hw->destroy_cq(ib_cq, udata);
-
free_cqc(hr_dev, hr_cq);
free_cqn(hr_dev, hr_cq->cqn);
free_cq_db(hr_dev, hr_cq, udata);
diff --git a/drivers/infiniband/hw/hns/hns_roce_db.c b/drivers/infiniband/hw/hns/hns_roce_db.c
index 751470c7a2ce..5c4c0480832b 100644
--- a/drivers/infiniband/hw/hns/hns_roce_db.c
+++ b/drivers/infiniband/hw/hns/hns_roce_db.c
@@ -4,7 +4,6 @@
* Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
*/
-#include <linux/platform_device.h>
#include <rdma/ib_umem.h>
#include "hns_roce_device.h"
diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h
index 43e17d61cb63..1e0bae136997 100644
--- a/drivers/infiniband/hw/hns/hns_roce_device.h
+++ b/drivers/infiniband/hw/hns/hns_roce_device.h
@@ -36,36 +36,18 @@
#include <rdma/ib_verbs.h>
#include <rdma/hns-abi.h>
-#define DRV_NAME "hns_roce"
-
#define PCI_REVISION_ID_HIP08 0x21
#define PCI_REVISION_ID_HIP09 0x30
-#define HNS_ROCE_HW_VER1 ('h' << 24 | 'i' << 16 | '0' << 8 | '6')
-
#define HNS_ROCE_MAX_MSG_LEN 0x80000000
#define HNS_ROCE_IB_MIN_SQ_STRIDE 6
#define BA_BYTE_LEN 8
-/* Hardware specification only for v1 engine */
#define HNS_ROCE_MIN_CQE_NUM 0x40
-#define HNS_ROCE_MIN_WQE_NUM 0x20
#define HNS_ROCE_MIN_SRQ_WQE_NUM 1
-/* Hardware specification only for v1 engine */
-#define HNS_ROCE_MAX_INNER_MTPT_NUM 0x7
-#define HNS_ROCE_MAX_MTPT_PBL_NUM 0x100000
-
-#define HNS_ROCE_EACH_FREE_CQ_WAIT_MSECS 20
-#define HNS_ROCE_MAX_FREE_CQ_WAIT_CNT \
- (5000 / HNS_ROCE_EACH_FREE_CQ_WAIT_MSECS)
-#define HNS_ROCE_CQE_WCMD_EMPTY_BIT 0x2
-#define HNS_ROCE_MIN_CQE_CNT 16
-
-#define HNS_ROCE_RESERVED_SGE 1
-
#define HNS_ROCE_MAX_IRQ_NUM 128
#define HNS_ROCE_SGE_IN_WQE 2
@@ -102,18 +84,12 @@
#define HNS_ROCE_FRMR_MAX_PA 512
#define PKEY_ID 0xffff
-#define GUID_LEN 8
#define NODE_DESC_SIZE 64
#define DB_REG_OFFSET 0x1000
/* Configure to HW for PAGE_SIZE larger than 4KB */
#define PG_SHIFT_OFFSET (PAGE_SHIFT - 12)
-#define PAGES_SHIFT_8 8
-#define PAGES_SHIFT_16 16
-#define PAGES_SHIFT_24 24
-#define PAGES_SHIFT_32 32
-
#define HNS_ROCE_IDX_QUE_ENTRY_SZ 4
#define SRQ_DB_REG 0x230
@@ -122,11 +98,6 @@
#define CQ_BANKID_SHIFT 2
-/* The chip implementation of the consumer index is calculated
- * according to twice the actual EQ depth
- */
-#define EQ_DEPTH_COEFF 2
-
enum {
SERV_TYPE_RC,
SERV_TYPE_UC,
@@ -182,6 +153,7 @@ enum {
HNS_ROCE_CAP_FLAG_FRMR = BIT(8),
HNS_ROCE_CAP_FLAG_QP_FLOW_CTRL = BIT(9),
HNS_ROCE_CAP_FLAG_ATOMIC = BIT(10),
+ HNS_ROCE_CAP_FLAG_DIRECT_WQE = BIT(12),
HNS_ROCE_CAP_FLAG_SDI_MODE = BIT(14),
HNS_ROCE_CAP_FLAG_STASH = BIT(17),
};
@@ -227,7 +199,7 @@ struct hns_roce_uar {
enum hns_roce_mmap_type {
HNS_ROCE_MMAP_TYPE_DB = 1,
- HNS_ROCE_MMAP_TYPE_TPTR,
+ HNS_ROCE_MMAP_TYPE_DWQE,
};
struct hns_user_mmap_entry {
@@ -242,7 +214,6 @@ struct hns_roce_ucontext {
struct list_head page_list;
struct mutex page_mutex;
struct hns_user_mmap_entry *db_mmap_entry;
- struct hns_user_mmap_entry *tptr_mmap_entry;
};
struct hns_roce_pd {
@@ -345,19 +316,16 @@ struct hns_roce_mw {
u32 pbl_buf_pg_sz;
};
-/* Only support 4K page size for mr register */
-#define MR_SIZE_4K 0
-
struct hns_roce_mr {
struct ib_mr ibmr;
u64 iova; /* MR's virtual original addr */
u64 size; /* Address range of MR */
u32 key; /* Key of MR */
u32 pd; /* PD num of MR */
- u32 access; /* Access permission of MR */
+ u32 access; /* Access permission of MR */
int enabled; /* MR's active status */
- int type; /* MR's register type */
- u32 pbl_hop_num; /* multi-hop number */
+ int type; /* MR's register type */
+ u32 pbl_hop_num; /* multi-hop number */
struct hns_roce_mtr pbl_mtr;
u32 npages;
dma_addr_t *page_list;
@@ -374,17 +342,17 @@ struct hns_roce_wq {
u32 wqe_cnt; /* WQE num */
u32 max_gs;
u32 rsv_sge;
- int offset;
- int wqe_shift; /* WQE size */
+ u32 offset;
+ u32 wqe_shift; /* WQE size */
u32 head;
u32 tail;
void __iomem *db_reg;
};
struct hns_roce_sge {
- unsigned int sge_cnt; /* SGE num */
- int offset;
- int sge_shift; /* SGE size */
+ unsigned int sge_cnt; /* SGE num */
+ u32 offset;
+ u32 sge_shift; /* SGE size */
};
struct hns_roce_buf_list {
@@ -453,7 +421,6 @@ struct hns_roce_cq {
u32 cons_index;
u32 *set_ci_db;
void __iomem *db_reg;
- u16 *tptr_addr;
int arm_sn;
int cqe_size;
unsigned long cqn;
@@ -468,7 +435,7 @@ struct hns_roce_cq {
struct hns_roce_idx_que {
struct hns_roce_mtr mtr;
- int entry_shift;
+ u32 entry_shift;
unsigned long *bitmap;
u32 head;
u32 tail;
@@ -480,7 +447,7 @@ struct hns_roce_srq {
u32 wqe_cnt;
int max_gs;
u32 rsv_sge;
- int wqe_shift;
+ u32 wqe_shift;
u32 cqn;
u32 xrcdn;
void __iomem *db_reg;
@@ -539,10 +506,6 @@ struct hns_roce_srq_table {
struct hns_roce_hem_table table;
};
-struct hns_roce_raq_table {
- struct hns_roce_buf_list *e_raq_buf;
-};
-
struct hns_roce_av {
u8 port;
u8 gid_index;
@@ -627,10 +590,6 @@ struct hns_roce_work {
u32 queue_num;
};
-enum {
- HNS_ROCE_QP_CAP_DIRECT_WQE = BIT(5),
-};
-
struct hns_roce_qp {
struct ib_qp ibqp;
struct hns_roce_wq rq;
@@ -650,9 +609,7 @@ struct hns_roce_qp {
u8 sl;
u8 resp_depth;
u8 state;
- u32 access_flags;
u32 atomic_rd_en;
- u32 pkey_index;
u32 qkey;
void (*event)(struct hns_roce_qp *qp,
enum hns_roce_event event_type);
@@ -672,9 +629,10 @@ struct hns_roce_qp {
unsigned long flush_flag;
struct hns_roce_work flush_work;
struct hns_roce_rinl_buf rq_inl_buf;
- struct list_head node; /* all qps are on a list */
- struct list_head rq_node; /* all recv qps are on a list */
- struct list_head sq_node; /* all send qps are on a list */
+ struct list_head node; /* all qps are on a list */
+ struct list_head rq_node; /* all recv qps are on a list */
+ struct list_head sq_node; /* all send qps are on a list */
+ struct hns_user_mmap_entry *dwqe_mmap_entry;
};
struct hns_roce_ib_iboe {
@@ -684,11 +642,6 @@ struct hns_roce_ib_iboe {
u8 phy_port[HNS_ROCE_MAX_PORTS];
};
-enum {
- HNS_ROCE_EQ_STAT_INVALID = 0,
- HNS_ROCE_EQ_STAT_VALID = 2,
-};
-
struct hns_roce_ceqe {
__le32 comp;
__le32 rsv[15];
@@ -720,12 +673,9 @@ struct hns_roce_eq {
int type_flag; /* Aeq:1 ceq:0 */
int eqn;
u32 entries;
- u32 log_entries;
int eqe_size;
int irq;
- int log_page_size;
u32 cons_index;
- struct hns_roce_buf_list *buf_list;
int over_ignore;
int coalesce;
int arm_st;
@@ -740,7 +690,6 @@ struct hns_roce_eq {
struct hns_roce_eq_table {
struct hns_roce_eq *eq;
- void __iomem **eqc_base; /* only for hw v1 */
};
enum cong_type {
@@ -767,7 +716,7 @@ struct hns_roce_caps {
u32 reserved_qps;
int num_qpc_timer;
int num_cqc_timer;
- int num_srqs;
+ u32 num_srqs;
u32 max_wqes;
u32 max_srq_wrs;
u32 max_srq_sges;
@@ -781,7 +730,7 @@ struct hns_roce_caps {
u32 min_cqes;
u32 min_wqes;
u32 reserved_cqs;
- int reserved_srqs;
+ u32 reserved_srqs;
int num_aeq_vectors;
int num_comp_vectors;
int num_other_vectors;
@@ -855,7 +804,7 @@ struct hns_roce_caps {
u32 cqc_timer_ba_pg_sz;
u32 cqc_timer_buf_pg_sz;
u32 cqc_timer_hop_num;
- u32 cqe_ba_pg_sz; /* page_size = 4K*(2^cqe_ba_pg_sz) */
+ u32 cqe_ba_pg_sz; /* page_size = 4K*(2^cqe_ba_pg_sz) */
u32 cqe_buf_pg_sz;
u32 cqe_hop_num;
u32 srqwqe_ba_pg_sz;
@@ -874,7 +823,7 @@ struct hns_roce_caps {
u32 gmv_hop_num;
u32 sl_num;
u32 llm_buf_pg_sz;
- u32 chunk_sz; /* chunk size in non multihop mode */
+ u32 chunk_sz; /* chunk size in non multihop mode */
u64 flags;
u16 default_ceq_max_cnt;
u16 default_ceq_period;
@@ -897,7 +846,6 @@ enum hns_roce_device_state {
};
struct hns_roce_hw {
- int (*reset)(struct hns_roce_dev *hr_dev, bool enable);
int (*cmq_init)(struct hns_roce_dev *hr_dev);
void (*cmq_exit)(struct hns_roce_dev *hr_dev);
int (*hw_profile)(struct hns_roce_dev *hr_dev);
@@ -909,14 +857,12 @@ struct hns_roce_hw {
int (*poll_mbox_done)(struct hns_roce_dev *hr_dev,
unsigned int timeout);
bool (*chk_mbox_avail)(struct hns_roce_dev *hr_dev, bool *is_busy);
- int (*set_gid)(struct hns_roce_dev *hr_dev, u32 port, int gid_index,
+ int (*set_gid)(struct hns_roce_dev *hr_dev, int gid_index,
const union ib_gid *gid, const struct ib_gid_attr *attr);
int (*set_mac)(struct hns_roce_dev *hr_dev, u8 phy_port,
const u8 *addr);
- void (*set_mtu)(struct hns_roce_dev *hr_dev, u8 phy_port,
- enum ib_mtu mtu);
int (*write_mtpt)(struct hns_roce_dev *hr_dev, void *mb_buf,
- struct hns_roce_mr *mr, unsigned long mtpt_idx);
+ struct hns_roce_mr *mr);
int (*rereg_write_mtpt)(struct hns_roce_dev *hr_dev,
struct hns_roce_mr *mr, int flags,
void *mb_buf);
@@ -936,9 +882,6 @@ struct hns_roce_hw {
enum ib_qp_state new_state);
int (*qp_flow_control_init)(struct hns_roce_dev *hr_dev,
struct hns_roce_qp *hr_qp);
- int (*dereg_mr)(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr,
- struct ib_udata *udata);
- int (*destroy_cq)(struct ib_cq *ibcq, struct ib_udata *udata);
int (*init_eq)(struct hns_roce_dev *hr_dev);
void (*cleanup_eq)(struct hns_roce_dev *hr_dev);
int (*write_srqc)(struct hns_roce_srq *srq, void *mb_buf);
@@ -948,13 +891,11 @@ struct hns_roce_hw {
struct hns_roce_dev {
struct ib_device ib_dev;
- struct platform_device *pdev;
struct pci_dev *pci_dev;
struct device *dev;
struct hns_roce_uar priv_uar;
const char *irq_names[HNS_ROCE_MAX_IRQ_NUM];
spinlock_t sm_lock;
- spinlock_t bt_cmd_lock;
bool active;
bool is_reset;
bool dis_db;
@@ -1001,8 +942,6 @@ struct hns_roce_dev {
int loop_idc;
u32 sdb_offset;
u32 odb_offset;
- dma_addr_t tptr_dma_addr; /* only for hw v1 */
- u32 tptr_size; /* only for hw v1 */
const struct hns_roce_hw *hw;
void *priv;
struct workqueue_struct *irq_workq;
@@ -1010,6 +949,7 @@ struct hns_roce_dev {
u32 func_num;
u32 is_vf;
u32 cong_algo_tmpl_id;
+ u64 dwqe_page;
};
static inline struct hns_roce_dev *to_hr_dev(struct ib_device *ib_dev)
@@ -1158,7 +1098,7 @@ void hns_roce_cmd_use_polling(struct hns_roce_dev *hr_dev);
/* hns roce hw need current block and next block addr from mtt */
#define MTT_MIN_COUNT 2
int hns_roce_mtr_find(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
- int offset, u64 *mtt_buf, int mtt_max, u64 *base_addr);
+ u32 offset, u64 *mtt_buf, int mtt_max, u64 *base_addr);
int hns_roce_mtr_create(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
struct hns_roce_buf_attr *buf_attr,
unsigned int page_shift, struct ib_udata *udata,
diff --git a/drivers/infiniband/hw/hns/hns_roce_hem.c b/drivers/infiniband/hw/hns/hns_roce_hem.c
index fa15d79eabb3..8917365cc6b8 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hem.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hem.c
@@ -31,7 +31,6 @@
* SOFTWARE.
*/
-#include <linux/platform_device.h>
#include "hns_roce_device.h"
#include "hns_roce_hem.h"
#include "hns_roce_common.h"
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
deleted file mode 100644
index f4af3992ba95..000000000000
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
+++ /dev/null
@@ -1,4675 +0,0 @@
-/*
- * Copyright (c) 2016 Hisilicon Limited.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <linux/platform_device.h>
-#include <linux/acpi.h>
-#include <linux/etherdevice.h>
-#include <linux/interrupt.h>
-#include <linux/of.h>
-#include <linux/of_platform.h>
-#include <rdma/ib_umem.h>
-#include "hns_roce_common.h"
-#include "hns_roce_device.h"
-#include "hns_roce_cmd.h"
-#include "hns_roce_hem.h"
-#include "hns_roce_hw_v1.h"
-
-/**
- * hns_get_gid_index - Get gid index.
- * @hr_dev: pointer to structure hns_roce_dev.
- * @port: port, value range: 0 ~ MAX
- * @gid_index: gid_index, value range: 0 ~ MAX
- * Description:
- * N ports shared gids, allocation method as follow:
- * GID[0][0], GID[1][0],.....GID[N - 1][0],
- * GID[0][0], GID[1][0],.....GID[N - 1][0],
- * And so on
- */
-u8 hns_get_gid_index(struct hns_roce_dev *hr_dev, u32 port, int gid_index)
-{
- return gid_index * hr_dev->caps.num_ports + port;
-}
-
-static void set_data_seg(struct hns_roce_wqe_data_seg *dseg, struct ib_sge *sg)
-{
- dseg->lkey = cpu_to_le32(sg->lkey);
- dseg->addr = cpu_to_le64(sg->addr);
- dseg->len = cpu_to_le32(sg->length);
-}
-
-static void set_raddr_seg(struct hns_roce_wqe_raddr_seg *rseg, u64 remote_addr,
- u32 rkey)
-{
- rseg->raddr = cpu_to_le64(remote_addr);
- rseg->rkey = cpu_to_le32(rkey);
- rseg->len = 0;
-}
-
-static int hns_roce_v1_post_send(struct ib_qp *ibqp,
- const struct ib_send_wr *wr,
- const struct ib_send_wr **bad_wr)
-{
- struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
- struct hns_roce_ah *ah = to_hr_ah(ud_wr(wr)->ah);
- struct hns_roce_ud_send_wqe *ud_sq_wqe = NULL;
- struct hns_roce_wqe_ctrl_seg *ctrl = NULL;
- struct hns_roce_wqe_data_seg *dseg = NULL;
- struct hns_roce_qp *qp = to_hr_qp(ibqp);
- struct device *dev = &hr_dev->pdev->dev;
- struct hns_roce_sq_db sq_db = {};
- int ps_opcode, i;
- unsigned long flags = 0;
- void *wqe = NULL;
- __le32 doorbell[2];
- const u8 *smac;
- int ret = 0;
- int loopback;
- u32 wqe_idx;
- int nreq;
-
- if (unlikely(ibqp->qp_type != IB_QPT_GSI &&
- ibqp->qp_type != IB_QPT_RC)) {
- dev_err(dev, "un-supported QP type\n");
- *bad_wr = NULL;
- return -EOPNOTSUPP;
- }
-
- spin_lock_irqsave(&qp->sq.lock, flags);
-
- for (nreq = 0; wr; ++nreq, wr = wr->next) {
- if (hns_roce_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) {
- ret = -ENOMEM;
- *bad_wr = wr;
- goto out;
- }
-
- wqe_idx = (qp->sq.head + nreq) & (qp->sq.wqe_cnt - 1);
-
- if (unlikely(wr->num_sge > qp->sq.max_gs)) {
- dev_err(dev, "num_sge=%d > qp->sq.max_gs=%d\n",
- wr->num_sge, qp->sq.max_gs);
- ret = -EINVAL;
- *bad_wr = wr;
- goto out;
- }
-
- wqe = hns_roce_get_send_wqe(qp, wqe_idx);
- qp->sq.wrid[wqe_idx] = wr->wr_id;
-
- /* Corresponding to the RC and RD type wqe process separately */
- if (ibqp->qp_type == IB_QPT_GSI) {
- ud_sq_wqe = wqe;
- roce_set_field(ud_sq_wqe->dmac_h,
- UD_SEND_WQE_U32_4_DMAC_0_M,
- UD_SEND_WQE_U32_4_DMAC_0_S,
- ah->av.mac[0]);
- roce_set_field(ud_sq_wqe->dmac_h,
- UD_SEND_WQE_U32_4_DMAC_1_M,
- UD_SEND_WQE_U32_4_DMAC_1_S,
- ah->av.mac[1]);
- roce_set_field(ud_sq_wqe->dmac_h,
- UD_SEND_WQE_U32_4_DMAC_2_M,
- UD_SEND_WQE_U32_4_DMAC_2_S,
- ah->av.mac[2]);
- roce_set_field(ud_sq_wqe->dmac_h,
- UD_SEND_WQE_U32_4_DMAC_3_M,
- UD_SEND_WQE_U32_4_DMAC_3_S,
- ah->av.mac[3]);
-
- roce_set_field(ud_sq_wqe->u32_8,
- UD_SEND_WQE_U32_8_DMAC_4_M,
- UD_SEND_WQE_U32_8_DMAC_4_S,
- ah->av.mac[4]);
- roce_set_field(ud_sq_wqe->u32_8,
- UD_SEND_WQE_U32_8_DMAC_5_M,
- UD_SEND_WQE_U32_8_DMAC_5_S,
- ah->av.mac[5]);
-
- smac = (const u8 *)hr_dev->dev_addr[qp->port];
- loopback = ether_addr_equal_unaligned(ah->av.mac,
- smac) ? 1 : 0;
- roce_set_bit(ud_sq_wqe->u32_8,
- UD_SEND_WQE_U32_8_LOOPBACK_INDICATOR_S,
- loopback);
-
- roce_set_field(ud_sq_wqe->u32_8,
- UD_SEND_WQE_U32_8_OPERATION_TYPE_M,
- UD_SEND_WQE_U32_8_OPERATION_TYPE_S,
- HNS_ROCE_WQE_OPCODE_SEND);
- roce_set_field(ud_sq_wqe->u32_8,
- UD_SEND_WQE_U32_8_NUMBER_OF_DATA_SEG_M,
- UD_SEND_WQE_U32_8_NUMBER_OF_DATA_SEG_S,
- 2);
- roce_set_bit(ud_sq_wqe->u32_8,
- UD_SEND_WQE_U32_8_SEND_GL_ROUTING_HDR_FLAG_S,
- 1);
-
- ud_sq_wqe->u32_8 |= (wr->send_flags & IB_SEND_SIGNALED ?
- cpu_to_le32(HNS_ROCE_WQE_CQ_NOTIFY) : 0) |
- (wr->send_flags & IB_SEND_SOLICITED ?
- cpu_to_le32(HNS_ROCE_WQE_SE) : 0) |
- ((wr->opcode == IB_WR_SEND_WITH_IMM) ?
- cpu_to_le32(HNS_ROCE_WQE_IMM) : 0);
-
- roce_set_field(ud_sq_wqe->u32_16,
- UD_SEND_WQE_U32_16_DEST_QP_M,
- UD_SEND_WQE_U32_16_DEST_QP_S,
- ud_wr(wr)->remote_qpn);
- roce_set_field(ud_sq_wqe->u32_16,
- UD_SEND_WQE_U32_16_MAX_STATIC_RATE_M,
- UD_SEND_WQE_U32_16_MAX_STATIC_RATE_S,
- ah->av.stat_rate);
-
- roce_set_field(ud_sq_wqe->u32_36,
- UD_SEND_WQE_U32_36_FLOW_LABEL_M,
- UD_SEND_WQE_U32_36_FLOW_LABEL_S,
- ah->av.flowlabel);
- roce_set_field(ud_sq_wqe->u32_36,
- UD_SEND_WQE_U32_36_PRIORITY_M,
- UD_SEND_WQE_U32_36_PRIORITY_S,
- ah->av.sl);
- roce_set_field(ud_sq_wqe->u32_36,
- UD_SEND_WQE_U32_36_SGID_INDEX_M,
- UD_SEND_WQE_U32_36_SGID_INDEX_S,
- hns_get_gid_index(hr_dev, qp->phy_port,
- ah->av.gid_index));
-
- roce_set_field(ud_sq_wqe->u32_40,
- UD_SEND_WQE_U32_40_HOP_LIMIT_M,
- UD_SEND_WQE_U32_40_HOP_LIMIT_S,
- ah->av.hop_limit);
- roce_set_field(ud_sq_wqe->u32_40,
- UD_SEND_WQE_U32_40_TRAFFIC_CLASS_M,
- UD_SEND_WQE_U32_40_TRAFFIC_CLASS_S,
- ah->av.tclass);
-
- memcpy(&ud_sq_wqe->dgid[0], &ah->av.dgid[0], GID_LEN);
-
- ud_sq_wqe->va0_l =
- cpu_to_le32((u32)wr->sg_list[0].addr);
- ud_sq_wqe->va0_h =
- cpu_to_le32((wr->sg_list[0].addr) >> 32);
- ud_sq_wqe->l_key0 =
- cpu_to_le32(wr->sg_list[0].lkey);
-
- ud_sq_wqe->va1_l =
- cpu_to_le32((u32)wr->sg_list[1].addr);
- ud_sq_wqe->va1_h =
- cpu_to_le32((wr->sg_list[1].addr) >> 32);
- ud_sq_wqe->l_key1 =
- cpu_to_le32(wr->sg_list[1].lkey);
- } else if (ibqp->qp_type == IB_QPT_RC) {
- u32 tmp_len = 0;
-
- ctrl = wqe;
- memset(ctrl, 0, sizeof(struct hns_roce_wqe_ctrl_seg));
- for (i = 0; i < wr->num_sge; i++)
- tmp_len += wr->sg_list[i].length;
-
- ctrl->msg_length =
- cpu_to_le32(le32_to_cpu(ctrl->msg_length) + tmp_len);
-
- ctrl->sgl_pa_h = 0;
- ctrl->flag = 0;
-
- switch (wr->opcode) {
- case IB_WR_SEND_WITH_IMM:
- case IB_WR_RDMA_WRITE_WITH_IMM:
- ctrl->imm_data = wr->ex.imm_data;
- break;
- case IB_WR_SEND_WITH_INV:
- ctrl->inv_key =
- cpu_to_le32(wr->ex.invalidate_rkey);
- break;
- default:
- ctrl->imm_data = 0;
- break;
- }
-
- /* Ctrl field, ctrl set type: sig, solic, imm, fence */
- /* SO wait for conforming application scenarios */
- ctrl->flag |= (wr->send_flags & IB_SEND_SIGNALED ?
- cpu_to_le32(HNS_ROCE_WQE_CQ_NOTIFY) : 0) |
- (wr->send_flags & IB_SEND_SOLICITED ?
- cpu_to_le32(HNS_ROCE_WQE_SE) : 0) |
- ((wr->opcode == IB_WR_SEND_WITH_IMM ||
- wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM) ?
- cpu_to_le32(HNS_ROCE_WQE_IMM) : 0) |
- (wr->send_flags & IB_SEND_FENCE ?
- (cpu_to_le32(HNS_ROCE_WQE_FENCE)) : 0);
-
- wqe += sizeof(struct hns_roce_wqe_ctrl_seg);
-
- switch (wr->opcode) {
- case IB_WR_RDMA_READ:
- ps_opcode = HNS_ROCE_WQE_OPCODE_RDMA_READ;
- set_raddr_seg(wqe, rdma_wr(wr)->remote_addr,
- rdma_wr(wr)->rkey);
- break;
- case IB_WR_RDMA_WRITE:
- case IB_WR_RDMA_WRITE_WITH_IMM:
- ps_opcode = HNS_ROCE_WQE_OPCODE_RDMA_WRITE;
- set_raddr_seg(wqe, rdma_wr(wr)->remote_addr,
- rdma_wr(wr)->rkey);
- break;
- case IB_WR_SEND:
- case IB_WR_SEND_WITH_INV:
- case IB_WR_SEND_WITH_IMM:
- ps_opcode = HNS_ROCE_WQE_OPCODE_SEND;
- break;
- case IB_WR_LOCAL_INV:
- case IB_WR_ATOMIC_CMP_AND_SWP:
- case IB_WR_ATOMIC_FETCH_AND_ADD:
- case IB_WR_LSO:
- default:
- ps_opcode = HNS_ROCE_WQE_OPCODE_MASK;
- break;
- }
- ctrl->flag |= cpu_to_le32(ps_opcode);
- wqe += sizeof(struct hns_roce_wqe_raddr_seg);
-
- dseg = wqe;
- if (wr->send_flags & IB_SEND_INLINE && wr->num_sge) {
- if (le32_to_cpu(ctrl->msg_length) >
- hr_dev->caps.max_sq_inline) {
- ret = -EINVAL;
- *bad_wr = wr;
- dev_err(dev, "inline len(1-%d)=%d, illegal",
- le32_to_cpu(ctrl->msg_length),
- hr_dev->caps.max_sq_inline);
- goto out;
- }
- for (i = 0; i < wr->num_sge; i++) {
- memcpy(wqe, ((void *) (uintptr_t)
- wr->sg_list[i].addr),
- wr->sg_list[i].length);
- wqe += wr->sg_list[i].length;
- }
- ctrl->flag |= cpu_to_le32(HNS_ROCE_WQE_INLINE);
- } else {
- /* sqe num is two */
- for (i = 0; i < wr->num_sge; i++)
- set_data_seg(dseg + i, wr->sg_list + i);
-
- ctrl->flag |= cpu_to_le32(wr->num_sge <<
- HNS_ROCE_WQE_SGE_NUM_BIT);
- }
- }
- }
-
-out:
- /* Set DB return */
- if (likely(nreq)) {
- qp->sq.head += nreq;
-
- roce_set_field(sq_db.u32_4, SQ_DOORBELL_U32_4_SQ_HEAD_M,
- SQ_DOORBELL_U32_4_SQ_HEAD_S,
- (qp->sq.head & ((qp->sq.wqe_cnt << 1) - 1)));
- roce_set_field(sq_db.u32_4, SQ_DOORBELL_U32_4_SL_M,
- SQ_DOORBELL_U32_4_SL_S, qp->sl);
- roce_set_field(sq_db.u32_4, SQ_DOORBELL_U32_4_PORT_M,
- SQ_DOORBELL_U32_4_PORT_S, qp->phy_port);
- roce_set_field(sq_db.u32_8, SQ_DOORBELL_U32_8_QPN_M,
- SQ_DOORBELL_U32_8_QPN_S, qp->doorbell_qpn);
- roce_set_bit(sq_db.u32_8, SQ_DOORBELL_HW_SYNC_S, 1);
-
- doorbell[0] = sq_db.u32_4;
- doorbell[1] = sq_db.u32_8;
-
- hns_roce_write64_k(doorbell, qp->sq.db_reg);
- }
-
- spin_unlock_irqrestore(&qp->sq.lock, flags);
-
- return ret;
-}
-
-static int hns_roce_v1_post_recv(struct ib_qp *ibqp,
- const struct ib_recv_wr *wr,
- const struct ib_recv_wr **bad_wr)
-{
- struct hns_roce_rq_wqe_ctrl *ctrl = NULL;
- struct hns_roce_wqe_data_seg *scat = NULL;
- struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
- struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
- struct device *dev = &hr_dev->pdev->dev;
- struct hns_roce_rq_db rq_db = {};
- __le32 doorbell[2] = {0};
- unsigned long flags = 0;
- unsigned int wqe_idx;
- int ret = 0;
- int nreq;
- int i;
- u32 reg_val;
-
- spin_lock_irqsave(&hr_qp->rq.lock, flags);
-
- for (nreq = 0; wr; ++nreq, wr = wr->next) {
- if (hns_roce_wq_overflow(&hr_qp->rq, nreq,
- hr_qp->ibqp.recv_cq)) {
- ret = -ENOMEM;
- *bad_wr = wr;
- goto out;
- }
-
- wqe_idx = (hr_qp->rq.head + nreq) & (hr_qp->rq.wqe_cnt - 1);
-
- if (unlikely(wr->num_sge > hr_qp->rq.max_gs)) {
- dev_err(dev, "rq:num_sge=%d > qp->sq.max_gs=%d\n",
- wr->num_sge, hr_qp->rq.max_gs);
- ret = -EINVAL;
- *bad_wr = wr;
- goto out;
- }
-
- ctrl = hns_roce_get_recv_wqe(hr_qp, wqe_idx);
-
- roce_set_field(ctrl->rwqe_byte_12,
- RQ_WQE_CTRL_RWQE_BYTE_12_RWQE_SGE_NUM_M,
- RQ_WQE_CTRL_RWQE_BYTE_12_RWQE_SGE_NUM_S,
- wr->num_sge);
-
- scat = (struct hns_roce_wqe_data_seg *)(ctrl + 1);
-
- for (i = 0; i < wr->num_sge; i++)
- set_data_seg(scat + i, wr->sg_list + i);
-
- hr_qp->rq.wrid[wqe_idx] = wr->wr_id;
- }
-
-out:
- if (likely(nreq)) {
- hr_qp->rq.head += nreq;
-
- if (ibqp->qp_type == IB_QPT_GSI) {
- __le32 tmp;
-
- /* SW update GSI rq header */
- reg_val = roce_read(to_hr_dev(ibqp->device),
- ROCEE_QP1C_CFG3_0_REG +
- QP1C_CFGN_OFFSET * hr_qp->phy_port);
- tmp = cpu_to_le32(reg_val);
- roce_set_field(tmp,
- ROCEE_QP1C_CFG3_0_ROCEE_QP1C_RQ_HEAD_M,
- ROCEE_QP1C_CFG3_0_ROCEE_QP1C_RQ_HEAD_S,
- hr_qp->rq.head);
- reg_val = le32_to_cpu(tmp);
- roce_write(to_hr_dev(ibqp->device),
- ROCEE_QP1C_CFG3_0_REG +
- QP1C_CFGN_OFFSET * hr_qp->phy_port, reg_val);
- } else {
- roce_set_field(rq_db.u32_4, RQ_DOORBELL_U32_4_RQ_HEAD_M,
- RQ_DOORBELL_U32_4_RQ_HEAD_S,
- hr_qp->rq.head);
- roce_set_field(rq_db.u32_8, RQ_DOORBELL_U32_8_QPN_M,
- RQ_DOORBELL_U32_8_QPN_S, hr_qp->qpn);
- roce_set_field(rq_db.u32_8, RQ_DOORBELL_U32_8_CMD_M,
- RQ_DOORBELL_U32_8_CMD_S, 1);
- roce_set_bit(rq_db.u32_8, RQ_DOORBELL_U32_8_HW_SYNC_S,
- 1);
-
- doorbell[0] = rq_db.u32_4;
- doorbell[1] = rq_db.u32_8;
-
- hns_roce_write64_k(doorbell, hr_qp->rq.db_reg);
- }
- }
- spin_unlock_irqrestore(&hr_qp->rq.lock, flags);
-
- return ret;
-}
-
-static void hns_roce_set_db_event_mode(struct hns_roce_dev *hr_dev,
- int sdb_mode, int odb_mode)
-{
- __le32 tmp;
- u32 val;
-
- val = roce_read(hr_dev, ROCEE_GLB_CFG_REG);
- tmp = cpu_to_le32(val);
- roce_set_bit(tmp, ROCEE_GLB_CFG_ROCEE_DB_SQ_MODE_S, sdb_mode);
- roce_set_bit(tmp, ROCEE_GLB_CFG_ROCEE_DB_OTH_MODE_S, odb_mode);
- val = le32_to_cpu(tmp);
- roce_write(hr_dev, ROCEE_GLB_CFG_REG, val);
-}
-
-static int hns_roce_v1_set_hem(struct hns_roce_dev *hr_dev,
- struct hns_roce_hem_table *table, int obj,
- int step_idx)
-{
- spinlock_t *lock = &hr_dev->bt_cmd_lock;
- struct device *dev = hr_dev->dev;
- struct hns_roce_hem_iter iter;
- void __iomem *bt_cmd;
- __le32 bt_cmd_val[2];
- __le32 bt_cmd_h = 0;
- unsigned long flags;
- __le32 bt_cmd_l;
- int ret = 0;
- u64 bt_ba;
- long end;
-
- /* Find the HEM(Hardware Entry Memory) entry */
- unsigned long i = obj / (table->table_chunk_size / table->obj_size);
-
- switch (table->type) {
- case HEM_TYPE_QPC:
- case HEM_TYPE_MTPT:
- case HEM_TYPE_CQC:
- case HEM_TYPE_SRQC:
- roce_set_field(bt_cmd_h, ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_M,
- ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_S, table->type);
- break;
- default:
- return ret;
- }
-
- roce_set_field(bt_cmd_h, ROCEE_BT_CMD_H_ROCEE_BT_CMD_IN_MDF_M,
- ROCEE_BT_CMD_H_ROCEE_BT_CMD_IN_MDF_S, obj);
- roce_set_bit(bt_cmd_h, ROCEE_BT_CMD_H_ROCEE_BT_CMD_S, 0);
- roce_set_bit(bt_cmd_h, ROCEE_BT_CMD_H_ROCEE_BT_CMD_HW_SYNS_S, 1);
-
- /* Currently iter only a chunk */
- for (hns_roce_hem_first(table->hem[i], &iter);
- !hns_roce_hem_last(&iter); hns_roce_hem_next(&iter)) {
- bt_ba = hns_roce_hem_addr(&iter) >> HNS_HW_PAGE_SHIFT;
-
- spin_lock_irqsave(lock, flags);
-
- bt_cmd = hr_dev->reg_base + ROCEE_BT_CMD_H_REG;
-
- end = HW_SYNC_TIMEOUT_MSECS;
- while (end > 0) {
- if (!(readl(bt_cmd) >> BT_CMD_SYNC_SHIFT))
- break;
-
- mdelay(HW_SYNC_SLEEP_TIME_INTERVAL);
- end -= HW_SYNC_SLEEP_TIME_INTERVAL;
- }
-
- if (end <= 0) {
- dev_err(dev, "Write bt_cmd err,hw_sync is not zero.\n");
- spin_unlock_irqrestore(lock, flags);
- return -EBUSY;
- }
-
- bt_cmd_l = cpu_to_le32(bt_ba);
- roce_set_field(bt_cmd_h, ROCEE_BT_CMD_H_ROCEE_BT_CMD_BA_H_M,
- ROCEE_BT_CMD_H_ROCEE_BT_CMD_BA_H_S,
- upper_32_bits(bt_ba));
-
- bt_cmd_val[0] = bt_cmd_l;
- bt_cmd_val[1] = bt_cmd_h;
- hns_roce_write64_k(bt_cmd_val,
- hr_dev->reg_base + ROCEE_BT_CMD_L_REG);
- spin_unlock_irqrestore(lock, flags);
- }
-
- return ret;
-}
-
-static void hns_roce_set_db_ext_mode(struct hns_roce_dev *hr_dev, u32 sdb_mode,
- u32 odb_mode)
-{
- __le32 tmp;
- u32 val;
-
- /* Configure SDB/ODB extend mode */
- val = roce_read(hr_dev, ROCEE_GLB_CFG_REG);
- tmp = cpu_to_le32(val);
- roce_set_bit(tmp, ROCEE_GLB_CFG_SQ_EXT_DB_MODE_S, sdb_mode);
- roce_set_bit(tmp, ROCEE_GLB_CFG_OTH_EXT_DB_MODE_S, odb_mode);
- val = le32_to_cpu(tmp);
- roce_write(hr_dev, ROCEE_GLB_CFG_REG, val);
-}
-
-static void hns_roce_set_sdb(struct hns_roce_dev *hr_dev, u32 sdb_alept,
- u32 sdb_alful)
-{
- __le32 tmp;
- u32 val;
-
- /* Configure SDB */
- val = roce_read(hr_dev, ROCEE_DB_SQ_WL_REG);
- tmp = cpu_to_le32(val);
- roce_set_field(tmp, ROCEE_DB_SQ_WL_ROCEE_DB_SQ_WL_M,
- ROCEE_DB_SQ_WL_ROCEE_DB_SQ_WL_S, sdb_alful);
- roce_set_field(tmp, ROCEE_DB_SQ_WL_ROCEE_DB_SQ_WL_EMPTY_M,
- ROCEE_DB_SQ_WL_ROCEE_DB_SQ_WL_EMPTY_S, sdb_alept);
- val = le32_to_cpu(tmp);
- roce_write(hr_dev, ROCEE_DB_SQ_WL_REG, val);
-}
-
-static void hns_roce_set_odb(struct hns_roce_dev *hr_dev, u32 odb_alept,
- u32 odb_alful)
-{
- __le32 tmp;
- u32 val;
-
- /* Configure ODB */
- val = roce_read(hr_dev, ROCEE_DB_OTHERS_WL_REG);
- tmp = cpu_to_le32(val);
- roce_set_field(tmp, ROCEE_DB_OTHERS_WL_ROCEE_DB_OTH_WL_M,
- ROCEE_DB_OTHERS_WL_ROCEE_DB_OTH_WL_S, odb_alful);
- roce_set_field(tmp, ROCEE_DB_OTHERS_WL_ROCEE_DB_OTH_WL_EMPTY_M,
- ROCEE_DB_OTHERS_WL_ROCEE_DB_OTH_WL_EMPTY_S, odb_alept);
- val = le32_to_cpu(tmp);
- roce_write(hr_dev, ROCEE_DB_OTHERS_WL_REG, val);
-}
-
-static void hns_roce_set_sdb_ext(struct hns_roce_dev *hr_dev, u32 ext_sdb_alept,
- u32 ext_sdb_alful)
-{
- struct hns_roce_v1_priv *priv = hr_dev->priv;
- struct hns_roce_db_table *db = &priv->db_table;
- struct device *dev = &hr_dev->pdev->dev;
- dma_addr_t sdb_dma_addr;
- __le32 tmp;
- u32 val;
-
- /* Configure extend SDB threshold */
- roce_write(hr_dev, ROCEE_EXT_DB_SQ_WL_EMPTY_REG, ext_sdb_alept);
- roce_write(hr_dev, ROCEE_EXT_DB_SQ_WL_REG, ext_sdb_alful);
-
- /* Configure extend SDB base addr */
- sdb_dma_addr = db->ext_db->sdb_buf_list->map;
- roce_write(hr_dev, ROCEE_EXT_DB_SQ_REG, (u32)(sdb_dma_addr >> 12));
-
- /* Configure extend SDB depth */
- val = roce_read(hr_dev, ROCEE_EXT_DB_SQ_H_REG);
- tmp = cpu_to_le32(val);
- roce_set_field(tmp, ROCEE_EXT_DB_SQ_H_EXT_DB_SQ_SHIFT_M,
- ROCEE_EXT_DB_SQ_H_EXT_DB_SQ_SHIFT_S,
- db->ext_db->esdb_dep);
- /*
- * 44 = 32 + 12, When evaluating addr to hardware, shift 12 because of
- * using 4K page, and shift more 32 because of
- * calculating the high 32 bit value evaluated to hardware.
- */
- roce_set_field(tmp, ROCEE_EXT_DB_SQ_H_EXT_DB_SQ_BA_H_M,
- ROCEE_EXT_DB_SQ_H_EXT_DB_SQ_BA_H_S, sdb_dma_addr >> 44);
- val = le32_to_cpu(tmp);
- roce_write(hr_dev, ROCEE_EXT_DB_SQ_H_REG, val);
-
- dev_dbg(dev, "ext SDB depth: 0x%x\n", db->ext_db->esdb_dep);
- dev_dbg(dev, "ext SDB threshold: empty: 0x%x, ful: 0x%x\n",
- ext_sdb_alept, ext_sdb_alful);
-}
-
-static void hns_roce_set_odb_ext(struct hns_roce_dev *hr_dev, u32 ext_odb_alept,
- u32 ext_odb_alful)
-{
- struct hns_roce_v1_priv *priv = hr_dev->priv;
- struct hns_roce_db_table *db = &priv->db_table;
- struct device *dev = &hr_dev->pdev->dev;
- dma_addr_t odb_dma_addr;
- __le32 tmp;
- u32 val;
-
- /* Configure extend ODB threshold */
- roce_write(hr_dev, ROCEE_EXT_DB_OTHERS_WL_EMPTY_REG, ext_odb_alept);
- roce_write(hr_dev, ROCEE_EXT_DB_OTHERS_WL_REG, ext_odb_alful);
-
- /* Configure extend ODB base addr */
- odb_dma_addr = db->ext_db->odb_buf_list->map;
- roce_write(hr_dev, ROCEE_EXT_DB_OTH_REG, (u32)(odb_dma_addr >> 12));
-
- /* Configure extend ODB depth */
- val = roce_read(hr_dev, ROCEE_EXT_DB_OTH_H_REG);
- tmp = cpu_to_le32(val);
- roce_set_field(tmp, ROCEE_EXT_DB_OTH_H_EXT_DB_OTH_SHIFT_M,
- ROCEE_EXT_DB_OTH_H_EXT_DB_OTH_SHIFT_S,
- db->ext_db->eodb_dep);
- roce_set_field(tmp, ROCEE_EXT_DB_SQ_H_EXT_DB_OTH_BA_H_M,
- ROCEE_EXT_DB_SQ_H_EXT_DB_OTH_BA_H_S,
- db->ext_db->eodb_dep);
- val = le32_to_cpu(tmp);
- roce_write(hr_dev, ROCEE_EXT_DB_OTH_H_REG, val);
-
- dev_dbg(dev, "ext ODB depth: 0x%x\n", db->ext_db->eodb_dep);
- dev_dbg(dev, "ext ODB threshold: empty: 0x%x, ful: 0x%x\n",
- ext_odb_alept, ext_odb_alful);
-}
-
-static int hns_roce_db_ext_init(struct hns_roce_dev *hr_dev, u32 sdb_ext_mod,
- u32 odb_ext_mod)
-{
- struct hns_roce_v1_priv *priv = hr_dev->priv;
- struct hns_roce_db_table *db = &priv->db_table;
- struct device *dev = &hr_dev->pdev->dev;
- dma_addr_t sdb_dma_addr;
- dma_addr_t odb_dma_addr;
- int ret = 0;
-
- db->ext_db = kmalloc(sizeof(*db->ext_db), GFP_KERNEL);
- if (!db->ext_db)
- return -ENOMEM;
-
- if (sdb_ext_mod) {
- db->ext_db->sdb_buf_list = kmalloc(
- sizeof(*db->ext_db->sdb_buf_list), GFP_KERNEL);
- if (!db->ext_db->sdb_buf_list) {
- ret = -ENOMEM;
- goto ext_sdb_buf_fail_out;
- }
-
- db->ext_db->sdb_buf_list->buf = dma_alloc_coherent(dev,
- HNS_ROCE_V1_EXT_SDB_SIZE,
- &sdb_dma_addr, GFP_KERNEL);
- if (!db->ext_db->sdb_buf_list->buf) {
- ret = -ENOMEM;
- goto alloc_sq_db_buf_fail;
- }
- db->ext_db->sdb_buf_list->map = sdb_dma_addr;
-
- db->ext_db->esdb_dep = ilog2(HNS_ROCE_V1_EXT_SDB_DEPTH);
- hns_roce_set_sdb_ext(hr_dev, HNS_ROCE_V1_EXT_SDB_ALEPT,
- HNS_ROCE_V1_EXT_SDB_ALFUL);
- } else
- hns_roce_set_sdb(hr_dev, HNS_ROCE_V1_SDB_ALEPT,
- HNS_ROCE_V1_SDB_ALFUL);
-
- if (odb_ext_mod) {
- db->ext_db->odb_buf_list = kmalloc(
- sizeof(*db->ext_db->odb_buf_list), GFP_KERNEL);
- if (!db->ext_db->odb_buf_list) {
- ret = -ENOMEM;
- goto ext_odb_buf_fail_out;
- }
-
- db->ext_db->odb_buf_list->buf = dma_alloc_coherent(dev,
- HNS_ROCE_V1_EXT_ODB_SIZE,
- &odb_dma_addr, GFP_KERNEL);
- if (!db->ext_db->odb_buf_list->buf) {
- ret = -ENOMEM;
- goto alloc_otr_db_buf_fail;
- }
- db->ext_db->odb_buf_list->map = odb_dma_addr;
-
- db->ext_db->eodb_dep = ilog2(HNS_ROCE_V1_EXT_ODB_DEPTH);
- hns_roce_set_odb_ext(hr_dev, HNS_ROCE_V1_EXT_ODB_ALEPT,
- HNS_ROCE_V1_EXT_ODB_ALFUL);
- } else
- hns_roce_set_odb(hr_dev, HNS_ROCE_V1_ODB_ALEPT,
- HNS_ROCE_V1_ODB_ALFUL);
-
- hns_roce_set_db_ext_mode(hr_dev, sdb_ext_mod, odb_ext_mod);
-
- return 0;
-
-alloc_otr_db_buf_fail:
- kfree(db->ext_db->odb_buf_list);
-
-ext_odb_buf_fail_out:
- if (sdb_ext_mod) {
- dma_free_coherent(dev, HNS_ROCE_V1_EXT_SDB_SIZE,
- db->ext_db->sdb_buf_list->buf,
- db->ext_db->sdb_buf_list->map);
- }
-
-alloc_sq_db_buf_fail:
- if (sdb_ext_mod)
- kfree(db->ext_db->sdb_buf_list);
-
-ext_sdb_buf_fail_out:
- kfree(db->ext_db);
- return ret;
-}
-
-static struct hns_roce_qp *hns_roce_v1_create_lp_qp(struct hns_roce_dev *hr_dev,
- struct ib_pd *pd)
-{
- struct device *dev = &hr_dev->pdev->dev;
- struct ib_qp_init_attr init_attr;
- struct ib_qp *qp;
-
- memset(&init_attr, 0, sizeof(struct ib_qp_init_attr));
- init_attr.qp_type = IB_QPT_RC;
- init_attr.sq_sig_type = IB_SIGNAL_ALL_WR;
- init_attr.cap.max_recv_wr = HNS_ROCE_MIN_WQE_NUM;
- init_attr.cap.max_send_wr = HNS_ROCE_MIN_WQE_NUM;
-
- qp = ib_create_qp(pd, &init_attr);
- if (IS_ERR(qp)) {
- dev_err(dev, "Create loop qp for mr free failed!");
- return NULL;
- }
-
- return to_hr_qp(qp);
-}
-
-static int hns_roce_v1_rsv_lp_qp(struct hns_roce_dev *hr_dev)
-{
- struct hns_roce_v1_priv *priv = hr_dev->priv;
- struct hns_roce_free_mr *free_mr = &priv->free_mr;
- struct hns_roce_caps *caps = &hr_dev->caps;
- struct ib_device *ibdev = &hr_dev->ib_dev;
- struct device *dev = &hr_dev->pdev->dev;
- struct ib_cq_init_attr cq_init_attr;
- struct ib_qp_attr attr = { 0 };
- struct hns_roce_qp *hr_qp;
- struct ib_cq *cq;
- struct ib_pd *pd;
- union ib_gid dgid;
- __be64 subnet_prefix;
- int attr_mask = 0;
- int ret;
- int i, j;
- u8 queue_en[HNS_ROCE_V1_RESV_QP] = { 0 };
- u8 phy_port;
- u32 port = 0;
- u8 sl;
-
- /* Reserved cq for loop qp */
- cq_init_attr.cqe = HNS_ROCE_MIN_WQE_NUM * 2;
- cq_init_attr.comp_vector = 0;
-
- cq = rdma_zalloc_drv_obj(ibdev, ib_cq);
- if (!cq)
- return -ENOMEM;
-
- ret = hns_roce_create_cq(cq, &cq_init_attr, NULL);
- if (ret) {
- dev_err(dev, "Create cq for reserved loop qp failed!");
- goto alloc_cq_failed;
- }
- free_mr->mr_free_cq = to_hr_cq(cq);
- free_mr->mr_free_cq->ib_cq.device = &hr_dev->ib_dev;
- free_mr->mr_free_cq->ib_cq.uobject = NULL;
- free_mr->mr_free_cq->ib_cq.comp_handler = NULL;
- free_mr->mr_free_cq->ib_cq.event_handler = NULL;
- free_mr->mr_free_cq->ib_cq.cq_context = NULL;
- atomic_set(&free_mr->mr_free_cq->ib_cq.usecnt, 0);
-
- pd = rdma_zalloc_drv_obj(ibdev, ib_pd);
- if (!pd) {
- ret = -ENOMEM;
- goto alloc_mem_failed;
- }
-
- pd->device = ibdev;
- ret = hns_roce_alloc_pd(pd, NULL);
- if (ret)
- goto alloc_pd_failed;
-
- free_mr->mr_free_pd = to_hr_pd(pd);
- free_mr->mr_free_pd->ibpd.device = &hr_dev->ib_dev;
- free_mr->mr_free_pd->ibpd.uobject = NULL;
- free_mr->mr_free_pd->ibpd.__internal_mr = NULL;
- atomic_set(&free_mr->mr_free_pd->ibpd.usecnt, 0);
-
- attr.qp_access_flags = IB_ACCESS_REMOTE_WRITE;
- attr.pkey_index = 0;
- attr.min_rnr_timer = 0;
- /* Disable read ability */
- attr.max_dest_rd_atomic = 0;
- attr.max_rd_atomic = 0;
- /* Use arbitrary values as rq_psn and sq_psn */
- attr.rq_psn = 0x0808;
- attr.sq_psn = 0x0808;
- attr.retry_cnt = 7;
- attr.rnr_retry = 7;
- attr.timeout = 0x12;
- attr.path_mtu = IB_MTU_256;
- attr.ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE;
- rdma_ah_set_grh(&attr.ah_attr, NULL, 0, 0, 1, 0);
- rdma_ah_set_static_rate(&attr.ah_attr, 3);
-
- subnet_prefix = cpu_to_be64(0xfe80000000000000LL);
- for (i = 0; i < HNS_ROCE_V1_RESV_QP; i++) {
- phy_port = (i >= HNS_ROCE_MAX_PORTS) ? (i - 2) :
- (i % HNS_ROCE_MAX_PORTS);
- sl = i / HNS_ROCE_MAX_PORTS;
-
- for (j = 0; j < caps->num_ports; j++) {
- if (hr_dev->iboe.phy_port[j] == phy_port) {
- queue_en[i] = 1;
- port = j;
- break;
- }
- }
-
- if (!queue_en[i])
- continue;
-
- free_mr->mr_free_qp[i] = hns_roce_v1_create_lp_qp(hr_dev, pd);
- if (!free_mr->mr_free_qp[i]) {
- dev_err(dev, "Create loop qp failed!\n");
- ret = -ENOMEM;
- goto create_lp_qp_failed;
- }
- hr_qp = free_mr->mr_free_qp[i];
-
- hr_qp->port = port;
- hr_qp->phy_port = phy_port;
- hr_qp->ibqp.qp_type = IB_QPT_RC;
- hr_qp->ibqp.device = &hr_dev->ib_dev;
- hr_qp->ibqp.uobject = NULL;
- atomic_set(&hr_qp->ibqp.usecnt, 0);
- hr_qp->ibqp.pd = pd;
- hr_qp->ibqp.recv_cq = cq;
- hr_qp->ibqp.send_cq = cq;
-
- rdma_ah_set_port_num(&attr.ah_attr, port + 1);
- rdma_ah_set_sl(&attr.ah_attr, sl);
- attr.port_num = port + 1;
-
- attr.dest_qp_num = hr_qp->qpn;
- memcpy(rdma_ah_retrieve_dmac(&attr.ah_attr),
- hr_dev->dev_addr[port],
- ETH_ALEN);
-
- memcpy(&dgid.raw, &subnet_prefix, sizeof(u64));
- memcpy(&dgid.raw[8], hr_dev->dev_addr[port], 3);
- memcpy(&dgid.raw[13], hr_dev->dev_addr[port] + 3, 3);
- dgid.raw[11] = 0xff;
- dgid.raw[12] = 0xfe;
- dgid.raw[8] ^= 2;
- rdma_ah_set_dgid_raw(&attr.ah_attr, dgid.raw);
-
- ret = hr_dev->hw->modify_qp(&hr_qp->ibqp, &attr, attr_mask,
- IB_QPS_RESET, IB_QPS_INIT);
- if (ret) {
- dev_err(dev, "modify qp failed(%d)!\n", ret);
- goto create_lp_qp_failed;
- }
-
- ret = hr_dev->hw->modify_qp(&hr_qp->ibqp, &attr, IB_QP_DEST_QPN,
- IB_QPS_INIT, IB_QPS_RTR);
- if (ret) {
- dev_err(dev, "modify qp failed(%d)!\n", ret);
- goto create_lp_qp_failed;
- }
-
- ret = hr_dev->hw->modify_qp(&hr_qp->ibqp, &attr, attr_mask,
- IB_QPS_RTR, IB_QPS_RTS);
- if (ret) {
- dev_err(dev, "modify qp failed(%d)!\n", ret);
- goto create_lp_qp_failed;
- }
- }
-
- return 0;
-
-create_lp_qp_failed:
- for (i -= 1; i >= 0; i--) {
- hr_qp = free_mr->mr_free_qp[i];
- if (ib_destroy_qp(&hr_qp->ibqp))
- dev_err(dev, "Destroy qp %d for mr free failed!\n", i);
- }
-
- hns_roce_dealloc_pd(pd, NULL);
-
-alloc_pd_failed:
- kfree(pd);
-
-alloc_mem_failed:
- hns_roce_destroy_cq(cq, NULL);
-alloc_cq_failed:
- kfree(cq);
- return ret;
-}
-
-static void hns_roce_v1_release_lp_qp(struct hns_roce_dev *hr_dev)
-{
- struct hns_roce_v1_priv *priv = hr_dev->priv;
- struct hns_roce_free_mr *free_mr = &priv->free_mr;
- struct device *dev = &hr_dev->pdev->dev;
- struct hns_roce_qp *hr_qp;
- int ret;
- int i;
-
- for (i = 0; i < HNS_ROCE_V1_RESV_QP; i++) {
- hr_qp = free_mr->mr_free_qp[i];
- if (!hr_qp)
- continue;
-
- ret = ib_destroy_qp(&hr_qp->ibqp);
- if (ret)
- dev_err(dev, "Destroy qp %d for mr free failed(%d)!\n",
- i, ret);
- }
-
- hns_roce_destroy_cq(&free_mr->mr_free_cq->ib_cq, NULL);
- kfree(&free_mr->mr_free_cq->ib_cq);
- hns_roce_dealloc_pd(&free_mr->mr_free_pd->ibpd, NULL);
- kfree(&free_mr->mr_free_pd->ibpd);
-}
-
-static int hns_roce_db_init(struct hns_roce_dev *hr_dev)
-{
- struct hns_roce_v1_priv *priv = hr_dev->priv;
- struct hns_roce_db_table *db = &priv->db_table;
- struct device *dev = &hr_dev->pdev->dev;
- u32 sdb_ext_mod;
- u32 odb_ext_mod;
- u32 sdb_evt_mod;
- u32 odb_evt_mod;
- int ret;
-
- memset(db, 0, sizeof(*db));
-
- /* Default DB mode */
- sdb_ext_mod = HNS_ROCE_SDB_EXTEND_MODE;
- odb_ext_mod = HNS_ROCE_ODB_EXTEND_MODE;
- sdb_evt_mod = HNS_ROCE_SDB_NORMAL_MODE;
- odb_evt_mod = HNS_ROCE_ODB_POLL_MODE;
-
- db->sdb_ext_mod = sdb_ext_mod;
- db->odb_ext_mod = odb_ext_mod;
-
- /* Init extend DB */
- ret = hns_roce_db_ext_init(hr_dev, sdb_ext_mod, odb_ext_mod);
- if (ret) {
- dev_err(dev, "Failed in extend DB configuration.\n");
- return ret;
- }
-
- hns_roce_set_db_event_mode(hr_dev, sdb_evt_mod, odb_evt_mod);
-
- return 0;
-}
-
-static void hns_roce_v1_recreate_lp_qp_work_fn(struct work_struct *work)
-{
- struct hns_roce_recreate_lp_qp_work *lp_qp_work;
- struct hns_roce_dev *hr_dev;
-
- lp_qp_work = container_of(work, struct hns_roce_recreate_lp_qp_work,
- work);
- hr_dev = to_hr_dev(lp_qp_work->ib_dev);
-
- hns_roce_v1_release_lp_qp(hr_dev);
-
- if (hns_roce_v1_rsv_lp_qp(hr_dev))
- dev_err(&hr_dev->pdev->dev, "create reserver qp failed\n");
-
- if (lp_qp_work->comp_flag)
- complete(lp_qp_work->comp);
-
- kfree(lp_qp_work);
-}
-
-static int hns_roce_v1_recreate_lp_qp(struct hns_roce_dev *hr_dev)
-{
- long end = HNS_ROCE_V1_RECREATE_LP_QP_TIMEOUT_MSECS;
- struct hns_roce_v1_priv *priv = hr_dev->priv;
- struct hns_roce_free_mr *free_mr = &priv->free_mr;
- struct hns_roce_recreate_lp_qp_work *lp_qp_work;
- struct device *dev = &hr_dev->pdev->dev;
- struct completion comp;
-
- lp_qp_work = kzalloc(sizeof(struct hns_roce_recreate_lp_qp_work),
- GFP_KERNEL);
- if (!lp_qp_work)
- return -ENOMEM;
-
- INIT_WORK(&(lp_qp_work->work), hns_roce_v1_recreate_lp_qp_work_fn);
-
- lp_qp_work->ib_dev = &(hr_dev->ib_dev);
- lp_qp_work->comp = &comp;
- lp_qp_work->comp_flag = 1;
-
- init_completion(lp_qp_work->comp);
-
- queue_work(free_mr->free_mr_wq, &(lp_qp_work->work));
-
- while (end > 0) {
- if (try_wait_for_completion(&comp))
- return 0;
- msleep(HNS_ROCE_V1_RECREATE_LP_QP_WAIT_VALUE);
- end -= HNS_ROCE_V1_RECREATE_LP_QP_WAIT_VALUE;
- }
-
- lp_qp_work->comp_flag = 0;
- if (try_wait_for_completion(&comp))
- return 0;
-
- dev_warn(dev, "recreate lp qp failed 20s timeout and return failed!\n");
- return -ETIMEDOUT;
-}
-
-static int hns_roce_v1_send_lp_wqe(struct hns_roce_qp *hr_qp)
-{
- struct hns_roce_dev *hr_dev = to_hr_dev(hr_qp->ibqp.device);
- struct device *dev = &hr_dev->pdev->dev;
- struct ib_send_wr send_wr;
- const struct ib_send_wr *bad_wr;
- int ret;
-
- memset(&send_wr, 0, sizeof(send_wr));
- send_wr.next = NULL;
- send_wr.num_sge = 0;
- send_wr.send_flags = 0;
- send_wr.sg_list = NULL;
- send_wr.wr_id = (unsigned long long)&send_wr;
- send_wr.opcode = IB_WR_RDMA_WRITE;
-
- ret = hns_roce_v1_post_send(&hr_qp->ibqp, &send_wr, &bad_wr);
- if (ret) {
- dev_err(dev, "Post write wqe for mr free failed(%d)!", ret);
- return ret;
- }
-
- return 0;
-}
-
-static void hns_roce_v1_mr_free_work_fn(struct work_struct *work)
-{
- unsigned long end =
- msecs_to_jiffies(HNS_ROCE_V1_FREE_MR_TIMEOUT_MSECS) + jiffies;
- struct hns_roce_mr_free_work *mr_work =
- container_of(work, struct hns_roce_mr_free_work, work);
- struct hns_roce_dev *hr_dev = to_hr_dev(mr_work->ib_dev);
- struct hns_roce_v1_priv *priv = hr_dev->priv;
- struct hns_roce_free_mr *free_mr = &priv->free_mr;
- struct hns_roce_cq *mr_free_cq = free_mr->mr_free_cq;
- struct hns_roce_mr *hr_mr = mr_work->mr;
- struct device *dev = &hr_dev->pdev->dev;
- struct ib_wc wc[HNS_ROCE_V1_RESV_QP];
- struct hns_roce_qp *hr_qp;
- int ne = 0;
- int ret;
- int i;
-
- for (i = 0; i < HNS_ROCE_V1_RESV_QP; i++) {
- hr_qp = free_mr->mr_free_qp[i];
- if (!hr_qp)
- continue;
- ne++;
-
- ret = hns_roce_v1_send_lp_wqe(hr_qp);
- if (ret) {
- dev_err(dev,
- "Send wqe (qp:0x%lx) for mr free failed(%d)!\n",
- hr_qp->qpn, ret);
- goto free_work;
- }
- }
-
- if (!ne) {
- dev_err(dev, "Reserved loop qp is absent!\n");
- goto free_work;
- }
-
- do {
- ret = hns_roce_v1_poll_cq(&mr_free_cq->ib_cq, ne, wc);
- if (ret < 0 && hr_qp) {
- dev_err(dev,
- "(qp:0x%lx) starts, Poll cqe failed(%d) for mr 0x%x free! Remain %d cqe\n",
- hr_qp->qpn, ret, hr_mr->key, ne);
- goto free_work;
- }
- ne -= ret;
- usleep_range(HNS_ROCE_V1_FREE_MR_WAIT_VALUE * 1000,
- (1 + HNS_ROCE_V1_FREE_MR_WAIT_VALUE) * 1000);
- } while (ne && time_before_eq(jiffies, end));
-
- if (ne != 0)
- dev_err(dev,
- "Poll cqe for mr 0x%x free timeout! Remain %d cqe\n",
- hr_mr->key, ne);
-
-free_work:
- if (mr_work->comp_flag)
- complete(mr_work->comp);
- kfree(mr_work);
-}
-
-static int hns_roce_v1_dereg_mr(struct hns_roce_dev *hr_dev,
- struct hns_roce_mr *mr, struct ib_udata *udata)
-{
- struct hns_roce_v1_priv *priv = hr_dev->priv;
- struct hns_roce_free_mr *free_mr = &priv->free_mr;
- long end = HNS_ROCE_V1_FREE_MR_TIMEOUT_MSECS;
- struct device *dev = &hr_dev->pdev->dev;
- struct hns_roce_mr_free_work *mr_work;
- unsigned long start = jiffies;
- struct completion comp;
- int ret = 0;
-
- if (mr->enabled) {
- if (hns_roce_hw_destroy_mpt(hr_dev, NULL,
- key_to_hw_index(mr->key) &
- (hr_dev->caps.num_mtpts - 1)))
- dev_warn(dev, "DESTROY_MPT failed!\n");
- }
-
- mr_work = kzalloc(sizeof(*mr_work), GFP_KERNEL);
- if (!mr_work) {
- ret = -ENOMEM;
- goto free_mr;
- }
-
- INIT_WORK(&(mr_work->work), hns_roce_v1_mr_free_work_fn);
-
- mr_work->ib_dev = &(hr_dev->ib_dev);
- mr_work->comp = &comp;
- mr_work->comp_flag = 1;
- mr_work->mr = (void *)mr;
- init_completion(mr_work->comp);
-
- queue_work(free_mr->free_mr_wq, &(mr_work->work));
-
- while (end > 0) {
- if (try_wait_for_completion(&comp))
- goto free_mr;
- msleep(HNS_ROCE_V1_FREE_MR_WAIT_VALUE);
- end -= HNS_ROCE_V1_FREE_MR_WAIT_VALUE;
- }
-
- mr_work->comp_flag = 0;
- if (try_wait_for_completion(&comp))
- goto free_mr;
-
- dev_warn(dev, "Free mr work 0x%x over 50s and failed!\n", mr->key);
- ret = -ETIMEDOUT;
-
-free_mr:
- dev_dbg(dev, "Free mr 0x%x use 0x%x us.\n",
- mr->key, jiffies_to_usecs(jiffies) - jiffies_to_usecs(start));
-
- ida_free(&hr_dev->mr_table.mtpt_ida.ida, (int)key_to_hw_index(mr->key));
- hns_roce_mtr_destroy(hr_dev, &mr->pbl_mtr);
- kfree(mr);
-
- return ret;
-}
-
-static void hns_roce_db_free(struct hns_roce_dev *hr_dev)
-{
- struct hns_roce_v1_priv *priv = hr_dev->priv;
- struct hns_roce_db_table *db = &priv->db_table;
- struct device *dev = &hr_dev->pdev->dev;
-
- if (db->sdb_ext_mod) {
- dma_free_coherent(dev, HNS_ROCE_V1_EXT_SDB_SIZE,
- db->ext_db->sdb_buf_list->buf,
- db->ext_db->sdb_buf_list->map);
- kfree(db->ext_db->sdb_buf_list);
- }
-
- if (db->odb_ext_mod) {
- dma_free_coherent(dev, HNS_ROCE_V1_EXT_ODB_SIZE,
- db->ext_db->odb_buf_list->buf,
- db->ext_db->odb_buf_list->map);
- kfree(db->ext_db->odb_buf_list);
- }
-
- kfree(db->ext_db);
-}
-
-static int hns_roce_raq_init(struct hns_roce_dev *hr_dev)
-{
- struct hns_roce_v1_priv *priv = hr_dev->priv;
- struct hns_roce_raq_table *raq = &priv->raq_table;
- struct device *dev = &hr_dev->pdev->dev;
- dma_addr_t addr;
- int raq_shift;
- __le32 tmp;
- u32 val;
- int ret;
-
- raq->e_raq_buf = kzalloc(sizeof(*(raq->e_raq_buf)), GFP_KERNEL);
- if (!raq->e_raq_buf)
- return -ENOMEM;
-
- raq->e_raq_buf->buf = dma_alloc_coherent(dev, HNS_ROCE_V1_RAQ_SIZE,
- &addr, GFP_KERNEL);
- if (!raq->e_raq_buf->buf) {
- ret = -ENOMEM;
- goto err_dma_alloc_raq;
- }
- raq->e_raq_buf->map = addr;
-
- /* Configure raq extended address. 48bit 4K align */
- roce_write(hr_dev, ROCEE_EXT_RAQ_REG, raq->e_raq_buf->map >> 12);
-
- /* Configure raq_shift */
- raq_shift = ilog2(HNS_ROCE_V1_RAQ_SIZE / HNS_ROCE_V1_RAQ_ENTRY);
- val = roce_read(hr_dev, ROCEE_EXT_RAQ_H_REG);
- tmp = cpu_to_le32(val);
- roce_set_field(tmp, ROCEE_EXT_RAQ_H_EXT_RAQ_SHIFT_M,
- ROCEE_EXT_RAQ_H_EXT_RAQ_SHIFT_S, raq_shift);
- /*
- * 44 = 32 + 12, When evaluating addr to hardware, shift 12 because of
- * using 4K page, and shift more 32 because of
- * calculating the high 32 bit value evaluated to hardware.
- */
- roce_set_field(tmp, ROCEE_EXT_RAQ_H_EXT_RAQ_BA_H_M,
- ROCEE_EXT_RAQ_H_EXT_RAQ_BA_H_S,
- raq->e_raq_buf->map >> 44);
- val = le32_to_cpu(tmp);
- roce_write(hr_dev, ROCEE_EXT_RAQ_H_REG, val);
- dev_dbg(dev, "Configure raq_shift 0x%x.\n", val);
-
- /* Configure raq threshold */
- val = roce_read(hr_dev, ROCEE_RAQ_WL_REG);
- tmp = cpu_to_le32(val);
- roce_set_field(tmp, ROCEE_RAQ_WL_ROCEE_RAQ_WL_M,
- ROCEE_RAQ_WL_ROCEE_RAQ_WL_S,
- HNS_ROCE_V1_EXT_RAQ_WF);
- val = le32_to_cpu(tmp);
- roce_write(hr_dev, ROCEE_RAQ_WL_REG, val);
- dev_dbg(dev, "Configure raq_wl 0x%x.\n", val);
-
- /* Enable extend raq */
- val = roce_read(hr_dev, ROCEE_WRMS_POL_TIME_INTERVAL_REG);
- tmp = cpu_to_le32(val);
- roce_set_field(tmp,
- ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_POL_TIME_INTERVAL_M,
- ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_POL_TIME_INTERVAL_S,
- POL_TIME_INTERVAL_VAL);
- roce_set_bit(tmp, ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_EXT_RAQ_MODE, 1);
- roce_set_field(tmp,
- ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_RAQ_TIMEOUT_CHK_CFG_M,
- ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_RAQ_TIMEOUT_CHK_CFG_S,
- 2);
- roce_set_bit(tmp,
- ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_RAQ_TIMEOUT_CHK_EN_S, 1);
- val = le32_to_cpu(tmp);
- roce_write(hr_dev, ROCEE_WRMS_POL_TIME_INTERVAL_REG, val);
- dev_dbg(dev, "Configure WrmsPolTimeInterval 0x%x.\n", val);
-
- /* Enable raq drop */
- val = roce_read(hr_dev, ROCEE_GLB_CFG_REG);
- tmp = cpu_to_le32(val);
- roce_set_bit(tmp, ROCEE_GLB_CFG_TRP_RAQ_DROP_EN_S, 1);
- val = le32_to_cpu(tmp);
- roce_write(hr_dev, ROCEE_GLB_CFG_REG, val);
- dev_dbg(dev, "Configure GlbCfg = 0x%x.\n", val);
-
- return 0;
-
-err_dma_alloc_raq:
- kfree(raq->e_raq_buf);
- return ret;
-}
-
-static void hns_roce_raq_free(struct hns_roce_dev *hr_dev)
-{
- struct hns_roce_v1_priv *priv = hr_dev->priv;
- struct hns_roce_raq_table *raq = &priv->raq_table;
- struct device *dev = &hr_dev->pdev->dev;
-
- dma_free_coherent(dev, HNS_ROCE_V1_RAQ_SIZE, raq->e_raq_buf->buf,
- raq->e_raq_buf->map);
- kfree(raq->e_raq_buf);
-}
-
-static void hns_roce_port_enable(struct hns_roce_dev *hr_dev, int enable_flag)
-{
- __le32 tmp;
- u32 val;
-
- if (enable_flag) {
- val = roce_read(hr_dev, ROCEE_GLB_CFG_REG);
- /* Open all ports */
- tmp = cpu_to_le32(val);
- roce_set_field(tmp, ROCEE_GLB_CFG_ROCEE_PORT_ST_M,
- ROCEE_GLB_CFG_ROCEE_PORT_ST_S,
- ALL_PORT_VAL_OPEN);
- val = le32_to_cpu(tmp);
- roce_write(hr_dev, ROCEE_GLB_CFG_REG, val);
- } else {
- val = roce_read(hr_dev, ROCEE_GLB_CFG_REG);
- /* Close all ports */
- tmp = cpu_to_le32(val);
- roce_set_field(tmp, ROCEE_GLB_CFG_ROCEE_PORT_ST_M,
- ROCEE_GLB_CFG_ROCEE_PORT_ST_S, 0x0);
- val = le32_to_cpu(tmp);
- roce_write(hr_dev, ROCEE_GLB_CFG_REG, val);
- }
-}
-
-static int hns_roce_bt_init(struct hns_roce_dev *hr_dev)
-{
- struct hns_roce_v1_priv *priv = hr_dev->priv;
- struct device *dev = &hr_dev->pdev->dev;
- int ret;
-
- priv->bt_table.qpc_buf.buf = dma_alloc_coherent(dev,
- HNS_ROCE_BT_RSV_BUF_SIZE, &priv->bt_table.qpc_buf.map,
- GFP_KERNEL);
- if (!priv->bt_table.qpc_buf.buf)
- return -ENOMEM;
-
- priv->bt_table.mtpt_buf.buf = dma_alloc_coherent(dev,
- HNS_ROCE_BT_RSV_BUF_SIZE, &priv->bt_table.mtpt_buf.map,
- GFP_KERNEL);
- if (!priv->bt_table.mtpt_buf.buf) {
- ret = -ENOMEM;
- goto err_failed_alloc_mtpt_buf;
- }
-
- priv->bt_table.cqc_buf.buf = dma_alloc_coherent(dev,
- HNS_ROCE_BT_RSV_BUF_SIZE, &priv->bt_table.cqc_buf.map,
- GFP_KERNEL);
- if (!priv->bt_table.cqc_buf.buf) {
- ret = -ENOMEM;
- goto err_failed_alloc_cqc_buf;
- }
-
- return 0;
-
-err_failed_alloc_cqc_buf:
- dma_free_coherent(dev, HNS_ROCE_BT_RSV_BUF_SIZE,
- priv->bt_table.mtpt_buf.buf, priv->bt_table.mtpt_buf.map);
-
-err_failed_alloc_mtpt_buf:
- dma_free_coherent(dev, HNS_ROCE_BT_RSV_BUF_SIZE,
- priv->bt_table.qpc_buf.buf, priv->bt_table.qpc_buf.map);
-
- return ret;
-}
-
-static void hns_roce_bt_free(struct hns_roce_dev *hr_dev)
-{
- struct hns_roce_v1_priv *priv = hr_dev->priv;
- struct device *dev = &hr_dev->pdev->dev;
-
- dma_free_coherent(dev, HNS_ROCE_BT_RSV_BUF_SIZE,
- priv->bt_table.cqc_buf.buf, priv->bt_table.cqc_buf.map);
-
- dma_free_coherent(dev, HNS_ROCE_BT_RSV_BUF_SIZE,
- priv->bt_table.mtpt_buf.buf, priv->bt_table.mtpt_buf.map);
-
- dma_free_coherent(dev, HNS_ROCE_BT_RSV_BUF_SIZE,
- priv->bt_table.qpc_buf.buf, priv->bt_table.qpc_buf.map);
-}
-
-static int hns_roce_tptr_init(struct hns_roce_dev *hr_dev)
-{
- struct hns_roce_v1_priv *priv = hr_dev->priv;
- struct hns_roce_buf_list *tptr_buf = &priv->tptr_table.tptr_buf;
- struct device *dev = &hr_dev->pdev->dev;
-
- /*
- * This buffer will be used for CQ's tptr(tail pointer), also
- * named ci(customer index). Every CQ will use 2 bytes to save
- * cqe ci in hip06. Hardware will read this area to get new ci
- * when the queue is almost full.
- */
- tptr_buf->buf = dma_alloc_coherent(dev, HNS_ROCE_V1_TPTR_BUF_SIZE,
- &tptr_buf->map, GFP_KERNEL);
- if (!tptr_buf->buf)
- return -ENOMEM;
-
- hr_dev->tptr_dma_addr = tptr_buf->map;
- hr_dev->tptr_size = HNS_ROCE_V1_TPTR_BUF_SIZE;
-
- return 0;
-}
-
-static void hns_roce_tptr_free(struct hns_roce_dev *hr_dev)
-{
- struct hns_roce_v1_priv *priv = hr_dev->priv;
- struct hns_roce_buf_list *tptr_buf = &priv->tptr_table.tptr_buf;
- struct device *dev = &hr_dev->pdev->dev;
-
- dma_free_coherent(dev, HNS_ROCE_V1_TPTR_BUF_SIZE,
- tptr_buf->buf, tptr_buf->map);
-}
-
-static int hns_roce_free_mr_init(struct hns_roce_dev *hr_dev)
-{
- struct hns_roce_v1_priv *priv = hr_dev->priv;
- struct hns_roce_free_mr *free_mr = &priv->free_mr;
- struct device *dev = &hr_dev->pdev->dev;
- int ret;
-
- free_mr->free_mr_wq = create_singlethread_workqueue("hns_roce_free_mr");
- if (!free_mr->free_mr_wq) {
- dev_err(dev, "Create free mr workqueue failed!\n");
- return -ENOMEM;
- }
-
- ret = hns_roce_v1_rsv_lp_qp(hr_dev);
- if (ret) {
- dev_err(dev, "Reserved loop qp failed(%d)!\n", ret);
- destroy_workqueue(free_mr->free_mr_wq);
- }
-
- return ret;
-}
-
-static void hns_roce_free_mr_free(struct hns_roce_dev *hr_dev)
-{
- struct hns_roce_v1_priv *priv = hr_dev->priv;
- struct hns_roce_free_mr *free_mr = &priv->free_mr;
-
- destroy_workqueue(free_mr->free_mr_wq);
-
- hns_roce_v1_release_lp_qp(hr_dev);
-}
-
-/**
- * hns_roce_v1_reset - reset RoCE
- * @hr_dev: RoCE device struct pointer
- * @dereset: true -- drop reset, false -- reset
- * return 0 - success , negative --fail
- */
-static int hns_roce_v1_reset(struct hns_roce_dev *hr_dev, bool dereset)
-{
- struct device_node *dsaf_node;
- struct device *dev = &hr_dev->pdev->dev;
- struct device_node *np = dev->of_node;
- struct fwnode_handle *fwnode;
- int ret;
-
- /* check if this is DT/ACPI case */
- if (dev_of_node(dev)) {
- dsaf_node = of_parse_phandle(np, "dsaf-handle", 0);
- if (!dsaf_node) {
- dev_err(dev, "could not find dsaf-handle\n");
- return -EINVAL;
- }
- fwnode = &dsaf_node->fwnode;
- } else if (is_acpi_device_node(dev->fwnode)) {
- struct fwnode_reference_args args;
-
- ret = acpi_node_get_property_reference(dev->fwnode,
- "dsaf-handle", 0, &args);
- if (ret) {
- dev_err(dev, "could not find dsaf-handle\n");
- return ret;
- }
- fwnode = args.fwnode;
- } else {
- dev_err(dev, "cannot read data from DT or ACPI\n");
- return -ENXIO;
- }
-
- ret = hns_dsaf_roce_reset(fwnode, false);
- if (ret)
- return ret;
-
- if (dereset) {
- msleep(SLEEP_TIME_INTERVAL);
- ret = hns_dsaf_roce_reset(fwnode, true);
- }
-
- return ret;
-}
-
-static int hns_roce_v1_profile(struct hns_roce_dev *hr_dev)
-{
- struct hns_roce_caps *caps = &hr_dev->caps;
- int i;
-
- hr_dev->vendor_id = roce_read(hr_dev, ROCEE_VENDOR_ID_REG);
- hr_dev->vendor_part_id = roce_read(hr_dev, ROCEE_VENDOR_PART_ID_REG);
- hr_dev->sys_image_guid = roce_read(hr_dev, ROCEE_SYS_IMAGE_GUID_L_REG) |
- ((u64)roce_read(hr_dev,
- ROCEE_SYS_IMAGE_GUID_H_REG) << 32);
- hr_dev->hw_rev = HNS_ROCE_HW_VER1;
-
- caps->num_qps = HNS_ROCE_V1_MAX_QP_NUM;
- caps->max_wqes = HNS_ROCE_V1_MAX_WQE_NUM;
- caps->min_wqes = HNS_ROCE_MIN_WQE_NUM;
- caps->num_cqs = HNS_ROCE_V1_MAX_CQ_NUM;
- caps->min_cqes = HNS_ROCE_MIN_CQE_NUM;
- caps->max_cqes = HNS_ROCE_V1_MAX_CQE_NUM;
- caps->max_sq_sg = HNS_ROCE_V1_SG_NUM;
- caps->max_rq_sg = HNS_ROCE_V1_SG_NUM;
- caps->max_sq_inline = HNS_ROCE_V1_INLINE_SIZE;
- caps->num_uars = HNS_ROCE_V1_UAR_NUM;
- caps->phy_num_uars = HNS_ROCE_V1_PHY_UAR_NUM;
- caps->num_aeq_vectors = HNS_ROCE_V1_AEQE_VEC_NUM;
- caps->num_comp_vectors = HNS_ROCE_V1_COMP_VEC_NUM;
- caps->num_other_vectors = HNS_ROCE_V1_ABNORMAL_VEC_NUM;
- caps->num_mtpts = HNS_ROCE_V1_MAX_MTPT_NUM;
- caps->num_mtt_segs = HNS_ROCE_V1_MAX_MTT_SEGS;
- caps->num_pds = HNS_ROCE_V1_MAX_PD_NUM;
- caps->max_qp_init_rdma = HNS_ROCE_V1_MAX_QP_INIT_RDMA;
- caps->max_qp_dest_rdma = HNS_ROCE_V1_MAX_QP_DEST_RDMA;
- caps->max_sq_desc_sz = HNS_ROCE_V1_MAX_SQ_DESC_SZ;
- caps->max_rq_desc_sz = HNS_ROCE_V1_MAX_RQ_DESC_SZ;
- caps->qpc_sz = HNS_ROCE_V1_QPC_SIZE;
- caps->irrl_entry_sz = HNS_ROCE_V1_IRRL_ENTRY_SIZE;
- caps->cqc_entry_sz = HNS_ROCE_V1_CQC_ENTRY_SIZE;
- caps->mtpt_entry_sz = HNS_ROCE_V1_MTPT_ENTRY_SIZE;
- caps->mtt_entry_sz = HNS_ROCE_V1_MTT_ENTRY_SIZE;
- caps->cqe_sz = HNS_ROCE_V1_CQE_SIZE;
- caps->page_size_cap = HNS_ROCE_V1_PAGE_SIZE_SUPPORT;
- caps->reserved_lkey = 0;
- caps->reserved_pds = 0;
- caps->reserved_mrws = 1;
- caps->reserved_uars = 0;
- caps->reserved_cqs = 0;
- caps->reserved_qps = 12; /* 2 SQP per port, six ports total 12 */
- caps->chunk_sz = HNS_ROCE_V1_TABLE_CHUNK_SIZE;
-
- for (i = 0; i < caps->num_ports; i++)
- caps->pkey_table_len[i] = 1;
-
- for (i = 0; i < caps->num_ports; i++) {
- /* Six ports shared 16 GID in v1 engine */
- if (i >= (HNS_ROCE_V1_GID_NUM % caps->num_ports))
- caps->gid_table_len[i] = HNS_ROCE_V1_GID_NUM /
- caps->num_ports;
- else
- caps->gid_table_len[i] = HNS_ROCE_V1_GID_NUM /
- caps->num_ports + 1;
- }
-
- caps->ceqe_depth = HNS_ROCE_V1_COMP_EQE_NUM;
- caps->aeqe_depth = HNS_ROCE_V1_ASYNC_EQE_NUM;
- caps->local_ca_ack_delay = roce_read(hr_dev, ROCEE_ACK_DELAY_REG);
- caps->max_mtu = IB_MTU_2048;
-
- return 0;
-}
-
-static int hns_roce_v1_init(struct hns_roce_dev *hr_dev)
-{
- int ret;
- u32 val;
- __le32 tmp;
- struct device *dev = &hr_dev->pdev->dev;
-
- /* DMAE user config */
- val = roce_read(hr_dev, ROCEE_DMAE_USER_CFG1_REG);
- tmp = cpu_to_le32(val);
- roce_set_field(tmp, ROCEE_DMAE_USER_CFG1_ROCEE_CACHE_TB_CFG_M,
- ROCEE_DMAE_USER_CFG1_ROCEE_CACHE_TB_CFG_S, 0xf);
- roce_set_field(tmp, ROCEE_DMAE_USER_CFG1_ROCEE_STREAM_ID_TB_CFG_M,
- ROCEE_DMAE_USER_CFG1_ROCEE_STREAM_ID_TB_CFG_S,
- 1 << PAGES_SHIFT_16);
- val = le32_to_cpu(tmp);
- roce_write(hr_dev, ROCEE_DMAE_USER_CFG1_REG, val);
-
- val = roce_read(hr_dev, ROCEE_DMAE_USER_CFG2_REG);
- tmp = cpu_to_le32(val);
- roce_set_field(tmp, ROCEE_DMAE_USER_CFG2_ROCEE_CACHE_PKT_CFG_M,
- ROCEE_DMAE_USER_CFG2_ROCEE_CACHE_PKT_CFG_S, 0xf);
- roce_set_field(tmp, ROCEE_DMAE_USER_CFG2_ROCEE_STREAM_ID_PKT_CFG_M,
- ROCEE_DMAE_USER_CFG2_ROCEE_STREAM_ID_PKT_CFG_S,
- 1 << PAGES_SHIFT_16);
-
- ret = hns_roce_db_init(hr_dev);
- if (ret) {
- dev_err(dev, "doorbell init failed!\n");
- return ret;
- }
-
- ret = hns_roce_raq_init(hr_dev);
- if (ret) {
- dev_err(dev, "raq init failed!\n");
- goto error_failed_raq_init;
- }
-
- ret = hns_roce_bt_init(hr_dev);
- if (ret) {
- dev_err(dev, "bt init failed!\n");
- goto error_failed_bt_init;
- }
-
- ret = hns_roce_tptr_init(hr_dev);
- if (ret) {
- dev_err(dev, "tptr init failed!\n");
- goto error_failed_tptr_init;
- }
-
- ret = hns_roce_free_mr_init(hr_dev);
- if (ret) {
- dev_err(dev, "free mr init failed!\n");
- goto error_failed_free_mr_init;
- }
-
- hns_roce_port_enable(hr_dev, HNS_ROCE_PORT_UP);
-
- return 0;
-
-error_failed_free_mr_init:
- hns_roce_tptr_free(hr_dev);
-
-error_failed_tptr_init:
- hns_roce_bt_free(hr_dev);
-
-error_failed_bt_init:
- hns_roce_raq_free(hr_dev);
-
-error_failed_raq_init:
- hns_roce_db_free(hr_dev);
- return ret;
-}
-
-static void hns_roce_v1_exit(struct hns_roce_dev *hr_dev)
-{
- hns_roce_port_enable(hr_dev, HNS_ROCE_PORT_DOWN);
- hns_roce_free_mr_free(hr_dev);
- hns_roce_tptr_free(hr_dev);
- hns_roce_bt_free(hr_dev);
- hns_roce_raq_free(hr_dev);
- hns_roce_db_free(hr_dev);
-}
-
-static int hns_roce_v1_cmd_pending(struct hns_roce_dev *hr_dev)
-{
- u32 status = readl(hr_dev->reg_base + ROCEE_MB6_REG);
-
- return (!!(status & (1 << HCR_GO_BIT)));
-}
-
-static int hns_roce_v1_post_mbox(struct hns_roce_dev *hr_dev, u64 in_param,
- u64 out_param, u32 in_modifier, u8 op_modifier,
- u16 op, u16 token, int event)
-{
- u32 __iomem *hcr = (u32 __iomem *)(hr_dev->reg_base + ROCEE_MB1_REG);
- unsigned long end;
- u32 val = 0;
- __le32 tmp;
-
- end = msecs_to_jiffies(GO_BIT_TIMEOUT_MSECS) + jiffies;
- while (hns_roce_v1_cmd_pending(hr_dev)) {
- if (time_after(jiffies, end)) {
- dev_err(hr_dev->dev, "jiffies=%d end=%d\n",
- (int)jiffies, (int)end);
- return -EAGAIN;
- }
- cond_resched();
- }
-
- tmp = cpu_to_le32(val);
- roce_set_field(tmp, ROCEE_MB6_ROCEE_MB_CMD_M, ROCEE_MB6_ROCEE_MB_CMD_S,
- op);
- roce_set_field(tmp, ROCEE_MB6_ROCEE_MB_CMD_MDF_M,
- ROCEE_MB6_ROCEE_MB_CMD_MDF_S, op_modifier);
- roce_set_bit(tmp, ROCEE_MB6_ROCEE_MB_EVENT_S, event);
- roce_set_bit(tmp, ROCEE_MB6_ROCEE_MB_HW_RUN_S, 1);
- roce_set_field(tmp, ROCEE_MB6_ROCEE_MB_TOKEN_M,
- ROCEE_MB6_ROCEE_MB_TOKEN_S, token);
-
- val = le32_to_cpu(tmp);
- writeq(in_param, hcr + 0);
- writeq(out_param, hcr + 2);
- writel(in_modifier, hcr + 4);
- /* Memory barrier */
- wmb();
-
- writel(val, hcr + 5);
-
- return 0;
-}
-
-static int hns_roce_v1_chk_mbox(struct hns_roce_dev *hr_dev,
- unsigned int timeout)
-{
- u8 __iomem *hcr = hr_dev->reg_base + ROCEE_MB1_REG;
- unsigned long end;
- u32 status = 0;
-
- end = msecs_to_jiffies(timeout) + jiffies;
- while (hns_roce_v1_cmd_pending(hr_dev) && time_before(jiffies, end))
- cond_resched();
-
- if (hns_roce_v1_cmd_pending(hr_dev)) {
- dev_err(hr_dev->dev, "[cmd_poll]hw run cmd TIMEDOUT!\n");
- return -ETIMEDOUT;
- }
-
- status = le32_to_cpu((__force __le32)
- __raw_readl(hcr + HCR_STATUS_OFFSET));
- if ((status & STATUS_MASK) != 0x1) {
- dev_err(hr_dev->dev, "mailbox status 0x%x!\n", status);
- return -EBUSY;
- }
-
- return 0;
-}
-
-static int hns_roce_v1_set_gid(struct hns_roce_dev *hr_dev, u32 port,
- int gid_index, const union ib_gid *gid,
- const struct ib_gid_attr *attr)
-{
- unsigned long flags;
- u32 *p = NULL;
- u8 gid_idx;
-
- gid_idx = hns_get_gid_index(hr_dev, port, gid_index);
-
- spin_lock_irqsave(&hr_dev->iboe.lock, flags);
-
- p = (u32 *)&gid->raw[0];
- roce_raw_write(*p, hr_dev->reg_base + ROCEE_PORT_GID_L_0_REG +
- (HNS_ROCE_V1_GID_NUM * gid_idx));
-
- p = (u32 *)&gid->raw[4];
- roce_raw_write(*p, hr_dev->reg_base + ROCEE_PORT_GID_ML_0_REG +
- (HNS_ROCE_V1_GID_NUM * gid_idx));
-
- p = (u32 *)&gid->raw[8];
- roce_raw_write(*p, hr_dev->reg_base + ROCEE_PORT_GID_MH_0_REG +
- (HNS_ROCE_V1_GID_NUM * gid_idx));
-
- p = (u32 *)&gid->raw[0xc];
- roce_raw_write(*p, hr_dev->reg_base + ROCEE_PORT_GID_H_0_REG +
- (HNS_ROCE_V1_GID_NUM * gid_idx));
-
- spin_unlock_irqrestore(&hr_dev->iboe.lock, flags);
-
- return 0;
-}
-
-static int hns_roce_v1_set_mac(struct hns_roce_dev *hr_dev, u8 phy_port,
- const u8 *addr)
-{
- u32 reg_smac_l;
- u16 reg_smac_h;
- __le32 tmp;
- u16 *p_h;
- u32 *p;
- u32 val;
-
- /*
- * When mac changed, loopback may fail
- * because of smac not equal to dmac.
- * We Need to release and create reserved qp again.
- */
- if (hr_dev->hw->dereg_mr) {
- int ret;
-
- ret = hns_roce_v1_recreate_lp_qp(hr_dev);
- if (ret && ret != -ETIMEDOUT)
- return ret;
- }
-
- p = (u32 *)(&addr[0]);
- reg_smac_l = *p;
- roce_raw_write(reg_smac_l, hr_dev->reg_base + ROCEE_SMAC_L_0_REG +
- PHY_PORT_OFFSET * phy_port);
-
- val = roce_read(hr_dev,
- ROCEE_SMAC_H_0_REG + phy_port * PHY_PORT_OFFSET);
- tmp = cpu_to_le32(val);
- p_h = (u16 *)(&addr[4]);
- reg_smac_h = *p_h;
- roce_set_field(tmp, ROCEE_SMAC_H_ROCEE_SMAC_H_M,
- ROCEE_SMAC_H_ROCEE_SMAC_H_S, reg_smac_h);
- val = le32_to_cpu(tmp);
- roce_write(hr_dev, ROCEE_SMAC_H_0_REG + phy_port * PHY_PORT_OFFSET,
- val);
-
- return 0;
-}
-
-static void hns_roce_v1_set_mtu(struct hns_roce_dev *hr_dev, u8 phy_port,
- enum ib_mtu mtu)
-{
- __le32 tmp;
- u32 val;
-
- val = roce_read(hr_dev,
- ROCEE_SMAC_H_0_REG + phy_port * PHY_PORT_OFFSET);
- tmp = cpu_to_le32(val);
- roce_set_field(tmp, ROCEE_SMAC_H_ROCEE_PORT_MTU_M,
- ROCEE_SMAC_H_ROCEE_PORT_MTU_S, mtu);
- val = le32_to_cpu(tmp);
- roce_write(hr_dev, ROCEE_SMAC_H_0_REG + phy_port * PHY_PORT_OFFSET,
- val);
-}
-
-static int hns_roce_v1_write_mtpt(struct hns_roce_dev *hr_dev, void *mb_buf,
- struct hns_roce_mr *mr,
- unsigned long mtpt_idx)
-{
- u64 pages[HNS_ROCE_MAX_INNER_MTPT_NUM] = { 0 };
- struct ib_device *ibdev = &hr_dev->ib_dev;
- struct hns_roce_v1_mpt_entry *mpt_entry;
- dma_addr_t pbl_ba;
- int count;
- int i;
-
- /* MPT filled into mailbox buf */
- mpt_entry = (struct hns_roce_v1_mpt_entry *)mb_buf;
- memset(mpt_entry, 0, sizeof(*mpt_entry));
-
- roce_set_field(mpt_entry->mpt_byte_4, MPT_BYTE_4_KEY_STATE_M,
- MPT_BYTE_4_KEY_STATE_S, KEY_VALID);
- roce_set_field(mpt_entry->mpt_byte_4, MPT_BYTE_4_KEY_M,
- MPT_BYTE_4_KEY_S, mr->key);
- roce_set_field(mpt_entry->mpt_byte_4, MPT_BYTE_4_PAGE_SIZE_M,
- MPT_BYTE_4_PAGE_SIZE_S, MR_SIZE_4K);
- roce_set_bit(mpt_entry->mpt_byte_4, MPT_BYTE_4_MW_TYPE_S, 0);
- roce_set_bit(mpt_entry->mpt_byte_4, MPT_BYTE_4_MW_BIND_ENABLE_S,
- (mr->access & IB_ACCESS_MW_BIND ? 1 : 0));
- roce_set_bit(mpt_entry->mpt_byte_4, MPT_BYTE_4_OWN_S, 0);
- roce_set_field(mpt_entry->mpt_byte_4, MPT_BYTE_4_MEMORY_LOCATION_TYPE_M,
- MPT_BYTE_4_MEMORY_LOCATION_TYPE_S, mr->type);
- roce_set_bit(mpt_entry->mpt_byte_4, MPT_BYTE_4_REMOTE_ATOMIC_S, 0);
- roce_set_bit(mpt_entry->mpt_byte_4, MPT_BYTE_4_LOCAL_WRITE_S,
- (mr->access & IB_ACCESS_LOCAL_WRITE ? 1 : 0));
- roce_set_bit(mpt_entry->mpt_byte_4, MPT_BYTE_4_REMOTE_WRITE_S,
- (mr->access & IB_ACCESS_REMOTE_WRITE ? 1 : 0));
- roce_set_bit(mpt_entry->mpt_byte_4, MPT_BYTE_4_REMOTE_READ_S,
- (mr->access & IB_ACCESS_REMOTE_READ ? 1 : 0));
- roce_set_bit(mpt_entry->mpt_byte_4, MPT_BYTE_4_REMOTE_INVAL_ENABLE_S,
- 0);
- roce_set_bit(mpt_entry->mpt_byte_4, MPT_BYTE_4_ADDRESS_TYPE_S, 0);
-
- roce_set_field(mpt_entry->mpt_byte_12, MPT_BYTE_12_PBL_ADDR_H_M,
- MPT_BYTE_12_PBL_ADDR_H_S, 0);
- roce_set_field(mpt_entry->mpt_byte_12, MPT_BYTE_12_MW_BIND_COUNTER_M,
- MPT_BYTE_12_MW_BIND_COUNTER_S, 0);
-
- mpt_entry->virt_addr_l = cpu_to_le32((u32)mr->iova);
- mpt_entry->virt_addr_h = cpu_to_le32((u32)(mr->iova >> 32));
- mpt_entry->length = cpu_to_le32((u32)mr->size);
-
- roce_set_field(mpt_entry->mpt_byte_28, MPT_BYTE_28_PD_M,
- MPT_BYTE_28_PD_S, mr->pd);
- roce_set_field(mpt_entry->mpt_byte_28, MPT_BYTE_28_L_KEY_IDX_L_M,
- MPT_BYTE_28_L_KEY_IDX_L_S, mtpt_idx);
- roce_set_field(mpt_entry->mpt_byte_64, MPT_BYTE_64_L_KEY_IDX_H_M,
- MPT_BYTE_64_L_KEY_IDX_H_S, mtpt_idx >> MTPT_IDX_SHIFT);
-
- /* DMA memory register */
- if (mr->type == MR_TYPE_DMA)
- return 0;
-
- count = hns_roce_mtr_find(hr_dev, &mr->pbl_mtr, 0, pages,
- ARRAY_SIZE(pages), &pbl_ba);
- if (count < 1) {
- ibdev_err(ibdev, "failed to find PBL mtr, count = %d.", count);
- return -ENOBUFS;
- }
-
- /* Register user mr */
- for (i = 0; i < count; i++) {
- switch (i) {
- case 0:
- mpt_entry->pa0_l = cpu_to_le32((u32)(pages[i]));
- roce_set_field(mpt_entry->mpt_byte_36,
- MPT_BYTE_36_PA0_H_M,
- MPT_BYTE_36_PA0_H_S,
- (u32)(pages[i] >> PAGES_SHIFT_32));
- break;
- case 1:
- roce_set_field(mpt_entry->mpt_byte_36,
- MPT_BYTE_36_PA1_L_M,
- MPT_BYTE_36_PA1_L_S, (u32)(pages[i]));
- roce_set_field(mpt_entry->mpt_byte_40,
- MPT_BYTE_40_PA1_H_M,
- MPT_BYTE_40_PA1_H_S,
- (u32)(pages[i] >> PAGES_SHIFT_24));
- break;
- case 2:
- roce_set_field(mpt_entry->mpt_byte_40,
- MPT_BYTE_40_PA2_L_M,
- MPT_BYTE_40_PA2_L_S, (u32)(pages[i]));
- roce_set_field(mpt_entry->mpt_byte_44,
- MPT_BYTE_44_PA2_H_M,
- MPT_BYTE_44_PA2_H_S,
- (u32)(pages[i] >> PAGES_SHIFT_16));
- break;
- case 3:
- roce_set_field(mpt_entry->mpt_byte_44,
- MPT_BYTE_44_PA3_L_M,
- MPT_BYTE_44_PA3_L_S, (u32)(pages[i]));
- roce_set_field(mpt_entry->mpt_byte_48,
- MPT_BYTE_48_PA3_H_M,
- MPT_BYTE_48_PA3_H_S,
- (u32)(pages[i] >> PAGES_SHIFT_8));
- break;
- case 4:
- mpt_entry->pa4_l = cpu_to_le32((u32)(pages[i]));
- roce_set_field(mpt_entry->mpt_byte_56,
- MPT_BYTE_56_PA4_H_M,
- MPT_BYTE_56_PA4_H_S,
- (u32)(pages[i] >> PAGES_SHIFT_32));
- break;
- case 5:
- roce_set_field(mpt_entry->mpt_byte_56,
- MPT_BYTE_56_PA5_L_M,
- MPT_BYTE_56_PA5_L_S, (u32)(pages[i]));
- roce_set_field(mpt_entry->mpt_byte_60,
- MPT_BYTE_60_PA5_H_M,
- MPT_BYTE_60_PA5_H_S,
- (u32)(pages[i] >> PAGES_SHIFT_24));
- break;
- case 6:
- roce_set_field(mpt_entry->mpt_byte_60,
- MPT_BYTE_60_PA6_L_M,
- MPT_BYTE_60_PA6_L_S, (u32)(pages[i]));
- roce_set_field(mpt_entry->mpt_byte_64,
- MPT_BYTE_64_PA6_H_M,
- MPT_BYTE_64_PA6_H_S,
- (u32)(pages[i] >> PAGES_SHIFT_16));
- break;
- default:
- break;
- }
- }
-
- mpt_entry->pbl_addr_l = cpu_to_le32(pbl_ba);
- roce_set_field(mpt_entry->mpt_byte_12, MPT_BYTE_12_PBL_ADDR_H_M,
- MPT_BYTE_12_PBL_ADDR_H_S, upper_32_bits(pbl_ba));
-
- return 0;
-}
-
-static void *get_cqe(struct hns_roce_cq *hr_cq, int n)
-{
- return hns_roce_buf_offset(hr_cq->mtr.kmem, n * HNS_ROCE_V1_CQE_SIZE);
-}
-
-static void *get_sw_cqe(struct hns_roce_cq *hr_cq, int n)
-{
- struct hns_roce_cqe *hr_cqe = get_cqe(hr_cq, n & hr_cq->ib_cq.cqe);
-
- /* Get cqe when Owner bit is Conversely with the MSB of cons_idx */
- return (roce_get_bit(hr_cqe->cqe_byte_4, CQE_BYTE_4_OWNER_S) ^
- !!(n & hr_cq->cq_depth)) ? hr_cqe : NULL;
-}
-
-static struct hns_roce_cqe *next_cqe_sw(struct hns_roce_cq *hr_cq)
-{
- return get_sw_cqe(hr_cq, hr_cq->cons_index);
-}
-
-static void hns_roce_v1_cq_set_ci(struct hns_roce_cq *hr_cq, u32 cons_index)
-{
- __le32 doorbell[2];
-
- doorbell[0] = cpu_to_le32(cons_index & ((hr_cq->cq_depth << 1) - 1));
- doorbell[1] = 0;
- roce_set_bit(doorbell[1], ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_HW_SYNS_S, 1);
- roce_set_field(doorbell[1], ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_M,
- ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_S, 3);
- roce_set_field(doorbell[1], ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_MDF_M,
- ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_MDF_S, 0);
- roce_set_field(doorbell[1], ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_INP_H_M,
- ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_INP_H_S, hr_cq->cqn);
-
- hns_roce_write64_k(doorbell, hr_cq->db_reg);
-}
-
-static void __hns_roce_v1_cq_clean(struct hns_roce_cq *hr_cq, u32 qpn,
- struct hns_roce_srq *srq)
-{
- struct hns_roce_cqe *cqe, *dest;
- u32 prod_index;
- int nfreed = 0;
- u8 owner_bit;
-
- for (prod_index = hr_cq->cons_index; get_sw_cqe(hr_cq, prod_index);
- ++prod_index) {
- if (prod_index == hr_cq->cons_index + hr_cq->ib_cq.cqe)
- break;
- }
-
- /*
- * Now backwards through the CQ, removing CQ entries
- * that match our QP by overwriting them with next entries.
- */
- while ((int) --prod_index - (int) hr_cq->cons_index >= 0) {
- cqe = get_cqe(hr_cq, prod_index & hr_cq->ib_cq.cqe);
- if ((roce_get_field(cqe->cqe_byte_16, CQE_BYTE_16_LOCAL_QPN_M,
- CQE_BYTE_16_LOCAL_QPN_S) &
- HNS_ROCE_CQE_QPN_MASK) == qpn) {
- /* In v1 engine, not support SRQ */
- ++nfreed;
- } else if (nfreed) {
- dest = get_cqe(hr_cq, (prod_index + nfreed) &
- hr_cq->ib_cq.cqe);
- owner_bit = roce_get_bit(dest->cqe_byte_4,
- CQE_BYTE_4_OWNER_S);
- memcpy(dest, cqe, sizeof(*cqe));
- roce_set_bit(dest->cqe_byte_4, CQE_BYTE_4_OWNER_S,
- owner_bit);
- }
- }
-
- if (nfreed) {
- hr_cq->cons_index += nfreed;
- hns_roce_v1_cq_set_ci(hr_cq, hr_cq->cons_index);
- }
-}
-
-static void hns_roce_v1_cq_clean(struct hns_roce_cq *hr_cq, u32 qpn,
- struct hns_roce_srq *srq)
-{
- spin_lock_irq(&hr_cq->lock);
- __hns_roce_v1_cq_clean(hr_cq, qpn, srq);
- spin_unlock_irq(&hr_cq->lock);
-}
-
-static void hns_roce_v1_write_cqc(struct hns_roce_dev *hr_dev,
- struct hns_roce_cq *hr_cq, void *mb_buf,
- u64 *mtts, dma_addr_t dma_handle)
-{
- struct hns_roce_v1_priv *priv = hr_dev->priv;
- struct hns_roce_buf_list *tptr_buf = &priv->tptr_table.tptr_buf;
- struct hns_roce_cq_context *cq_context = mb_buf;
- dma_addr_t tptr_dma_addr;
- int offset;
-
- memset(cq_context, 0, sizeof(*cq_context));
-
- /* Get the tptr for this CQ. */
- offset = hr_cq->cqn * HNS_ROCE_V1_TPTR_ENTRY_SIZE;
- tptr_dma_addr = tptr_buf->map + offset;
- hr_cq->tptr_addr = (u16 *)(tptr_buf->buf + offset);
-
- /* Register cq_context members */
- roce_set_field(cq_context->cqc_byte_4,
- CQ_CONTEXT_CQC_BYTE_4_CQC_STATE_M,
- CQ_CONTEXT_CQC_BYTE_4_CQC_STATE_S, CQ_STATE_VALID);
- roce_set_field(cq_context->cqc_byte_4, CQ_CONTEXT_CQC_BYTE_4_CQN_M,
- CQ_CONTEXT_CQC_BYTE_4_CQN_S, hr_cq->cqn);
-
- cq_context->cq_bt_l = cpu_to_le32((u32)dma_handle);
-
- roce_set_field(cq_context->cqc_byte_12,
- CQ_CONTEXT_CQC_BYTE_12_CQ_BT_H_M,
- CQ_CONTEXT_CQC_BYTE_12_CQ_BT_H_S,
- ((u64)dma_handle >> 32));
- roce_set_field(cq_context->cqc_byte_12,
- CQ_CONTEXT_CQC_BYTE_12_CQ_CQE_SHIFT_M,
- CQ_CONTEXT_CQC_BYTE_12_CQ_CQE_SHIFT_S,
- ilog2(hr_cq->cq_depth));
- roce_set_field(cq_context->cqc_byte_12, CQ_CONTEXT_CQC_BYTE_12_CEQN_M,
- CQ_CONTEXT_CQC_BYTE_12_CEQN_S, hr_cq->vector);
-
- cq_context->cur_cqe_ba0_l = cpu_to_le32((u32)(mtts[0]));
-
- roce_set_field(cq_context->cqc_byte_20,
- CQ_CONTEXT_CQC_BYTE_20_CUR_CQE_BA0_H_M,
- CQ_CONTEXT_CQC_BYTE_20_CUR_CQE_BA0_H_S, (mtts[0]) >> 32);
- /* Dedicated hardware, directly set 0 */
- roce_set_field(cq_context->cqc_byte_20,
- CQ_CONTEXT_CQC_BYTE_20_CQ_CUR_INDEX_M,
- CQ_CONTEXT_CQC_BYTE_20_CQ_CUR_INDEX_S, 0);
- /**
- * 44 = 32 + 12, When evaluating addr to hardware, shift 12 because of
- * using 4K page, and shift more 32 because of
- * calculating the high 32 bit value evaluated to hardware.
- */
- roce_set_field(cq_context->cqc_byte_20,
- CQ_CONTEXT_CQC_BYTE_20_CQE_TPTR_ADDR_H_M,
- CQ_CONTEXT_CQC_BYTE_20_CQE_TPTR_ADDR_H_S,
- tptr_dma_addr >> 44);
-
- cq_context->cqe_tptr_addr_l = cpu_to_le32((u32)(tptr_dma_addr >> 12));
-
- roce_set_field(cq_context->cqc_byte_32,
- CQ_CONTEXT_CQC_BYTE_32_CUR_CQE_BA1_H_M,
- CQ_CONTEXT_CQC_BYTE_32_CUR_CQE_BA1_H_S, 0);
- roce_set_bit(cq_context->cqc_byte_32,
- CQ_CONTEXT_CQC_BYTE_32_SE_FLAG_S, 0);
- roce_set_bit(cq_context->cqc_byte_32,
- CQ_CONTEXT_CQC_BYTE_32_CE_FLAG_S, 0);
- roce_set_bit(cq_context->cqc_byte_32,
- CQ_CONTEXT_CQC_BYTE_32_NOTIFICATION_FLAG_S, 0);
- roce_set_bit(cq_context->cqc_byte_32,
- CQ_CQNTEXT_CQC_BYTE_32_TYPE_OF_COMPLETION_NOTIFICATION_S,
- 0);
- /* The initial value of cq's ci is 0 */
- roce_set_field(cq_context->cqc_byte_32,
- CQ_CONTEXT_CQC_BYTE_32_CQ_CONS_IDX_M,
- CQ_CONTEXT_CQC_BYTE_32_CQ_CONS_IDX_S, 0);
-}
-
-static int hns_roce_v1_req_notify_cq(struct ib_cq *ibcq,
- enum ib_cq_notify_flags flags)
-{
- struct hns_roce_cq *hr_cq = to_hr_cq(ibcq);
- u32 notification_flag;
- __le32 doorbell[2] = {};
-
- notification_flag = (flags & IB_CQ_SOLICITED_MASK) ==
- IB_CQ_SOLICITED ? CQ_DB_REQ_NOT : CQ_DB_REQ_NOT_SOL;
- /*
- * flags = 0; Notification Flag = 1, next
- * flags = 1; Notification Flag = 0, solocited
- */
- doorbell[0] =
- cpu_to_le32(hr_cq->cons_index & ((hr_cq->cq_depth << 1) - 1));
- roce_set_bit(doorbell[1], ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_HW_SYNS_S, 1);
- roce_set_field(doorbell[1], ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_M,
- ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_S, 3);
- roce_set_field(doorbell[1], ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_MDF_M,
- ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_MDF_S, 1);
- roce_set_field(doorbell[1], ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_INP_H_M,
- ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_INP_H_S,
- hr_cq->cqn | notification_flag);
-
- hns_roce_write64_k(doorbell, hr_cq->db_reg);
-
- return 0;
-}
-
-static int hns_roce_v1_poll_one(struct hns_roce_cq *hr_cq,
- struct hns_roce_qp **cur_qp, struct ib_wc *wc)
-{
- int qpn;
- int is_send;
- u16 wqe_ctr;
- u32 status;
- u32 opcode;
- struct hns_roce_cqe *cqe;
- struct hns_roce_qp *hr_qp;
- struct hns_roce_wq *wq;
- struct hns_roce_wqe_ctrl_seg *sq_wqe;
- struct hns_roce_dev *hr_dev = to_hr_dev(hr_cq->ib_cq.device);
- struct device *dev = &hr_dev->pdev->dev;
-
- /* Find cqe according consumer index */
- cqe = next_cqe_sw(hr_cq);
- if (!cqe)
- return -EAGAIN;
-
- ++hr_cq->cons_index;
- /* Memory barrier */
- rmb();
- /* 0->SQ, 1->RQ */
- is_send = !(roce_get_bit(cqe->cqe_byte_4, CQE_BYTE_4_SQ_RQ_FLAG_S));
-
- /* Local_qpn in UD cqe is always 1, so it needs to compute new qpn */
- if (roce_get_field(cqe->cqe_byte_16, CQE_BYTE_16_LOCAL_QPN_M,
- CQE_BYTE_16_LOCAL_QPN_S) <= 1) {
- qpn = roce_get_field(cqe->cqe_byte_20, CQE_BYTE_20_PORT_NUM_M,
- CQE_BYTE_20_PORT_NUM_S) +
- roce_get_field(cqe->cqe_byte_16, CQE_BYTE_16_LOCAL_QPN_M,
- CQE_BYTE_16_LOCAL_QPN_S) *
- HNS_ROCE_MAX_PORTS;
- } else {
- qpn = roce_get_field(cqe->cqe_byte_16, CQE_BYTE_16_LOCAL_QPN_M,
- CQE_BYTE_16_LOCAL_QPN_S);
- }
-
- if (!*cur_qp || (qpn & HNS_ROCE_CQE_QPN_MASK) != (*cur_qp)->qpn) {
- hr_qp = __hns_roce_qp_lookup(hr_dev, qpn);
- if (unlikely(!hr_qp)) {
- dev_err(dev, "CQ %06lx with entry for unknown QPN %06x\n",
- hr_cq->cqn, (qpn & HNS_ROCE_CQE_QPN_MASK));
- return -EINVAL;
- }
-
- *cur_qp = hr_qp;
- }
-
- wc->qp = &(*cur_qp)->ibqp;
- wc->vendor_err = 0;
-
- status = roce_get_field(cqe->cqe_byte_4,
- CQE_BYTE_4_STATUS_OF_THE_OPERATION_M,
- CQE_BYTE_4_STATUS_OF_THE_OPERATION_S) &
- HNS_ROCE_CQE_STATUS_MASK;
- switch (status) {
- case HNS_ROCE_CQE_SUCCESS:
- wc->status = IB_WC_SUCCESS;
- break;
- case HNS_ROCE_CQE_SYNDROME_LOCAL_LENGTH_ERR:
- wc->status = IB_WC_LOC_LEN_ERR;
- break;
- case HNS_ROCE_CQE_SYNDROME_LOCAL_QP_OP_ERR:
- wc->status = IB_WC_LOC_QP_OP_ERR;
- break;
- case HNS_ROCE_CQE_SYNDROME_LOCAL_PROT_ERR:
- wc->status = IB_WC_LOC_PROT_ERR;
- break;
- case HNS_ROCE_CQE_SYNDROME_WR_FLUSH_ERR:
- wc->status = IB_WC_WR_FLUSH_ERR;
- break;
- case HNS_ROCE_CQE_SYNDROME_MEM_MANAGE_OPERATE_ERR:
- wc->status = IB_WC_MW_BIND_ERR;
- break;
- case HNS_ROCE_CQE_SYNDROME_BAD_RESP_ERR:
- wc->status = IB_WC_BAD_RESP_ERR;
- break;
- case HNS_ROCE_CQE_SYNDROME_LOCAL_ACCESS_ERR:
- wc->status = IB_WC_LOC_ACCESS_ERR;
- break;
- case HNS_ROCE_CQE_SYNDROME_REMOTE_INVAL_REQ_ERR:
- wc->status = IB_WC_REM_INV_REQ_ERR;
- break;
- case HNS_ROCE_CQE_SYNDROME_REMOTE_ACCESS_ERR:
- wc->status = IB_WC_REM_ACCESS_ERR;
- break;
- case HNS_ROCE_CQE_SYNDROME_REMOTE_OP_ERR:
- wc->status = IB_WC_REM_OP_ERR;
- break;
- case HNS_ROCE_CQE_SYNDROME_TRANSPORT_RETRY_EXC_ERR:
- wc->status = IB_WC_RETRY_EXC_ERR;
- break;
- case HNS_ROCE_CQE_SYNDROME_RNR_RETRY_EXC_ERR:
- wc->status = IB_WC_RNR_RETRY_EXC_ERR;
- break;
- default:
- wc->status = IB_WC_GENERAL_ERR;
- break;
- }
-
- /* CQE status error, directly return */
- if (wc->status != IB_WC_SUCCESS)
- return 0;
-
- if (is_send) {
- /* SQ conrespond to CQE */
- sq_wqe = hns_roce_get_send_wqe(*cur_qp,
- roce_get_field(cqe->cqe_byte_4,
- CQE_BYTE_4_WQE_INDEX_M,
- CQE_BYTE_4_WQE_INDEX_S) &
- ((*cur_qp)->sq.wqe_cnt-1));
- switch (le32_to_cpu(sq_wqe->flag) & HNS_ROCE_WQE_OPCODE_MASK) {
- case HNS_ROCE_WQE_OPCODE_SEND:
- wc->opcode = IB_WC_SEND;
- break;
- case HNS_ROCE_WQE_OPCODE_RDMA_READ:
- wc->opcode = IB_WC_RDMA_READ;
- wc->byte_len = le32_to_cpu(cqe->byte_cnt);
- break;
- case HNS_ROCE_WQE_OPCODE_RDMA_WRITE:
- wc->opcode = IB_WC_RDMA_WRITE;
- break;
- case HNS_ROCE_WQE_OPCODE_LOCAL_INV:
- wc->opcode = IB_WC_LOCAL_INV;
- break;
- case HNS_ROCE_WQE_OPCODE_UD_SEND:
- wc->opcode = IB_WC_SEND;
- break;
- default:
- wc->status = IB_WC_GENERAL_ERR;
- break;
- }
- wc->wc_flags = (le32_to_cpu(sq_wqe->flag) & HNS_ROCE_WQE_IMM ?
- IB_WC_WITH_IMM : 0);
-
- wq = &(*cur_qp)->sq;
- if ((*cur_qp)->sq_signal_bits) {
- /*
- * If sg_signal_bit is 1,
- * firstly tail pointer updated to wqe
- * which current cqe correspond to
- */
- wqe_ctr = (u16)roce_get_field(cqe->cqe_byte_4,
- CQE_BYTE_4_WQE_INDEX_M,
- CQE_BYTE_4_WQE_INDEX_S);
- wq->tail += (wqe_ctr - (u16)wq->tail) &
- (wq->wqe_cnt - 1);
- }
- wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
- ++wq->tail;
- } else {
- /* RQ conrespond to CQE */
- wc->byte_len = le32_to_cpu(cqe->byte_cnt);
- opcode = roce_get_field(cqe->cqe_byte_4,
- CQE_BYTE_4_OPERATION_TYPE_M,
- CQE_BYTE_4_OPERATION_TYPE_S) &
- HNS_ROCE_CQE_OPCODE_MASK;
- switch (opcode) {
- case HNS_ROCE_OPCODE_RDMA_WITH_IMM_RECEIVE:
- wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
- wc->wc_flags = IB_WC_WITH_IMM;
- wc->ex.imm_data =
- cpu_to_be32(le32_to_cpu(cqe->immediate_data));
- break;
- case HNS_ROCE_OPCODE_SEND_DATA_RECEIVE:
- if (roce_get_bit(cqe->cqe_byte_4,
- CQE_BYTE_4_IMM_INDICATOR_S)) {
- wc->opcode = IB_WC_RECV;
- wc->wc_flags = IB_WC_WITH_IMM;
- wc->ex.imm_data = cpu_to_be32(
- le32_to_cpu(cqe->immediate_data));
- } else {
- wc->opcode = IB_WC_RECV;
- wc->wc_flags = 0;
- }
- break;
- default:
- wc->status = IB_WC_GENERAL_ERR;
- break;
- }
-
- /* Update tail pointer, record wr_id */
- wq = &(*cur_qp)->rq;
- wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
- ++wq->tail;
- wc->sl = (u8)roce_get_field(cqe->cqe_byte_20, CQE_BYTE_20_SL_M,
- CQE_BYTE_20_SL_S);
- wc->src_qp = (u8)roce_get_field(cqe->cqe_byte_20,
- CQE_BYTE_20_REMOTE_QPN_M,
- CQE_BYTE_20_REMOTE_QPN_S);
- wc->wc_flags |= (roce_get_bit(cqe->cqe_byte_20,
- CQE_BYTE_20_GRH_PRESENT_S) ?
- IB_WC_GRH : 0);
- wc->pkey_index = (u16)roce_get_field(cqe->cqe_byte_28,
- CQE_BYTE_28_P_KEY_IDX_M,
- CQE_BYTE_28_P_KEY_IDX_S);
- }
-
- return 0;
-}
-
-int hns_roce_v1_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
-{
- struct hns_roce_cq *hr_cq = to_hr_cq(ibcq);
- struct hns_roce_qp *cur_qp = NULL;
- unsigned long flags;
- int npolled;
- int ret;
-
- spin_lock_irqsave(&hr_cq->lock, flags);
-
- for (npolled = 0; npolled < num_entries; ++npolled) {
- ret = hns_roce_v1_poll_one(hr_cq, &cur_qp, wc + npolled);
- if (ret)
- break;
- }
-
- if (npolled) {
- *hr_cq->tptr_addr = hr_cq->cons_index &
- ((hr_cq->cq_depth << 1) - 1);
-
- hns_roce_v1_cq_set_ci(hr_cq, hr_cq->cons_index);
- }
-
- spin_unlock_irqrestore(&hr_cq->lock, flags);
-
- if (ret == 0 || ret == -EAGAIN)
- return npolled;
- else
- return ret;
-}
-
-static int hns_roce_v1_clear_hem(struct hns_roce_dev *hr_dev,
- struct hns_roce_hem_table *table, int obj,
- int step_idx)
-{
- struct hns_roce_v1_priv *priv = hr_dev->priv;
- struct device *dev = &hr_dev->pdev->dev;
- long end = HW_SYNC_TIMEOUT_MSECS;
- __le32 bt_cmd_val[2] = {0};
- unsigned long flags = 0;
- void __iomem *bt_cmd;
- u64 bt_ba = 0;
-
- switch (table->type) {
- case HEM_TYPE_QPC:
- bt_ba = priv->bt_table.qpc_buf.map >> 12;
- break;
- case HEM_TYPE_MTPT:
- bt_ba = priv->bt_table.mtpt_buf.map >> 12;
- break;
- case HEM_TYPE_CQC:
- bt_ba = priv->bt_table.cqc_buf.map >> 12;
- break;
- case HEM_TYPE_SRQC:
- dev_dbg(dev, "HEM_TYPE_SRQC not support.\n");
- return -EINVAL;
- default:
- return 0;
- }
- roce_set_field(bt_cmd_val[1], ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_M,
- ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_S, table->type);
- roce_set_field(bt_cmd_val[1], ROCEE_BT_CMD_H_ROCEE_BT_CMD_IN_MDF_M,
- ROCEE_BT_CMD_H_ROCEE_BT_CMD_IN_MDF_S, obj);
- roce_set_bit(bt_cmd_val[1], ROCEE_BT_CMD_H_ROCEE_BT_CMD_S, 0);
- roce_set_bit(bt_cmd_val[1], ROCEE_BT_CMD_H_ROCEE_BT_CMD_HW_SYNS_S, 1);
-
- spin_lock_irqsave(&hr_dev->bt_cmd_lock, flags);
-
- bt_cmd = hr_dev->reg_base + ROCEE_BT_CMD_H_REG;
-
- while (1) {
- if (readl(bt_cmd) >> BT_CMD_SYNC_SHIFT) {
- if (!end) {
- dev_err(dev, "Write bt_cmd err,hw_sync is not zero.\n");
- spin_unlock_irqrestore(&hr_dev->bt_cmd_lock,
- flags);
- return -EBUSY;
- }
- } else {
- break;
- }
- mdelay(HW_SYNC_SLEEP_TIME_INTERVAL);
- end -= HW_SYNC_SLEEP_TIME_INTERVAL;
- }
-
- bt_cmd_val[0] = cpu_to_le32(bt_ba);
- roce_set_field(bt_cmd_val[1], ROCEE_BT_CMD_H_ROCEE_BT_CMD_BA_H_M,
- ROCEE_BT_CMD_H_ROCEE_BT_CMD_BA_H_S, bt_ba >> 32);
- hns_roce_write64_k(bt_cmd_val, hr_dev->reg_base + ROCEE_BT_CMD_L_REG);
-
- spin_unlock_irqrestore(&hr_dev->bt_cmd_lock, flags);
-
- return 0;
-}
-
-static int hns_roce_v1_qp_modify(struct hns_roce_dev *hr_dev,
- enum hns_roce_qp_state cur_state,
- enum hns_roce_qp_state new_state,
- struct hns_roce_qp_context *context,
- struct hns_roce_qp *hr_qp)
-{
- static const u16
- op[HNS_ROCE_QP_NUM_STATE][HNS_ROCE_QP_NUM_STATE] = {
- [HNS_ROCE_QP_STATE_RST] = {
- [HNS_ROCE_QP_STATE_RST] = HNS_ROCE_CMD_2RST_QP,
- [HNS_ROCE_QP_STATE_ERR] = HNS_ROCE_CMD_2ERR_QP,
- [HNS_ROCE_QP_STATE_INIT] = HNS_ROCE_CMD_RST2INIT_QP,
- },
- [HNS_ROCE_QP_STATE_INIT] = {
- [HNS_ROCE_QP_STATE_RST] = HNS_ROCE_CMD_2RST_QP,
- [HNS_ROCE_QP_STATE_ERR] = HNS_ROCE_CMD_2ERR_QP,
- /* Note: In v1 engine, HW doesn't support RST2INIT.
- * We use RST2INIT cmd instead of INIT2INIT.
- */
- [HNS_ROCE_QP_STATE_INIT] = HNS_ROCE_CMD_RST2INIT_QP,
- [HNS_ROCE_QP_STATE_RTR] = HNS_ROCE_CMD_INIT2RTR_QP,
- },
- [HNS_ROCE_QP_STATE_RTR] = {
- [HNS_ROCE_QP_STATE_RST] = HNS_ROCE_CMD_2RST_QP,
- [HNS_ROCE_QP_STATE_ERR] = HNS_ROCE_CMD_2ERR_QP,
- [HNS_ROCE_QP_STATE_RTS] = HNS_ROCE_CMD_RTR2RTS_QP,
- },
- [HNS_ROCE_QP_STATE_RTS] = {
- [HNS_ROCE_QP_STATE_RST] = HNS_ROCE_CMD_2RST_QP,
- [HNS_ROCE_QP_STATE_ERR] = HNS_ROCE_CMD_2ERR_QP,
- [HNS_ROCE_QP_STATE_RTS] = HNS_ROCE_CMD_RTS2RTS_QP,
- [HNS_ROCE_QP_STATE_SQD] = HNS_ROCE_CMD_RTS2SQD_QP,
- },
- [HNS_ROCE_QP_STATE_SQD] = {
- [HNS_ROCE_QP_STATE_RST] = HNS_ROCE_CMD_2RST_QP,
- [HNS_ROCE_QP_STATE_ERR] = HNS_ROCE_CMD_2ERR_QP,
- [HNS_ROCE_QP_STATE_RTS] = HNS_ROCE_CMD_SQD2RTS_QP,
- [HNS_ROCE_QP_STATE_SQD] = HNS_ROCE_CMD_SQD2SQD_QP,
- },
- [HNS_ROCE_QP_STATE_ERR] = {
- [HNS_ROCE_QP_STATE_RST] = HNS_ROCE_CMD_2RST_QP,
- [HNS_ROCE_QP_STATE_ERR] = HNS_ROCE_CMD_2ERR_QP,
- }
- };
-
- struct hns_roce_cmd_mailbox *mailbox;
- struct device *dev = &hr_dev->pdev->dev;
- int ret;
-
- if (cur_state >= HNS_ROCE_QP_NUM_STATE ||
- new_state >= HNS_ROCE_QP_NUM_STATE ||
- !op[cur_state][new_state]) {
- dev_err(dev, "[modify_qp]not support state %d to %d\n",
- cur_state, new_state);
- return -EINVAL;
- }
-
- if (op[cur_state][new_state] == HNS_ROCE_CMD_2RST_QP)
- return hns_roce_cmd_mbox(hr_dev, 0, 0, hr_qp->qpn, 2,
- HNS_ROCE_CMD_2RST_QP,
- HNS_ROCE_CMD_TIMEOUT_MSECS);
-
- if (op[cur_state][new_state] == HNS_ROCE_CMD_2ERR_QP)
- return hns_roce_cmd_mbox(hr_dev, 0, 0, hr_qp->qpn, 2,
- HNS_ROCE_CMD_2ERR_QP,
- HNS_ROCE_CMD_TIMEOUT_MSECS);
-
- mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
- if (IS_ERR(mailbox))
- return PTR_ERR(mailbox);
-
- memcpy(mailbox->buf, context, sizeof(*context));
-
- ret = hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, hr_qp->qpn, 0,
- op[cur_state][new_state],
- HNS_ROCE_CMD_TIMEOUT_MSECS);
-
- hns_roce_free_cmd_mailbox(hr_dev, mailbox);
- return ret;
-}
-
-static int find_wqe_mtt(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
- u64 *sq_ba, u64 *rq_ba, dma_addr_t *bt_ba)
-{
- struct ib_device *ibdev = &hr_dev->ib_dev;
- int count;
-
- count = hns_roce_mtr_find(hr_dev, &hr_qp->mtr, 0, sq_ba, 1, bt_ba);
- if (count < 1) {
- ibdev_err(ibdev, "Failed to find SQ ba\n");
- return -ENOBUFS;
- }
-
- count = hns_roce_mtr_find(hr_dev, &hr_qp->mtr, hr_qp->rq.offset, rq_ba,
- 1, NULL);
- if (!count) {
- ibdev_err(ibdev, "Failed to find RQ ba\n");
- return -ENOBUFS;
- }
-
- return 0;
-}
-
-static int hns_roce_v1_m_sqp(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
- int attr_mask, enum ib_qp_state cur_state,
- enum ib_qp_state new_state)
-{
- struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
- struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
- struct hns_roce_sqp_context *context;
- dma_addr_t dma_handle = 0;
- u32 __iomem *addr;
- u64 sq_ba = 0;
- u64 rq_ba = 0;
- __le32 tmp;
- u32 reg_val;
-
- context = kzalloc(sizeof(*context), GFP_KERNEL);
- if (!context)
- return -ENOMEM;
-
- /* Search QP buf's MTTs */
- if (find_wqe_mtt(hr_dev, hr_qp, &sq_ba, &rq_ba, &dma_handle))
- goto out;
-
- if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) {
- roce_set_field(context->qp1c_bytes_4,
- QP1C_BYTES_4_SQ_WQE_SHIFT_M,
- QP1C_BYTES_4_SQ_WQE_SHIFT_S,
- ilog2((unsigned int)hr_qp->sq.wqe_cnt));
- roce_set_field(context->qp1c_bytes_4,
- QP1C_BYTES_4_RQ_WQE_SHIFT_M,
- QP1C_BYTES_4_RQ_WQE_SHIFT_S,
- ilog2((unsigned int)hr_qp->rq.wqe_cnt));
- roce_set_field(context->qp1c_bytes_4, QP1C_BYTES_4_PD_M,
- QP1C_BYTES_4_PD_S, to_hr_pd(ibqp->pd)->pdn);
-
- context->sq_rq_bt_l = cpu_to_le32(dma_handle);
- roce_set_field(context->qp1c_bytes_12,
- QP1C_BYTES_12_SQ_RQ_BT_H_M,
- QP1C_BYTES_12_SQ_RQ_BT_H_S,
- upper_32_bits(dma_handle));
-
- roce_set_field(context->qp1c_bytes_16, QP1C_BYTES_16_RQ_HEAD_M,
- QP1C_BYTES_16_RQ_HEAD_S, hr_qp->rq.head);
- roce_set_field(context->qp1c_bytes_16, QP1C_BYTES_16_PORT_NUM_M,
- QP1C_BYTES_16_PORT_NUM_S, hr_qp->phy_port);
- roce_set_bit(context->qp1c_bytes_16,
- QP1C_BYTES_16_SIGNALING_TYPE_S,
- hr_qp->sq_signal_bits);
- roce_set_bit(context->qp1c_bytes_16, QP1C_BYTES_16_RQ_BA_FLG_S,
- 1);
- roce_set_bit(context->qp1c_bytes_16, QP1C_BYTES_16_SQ_BA_FLG_S,
- 1);
- roce_set_bit(context->qp1c_bytes_16, QP1C_BYTES_16_QP1_ERR_S,
- 0);
-
- roce_set_field(context->qp1c_bytes_20, QP1C_BYTES_20_SQ_HEAD_M,
- QP1C_BYTES_20_SQ_HEAD_S, hr_qp->sq.head);
- roce_set_field(context->qp1c_bytes_20, QP1C_BYTES_20_PKEY_IDX_M,
- QP1C_BYTES_20_PKEY_IDX_S, attr->pkey_index);
-
- context->cur_rq_wqe_ba_l = cpu_to_le32(rq_ba);
-
- roce_set_field(context->qp1c_bytes_28,
- QP1C_BYTES_28_CUR_RQ_WQE_BA_H_M,
- QP1C_BYTES_28_CUR_RQ_WQE_BA_H_S,
- upper_32_bits(rq_ba));
- roce_set_field(context->qp1c_bytes_28,
- QP1C_BYTES_28_RQ_CUR_IDX_M,
- QP1C_BYTES_28_RQ_CUR_IDX_S, 0);
-
- roce_set_field(context->qp1c_bytes_32,
- QP1C_BYTES_32_RX_CQ_NUM_M,
- QP1C_BYTES_32_RX_CQ_NUM_S,
- to_hr_cq(ibqp->recv_cq)->cqn);
- roce_set_field(context->qp1c_bytes_32,
- QP1C_BYTES_32_TX_CQ_NUM_M,
- QP1C_BYTES_32_TX_CQ_NUM_S,
- to_hr_cq(ibqp->send_cq)->cqn);
-
- context->cur_sq_wqe_ba_l = cpu_to_le32(sq_ba);
-
- roce_set_field(context->qp1c_bytes_40,
- QP1C_BYTES_40_CUR_SQ_WQE_BA_H_M,
- QP1C_BYTES_40_CUR_SQ_WQE_BA_H_S,
- upper_32_bits(sq_ba));
- roce_set_field(context->qp1c_bytes_40,
- QP1C_BYTES_40_SQ_CUR_IDX_M,
- QP1C_BYTES_40_SQ_CUR_IDX_S, 0);
-
- /* Copy context to QP1C register */
- addr = (u32 __iomem *)(hr_dev->reg_base +
- ROCEE_QP1C_CFG0_0_REG +
- hr_qp->phy_port * sizeof(*context));
-
- writel(le32_to_cpu(context->qp1c_bytes_4), addr);
- writel(le32_to_cpu(context->sq_rq_bt_l), addr + 1);
- writel(le32_to_cpu(context->qp1c_bytes_12), addr + 2);
- writel(le32_to_cpu(context->qp1c_bytes_16), addr + 3);
- writel(le32_to_cpu(context->qp1c_bytes_20), addr + 4);
- writel(le32_to_cpu(context->cur_rq_wqe_ba_l), addr + 5);
- writel(le32_to_cpu(context->qp1c_bytes_28), addr + 6);
- writel(le32_to_cpu(context->qp1c_bytes_32), addr + 7);
- writel(le32_to_cpu(context->cur_sq_wqe_ba_l), addr + 8);
- writel(le32_to_cpu(context->qp1c_bytes_40), addr + 9);
- }
-
- /* Modify QP1C status */
- reg_val = roce_read(hr_dev, ROCEE_QP1C_CFG0_0_REG +
- hr_qp->phy_port * sizeof(*context));
- tmp = cpu_to_le32(reg_val);
- roce_set_field(tmp, ROCEE_QP1C_CFG0_0_ROCEE_QP1C_QP_ST_M,
- ROCEE_QP1C_CFG0_0_ROCEE_QP1C_QP_ST_S, new_state);
- reg_val = le32_to_cpu(tmp);
- roce_write(hr_dev, ROCEE_QP1C_CFG0_0_REG +
- hr_qp->phy_port * sizeof(*context), reg_val);
-
- hr_qp->state = new_state;
- if (new_state == IB_QPS_RESET) {
- hns_roce_v1_cq_clean(to_hr_cq(ibqp->recv_cq), hr_qp->qpn,
- ibqp->srq ? to_hr_srq(ibqp->srq) : NULL);
- if (ibqp->send_cq != ibqp->recv_cq)
- hns_roce_v1_cq_clean(to_hr_cq(ibqp->send_cq),
- hr_qp->qpn, NULL);
-
- hr_qp->rq.head = 0;
- hr_qp->rq.tail = 0;
- hr_qp->sq.head = 0;
- hr_qp->sq.tail = 0;
- }
-
- kfree(context);
- return 0;
-
-out:
- kfree(context);
- return -EINVAL;
-}
-
-static bool check_qp_state(enum ib_qp_state cur_state,
- enum ib_qp_state new_state)
-{
- static const bool sm[][IB_QPS_ERR + 1] = {
- [IB_QPS_RESET] = { [IB_QPS_RESET] = true,
- [IB_QPS_INIT] = true },
- [IB_QPS_INIT] = { [IB_QPS_RESET] = true,
- [IB_QPS_INIT] = true,
- [IB_QPS_RTR] = true,
- [IB_QPS_ERR] = true },
- [IB_QPS_RTR] = { [IB_QPS_RESET] = true,
- [IB_QPS_RTS] = true,
- [IB_QPS_ERR] = true },
- [IB_QPS_RTS] = { [IB_QPS_RESET] = true, [IB_QPS_ERR] = true },
- [IB_QPS_SQD] = {},
- [IB_QPS_SQE] = {},
- [IB_QPS_ERR] = { [IB_QPS_RESET] = true, [IB_QPS_ERR] = true }
- };
-
- return sm[cur_state][new_state];
-}
-
-static int hns_roce_v1_m_qp(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
- int attr_mask, enum ib_qp_state cur_state,
- enum ib_qp_state new_state)
-{
- struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
- struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
- struct device *dev = &hr_dev->pdev->dev;
- struct hns_roce_qp_context *context;
- const struct ib_global_route *grh = rdma_ah_read_grh(&attr->ah_attr);
- dma_addr_t dma_handle_2 = 0;
- dma_addr_t dma_handle = 0;
- __le32 doorbell[2] = {0};
- u64 *mtts_2 = NULL;
- int ret = -EINVAL;
- const u8 *smac;
- u64 sq_ba = 0;
- u64 rq_ba = 0;
- u32 port;
- u32 port_num;
- u8 *dmac;
-
- if (!check_qp_state(cur_state, new_state)) {
- ibdev_err(ibqp->device,
- "not support QP(%u) status from %d to %d\n",
- ibqp->qp_num, cur_state, new_state);
- return -EINVAL;
- }
-
- context = kzalloc(sizeof(*context), GFP_KERNEL);
- if (!context)
- return -ENOMEM;
-
- /* Search qp buf's mtts */
- if (find_wqe_mtt(hr_dev, hr_qp, &sq_ba, &rq_ba, &dma_handle))
- goto out;
-
- /* Search IRRL's mtts */
- mtts_2 = hns_roce_table_find(hr_dev, &hr_dev->qp_table.irrl_table,
- hr_qp->qpn, &dma_handle_2);
- if (mtts_2 == NULL) {
- dev_err(dev, "qp irrl_table find failed\n");
- goto out;
- }
-
- /*
- * Reset to init
- * Mandatory param:
- * IB_QP_STATE | IB_QP_PKEY_INDEX | IB_QP_PORT | IB_QP_ACCESS_FLAGS
- * Optional param: NA
- */
- if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) {
- roce_set_field(context->qpc_bytes_4,
- QP_CONTEXT_QPC_BYTES_4_TRANSPORT_SERVICE_TYPE_M,
- QP_CONTEXT_QPC_BYTES_4_TRANSPORT_SERVICE_TYPE_S,
- to_hr_qp_type(hr_qp->ibqp.qp_type));
-
- roce_set_bit(context->qpc_bytes_4,
- QP_CONTEXT_QPC_BYTE_4_ENABLE_FPMR_S, 0);
- roce_set_bit(context->qpc_bytes_4,
- QP_CONTEXT_QPC_BYTE_4_RDMA_READ_ENABLE_S,
- !!(attr->qp_access_flags & IB_ACCESS_REMOTE_READ));
- roce_set_bit(context->qpc_bytes_4,
- QP_CONTEXT_QPC_BYTE_4_RDMA_WRITE_ENABLE_S,
- !!(attr->qp_access_flags & IB_ACCESS_REMOTE_WRITE)
- );
- roce_set_bit(context->qpc_bytes_4,
- QP_CONTEXT_QPC_BYTE_4_ATOMIC_OPERATION_ENABLE_S,
- !!(attr->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC)
- );
- roce_set_bit(context->qpc_bytes_4,
- QP_CONTEXT_QPC_BYTE_4_RDMAR_USE_S, 1);
- roce_set_field(context->qpc_bytes_4,
- QP_CONTEXT_QPC_BYTES_4_SQ_WQE_SHIFT_M,
- QP_CONTEXT_QPC_BYTES_4_SQ_WQE_SHIFT_S,
- ilog2((unsigned int)hr_qp->sq.wqe_cnt));
- roce_set_field(context->qpc_bytes_4,
- QP_CONTEXT_QPC_BYTES_4_RQ_WQE_SHIFT_M,
- QP_CONTEXT_QPC_BYTES_4_RQ_WQE_SHIFT_S,
- ilog2((unsigned int)hr_qp->rq.wqe_cnt));
- roce_set_field(context->qpc_bytes_4,
- QP_CONTEXT_QPC_BYTES_4_PD_M,
- QP_CONTEXT_QPC_BYTES_4_PD_S,
- to_hr_pd(ibqp->pd)->pdn);
- hr_qp->access_flags = attr->qp_access_flags;
- roce_set_field(context->qpc_bytes_8,
- QP_CONTEXT_QPC_BYTES_8_TX_COMPLETION_M,
- QP_CONTEXT_QPC_BYTES_8_TX_COMPLETION_S,
- to_hr_cq(ibqp->send_cq)->cqn);
- roce_set_field(context->qpc_bytes_8,
- QP_CONTEXT_QPC_BYTES_8_RX_COMPLETION_M,
- QP_CONTEXT_QPC_BYTES_8_RX_COMPLETION_S,
- to_hr_cq(ibqp->recv_cq)->cqn);
-
- if (ibqp->srq)
- roce_set_field(context->qpc_bytes_12,
- QP_CONTEXT_QPC_BYTES_12_SRQ_NUMBER_M,
- QP_CONTEXT_QPC_BYTES_12_SRQ_NUMBER_S,
- to_hr_srq(ibqp->srq)->srqn);
-
- roce_set_field(context->qpc_bytes_12,
- QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_M,
- QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_S,
- attr->pkey_index);
- hr_qp->pkey_index = attr->pkey_index;
- roce_set_field(context->qpc_bytes_16,
- QP_CONTEXT_QPC_BYTES_16_QP_NUM_M,
- QP_CONTEXT_QPC_BYTES_16_QP_NUM_S, hr_qp->qpn);
- } else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_INIT) {
- roce_set_field(context->qpc_bytes_4,
- QP_CONTEXT_QPC_BYTES_4_TRANSPORT_SERVICE_TYPE_M,
- QP_CONTEXT_QPC_BYTES_4_TRANSPORT_SERVICE_TYPE_S,
- to_hr_qp_type(hr_qp->ibqp.qp_type));
- roce_set_bit(context->qpc_bytes_4,
- QP_CONTEXT_QPC_BYTE_4_ENABLE_FPMR_S, 0);
- if (attr_mask & IB_QP_ACCESS_FLAGS) {
- roce_set_bit(context->qpc_bytes_4,
- QP_CONTEXT_QPC_BYTE_4_RDMA_READ_ENABLE_S,
- !!(attr->qp_access_flags &
- IB_ACCESS_REMOTE_READ));
- roce_set_bit(context->qpc_bytes_4,
- QP_CONTEXT_QPC_BYTE_4_RDMA_WRITE_ENABLE_S,
- !!(attr->qp_access_flags &
- IB_ACCESS_REMOTE_WRITE));
- } else {
- roce_set_bit(context->qpc_bytes_4,
- QP_CONTEXT_QPC_BYTE_4_RDMA_READ_ENABLE_S,
- !!(hr_qp->access_flags &
- IB_ACCESS_REMOTE_READ));
- roce_set_bit(context->qpc_bytes_4,
- QP_CONTEXT_QPC_BYTE_4_RDMA_WRITE_ENABLE_S,
- !!(hr_qp->access_flags &
- IB_ACCESS_REMOTE_WRITE));
- }
-
- roce_set_bit(context->qpc_bytes_4,
- QP_CONTEXT_QPC_BYTE_4_RDMAR_USE_S, 1);
- roce_set_field(context->qpc_bytes_4,
- QP_CONTEXT_QPC_BYTES_4_SQ_WQE_SHIFT_M,
- QP_CONTEXT_QPC_BYTES_4_SQ_WQE_SHIFT_S,
- ilog2((unsigned int)hr_qp->sq.wqe_cnt));
- roce_set_field(context->qpc_bytes_4,
- QP_CONTEXT_QPC_BYTES_4_RQ_WQE_SHIFT_M,
- QP_CONTEXT_QPC_BYTES_4_RQ_WQE_SHIFT_S,
- ilog2((unsigned int)hr_qp->rq.wqe_cnt));
- roce_set_field(context->qpc_bytes_4,
- QP_CONTEXT_QPC_BYTES_4_PD_M,
- QP_CONTEXT_QPC_BYTES_4_PD_S,
- to_hr_pd(ibqp->pd)->pdn);
-
- roce_set_field(context->qpc_bytes_8,
- QP_CONTEXT_QPC_BYTES_8_TX_COMPLETION_M,
- QP_CONTEXT_QPC_BYTES_8_TX_COMPLETION_S,
- to_hr_cq(ibqp->send_cq)->cqn);
- roce_set_field(context->qpc_bytes_8,
- QP_CONTEXT_QPC_BYTES_8_RX_COMPLETION_M,
- QP_CONTEXT_QPC_BYTES_8_RX_COMPLETION_S,
- to_hr_cq(ibqp->recv_cq)->cqn);
-
- if (ibqp->srq)
- roce_set_field(context->qpc_bytes_12,
- QP_CONTEXT_QPC_BYTES_12_SRQ_NUMBER_M,
- QP_CONTEXT_QPC_BYTES_12_SRQ_NUMBER_S,
- to_hr_srq(ibqp->srq)->srqn);
- if (attr_mask & IB_QP_PKEY_INDEX)
- roce_set_field(context->qpc_bytes_12,
- QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_M,
- QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_S,
- attr->pkey_index);
- else
- roce_set_field(context->qpc_bytes_12,
- QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_M,
- QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_S,
- hr_qp->pkey_index);
-
- roce_set_field(context->qpc_bytes_16,
- QP_CONTEXT_QPC_BYTES_16_QP_NUM_M,
- QP_CONTEXT_QPC_BYTES_16_QP_NUM_S, hr_qp->qpn);
- } else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_RTR) {
- if ((attr_mask & IB_QP_ALT_PATH) ||
- (attr_mask & IB_QP_ACCESS_FLAGS) ||
- (attr_mask & IB_QP_PKEY_INDEX) ||
- (attr_mask & IB_QP_QKEY)) {
- dev_err(dev, "INIT2RTR attr_mask error\n");
- goto out;
- }
-
- dmac = (u8 *)attr->ah_attr.roce.dmac;
-
- context->sq_rq_bt_l = cpu_to_le32(dma_handle);
- roce_set_field(context->qpc_bytes_24,
- QP_CONTEXT_QPC_BYTES_24_SQ_RQ_BT_H_M,
- QP_CONTEXT_QPC_BYTES_24_SQ_RQ_BT_H_S,
- upper_32_bits(dma_handle));
- roce_set_bit(context->qpc_bytes_24,
- QP_CONTEXT_QPC_BYTE_24_REMOTE_ENABLE_E2E_CREDITS_S,
- 1);
- roce_set_field(context->qpc_bytes_24,
- QP_CONTEXT_QPC_BYTES_24_MINIMUM_RNR_NAK_TIMER_M,
- QP_CONTEXT_QPC_BYTES_24_MINIMUM_RNR_NAK_TIMER_S,
- attr->min_rnr_timer);
- context->irrl_ba_l = cpu_to_le32((u32)(dma_handle_2));
- roce_set_field(context->qpc_bytes_32,
- QP_CONTEXT_QPC_BYTES_32_IRRL_BA_H_M,
- QP_CONTEXT_QPC_BYTES_32_IRRL_BA_H_S,
- ((u32)(dma_handle_2 >> 32)) &
- QP_CONTEXT_QPC_BYTES_32_IRRL_BA_H_M);
- roce_set_field(context->qpc_bytes_32,
- QP_CONTEXT_QPC_BYTES_32_MIG_STATE_M,
- QP_CONTEXT_QPC_BYTES_32_MIG_STATE_S, 0);
- roce_set_bit(context->qpc_bytes_32,
- QP_CONTEXT_QPC_BYTE_32_LOCAL_ENABLE_E2E_CREDITS_S,
- 1);
- roce_set_bit(context->qpc_bytes_32,
- QP_CONTEXT_QPC_BYTE_32_SIGNALING_TYPE_S,
- hr_qp->sq_signal_bits);
-
- port = (attr_mask & IB_QP_PORT) ? (attr->port_num - 1) :
- hr_qp->port;
- smac = (const u8 *)hr_dev->dev_addr[port];
- /* when dmac equals smac or loop_idc is 1, it should loopback */
- if (ether_addr_equal_unaligned(dmac, smac) ||
- hr_dev->loop_idc == 0x1)
- roce_set_bit(context->qpc_bytes_32,
- QP_CONTEXT_QPC_BYTE_32_LOOPBACK_INDICATOR_S, 1);
-
- roce_set_bit(context->qpc_bytes_32,
- QP_CONTEXT_QPC_BYTE_32_GLOBAL_HEADER_S,
- rdma_ah_get_ah_flags(&attr->ah_attr));
- roce_set_field(context->qpc_bytes_32,
- QP_CONTEXT_QPC_BYTES_32_RESPONDER_RESOURCES_M,
- QP_CONTEXT_QPC_BYTES_32_RESPONDER_RESOURCES_S,
- ilog2((unsigned int)attr->max_dest_rd_atomic));
-
- if (attr_mask & IB_QP_DEST_QPN)
- roce_set_field(context->qpc_bytes_36,
- QP_CONTEXT_QPC_BYTES_36_DEST_QP_M,
- QP_CONTEXT_QPC_BYTES_36_DEST_QP_S,
- attr->dest_qp_num);
-
- /* Configure GID index */
- port_num = rdma_ah_get_port_num(&attr->ah_attr);
- roce_set_field(context->qpc_bytes_36,
- QP_CONTEXT_QPC_BYTES_36_SGID_INDEX_M,
- QP_CONTEXT_QPC_BYTES_36_SGID_INDEX_S,
- hns_get_gid_index(hr_dev,
- port_num - 1,
- grh->sgid_index));
-
- memcpy(&(context->dmac_l), dmac, 4);
-
- roce_set_field(context->qpc_bytes_44,
- QP_CONTEXT_QPC_BYTES_44_DMAC_H_M,
- QP_CONTEXT_QPC_BYTES_44_DMAC_H_S,
- *((u16 *)(&dmac[4])));
- roce_set_field(context->qpc_bytes_44,
- QP_CONTEXT_QPC_BYTES_44_MAXIMUM_STATIC_RATE_M,
- QP_CONTEXT_QPC_BYTES_44_MAXIMUM_STATIC_RATE_S,
- rdma_ah_get_static_rate(&attr->ah_attr));
- roce_set_field(context->qpc_bytes_44,
- QP_CONTEXT_QPC_BYTES_44_HOPLMT_M,
- QP_CONTEXT_QPC_BYTES_44_HOPLMT_S,
- grh->hop_limit);
-
- roce_set_field(context->qpc_bytes_48,
- QP_CONTEXT_QPC_BYTES_48_FLOWLABEL_M,
- QP_CONTEXT_QPC_BYTES_48_FLOWLABEL_S,
- grh->flow_label);
- roce_set_field(context->qpc_bytes_48,
- QP_CONTEXT_QPC_BYTES_48_TCLASS_M,
- QP_CONTEXT_QPC_BYTES_48_TCLASS_S,
- grh->traffic_class);
- roce_set_field(context->qpc_bytes_48,
- QP_CONTEXT_QPC_BYTES_48_MTU_M,
- QP_CONTEXT_QPC_BYTES_48_MTU_S, attr->path_mtu);
-
- memcpy(context->dgid, grh->dgid.raw,
- sizeof(grh->dgid.raw));
-
- dev_dbg(dev, "dmac:%x :%lx\n", context->dmac_l,
- roce_get_field(context->qpc_bytes_44,
- QP_CONTEXT_QPC_BYTES_44_DMAC_H_M,
- QP_CONTEXT_QPC_BYTES_44_DMAC_H_S));
-
- roce_set_field(context->qpc_bytes_68,
- QP_CONTEXT_QPC_BYTES_68_RQ_HEAD_M,
- QP_CONTEXT_QPC_BYTES_68_RQ_HEAD_S,
- hr_qp->rq.head);
- roce_set_field(context->qpc_bytes_68,
- QP_CONTEXT_QPC_BYTES_68_RQ_CUR_INDEX_M,
- QP_CONTEXT_QPC_BYTES_68_RQ_CUR_INDEX_S, 0);
-
- context->cur_rq_wqe_ba_l = cpu_to_le32(rq_ba);
-
- roce_set_field(context->qpc_bytes_76,
- QP_CONTEXT_QPC_BYTES_76_CUR_RQ_WQE_BA_H_M,
- QP_CONTEXT_QPC_BYTES_76_CUR_RQ_WQE_BA_H_S,
- upper_32_bits(rq_ba));
- roce_set_field(context->qpc_bytes_76,
- QP_CONTEXT_QPC_BYTES_76_RX_REQ_MSN_M,
- QP_CONTEXT_QPC_BYTES_76_RX_REQ_MSN_S, 0);
-
- context->rx_rnr_time = 0;
-
- roce_set_field(context->qpc_bytes_84,
- QP_CONTEXT_QPC_BYTES_84_LAST_ACK_PSN_M,
- QP_CONTEXT_QPC_BYTES_84_LAST_ACK_PSN_S,
- attr->rq_psn - 1);
- roce_set_field(context->qpc_bytes_84,
- QP_CONTEXT_QPC_BYTES_84_TRRL_HEAD_M,
- QP_CONTEXT_QPC_BYTES_84_TRRL_HEAD_S, 0);
-
- roce_set_field(context->qpc_bytes_88,
- QP_CONTEXT_QPC_BYTES_88_RX_REQ_EPSN_M,
- QP_CONTEXT_QPC_BYTES_88_RX_REQ_EPSN_S,
- attr->rq_psn);
- roce_set_bit(context->qpc_bytes_88,
- QP_CONTEXT_QPC_BYTES_88_RX_REQ_PSN_ERR_FLAG_S, 0);
- roce_set_bit(context->qpc_bytes_88,
- QP_CONTEXT_QPC_BYTES_88_RX_LAST_OPCODE_FLG_S, 0);
- roce_set_field(context->qpc_bytes_88,
- QP_CONTEXT_QPC_BYTES_88_RQ_REQ_LAST_OPERATION_TYPE_M,
- QP_CONTEXT_QPC_BYTES_88_RQ_REQ_LAST_OPERATION_TYPE_S,
- 0);
- roce_set_field(context->qpc_bytes_88,
- QP_CONTEXT_QPC_BYTES_88_RQ_REQ_RDMA_WR_FLAG_M,
- QP_CONTEXT_QPC_BYTES_88_RQ_REQ_RDMA_WR_FLAG_S,
- 0);
-
- context->dma_length = 0;
- context->r_key = 0;
- context->va_l = 0;
- context->va_h = 0;
-
- roce_set_field(context->qpc_bytes_108,
- QP_CONTEXT_QPC_BYTES_108_TRRL_SDB_PSN_M,
- QP_CONTEXT_QPC_BYTES_108_TRRL_SDB_PSN_S, 0);
- roce_set_bit(context->qpc_bytes_108,
- QP_CONTEXT_QPC_BYTES_108_TRRL_SDB_PSN_FLG_S, 0);
- roce_set_bit(context->qpc_bytes_108,
- QP_CONTEXT_QPC_BYTES_108_TRRL_TDB_PSN_FLG_S, 0);
-
- roce_set_field(context->qpc_bytes_112,
- QP_CONTEXT_QPC_BYTES_112_TRRL_TDB_PSN_M,
- QP_CONTEXT_QPC_BYTES_112_TRRL_TDB_PSN_S, 0);
- roce_set_field(context->qpc_bytes_112,
- QP_CONTEXT_QPC_BYTES_112_TRRL_TAIL_M,
- QP_CONTEXT_QPC_BYTES_112_TRRL_TAIL_S, 0);
-
- /* For chip resp ack */
- roce_set_field(context->qpc_bytes_156,
- QP_CONTEXT_QPC_BYTES_156_PORT_NUM_M,
- QP_CONTEXT_QPC_BYTES_156_PORT_NUM_S,
- hr_qp->phy_port);
- roce_set_field(context->qpc_bytes_156,
- QP_CONTEXT_QPC_BYTES_156_SL_M,
- QP_CONTEXT_QPC_BYTES_156_SL_S,
- rdma_ah_get_sl(&attr->ah_attr));
- hr_qp->sl = rdma_ah_get_sl(&attr->ah_attr);
- } else if (cur_state == IB_QPS_RTR && new_state == IB_QPS_RTS) {
- /* If exist optional param, return error */
- if ((attr_mask & IB_QP_ALT_PATH) ||
- (attr_mask & IB_QP_ACCESS_FLAGS) ||
- (attr_mask & IB_QP_QKEY) ||
- (attr_mask & IB_QP_PATH_MIG_STATE) ||
- (attr_mask & IB_QP_CUR_STATE) ||
- (attr_mask & IB_QP_MIN_RNR_TIMER)) {
- dev_err(dev, "RTR2RTS attr_mask error\n");
- goto out;
- }
-
- context->rx_cur_sq_wqe_ba_l = cpu_to_le32(sq_ba);
-
- roce_set_field(context->qpc_bytes_120,
- QP_CONTEXT_QPC_BYTES_120_RX_CUR_SQ_WQE_BA_H_M,
- QP_CONTEXT_QPC_BYTES_120_RX_CUR_SQ_WQE_BA_H_S,
- upper_32_bits(sq_ba));
-
- roce_set_field(context->qpc_bytes_124,
- QP_CONTEXT_QPC_BYTES_124_RX_ACK_MSN_M,
- QP_CONTEXT_QPC_BYTES_124_RX_ACK_MSN_S, 0);
- roce_set_field(context->qpc_bytes_124,
- QP_CONTEXT_QPC_BYTES_124_IRRL_MSG_IDX_M,
- QP_CONTEXT_QPC_BYTES_124_IRRL_MSG_IDX_S, 0);
-
- roce_set_field(context->qpc_bytes_128,
- QP_CONTEXT_QPC_BYTES_128_RX_ACK_EPSN_M,
- QP_CONTEXT_QPC_BYTES_128_RX_ACK_EPSN_S,
- attr->sq_psn);
- roce_set_bit(context->qpc_bytes_128,
- QP_CONTEXT_QPC_BYTES_128_RX_ACK_PSN_ERR_FLG_S, 0);
- roce_set_field(context->qpc_bytes_128,
- QP_CONTEXT_QPC_BYTES_128_ACK_LAST_OPERATION_TYPE_M,
- QP_CONTEXT_QPC_BYTES_128_ACK_LAST_OPERATION_TYPE_S,
- 0);
- roce_set_bit(context->qpc_bytes_128,
- QP_CONTEXT_QPC_BYTES_128_IRRL_PSN_VLD_FLG_S, 0);
-
- roce_set_field(context->qpc_bytes_132,
- QP_CONTEXT_QPC_BYTES_132_IRRL_PSN_M,
- QP_CONTEXT_QPC_BYTES_132_IRRL_PSN_S, 0);
- roce_set_field(context->qpc_bytes_132,
- QP_CONTEXT_QPC_BYTES_132_IRRL_TAIL_M,
- QP_CONTEXT_QPC_BYTES_132_IRRL_TAIL_S, 0);
-
- roce_set_field(context->qpc_bytes_136,
- QP_CONTEXT_QPC_BYTES_136_RETRY_MSG_PSN_M,
- QP_CONTEXT_QPC_BYTES_136_RETRY_MSG_PSN_S,
- attr->sq_psn);
- roce_set_field(context->qpc_bytes_136,
- QP_CONTEXT_QPC_BYTES_136_RETRY_MSG_FPKT_PSN_L_M,
- QP_CONTEXT_QPC_BYTES_136_RETRY_MSG_FPKT_PSN_L_S,
- attr->sq_psn);
-
- roce_set_field(context->qpc_bytes_140,
- QP_CONTEXT_QPC_BYTES_140_RETRY_MSG_FPKT_PSN_H_M,
- QP_CONTEXT_QPC_BYTES_140_RETRY_MSG_FPKT_PSN_H_S,
- (attr->sq_psn >> SQ_PSN_SHIFT));
- roce_set_field(context->qpc_bytes_140,
- QP_CONTEXT_QPC_BYTES_140_RETRY_MSG_MSN_M,
- QP_CONTEXT_QPC_BYTES_140_RETRY_MSG_MSN_S, 0);
- roce_set_bit(context->qpc_bytes_140,
- QP_CONTEXT_QPC_BYTES_140_RNR_RETRY_FLG_S, 0);
-
- roce_set_field(context->qpc_bytes_148,
- QP_CONTEXT_QPC_BYTES_148_CHECK_FLAG_M,
- QP_CONTEXT_QPC_BYTES_148_CHECK_FLAG_S, 0);
- roce_set_field(context->qpc_bytes_148,
- QP_CONTEXT_QPC_BYTES_148_RETRY_COUNT_M,
- QP_CONTEXT_QPC_BYTES_148_RETRY_COUNT_S,
- attr->retry_cnt);
- roce_set_field(context->qpc_bytes_148,
- QP_CONTEXT_QPC_BYTES_148_RNR_RETRY_COUNT_M,
- QP_CONTEXT_QPC_BYTES_148_RNR_RETRY_COUNT_S,
- attr->rnr_retry);
- roce_set_field(context->qpc_bytes_148,
- QP_CONTEXT_QPC_BYTES_148_LSN_M,
- QP_CONTEXT_QPC_BYTES_148_LSN_S, 0x100);
-
- context->rnr_retry = 0;
-
- roce_set_field(context->qpc_bytes_156,
- QP_CONTEXT_QPC_BYTES_156_RETRY_COUNT_INIT_M,
- QP_CONTEXT_QPC_BYTES_156_RETRY_COUNT_INIT_S,
- attr->retry_cnt);
- if (attr->timeout < 0x12) {
- dev_info(dev, "ack timeout value(0x%x) must bigger than 0x12.\n",
- attr->timeout);
- roce_set_field(context->qpc_bytes_156,
- QP_CONTEXT_QPC_BYTES_156_ACK_TIMEOUT_M,
- QP_CONTEXT_QPC_BYTES_156_ACK_TIMEOUT_S,
- 0x12);
- } else {
- roce_set_field(context->qpc_bytes_156,
- QP_CONTEXT_QPC_BYTES_156_ACK_TIMEOUT_M,
- QP_CONTEXT_QPC_BYTES_156_ACK_TIMEOUT_S,
- attr->timeout);
- }
- roce_set_field(context->qpc_bytes_156,
- QP_CONTEXT_QPC_BYTES_156_RNR_RETRY_COUNT_INIT_M,
- QP_CONTEXT_QPC_BYTES_156_RNR_RETRY_COUNT_INIT_S,
- attr->rnr_retry);
- roce_set_field(context->qpc_bytes_156,
- QP_CONTEXT_QPC_BYTES_156_PORT_NUM_M,
- QP_CONTEXT_QPC_BYTES_156_PORT_NUM_S,
- hr_qp->phy_port);
- roce_set_field(context->qpc_bytes_156,
- QP_CONTEXT_QPC_BYTES_156_SL_M,
- QP_CONTEXT_QPC_BYTES_156_SL_S,
- rdma_ah_get_sl(&attr->ah_attr));
- hr_qp->sl = rdma_ah_get_sl(&attr->ah_attr);
- roce_set_field(context->qpc_bytes_156,
- QP_CONTEXT_QPC_BYTES_156_INITIATOR_DEPTH_M,
- QP_CONTEXT_QPC_BYTES_156_INITIATOR_DEPTH_S,
- ilog2((unsigned int)attr->max_rd_atomic));
- roce_set_field(context->qpc_bytes_156,
- QP_CONTEXT_QPC_BYTES_156_ACK_REQ_IND_M,
- QP_CONTEXT_QPC_BYTES_156_ACK_REQ_IND_S, 0);
- context->pkt_use_len = 0;
-
- roce_set_field(context->qpc_bytes_164,
- QP_CONTEXT_QPC_BYTES_164_SQ_PSN_M,
- QP_CONTEXT_QPC_BYTES_164_SQ_PSN_S, attr->sq_psn);
- roce_set_field(context->qpc_bytes_164,
- QP_CONTEXT_QPC_BYTES_164_IRRL_HEAD_M,
- QP_CONTEXT_QPC_BYTES_164_IRRL_HEAD_S, 0);
-
- roce_set_field(context->qpc_bytes_168,
- QP_CONTEXT_QPC_BYTES_168_RETRY_SQ_PSN_M,
- QP_CONTEXT_QPC_BYTES_168_RETRY_SQ_PSN_S,
- attr->sq_psn);
- roce_set_field(context->qpc_bytes_168,
- QP_CONTEXT_QPC_BYTES_168_SGE_USE_FLA_M,
- QP_CONTEXT_QPC_BYTES_168_SGE_USE_FLA_S, 0);
- roce_set_field(context->qpc_bytes_168,
- QP_CONTEXT_QPC_BYTES_168_DB_TYPE_M,
- QP_CONTEXT_QPC_BYTES_168_DB_TYPE_S, 0);
- roce_set_bit(context->qpc_bytes_168,
- QP_CONTEXT_QPC_BYTES_168_MSG_LP_IND_S, 0);
- roce_set_bit(context->qpc_bytes_168,
- QP_CONTEXT_QPC_BYTES_168_CSDB_LP_IND_S, 0);
- roce_set_bit(context->qpc_bytes_168,
- QP_CONTEXT_QPC_BYTES_168_QP_ERR_FLG_S, 0);
- context->sge_use_len = 0;
-
- roce_set_field(context->qpc_bytes_176,
- QP_CONTEXT_QPC_BYTES_176_DB_CUR_INDEX_M,
- QP_CONTEXT_QPC_BYTES_176_DB_CUR_INDEX_S, 0);
- roce_set_field(context->qpc_bytes_176,
- QP_CONTEXT_QPC_BYTES_176_RETRY_DB_CUR_INDEX_M,
- QP_CONTEXT_QPC_BYTES_176_RETRY_DB_CUR_INDEX_S,
- 0);
- roce_set_field(context->qpc_bytes_180,
- QP_CONTEXT_QPC_BYTES_180_SQ_CUR_INDEX_M,
- QP_CONTEXT_QPC_BYTES_180_SQ_CUR_INDEX_S, 0);
- roce_set_field(context->qpc_bytes_180,
- QP_CONTEXT_QPC_BYTES_180_SQ_HEAD_M,
- QP_CONTEXT_QPC_BYTES_180_SQ_HEAD_S, 0);
-
- context->tx_cur_sq_wqe_ba_l = cpu_to_le32(sq_ba);
-
- roce_set_field(context->qpc_bytes_188,
- QP_CONTEXT_QPC_BYTES_188_TX_CUR_SQ_WQE_BA_H_M,
- QP_CONTEXT_QPC_BYTES_188_TX_CUR_SQ_WQE_BA_H_S,
- upper_32_bits(sq_ba));
- roce_set_bit(context->qpc_bytes_188,
- QP_CONTEXT_QPC_BYTES_188_PKT_RETRY_FLG_S, 0);
- roce_set_field(context->qpc_bytes_188,
- QP_CONTEXT_QPC_BYTES_188_TX_RETRY_CUR_INDEX_M,
- QP_CONTEXT_QPC_BYTES_188_TX_RETRY_CUR_INDEX_S,
- 0);
- }
-
- /* Every status migrate must change state */
- roce_set_field(context->qpc_bytes_144,
- QP_CONTEXT_QPC_BYTES_144_QP_STATE_M,
- QP_CONTEXT_QPC_BYTES_144_QP_STATE_S, new_state);
-
- /* SW pass context to HW */
- ret = hns_roce_v1_qp_modify(hr_dev, to_hns_roce_state(cur_state),
- to_hns_roce_state(new_state), context,
- hr_qp);
- if (ret) {
- dev_err(dev, "hns_roce_qp_modify failed\n");
- goto out;
- }
-
- /*
- * Use rst2init to instead of init2init with drv,
- * need to hw to flash RQ HEAD by DB again
- */
- if (cur_state == IB_QPS_INIT && new_state == IB_QPS_INIT) {
- roce_set_field(doorbell[0], RQ_DOORBELL_U32_4_RQ_HEAD_M,
- RQ_DOORBELL_U32_4_RQ_HEAD_S, hr_qp->rq.head);
- roce_set_field(doorbell[1], RQ_DOORBELL_U32_8_QPN_M,
- RQ_DOORBELL_U32_8_QPN_S, hr_qp->qpn);
- roce_set_field(doorbell[1], RQ_DOORBELL_U32_8_CMD_M,
- RQ_DOORBELL_U32_8_CMD_S, 1);
- roce_set_bit(doorbell[1], RQ_DOORBELL_U32_8_HW_SYNC_S, 1);
-
- if (ibqp->uobject) {
- hr_qp->rq.db_reg = hr_dev->reg_base +
- hr_dev->odb_offset +
- DB_REG_OFFSET * hr_dev->priv_uar.index;
- }
-
- hns_roce_write64_k(doorbell, hr_qp->rq.db_reg);
- }
-
- hr_qp->state = new_state;
-
- if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
- hr_qp->resp_depth = attr->max_dest_rd_atomic;
- if (attr_mask & IB_QP_PORT) {
- hr_qp->port = attr->port_num - 1;
- hr_qp->phy_port = hr_dev->iboe.phy_port[hr_qp->port];
- }
-
- if (new_state == IB_QPS_RESET && !ibqp->uobject) {
- hns_roce_v1_cq_clean(to_hr_cq(ibqp->recv_cq), hr_qp->qpn,
- ibqp->srq ? to_hr_srq(ibqp->srq) : NULL);
- if (ibqp->send_cq != ibqp->recv_cq)
- hns_roce_v1_cq_clean(to_hr_cq(ibqp->send_cq),
- hr_qp->qpn, NULL);
-
- hr_qp->rq.head = 0;
- hr_qp->rq.tail = 0;
- hr_qp->sq.head = 0;
- hr_qp->sq.tail = 0;
- }
-out:
- kfree(context);
- return ret;
-}
-
-static int hns_roce_v1_modify_qp(struct ib_qp *ibqp,
- const struct ib_qp_attr *attr, int attr_mask,
- enum ib_qp_state cur_state,
- enum ib_qp_state new_state)
-{
- if (attr_mask & ~IB_QP_ATTR_STANDARD_BITS)
- return -EOPNOTSUPP;
-
- if (ibqp->qp_type == IB_QPT_GSI || ibqp->qp_type == IB_QPT_SMI)
- return hns_roce_v1_m_sqp(ibqp, attr, attr_mask, cur_state,
- new_state);
- else
- return hns_roce_v1_m_qp(ibqp, attr, attr_mask, cur_state,
- new_state);
-}
-
-static enum ib_qp_state to_ib_qp_state(enum hns_roce_qp_state state)
-{
- switch (state) {
- case HNS_ROCE_QP_STATE_RST:
- return IB_QPS_RESET;
- case HNS_ROCE_QP_STATE_INIT:
- return IB_QPS_INIT;
- case HNS_ROCE_QP_STATE_RTR:
- return IB_QPS_RTR;
- case HNS_ROCE_QP_STATE_RTS:
- return IB_QPS_RTS;
- case HNS_ROCE_QP_STATE_SQD:
- return IB_QPS_SQD;
- case HNS_ROCE_QP_STATE_ERR:
- return IB_QPS_ERR;
- default:
- return IB_QPS_ERR;
- }
-}
-
-static int hns_roce_v1_query_qpc(struct hns_roce_dev *hr_dev,
- struct hns_roce_qp *hr_qp,
- struct hns_roce_qp_context *hr_context)
-{
- struct hns_roce_cmd_mailbox *mailbox;
- int ret;
-
- mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
- if (IS_ERR(mailbox))
- return PTR_ERR(mailbox);
-
- ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, hr_qp->qpn, 0,
- HNS_ROCE_CMD_QUERY_QP,
- HNS_ROCE_CMD_TIMEOUT_MSECS);
- if (!ret)
- memcpy(hr_context, mailbox->buf, sizeof(*hr_context));
- else
- dev_err(&hr_dev->pdev->dev, "QUERY QP cmd process error\n");
-
- hns_roce_free_cmd_mailbox(hr_dev, mailbox);
-
- return ret;
-}
-
-static int hns_roce_v1_q_sqp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
- int qp_attr_mask,
- struct ib_qp_init_attr *qp_init_attr)
-{
- struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
- struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
- struct hns_roce_sqp_context context;
- u32 addr;
-
- mutex_lock(&hr_qp->mutex);
-
- if (hr_qp->state == IB_QPS_RESET) {
- qp_attr->qp_state = IB_QPS_RESET;
- goto done;
- }
-
- addr = ROCEE_QP1C_CFG0_0_REG +
- hr_qp->port * sizeof(struct hns_roce_sqp_context);
- context.qp1c_bytes_4 = cpu_to_le32(roce_read(hr_dev, addr));
- context.sq_rq_bt_l = cpu_to_le32(roce_read(hr_dev, addr + 1));
- context.qp1c_bytes_12 = cpu_to_le32(roce_read(hr_dev, addr + 2));
- context.qp1c_bytes_16 = cpu_to_le32(roce_read(hr_dev, addr + 3));
- context.qp1c_bytes_20 = cpu_to_le32(roce_read(hr_dev, addr + 4));
- context.cur_rq_wqe_ba_l = cpu_to_le32(roce_read(hr_dev, addr + 5));
- context.qp1c_bytes_28 = cpu_to_le32(roce_read(hr_dev, addr + 6));
- context.qp1c_bytes_32 = cpu_to_le32(roce_read(hr_dev, addr + 7));
- context.cur_sq_wqe_ba_l = cpu_to_le32(roce_read(hr_dev, addr + 8));
- context.qp1c_bytes_40 = cpu_to_le32(roce_read(hr_dev, addr + 9));
-
- hr_qp->state = roce_get_field(context.qp1c_bytes_4,
- QP1C_BYTES_4_QP_STATE_M,
- QP1C_BYTES_4_QP_STATE_S);
- qp_attr->qp_state = hr_qp->state;
- qp_attr->path_mtu = IB_MTU_256;
- qp_attr->path_mig_state = IB_MIG_ARMED;
- qp_attr->qkey = QKEY_VAL;
- qp_attr->ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE;
- qp_attr->rq_psn = 0;
- qp_attr->sq_psn = 0;
- qp_attr->dest_qp_num = 1;
- qp_attr->qp_access_flags = 6;
-
- qp_attr->pkey_index = roce_get_field(context.qp1c_bytes_20,
- QP1C_BYTES_20_PKEY_IDX_M,
- QP1C_BYTES_20_PKEY_IDX_S);
- qp_attr->port_num = hr_qp->port + 1;
- qp_attr->sq_draining = 0;
- qp_attr->max_rd_atomic = 0;
- qp_attr->max_dest_rd_atomic = 0;
- qp_attr->min_rnr_timer = 0;
- qp_attr->timeout = 0;
- qp_attr->retry_cnt = 0;
- qp_attr->rnr_retry = 0;
- qp_attr->alt_timeout = 0;
-
-done:
- qp_attr->cur_qp_state = qp_attr->qp_state;
- qp_attr->cap.max_recv_wr = hr_qp->rq.wqe_cnt;
- qp_attr->cap.max_recv_sge = hr_qp->rq.max_gs;
- qp_attr->cap.max_send_wr = hr_qp->sq.wqe_cnt;
- qp_attr->cap.max_send_sge = hr_qp->sq.max_gs;
- qp_attr->cap.max_inline_data = 0;
- qp_init_attr->cap = qp_attr->cap;
- qp_init_attr->create_flags = 0;
-
- mutex_unlock(&hr_qp->mutex);
-
- return 0;
-}
-
-static int hns_roce_v1_q_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
- int qp_attr_mask,
- struct ib_qp_init_attr *qp_init_attr)
-{
- struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
- struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
- struct device *dev = &hr_dev->pdev->dev;
- struct hns_roce_qp_context *context;
- int tmp_qp_state;
- int ret = 0;
- int state;
-
- context = kzalloc(sizeof(*context), GFP_KERNEL);
- if (!context)
- return -ENOMEM;
-
- memset(qp_attr, 0, sizeof(*qp_attr));
- memset(qp_init_attr, 0, sizeof(*qp_init_attr));
-
- mutex_lock(&hr_qp->mutex);
-
- if (hr_qp->state == IB_QPS_RESET) {
- qp_attr->qp_state = IB_QPS_RESET;
- goto done;
- }
-
- ret = hns_roce_v1_query_qpc(hr_dev, hr_qp, context);
- if (ret) {
- dev_err(dev, "query qpc error\n");
- ret = -EINVAL;
- goto out;
- }
-
- state = roce_get_field(context->qpc_bytes_144,
- QP_CONTEXT_QPC_BYTES_144_QP_STATE_M,
- QP_CONTEXT_QPC_BYTES_144_QP_STATE_S);
- tmp_qp_state = (int)to_ib_qp_state((enum hns_roce_qp_state)state);
- if (tmp_qp_state == -1) {
- dev_err(dev, "to_ib_qp_state error\n");
- ret = -EINVAL;
- goto out;
- }
- hr_qp->state = (u8)tmp_qp_state;
- qp_attr->qp_state = (enum ib_qp_state)hr_qp->state;
- qp_attr->path_mtu = (enum ib_mtu)roce_get_field(context->qpc_bytes_48,
- QP_CONTEXT_QPC_BYTES_48_MTU_M,
- QP_CONTEXT_QPC_BYTES_48_MTU_S);
- qp_attr->path_mig_state = IB_MIG_ARMED;
- qp_attr->ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE;
- if (hr_qp->ibqp.qp_type == IB_QPT_UD)
- qp_attr->qkey = QKEY_VAL;
-
- qp_attr->rq_psn = roce_get_field(context->qpc_bytes_88,
- QP_CONTEXT_QPC_BYTES_88_RX_REQ_EPSN_M,
- QP_CONTEXT_QPC_BYTES_88_RX_REQ_EPSN_S);
- qp_attr->sq_psn = (u32)roce_get_field(context->qpc_bytes_164,
- QP_CONTEXT_QPC_BYTES_164_SQ_PSN_M,
- QP_CONTEXT_QPC_BYTES_164_SQ_PSN_S);
- qp_attr->dest_qp_num = (u8)roce_get_field(context->qpc_bytes_36,
- QP_CONTEXT_QPC_BYTES_36_DEST_QP_M,
- QP_CONTEXT_QPC_BYTES_36_DEST_QP_S);
- qp_attr->qp_access_flags = ((roce_get_bit(context->qpc_bytes_4,
- QP_CONTEXT_QPC_BYTE_4_RDMA_READ_ENABLE_S)) << 2) |
- ((roce_get_bit(context->qpc_bytes_4,
- QP_CONTEXT_QPC_BYTE_4_RDMA_WRITE_ENABLE_S)) << 1) |
- ((roce_get_bit(context->qpc_bytes_4,
- QP_CONTEXT_QPC_BYTE_4_ATOMIC_OPERATION_ENABLE_S)) << 3);
-
- if (hr_qp->ibqp.qp_type == IB_QPT_RC) {
- struct ib_global_route *grh =
- rdma_ah_retrieve_grh(&qp_attr->ah_attr);
-
- rdma_ah_set_sl(&qp_attr->ah_attr,
- roce_get_field(context->qpc_bytes_156,
- QP_CONTEXT_QPC_BYTES_156_SL_M,
- QP_CONTEXT_QPC_BYTES_156_SL_S));
- rdma_ah_set_ah_flags(&qp_attr->ah_attr, IB_AH_GRH);
- grh->flow_label =
- roce_get_field(context->qpc_bytes_48,
- QP_CONTEXT_QPC_BYTES_48_FLOWLABEL_M,
- QP_CONTEXT_QPC_BYTES_48_FLOWLABEL_S);
- grh->sgid_index =
- roce_get_field(context->qpc_bytes_36,
- QP_CONTEXT_QPC_BYTES_36_SGID_INDEX_M,
- QP_CONTEXT_QPC_BYTES_36_SGID_INDEX_S);
- grh->hop_limit =
- roce_get_field(context->qpc_bytes_44,
- QP_CONTEXT_QPC_BYTES_44_HOPLMT_M,
- QP_CONTEXT_QPC_BYTES_44_HOPLMT_S);
- grh->traffic_class =
- roce_get_field(context->qpc_bytes_48,
- QP_CONTEXT_QPC_BYTES_48_TCLASS_M,
- QP_CONTEXT_QPC_BYTES_48_TCLASS_S);
-
- memcpy(grh->dgid.raw, context->dgid,
- sizeof(grh->dgid.raw));
- }
-
- qp_attr->pkey_index = roce_get_field(context->qpc_bytes_12,
- QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_M,
- QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_S);
- qp_attr->port_num = hr_qp->port + 1;
- qp_attr->sq_draining = 0;
- qp_attr->max_rd_atomic = 1 << roce_get_field(context->qpc_bytes_156,
- QP_CONTEXT_QPC_BYTES_156_INITIATOR_DEPTH_M,
- QP_CONTEXT_QPC_BYTES_156_INITIATOR_DEPTH_S);
- qp_attr->max_dest_rd_atomic = 1 << roce_get_field(context->qpc_bytes_32,
- QP_CONTEXT_QPC_BYTES_32_RESPONDER_RESOURCES_M,
- QP_CONTEXT_QPC_BYTES_32_RESPONDER_RESOURCES_S);
- qp_attr->min_rnr_timer = (u8)(roce_get_field(context->qpc_bytes_24,
- QP_CONTEXT_QPC_BYTES_24_MINIMUM_RNR_NAK_TIMER_M,
- QP_CONTEXT_QPC_BYTES_24_MINIMUM_RNR_NAK_TIMER_S));
- qp_attr->timeout = (u8)(roce_get_field(context->qpc_bytes_156,
- QP_CONTEXT_QPC_BYTES_156_ACK_TIMEOUT_M,
- QP_CONTEXT_QPC_BYTES_156_ACK_TIMEOUT_S));
- qp_attr->retry_cnt = roce_get_field(context->qpc_bytes_148,
- QP_CONTEXT_QPC_BYTES_148_RETRY_COUNT_M,
- QP_CONTEXT_QPC_BYTES_148_RETRY_COUNT_S);
- qp_attr->rnr_retry = (u8)le32_to_cpu(context->rnr_retry);
-
-done:
- qp_attr->cur_qp_state = qp_attr->qp_state;
- qp_attr->cap.max_recv_wr = hr_qp->rq.wqe_cnt;
- qp_attr->cap.max_recv_sge = hr_qp->rq.max_gs;
-
- if (!ibqp->uobject) {
- qp_attr->cap.max_send_wr = hr_qp->sq.wqe_cnt;
- qp_attr->cap.max_send_sge = hr_qp->sq.max_gs;
- } else {
- qp_attr->cap.max_send_wr = 0;
- qp_attr->cap.max_send_sge = 0;
- }
-
- qp_init_attr->cap = qp_attr->cap;
-
-out:
- mutex_unlock(&hr_qp->mutex);
- kfree(context);
- return ret;
-}
-
-static int hns_roce_v1_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
- int qp_attr_mask,
- struct ib_qp_init_attr *qp_init_attr)
-{
- struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
-
- return hr_qp->doorbell_qpn <= 1 ?
- hns_roce_v1_q_sqp(ibqp, qp_attr, qp_attr_mask, qp_init_attr) :
- hns_roce_v1_q_qp(ibqp, qp_attr, qp_attr_mask, qp_init_attr);
-}
-
-int hns_roce_v1_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
-{
- struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
- struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
- struct hns_roce_cq *send_cq, *recv_cq;
- int ret;
-
- ret = hns_roce_v1_modify_qp(ibqp, NULL, 0, hr_qp->state, IB_QPS_RESET);
- if (ret)
- return ret;
-
- send_cq = hr_qp->ibqp.send_cq ? to_hr_cq(hr_qp->ibqp.send_cq) : NULL;
- recv_cq = hr_qp->ibqp.recv_cq ? to_hr_cq(hr_qp->ibqp.recv_cq) : NULL;
-
- hns_roce_lock_cqs(send_cq, recv_cq);
- if (!udata) {
- if (recv_cq)
- __hns_roce_v1_cq_clean(recv_cq, hr_qp->qpn,
- (hr_qp->ibqp.srq ?
- to_hr_srq(hr_qp->ibqp.srq) :
- NULL));
-
- if (send_cq && send_cq != recv_cq)
- __hns_roce_v1_cq_clean(send_cq, hr_qp->qpn, NULL);
- }
- hns_roce_qp_remove(hr_dev, hr_qp);
- hns_roce_unlock_cqs(send_cq, recv_cq);
-
- hns_roce_qp_destroy(hr_dev, hr_qp, udata);
-
- return 0;
-}
-
-static int hns_roce_v1_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
-{
- struct hns_roce_dev *hr_dev = to_hr_dev(ibcq->device);
- struct hns_roce_cq *hr_cq = to_hr_cq(ibcq);
- struct device *dev = &hr_dev->pdev->dev;
- u32 cqe_cnt_ori;
- u32 cqe_cnt_cur;
- int wait_time = 0;
-
- /*
- * Before freeing cq buffer, we need to ensure that the outstanding CQE
- * have been written by checking the CQE counter.
- */
- cqe_cnt_ori = roce_read(hr_dev, ROCEE_SCAEP_WR_CQE_CNT);
- while (1) {
- if (roce_read(hr_dev, ROCEE_CAEP_CQE_WCMD_EMPTY) &
- HNS_ROCE_CQE_WCMD_EMPTY_BIT)
- break;
-
- cqe_cnt_cur = roce_read(hr_dev, ROCEE_SCAEP_WR_CQE_CNT);
- if ((cqe_cnt_cur - cqe_cnt_ori) >= HNS_ROCE_MIN_CQE_CNT)
- break;
-
- msleep(HNS_ROCE_EACH_FREE_CQ_WAIT_MSECS);
- if (wait_time > HNS_ROCE_MAX_FREE_CQ_WAIT_CNT) {
- dev_warn(dev, "Destroy cq 0x%lx timeout!\n",
- hr_cq->cqn);
- break;
- }
- wait_time++;
- }
- return 0;
-}
-
-static void set_eq_cons_index_v1(struct hns_roce_eq *eq, u32 req_not)
-{
- roce_raw_write((eq->cons_index & HNS_ROCE_V1_CONS_IDX_M) |
- (req_not << eq->log_entries), eq->db_reg);
-}
-
-static void hns_roce_v1_wq_catas_err_handle(struct hns_roce_dev *hr_dev,
- struct hns_roce_aeqe *aeqe, int qpn)
-{
- struct device *dev = &hr_dev->pdev->dev;
-
- dev_warn(dev, "Local Work Queue Catastrophic Error.\n");
- switch (roce_get_field(aeqe->asyn, HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_M,
- HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_S)) {
- case HNS_ROCE_LWQCE_QPC_ERROR:
- dev_warn(dev, "QP %d, QPC error.\n", qpn);
- break;
- case HNS_ROCE_LWQCE_MTU_ERROR:
- dev_warn(dev, "QP %d, MTU error.\n", qpn);
- break;
- case HNS_ROCE_LWQCE_WQE_BA_ADDR_ERROR:
- dev_warn(dev, "QP %d, WQE BA addr error.\n", qpn);
- break;
- case HNS_ROCE_LWQCE_WQE_ADDR_ERROR:
- dev_warn(dev, "QP %d, WQE addr error.\n", qpn);
- break;
- case HNS_ROCE_LWQCE_SQ_WQE_SHIFT_ERROR:
- dev_warn(dev, "QP %d, WQE shift error\n", qpn);
- break;
- case HNS_ROCE_LWQCE_SL_ERROR:
- dev_warn(dev, "QP %d, SL error.\n", qpn);
- break;
- case HNS_ROCE_LWQCE_PORT_ERROR:
- dev_warn(dev, "QP %d, port error.\n", qpn);
- break;
- default:
- break;
- }
-}
-
-static void hns_roce_v1_local_wq_access_err_handle(struct hns_roce_dev *hr_dev,
- struct hns_roce_aeqe *aeqe,
- int qpn)
-{
- struct device *dev = &hr_dev->pdev->dev;
-
- dev_warn(dev, "Local Access Violation Work Queue Error.\n");
- switch (roce_get_field(aeqe->asyn, HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_M,
- HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_S)) {
- case HNS_ROCE_LAVWQE_R_KEY_VIOLATION:
- dev_warn(dev, "QP %d, R_key violation.\n", qpn);
- break;
- case HNS_ROCE_LAVWQE_LENGTH_ERROR:
- dev_warn(dev, "QP %d, length error.\n", qpn);
- break;
- case HNS_ROCE_LAVWQE_VA_ERROR:
- dev_warn(dev, "QP %d, VA error.\n", qpn);
- break;
- case HNS_ROCE_LAVWQE_PD_ERROR:
- dev_err(dev, "QP %d, PD error.\n", qpn);
- break;
- case HNS_ROCE_LAVWQE_RW_ACC_ERROR:
- dev_warn(dev, "QP %d, rw acc error.\n", qpn);
- break;
- case HNS_ROCE_LAVWQE_KEY_STATE_ERROR:
- dev_warn(dev, "QP %d, key state error.\n", qpn);
- break;
- case HNS_ROCE_LAVWQE_MR_OPERATION_ERROR:
- dev_warn(dev, "QP %d, MR operation error.\n", qpn);
- break;
- default:
- break;
- }
-}
-
-static void hns_roce_v1_qp_err_handle(struct hns_roce_dev *hr_dev,
- struct hns_roce_aeqe *aeqe,
- int event_type)
-{
- struct device *dev = &hr_dev->pdev->dev;
- int phy_port;
- int qpn;
-
- qpn = roce_get_field(aeqe->event.queue_event.num,
- HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_M,
- HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_S);
- phy_port = roce_get_field(aeqe->event.queue_event.num,
- HNS_ROCE_AEQE_EVENT_QP_EVENT_PORT_NUM_M,
- HNS_ROCE_AEQE_EVENT_QP_EVENT_PORT_NUM_S);
- if (qpn <= 1)
- qpn = HNS_ROCE_MAX_PORTS * qpn + phy_port;
-
- switch (event_type) {
- case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR:
- dev_warn(dev, "Invalid Req Local Work Queue Error.\n"
- "QP %d, phy_port %d.\n", qpn, phy_port);
- break;
- case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR:
- hns_roce_v1_wq_catas_err_handle(hr_dev, aeqe, qpn);
- break;
- case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR:
- hns_roce_v1_local_wq_access_err_handle(hr_dev, aeqe, qpn);
- break;
- default:
- break;
- }
-
- hns_roce_qp_event(hr_dev, qpn, event_type);
-}
-
-static void hns_roce_v1_cq_err_handle(struct hns_roce_dev *hr_dev,
- struct hns_roce_aeqe *aeqe,
- int event_type)
-{
- struct device *dev = &hr_dev->pdev->dev;
- u32 cqn;
-
- cqn = roce_get_field(aeqe->event.queue_event.num,
- HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_M,
- HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_S);
-
- switch (event_type) {
- case HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR:
- dev_warn(dev, "CQ 0x%x access err.\n", cqn);
- break;
- case HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW:
- dev_warn(dev, "CQ 0x%x overflow\n", cqn);
- break;
- case HNS_ROCE_EVENT_TYPE_CQ_ID_INVALID:
- dev_warn(dev, "CQ 0x%x ID invalid.\n", cqn);
- break;
- default:
- break;
- }
-
- hns_roce_cq_event(hr_dev, cqn, event_type);
-}
-
-static void hns_roce_v1_db_overflow_handle(struct hns_roce_dev *hr_dev,
- struct hns_roce_aeqe *aeqe)
-{
- struct device *dev = &hr_dev->pdev->dev;
-
- switch (roce_get_field(aeqe->asyn, HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_M,
- HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_S)) {
- case HNS_ROCE_DB_SUBTYPE_SDB_OVF:
- dev_warn(dev, "SDB overflow.\n");
- break;
- case HNS_ROCE_DB_SUBTYPE_SDB_ALM_OVF:
- dev_warn(dev, "SDB almost overflow.\n");
- break;
- case HNS_ROCE_DB_SUBTYPE_SDB_ALM_EMP:
- dev_warn(dev, "SDB almost empty.\n");
- break;
- case HNS_ROCE_DB_SUBTYPE_ODB_OVF:
- dev_warn(dev, "ODB overflow.\n");
- break;
- case HNS_ROCE_DB_SUBTYPE_ODB_ALM_OVF:
- dev_warn(dev, "ODB almost overflow.\n");
- break;
- case HNS_ROCE_DB_SUBTYPE_ODB_ALM_EMP:
- dev_warn(dev, "SDB almost empty.\n");
- break;
- default:
- break;
- }
-}
-
-static struct hns_roce_aeqe *get_aeqe_v1(struct hns_roce_eq *eq, u32 entry)
-{
- unsigned long off = (entry & (eq->entries - 1)) * HNS_ROCE_AEQE_SIZE;
-
- return (struct hns_roce_aeqe *)((u8 *)
- (eq->buf_list[off / HNS_ROCE_BA_SIZE].buf) +
- off % HNS_ROCE_BA_SIZE);
-}
-
-static struct hns_roce_aeqe *next_aeqe_sw_v1(struct hns_roce_eq *eq)
-{
- struct hns_roce_aeqe *aeqe = get_aeqe_v1(eq, eq->cons_index);
-
- return (roce_get_bit(aeqe->asyn, HNS_ROCE_AEQE_U32_4_OWNER_S) ^
- !!(eq->cons_index & eq->entries)) ? aeqe : NULL;
-}
-
-static int hns_roce_v1_aeq_int(struct hns_roce_dev *hr_dev,
- struct hns_roce_eq *eq)
-{
- struct device *dev = &hr_dev->pdev->dev;
- struct hns_roce_aeqe *aeqe;
- int aeqes_found = 0;
- int event_type;
-
- while ((aeqe = next_aeqe_sw_v1(eq))) {
- /* Make sure we read the AEQ entry after we have checked the
- * ownership bit
- */
- dma_rmb();
-
- dev_dbg(dev, "aeqe = %pK, aeqe->asyn.event_type = 0x%lx\n",
- aeqe,
- roce_get_field(aeqe->asyn,
- HNS_ROCE_AEQE_U32_4_EVENT_TYPE_M,
- HNS_ROCE_AEQE_U32_4_EVENT_TYPE_S));
- event_type = roce_get_field(aeqe->asyn,
- HNS_ROCE_AEQE_U32_4_EVENT_TYPE_M,
- HNS_ROCE_AEQE_U32_4_EVENT_TYPE_S);
- switch (event_type) {
- case HNS_ROCE_EVENT_TYPE_PATH_MIG:
- dev_warn(dev, "PATH MIG not supported\n");
- break;
- case HNS_ROCE_EVENT_TYPE_COMM_EST:
- dev_warn(dev, "COMMUNICATION established\n");
- break;
- case HNS_ROCE_EVENT_TYPE_SQ_DRAINED:
- dev_warn(dev, "SQ DRAINED not supported\n");
- break;
- case HNS_ROCE_EVENT_TYPE_PATH_MIG_FAILED:
- dev_warn(dev, "PATH MIG failed\n");
- break;
- case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR:
- case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR:
- case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR:
- hns_roce_v1_qp_err_handle(hr_dev, aeqe, event_type);
- break;
- case HNS_ROCE_EVENT_TYPE_SRQ_LIMIT_REACH:
- case HNS_ROCE_EVENT_TYPE_SRQ_CATAS_ERROR:
- case HNS_ROCE_EVENT_TYPE_SRQ_LAST_WQE_REACH:
- dev_warn(dev, "SRQ not support!\n");
- break;
- case HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR:
- case HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW:
- case HNS_ROCE_EVENT_TYPE_CQ_ID_INVALID:
- hns_roce_v1_cq_err_handle(hr_dev, aeqe, event_type);
- break;
- case HNS_ROCE_EVENT_TYPE_PORT_CHANGE:
- dev_warn(dev, "port change.\n");
- break;
- case HNS_ROCE_EVENT_TYPE_MB:
- hns_roce_cmd_event(hr_dev,
- le16_to_cpu(aeqe->event.cmd.token),
- aeqe->event.cmd.status,
- le64_to_cpu(aeqe->event.cmd.out_param
- ));
- break;
- case HNS_ROCE_EVENT_TYPE_DB_OVERFLOW:
- hns_roce_v1_db_overflow_handle(hr_dev, aeqe);
- break;
- default:
- dev_warn(dev, "Unhandled event %d on EQ %d at idx %u.\n",
- event_type, eq->eqn, eq->cons_index);
- break;
- }
-
- eq->cons_index++;
- aeqes_found = 1;
-
- if (eq->cons_index > 2 * hr_dev->caps.aeqe_depth - 1)
- eq->cons_index = 0;
- }
-
- set_eq_cons_index_v1(eq, 0);
-
- return aeqes_found;
-}
-
-static struct hns_roce_ceqe *get_ceqe_v1(struct hns_roce_eq *eq, u32 entry)
-{
- unsigned long off = (entry & (eq->entries - 1)) * HNS_ROCE_CEQE_SIZE;
-
- return (struct hns_roce_ceqe *)((u8 *)
- (eq->buf_list[off / HNS_ROCE_BA_SIZE].buf) +
- off % HNS_ROCE_BA_SIZE);
-}
-
-static struct hns_roce_ceqe *next_ceqe_sw_v1(struct hns_roce_eq *eq)
-{
- struct hns_roce_ceqe *ceqe = get_ceqe_v1(eq, eq->cons_index);
-
- return (!!(roce_get_bit(ceqe->comp,
- HNS_ROCE_CEQE_CEQE_COMP_OWNER_S))) ^
- (!!(eq->cons_index & eq->entries)) ? ceqe : NULL;
-}
-
-static int hns_roce_v1_ceq_int(struct hns_roce_dev *hr_dev,
- struct hns_roce_eq *eq)
-{
- struct hns_roce_ceqe *ceqe;
- int ceqes_found = 0;
- u32 cqn;
-
- while ((ceqe = next_ceqe_sw_v1(eq))) {
- /* Make sure we read CEQ entry after we have checked the
- * ownership bit
- */
- dma_rmb();
-
- cqn = roce_get_field(ceqe->comp,
- HNS_ROCE_CEQE_CEQE_COMP_CQN_M,
- HNS_ROCE_CEQE_CEQE_COMP_CQN_S);
- hns_roce_cq_completion(hr_dev, cqn);
-
- ++eq->cons_index;
- ceqes_found = 1;
-
- if (eq->cons_index >
- EQ_DEPTH_COEFF * hr_dev->caps.ceqe_depth - 1)
- eq->cons_index = 0;
- }
-
- set_eq_cons_index_v1(eq, 0);
-
- return ceqes_found;
-}
-
-static irqreturn_t hns_roce_v1_msix_interrupt_eq(int irq, void *eq_ptr)
-{
- struct hns_roce_eq *eq = eq_ptr;
- struct hns_roce_dev *hr_dev = eq->hr_dev;
- int int_work;
-
- if (eq->type_flag == HNS_ROCE_CEQ)
- /* CEQ irq routine, CEQ is pulse irq, not clear */
- int_work = hns_roce_v1_ceq_int(hr_dev, eq);
- else
- /* AEQ irq routine, AEQ is pulse irq, not clear */
- int_work = hns_roce_v1_aeq_int(hr_dev, eq);
-
- return IRQ_RETVAL(int_work);
-}
-
-static irqreturn_t hns_roce_v1_msix_interrupt_abn(int irq, void *dev_id)
-{
- struct hns_roce_dev *hr_dev = dev_id;
- struct device *dev = &hr_dev->pdev->dev;
- int int_work = 0;
- u32 caepaemask_val;
- u32 cealmovf_val;
- u32 caepaest_val;
- u32 aeshift_val;
- u32 ceshift_val;
- u32 cemask_val;
- __le32 tmp;
- int i;
-
- /*
- * Abnormal interrupt:
- * AEQ overflow, ECC multi-bit err, CEQ overflow must clear
- * interrupt, mask irq, clear irq, cancel mask operation
- */
- aeshift_val = roce_read(hr_dev, ROCEE_CAEP_AEQC_AEQE_SHIFT_REG);
- tmp = cpu_to_le32(aeshift_val);
-
- /* AEQE overflow */
- if (roce_get_bit(tmp,
- ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQ_ALM_OVF_INT_ST_S) == 1) {
- dev_warn(dev, "AEQ overflow!\n");
-
- /* Set mask */
- caepaemask_val = roce_read(hr_dev, ROCEE_CAEP_AE_MASK_REG);
- tmp = cpu_to_le32(caepaemask_val);
- roce_set_bit(tmp, ROCEE_CAEP_AE_MASK_CAEP_AEQ_ALM_OVF_MASK_S,
- HNS_ROCE_INT_MASK_ENABLE);
- caepaemask_val = le32_to_cpu(tmp);
- roce_write(hr_dev, ROCEE_CAEP_AE_MASK_REG, caepaemask_val);
-
- /* Clear int state(INT_WC : write 1 clear) */
- caepaest_val = roce_read(hr_dev, ROCEE_CAEP_AE_ST_REG);
- tmp = cpu_to_le32(caepaest_val);
- roce_set_bit(tmp, ROCEE_CAEP_AE_ST_CAEP_AEQ_ALM_OVF_S, 1);
- caepaest_val = le32_to_cpu(tmp);
- roce_write(hr_dev, ROCEE_CAEP_AE_ST_REG, caepaest_val);
-
- /* Clear mask */
- caepaemask_val = roce_read(hr_dev, ROCEE_CAEP_AE_MASK_REG);
- tmp = cpu_to_le32(caepaemask_val);
- roce_set_bit(tmp, ROCEE_CAEP_AE_MASK_CAEP_AEQ_ALM_OVF_MASK_S,
- HNS_ROCE_INT_MASK_DISABLE);
- caepaemask_val = le32_to_cpu(tmp);
- roce_write(hr_dev, ROCEE_CAEP_AE_MASK_REG, caepaemask_val);
- }
-
- /* CEQ almost overflow */
- for (i = 0; i < hr_dev->caps.num_comp_vectors; i++) {
- ceshift_val = roce_read(hr_dev, ROCEE_CAEP_CEQC_SHIFT_0_REG +
- i * CEQ_REG_OFFSET);
- tmp = cpu_to_le32(ceshift_val);
-
- if (roce_get_bit(tmp,
- ROCEE_CAEP_CEQC_SHIFT_CAEP_CEQ_ALM_OVF_INT_ST_S) == 1) {
- dev_warn(dev, "CEQ[%d] almost overflow!\n", i);
- int_work++;
-
- /* Set mask */
- cemask_val = roce_read(hr_dev,
- ROCEE_CAEP_CE_IRQ_MASK_0_REG +
- i * CEQ_REG_OFFSET);
- tmp = cpu_to_le32(cemask_val);
- roce_set_bit(tmp,
- ROCEE_CAEP_CE_IRQ_MASK_CAEP_CEQ_ALM_OVF_MASK_S,
- HNS_ROCE_INT_MASK_ENABLE);
- cemask_val = le32_to_cpu(tmp);
- roce_write(hr_dev, ROCEE_CAEP_CE_IRQ_MASK_0_REG +
- i * CEQ_REG_OFFSET, cemask_val);
-
- /* Clear int state(INT_WC : write 1 clear) */
- cealmovf_val = roce_read(hr_dev,
- ROCEE_CAEP_CEQ_ALM_OVF_0_REG +
- i * CEQ_REG_OFFSET);
- tmp = cpu_to_le32(cealmovf_val);
- roce_set_bit(tmp,
- ROCEE_CAEP_CEQ_ALM_OVF_CAEP_CEQ_ALM_OVF_S,
- 1);
- cealmovf_val = le32_to_cpu(tmp);
- roce_write(hr_dev, ROCEE_CAEP_CEQ_ALM_OVF_0_REG +
- i * CEQ_REG_OFFSET, cealmovf_val);
-
- /* Clear mask */
- cemask_val = roce_read(hr_dev,
- ROCEE_CAEP_CE_IRQ_MASK_0_REG +
- i * CEQ_REG_OFFSET);
- tmp = cpu_to_le32(cemask_val);
- roce_set_bit(tmp,
- ROCEE_CAEP_CE_IRQ_MASK_CAEP_CEQ_ALM_OVF_MASK_S,
- HNS_ROCE_INT_MASK_DISABLE);
- cemask_val = le32_to_cpu(tmp);
- roce_write(hr_dev, ROCEE_CAEP_CE_IRQ_MASK_0_REG +
- i * CEQ_REG_OFFSET, cemask_val);
- }
- }
-
- /* ECC multi-bit error alarm */
- dev_warn(dev, "ECC UCERR ALARM: 0x%x, 0x%x, 0x%x\n",
- roce_read(hr_dev, ROCEE_ECC_UCERR_ALM0_REG),
- roce_read(hr_dev, ROCEE_ECC_UCERR_ALM1_REG),
- roce_read(hr_dev, ROCEE_ECC_UCERR_ALM2_REG));
-
- dev_warn(dev, "ECC CERR ALARM: 0x%x, 0x%x, 0x%x\n",
- roce_read(hr_dev, ROCEE_ECC_CERR_ALM0_REG),
- roce_read(hr_dev, ROCEE_ECC_CERR_ALM1_REG),
- roce_read(hr_dev, ROCEE_ECC_CERR_ALM2_REG));
-
- return IRQ_RETVAL(int_work);
-}
-
-static void hns_roce_v1_int_mask_enable(struct hns_roce_dev *hr_dev)
-{
- u32 aemask_val;
- int masken = 0;
- __le32 tmp;
- int i;
-
- /* AEQ INT */
- aemask_val = roce_read(hr_dev, ROCEE_CAEP_AE_MASK_REG);
- tmp = cpu_to_le32(aemask_val);
- roce_set_bit(tmp, ROCEE_CAEP_AE_MASK_CAEP_AEQ_ALM_OVF_MASK_S,
- masken);
- roce_set_bit(tmp, ROCEE_CAEP_AE_MASK_CAEP_AE_IRQ_MASK_S, masken);
- aemask_val = le32_to_cpu(tmp);
- roce_write(hr_dev, ROCEE_CAEP_AE_MASK_REG, aemask_val);
-
- /* CEQ INT */
- for (i = 0; i < hr_dev->caps.num_comp_vectors; i++) {
- /* IRQ mask */
- roce_write(hr_dev, ROCEE_CAEP_CE_IRQ_MASK_0_REG +
- i * CEQ_REG_OFFSET, masken);
- }
-}
-
-static void hns_roce_v1_free_eq(struct hns_roce_dev *hr_dev,
- struct hns_roce_eq *eq)
-{
- int npages = (PAGE_ALIGN(eq->eqe_size * eq->entries) +
- HNS_ROCE_BA_SIZE - 1) / HNS_ROCE_BA_SIZE;
- int i;
-
- if (!eq->buf_list)
- return;
-
- for (i = 0; i < npages; ++i)
- dma_free_coherent(&hr_dev->pdev->dev, HNS_ROCE_BA_SIZE,
- eq->buf_list[i].buf, eq->buf_list[i].map);
-
- kfree(eq->buf_list);
-}
-
-static void hns_roce_v1_enable_eq(struct hns_roce_dev *hr_dev, int eq_num,
- int enable_flag)
-{
- void __iomem *eqc = hr_dev->eq_table.eqc_base[eq_num];
- __le32 tmp;
- u32 val;
-
- val = readl(eqc);
- tmp = cpu_to_le32(val);
-
- if (enable_flag)
- roce_set_field(tmp,
- ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_M,
- ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_S,
- HNS_ROCE_EQ_STAT_VALID);
- else
- roce_set_field(tmp,
- ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_M,
- ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_S,
- HNS_ROCE_EQ_STAT_INVALID);
-
- val = le32_to_cpu(tmp);
- writel(val, eqc);
-}
-
-static int hns_roce_v1_create_eq(struct hns_roce_dev *hr_dev,
- struct hns_roce_eq *eq)
-{
- void __iomem *eqc = hr_dev->eq_table.eqc_base[eq->eqn];
- struct device *dev = &hr_dev->pdev->dev;
- dma_addr_t tmp_dma_addr;
- u32 eqcuridx_val;
- u32 eqconsindx_val;
- u32 eqshift_val;
- __le32 tmp2 = 0;
- __le32 tmp1 = 0;
- __le32 tmp = 0;
- int num_bas;
- int ret;
- int i;
-
- num_bas = (PAGE_ALIGN(eq->entries * eq->eqe_size) +
- HNS_ROCE_BA_SIZE - 1) / HNS_ROCE_BA_SIZE;
-
- if ((eq->entries * eq->eqe_size) > HNS_ROCE_BA_SIZE) {
- dev_err(dev, "[error]eq buf %d gt ba size(%d) need bas=%d\n",
- (eq->entries * eq->eqe_size), HNS_ROCE_BA_SIZE,
- num_bas);
- return -EINVAL;
- }
-
- eq->buf_list = kcalloc(num_bas, sizeof(*eq->buf_list), GFP_KERNEL);
- if (!eq->buf_list)
- return -ENOMEM;
-
- for (i = 0; i < num_bas; ++i) {
- eq->buf_list[i].buf = dma_alloc_coherent(dev, HNS_ROCE_BA_SIZE,
- &tmp_dma_addr,
- GFP_KERNEL);
- if (!eq->buf_list[i].buf) {
- ret = -ENOMEM;
- goto err_out_free_pages;
- }
-
- eq->buf_list[i].map = tmp_dma_addr;
- }
- eq->cons_index = 0;
- roce_set_field(tmp, ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_M,
- ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_S,
- HNS_ROCE_EQ_STAT_INVALID);
- roce_set_field(tmp, ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_AEQE_SHIFT_M,
- ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_AEQE_SHIFT_S,
- eq->log_entries);
- eqshift_val = le32_to_cpu(tmp);
- writel(eqshift_val, eqc);
-
- /* Configure eq extended address 12~44bit */
- writel((u32)(eq->buf_list[0].map >> 12), eqc + 4);
-
- /*
- * Configure eq extended address 45~49 bit.
- * 44 = 32 + 12, When evaluating addr to hardware, shift 12 because of
- * using 4K page, and shift more 32 because of
- * calculating the high 32 bit value evaluated to hardware.
- */
- roce_set_field(tmp1, ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQ_BT_H_M,
- ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQ_BT_H_S,
- eq->buf_list[0].map >> 44);
- roce_set_field(tmp1, ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQE_CUR_IDX_M,
- ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQE_CUR_IDX_S, 0);
- eqcuridx_val = le32_to_cpu(tmp1);
- writel(eqcuridx_val, eqc + 8);
-
- /* Configure eq consumer index */
- roce_set_field(tmp2, ROCEE_CAEP_AEQE_CONS_IDX_CAEP_AEQE_CONS_IDX_M,
- ROCEE_CAEP_AEQE_CONS_IDX_CAEP_AEQE_CONS_IDX_S, 0);
- eqconsindx_val = le32_to_cpu(tmp2);
- writel(eqconsindx_val, eqc + 0xc);
-
- return 0;
-
-err_out_free_pages:
- for (i -= 1; i >= 0; i--)
- dma_free_coherent(dev, HNS_ROCE_BA_SIZE, eq->buf_list[i].buf,
- eq->buf_list[i].map);
-
- kfree(eq->buf_list);
- return ret;
-}
-
-static int hns_roce_v1_init_eq_table(struct hns_roce_dev *hr_dev)
-{
- struct hns_roce_eq_table *eq_table = &hr_dev->eq_table;
- struct device *dev = &hr_dev->pdev->dev;
- struct hns_roce_eq *eq;
- int irq_num;
- int eq_num;
- int ret;
- int i, j;
-
- eq_num = hr_dev->caps.num_comp_vectors + hr_dev->caps.num_aeq_vectors;
- irq_num = eq_num + hr_dev->caps.num_other_vectors;
-
- eq_table->eq = kcalloc(eq_num, sizeof(*eq_table->eq), GFP_KERNEL);
- if (!eq_table->eq)
- return -ENOMEM;
-
- eq_table->eqc_base = kcalloc(eq_num, sizeof(*eq_table->eqc_base),
- GFP_KERNEL);
- if (!eq_table->eqc_base) {
- ret = -ENOMEM;
- goto err_eqc_base_alloc_fail;
- }
-
- for (i = 0; i < eq_num; i++) {
- eq = &eq_table->eq[i];
- eq->hr_dev = hr_dev;
- eq->eqn = i;
- eq->irq = hr_dev->irq[i];
- eq->log_page_size = PAGE_SHIFT;
-
- if (i < hr_dev->caps.num_comp_vectors) {
- /* CEQ */
- eq_table->eqc_base[i] = hr_dev->reg_base +
- ROCEE_CAEP_CEQC_SHIFT_0_REG +
- CEQ_REG_OFFSET * i;
- eq->type_flag = HNS_ROCE_CEQ;
- eq->db_reg = hr_dev->reg_base +
- ROCEE_CAEP_CEQC_CONS_IDX_0_REG +
- CEQ_REG_OFFSET * i;
- eq->entries = hr_dev->caps.ceqe_depth;
- eq->log_entries = ilog2(eq->entries);
- eq->eqe_size = HNS_ROCE_CEQE_SIZE;
- } else {
- /* AEQ */
- eq_table->eqc_base[i] = hr_dev->reg_base +
- ROCEE_CAEP_AEQC_AEQE_SHIFT_REG;
- eq->type_flag = HNS_ROCE_AEQ;
- eq->db_reg = hr_dev->reg_base +
- ROCEE_CAEP_AEQE_CONS_IDX_REG;
- eq->entries = hr_dev->caps.aeqe_depth;
- eq->log_entries = ilog2(eq->entries);
- eq->eqe_size = HNS_ROCE_AEQE_SIZE;
- }
- }
-
- /* Disable irq */
- hns_roce_v1_int_mask_enable(hr_dev);
-
- /* Configure ce int interval */
- roce_write(hr_dev, ROCEE_CAEP_CE_INTERVAL_CFG_REG,
- HNS_ROCE_CEQ_DEFAULT_INTERVAL);
-
- /* Configure ce int burst num */
- roce_write(hr_dev, ROCEE_CAEP_CE_BURST_NUM_CFG_REG,
- HNS_ROCE_CEQ_DEFAULT_BURST_NUM);
-
- for (i = 0; i < eq_num; i++) {
- ret = hns_roce_v1_create_eq(hr_dev, &eq_table->eq[i]);
- if (ret) {
- dev_err(dev, "eq create failed\n");
- goto err_create_eq_fail;
- }
- }
-
- for (j = 0; j < irq_num; j++) {
- if (j < eq_num)
- ret = request_irq(hr_dev->irq[j],
- hns_roce_v1_msix_interrupt_eq, 0,
- hr_dev->irq_names[j],
- &eq_table->eq[j]);
- else
- ret = request_irq(hr_dev->irq[j],
- hns_roce_v1_msix_interrupt_abn, 0,
- hr_dev->irq_names[j], hr_dev);
-
- if (ret) {
- dev_err(dev, "request irq error!\n");
- goto err_request_irq_fail;
- }
- }
-
- for (i = 0; i < eq_num; i++)
- hns_roce_v1_enable_eq(hr_dev, i, EQ_ENABLE);
-
- return 0;
-
-err_request_irq_fail:
- for (j -= 1; j >= 0; j--)
- free_irq(hr_dev->irq[j], &eq_table->eq[j]);
-
-err_create_eq_fail:
- for (i -= 1; i >= 0; i--)
- hns_roce_v1_free_eq(hr_dev, &eq_table->eq[i]);
-
- kfree(eq_table->eqc_base);
-
-err_eqc_base_alloc_fail:
- kfree(eq_table->eq);
-
- return ret;
-}
-
-static void hns_roce_v1_cleanup_eq_table(struct hns_roce_dev *hr_dev)
-{
- struct hns_roce_eq_table *eq_table = &hr_dev->eq_table;
- int irq_num;
- int eq_num;
- int i;
-
- eq_num = hr_dev->caps.num_comp_vectors + hr_dev->caps.num_aeq_vectors;
- irq_num = eq_num + hr_dev->caps.num_other_vectors;
- for (i = 0; i < eq_num; i++) {
- /* Disable EQ */
- hns_roce_v1_enable_eq(hr_dev, i, EQ_DISABLE);
-
- free_irq(hr_dev->irq[i], &eq_table->eq[i]);
-
- hns_roce_v1_free_eq(hr_dev, &eq_table->eq[i]);
- }
- for (i = eq_num; i < irq_num; i++)
- free_irq(hr_dev->irq[i], hr_dev);
-
- kfree(eq_table->eqc_base);
- kfree(eq_table->eq);
-}
-
-static const struct ib_device_ops hns_roce_v1_dev_ops = {
- .destroy_qp = hns_roce_v1_destroy_qp,
- .poll_cq = hns_roce_v1_poll_cq,
- .post_recv = hns_roce_v1_post_recv,
- .post_send = hns_roce_v1_post_send,
- .query_qp = hns_roce_v1_query_qp,
- .req_notify_cq = hns_roce_v1_req_notify_cq,
-};
-
-static const struct hns_roce_hw hns_roce_hw_v1 = {
- .reset = hns_roce_v1_reset,
- .hw_profile = hns_roce_v1_profile,
- .hw_init = hns_roce_v1_init,
- .hw_exit = hns_roce_v1_exit,
- .post_mbox = hns_roce_v1_post_mbox,
- .poll_mbox_done = hns_roce_v1_chk_mbox,
- .set_gid = hns_roce_v1_set_gid,
- .set_mac = hns_roce_v1_set_mac,
- .set_mtu = hns_roce_v1_set_mtu,
- .write_mtpt = hns_roce_v1_write_mtpt,
- .write_cqc = hns_roce_v1_write_cqc,
- .set_hem = hns_roce_v1_set_hem,
- .clear_hem = hns_roce_v1_clear_hem,
- .modify_qp = hns_roce_v1_modify_qp,
- .dereg_mr = hns_roce_v1_dereg_mr,
- .destroy_cq = hns_roce_v1_destroy_cq,
- .init_eq = hns_roce_v1_init_eq_table,
- .cleanup_eq = hns_roce_v1_cleanup_eq_table,
- .hns_roce_dev_ops = &hns_roce_v1_dev_ops,
-};
-
-static const struct of_device_id hns_roce_of_match[] = {
- { .compatible = "hisilicon,hns-roce-v1", .data = &hns_roce_hw_v1, },
- {},
-};
-MODULE_DEVICE_TABLE(of, hns_roce_of_match);
-
-static const struct acpi_device_id hns_roce_acpi_match[] = {
- { "HISI00D1", (kernel_ulong_t)&hns_roce_hw_v1 },
- {},
-};
-MODULE_DEVICE_TABLE(acpi, hns_roce_acpi_match);
-
-static struct
-platform_device *hns_roce_find_pdev(struct fwnode_handle *fwnode)
-{
- struct device *dev;
-
- /* get the 'device' corresponding to the matching 'fwnode' */
- dev = bus_find_device_by_fwnode(&platform_bus_type, fwnode);
- /* get the platform device */
- return dev ? to_platform_device(dev) : NULL;
-}
-
-static int hns_roce_get_cfg(struct hns_roce_dev *hr_dev)
-{
- struct device *dev = &hr_dev->pdev->dev;
- struct platform_device *pdev = NULL;
- struct net_device *netdev = NULL;
- struct device_node *net_node;
- int port_cnt = 0;
- u8 phy_port;
- int ret;
- int i;
-
- /* check if we are compatible with the underlying SoC */
- if (dev_of_node(dev)) {
- const struct of_device_id *of_id;
-
- of_id = of_match_node(hns_roce_of_match, dev->of_node);
- if (!of_id) {
- dev_err(dev, "device is not compatible!\n");
- return -ENXIO;
- }
- hr_dev->hw = (const struct hns_roce_hw *)of_id->data;
- if (!hr_dev->hw) {
- dev_err(dev, "couldn't get H/W specific DT data!\n");
- return -ENXIO;
- }
- } else if (is_acpi_device_node(dev->fwnode)) {
- const struct acpi_device_id *acpi_id;
-
- acpi_id = acpi_match_device(hns_roce_acpi_match, dev);
- if (!acpi_id) {
- dev_err(dev, "device is not compatible!\n");
- return -ENXIO;
- }
- hr_dev->hw = (const struct hns_roce_hw *) acpi_id->driver_data;
- if (!hr_dev->hw) {
- dev_err(dev, "couldn't get H/W specific ACPI data!\n");
- return -ENXIO;
- }
- } else {
- dev_err(dev, "can't read compatibility data from DT or ACPI\n");
- return -ENXIO;
- }
-
- /* get the mapped register base address */
- hr_dev->reg_base = devm_platform_ioremap_resource(hr_dev->pdev, 0);
- if (IS_ERR(hr_dev->reg_base))
- return PTR_ERR(hr_dev->reg_base);
-
- /* read the node_guid of IB device from the DT or ACPI */
- ret = device_property_read_u8_array(dev, "node-guid",
- (u8 *)&hr_dev->ib_dev.node_guid,
- GUID_LEN);
- if (ret) {
- dev_err(dev, "couldn't get node_guid from DT or ACPI!\n");
- return ret;
- }
-
- /* get the RoCE associated ethernet ports or netdevices */
- for (i = 0; i < HNS_ROCE_MAX_PORTS; i++) {
- if (dev_of_node(dev)) {
- net_node = of_parse_phandle(dev->of_node, "eth-handle",
- i);
- if (!net_node)
- continue;
- pdev = of_find_device_by_node(net_node);
- } else if (is_acpi_device_node(dev->fwnode)) {
- struct fwnode_reference_args args;
-
- ret = acpi_node_get_property_reference(dev->fwnode,
- "eth-handle",
- i, &args);
- if (ret)
- continue;
- pdev = hns_roce_find_pdev(args.fwnode);
- } else {
- dev_err(dev, "cannot read data from DT or ACPI\n");
- return -ENXIO;
- }
-
- if (pdev) {
- netdev = platform_get_drvdata(pdev);
- phy_port = (u8)i;
- if (netdev) {
- hr_dev->iboe.netdevs[port_cnt] = netdev;
- hr_dev->iboe.phy_port[port_cnt] = phy_port;
- } else {
- dev_err(dev, "no netdev found with pdev %s\n",
- pdev->name);
- return -ENODEV;
- }
- port_cnt++;
- }
- }
-
- if (port_cnt == 0) {
- dev_err(dev, "unable to get eth-handle for available ports!\n");
- return -EINVAL;
- }
-
- hr_dev->caps.num_ports = port_cnt;
-
- /* cmd issue mode: 0 is poll, 1 is event */
- hr_dev->cmd_mod = 1;
- hr_dev->loop_idc = 0;
- hr_dev->sdb_offset = ROCEE_DB_SQ_L_0_REG;
- hr_dev->odb_offset = ROCEE_DB_OTHERS_L_0_REG;
-
- /* read the interrupt names from the DT or ACPI */
- ret = device_property_read_string_array(dev, "interrupt-names",
- hr_dev->irq_names,
- HNS_ROCE_V1_MAX_IRQ_NUM);
- if (ret < 0) {
- dev_err(dev, "couldn't get interrupt names from DT or ACPI!\n");
- return ret;
- }
-
- /* fetch the interrupt numbers */
- for (i = 0; i < HNS_ROCE_V1_MAX_IRQ_NUM; i++) {
- hr_dev->irq[i] = platform_get_irq(hr_dev->pdev, i);
- if (hr_dev->irq[i] <= 0)
- return -EINVAL;
- }
-
- return 0;
-}
-
-/**
- * hns_roce_probe - RoCE driver entrance
- * @pdev: pointer to platform device
- * Return : int
- *
- */
-static int hns_roce_probe(struct platform_device *pdev)
-{
- int ret;
- struct hns_roce_dev *hr_dev;
- struct device *dev = &pdev->dev;
-
- hr_dev = ib_alloc_device(hns_roce_dev, ib_dev);
- if (!hr_dev)
- return -ENOMEM;
-
- hr_dev->priv = kzalloc(sizeof(struct hns_roce_v1_priv), GFP_KERNEL);
- if (!hr_dev->priv) {
- ret = -ENOMEM;
- goto error_failed_kzalloc;
- }
-
- hr_dev->pdev = pdev;
- hr_dev->dev = dev;
- platform_set_drvdata(pdev, hr_dev);
-
- if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64ULL)) &&
- dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32ULL))) {
- dev_err(dev, "Not usable DMA addressing mode\n");
- ret = -EIO;
- goto error_failed_get_cfg;
- }
-
- ret = hns_roce_get_cfg(hr_dev);
- if (ret) {
- dev_err(dev, "Get Configuration failed!\n");
- goto error_failed_get_cfg;
- }
-
- ret = hns_roce_init(hr_dev);
- if (ret) {
- dev_err(dev, "RoCE engine init failed!\n");
- goto error_failed_get_cfg;
- }
-
- return 0;
-
-error_failed_get_cfg:
- kfree(hr_dev->priv);
-
-error_failed_kzalloc:
- ib_dealloc_device(&hr_dev->ib_dev);
-
- return ret;
-}
-
-/**
- * hns_roce_remove - remove RoCE device
- * @pdev: pointer to platform device
- */
-static int hns_roce_remove(struct platform_device *pdev)
-{
- struct hns_roce_dev *hr_dev = platform_get_drvdata(pdev);
-
- hns_roce_exit(hr_dev);
- kfree(hr_dev->priv);
- ib_dealloc_device(&hr_dev->ib_dev);
-
- return 0;
-}
-
-static struct platform_driver hns_roce_driver = {
- .probe = hns_roce_probe,
- .remove = hns_roce_remove,
- .driver = {
- .name = DRV_NAME,
- .of_match_table = hns_roce_of_match,
- .acpi_match_table = ACPI_PTR(hns_roce_acpi_match),
- },
-};
-
-module_platform_driver(hns_roce_driver);
-
-MODULE_LICENSE("Dual BSD/GPL");
-MODULE_AUTHOR("Wei Hu <xavier.huwei@huawei.com>");
-MODULE_AUTHOR("Nenglong Zhao <zhaonenglong@hisilicon.com>");
-MODULE_AUTHOR("Lijun Ou <oulijun@huawei.com>");
-MODULE_DESCRIPTION("Hisilicon Hip06 Family RoCE Driver");
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v1.h b/drivers/infiniband/hw/hns/hns_roce_hw_v1.h
deleted file mode 100644
index 60fdcbae6729..000000000000
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v1.h
+++ /dev/null
@@ -1,1147 +0,0 @@
-/*
- * Copyright (c) 2016 Hisilicon Limited.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#ifndef _HNS_ROCE_HW_V1_H
-#define _HNS_ROCE_HW_V1_H
-
-#define CQ_STATE_VALID 2
-
-#define HNS_ROCE_V1_MAX_PD_NUM 0x8000
-#define HNS_ROCE_V1_MAX_CQ_NUM 0x10000
-#define HNS_ROCE_V1_MAX_CQE_NUM 0x8000
-
-#define HNS_ROCE_V1_MAX_QP_NUM 0x40000
-#define HNS_ROCE_V1_MAX_WQE_NUM 0x4000
-
-#define HNS_ROCE_V1_MAX_MTPT_NUM 0x80000
-
-#define HNS_ROCE_V1_MAX_MTT_SEGS 0x100000
-
-#define HNS_ROCE_V1_MAX_QP_INIT_RDMA 128
-#define HNS_ROCE_V1_MAX_QP_DEST_RDMA 128
-
-#define HNS_ROCE_V1_MAX_SQ_DESC_SZ 64
-#define HNS_ROCE_V1_MAX_RQ_DESC_SZ 64
-#define HNS_ROCE_V1_SG_NUM 2
-#define HNS_ROCE_V1_INLINE_SIZE 32
-
-#define HNS_ROCE_V1_UAR_NUM 256
-#define HNS_ROCE_V1_PHY_UAR_NUM 8
-
-#define HNS_ROCE_V1_GID_NUM 16
-#define HNS_ROCE_V1_RESV_QP 8
-
-#define HNS_ROCE_V1_MAX_IRQ_NUM 34
-#define HNS_ROCE_V1_COMP_VEC_NUM 32
-#define HNS_ROCE_V1_AEQE_VEC_NUM 1
-#define HNS_ROCE_V1_ABNORMAL_VEC_NUM 1
-
-#define HNS_ROCE_V1_COMP_EQE_NUM 0x8000
-#define HNS_ROCE_V1_ASYNC_EQE_NUM 0x400
-
-#define HNS_ROCE_V1_QPC_SIZE 256
-#define HNS_ROCE_V1_IRRL_ENTRY_SIZE 8
-#define HNS_ROCE_V1_CQC_ENTRY_SIZE 64
-#define HNS_ROCE_V1_MTPT_ENTRY_SIZE 64
-#define HNS_ROCE_V1_MTT_ENTRY_SIZE 64
-
-#define HNS_ROCE_V1_CQE_SIZE 32
-#define HNS_ROCE_V1_PAGE_SIZE_SUPPORT 0xFFFFF000
-
-#define HNS_ROCE_V1_TABLE_CHUNK_SIZE (1 << 17)
-
-#define HNS_ROCE_V1_EXT_RAQ_WF 8
-#define HNS_ROCE_V1_RAQ_ENTRY 64
-#define HNS_ROCE_V1_RAQ_DEPTH 32768
-#define HNS_ROCE_V1_RAQ_SIZE (HNS_ROCE_V1_RAQ_ENTRY * HNS_ROCE_V1_RAQ_DEPTH)
-
-#define HNS_ROCE_V1_SDB_DEPTH 0x400
-#define HNS_ROCE_V1_ODB_DEPTH 0x400
-
-#define HNS_ROCE_V1_DB_RSVD 0x80
-
-#define HNS_ROCE_V1_SDB_ALEPT HNS_ROCE_V1_DB_RSVD
-#define HNS_ROCE_V1_SDB_ALFUL (HNS_ROCE_V1_SDB_DEPTH - HNS_ROCE_V1_DB_RSVD)
-#define HNS_ROCE_V1_ODB_ALEPT HNS_ROCE_V1_DB_RSVD
-#define HNS_ROCE_V1_ODB_ALFUL (HNS_ROCE_V1_ODB_DEPTH - HNS_ROCE_V1_DB_RSVD)
-
-#define HNS_ROCE_V1_EXT_SDB_DEPTH 0x4000
-#define HNS_ROCE_V1_EXT_ODB_DEPTH 0x4000
-#define HNS_ROCE_V1_EXT_SDB_ENTRY 16
-#define HNS_ROCE_V1_EXT_ODB_ENTRY 16
-#define HNS_ROCE_V1_EXT_SDB_SIZE \
- (HNS_ROCE_V1_EXT_SDB_DEPTH * HNS_ROCE_V1_EXT_SDB_ENTRY)
-#define HNS_ROCE_V1_EXT_ODB_SIZE \
- (HNS_ROCE_V1_EXT_ODB_DEPTH * HNS_ROCE_V1_EXT_ODB_ENTRY)
-
-#define HNS_ROCE_V1_EXT_SDB_ALEPT HNS_ROCE_V1_DB_RSVD
-#define HNS_ROCE_V1_EXT_SDB_ALFUL \
- (HNS_ROCE_V1_EXT_SDB_DEPTH - HNS_ROCE_V1_DB_RSVD)
-#define HNS_ROCE_V1_EXT_ODB_ALEPT HNS_ROCE_V1_DB_RSVD
-#define HNS_ROCE_V1_EXT_ODB_ALFUL \
- (HNS_ROCE_V1_EXT_ODB_DEPTH - HNS_ROCE_V1_DB_RSVD)
-
-#define HNS_ROCE_V1_FREE_MR_TIMEOUT_MSECS 50000
-#define HNS_ROCE_V1_RECREATE_LP_QP_TIMEOUT_MSECS 10000
-#define HNS_ROCE_V1_FREE_MR_WAIT_VALUE 5
-#define HNS_ROCE_V1_RECREATE_LP_QP_WAIT_VALUE 20
-
-#define HNS_ROCE_BT_RSV_BUF_SIZE (1 << 17)
-
-#define HNS_ROCE_V1_TPTR_ENTRY_SIZE 2
-#define HNS_ROCE_V1_TPTR_BUF_SIZE \
- (HNS_ROCE_V1_TPTR_ENTRY_SIZE * HNS_ROCE_V1_MAX_CQ_NUM)
-
-#define HNS_ROCE_ODB_POLL_MODE 0
-
-#define HNS_ROCE_SDB_NORMAL_MODE 0
-#define HNS_ROCE_SDB_EXTEND_MODE 1
-
-#define HNS_ROCE_ODB_EXTEND_MODE 1
-
-#define KEY_VALID 0x02
-
-#define HNS_ROCE_CQE_QPN_MASK 0x3ffff
-#define HNS_ROCE_CQE_STATUS_MASK 0x1f
-#define HNS_ROCE_CQE_OPCODE_MASK 0xf
-
-#define HNS_ROCE_CQE_SUCCESS 0x00
-#define HNS_ROCE_CQE_SYNDROME_LOCAL_LENGTH_ERR 0x01
-#define HNS_ROCE_CQE_SYNDROME_LOCAL_QP_OP_ERR 0x02
-#define HNS_ROCE_CQE_SYNDROME_LOCAL_PROT_ERR 0x03
-#define HNS_ROCE_CQE_SYNDROME_WR_FLUSH_ERR 0x04
-#define HNS_ROCE_CQE_SYNDROME_MEM_MANAGE_OPERATE_ERR 0x05
-#define HNS_ROCE_CQE_SYNDROME_BAD_RESP_ERR 0x06
-#define HNS_ROCE_CQE_SYNDROME_LOCAL_ACCESS_ERR 0x07
-#define HNS_ROCE_CQE_SYNDROME_REMOTE_INVAL_REQ_ERR 0x08
-#define HNS_ROCE_CQE_SYNDROME_REMOTE_ACCESS_ERR 0x09
-#define HNS_ROCE_CQE_SYNDROME_REMOTE_OP_ERR 0x0a
-#define HNS_ROCE_CQE_SYNDROME_TRANSPORT_RETRY_EXC_ERR 0x0b
-#define HNS_ROCE_CQE_SYNDROME_RNR_RETRY_EXC_ERR 0x0c
-
-#define QP1C_CFGN_OFFSET 0x28
-#define PHY_PORT_OFFSET 0x8
-#define MTPT_IDX_SHIFT 16
-#define ALL_PORT_VAL_OPEN 0x3f
-#define POL_TIME_INTERVAL_VAL 0x80
-#define SLEEP_TIME_INTERVAL 20
-#define SQ_PSN_SHIFT 8
-#define QKEY_VAL 0x80010000
-#define SDB_INV_CNT_OFFSET 8
-
-#define HNS_ROCE_CEQ_DEFAULT_INTERVAL 0x10
-#define HNS_ROCE_CEQ_DEFAULT_BURST_NUM 0x10
-
-#define HNS_ROCE_INT_MASK_DISABLE 0
-#define HNS_ROCE_INT_MASK_ENABLE 1
-
-#define CEQ_REG_OFFSET 0x18
-
-#define HNS_ROCE_CEQE_CEQE_COMP_OWNER_S 0
-
-#define HNS_ROCE_V1_CONS_IDX_M GENMASK(15, 0)
-
-#define HNS_ROCE_CEQE_CEQE_COMP_CQN_S 16
-#define HNS_ROCE_CEQE_CEQE_COMP_CQN_M GENMASK(31, 16)
-
-#define HNS_ROCE_AEQE_U32_4_EVENT_TYPE_S 16
-#define HNS_ROCE_AEQE_U32_4_EVENT_TYPE_M GENMASK(23, 16)
-
-#define HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_S 24
-#define HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_M GENMASK(30, 24)
-
-#define HNS_ROCE_AEQE_U32_4_OWNER_S 31
-
-#define HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_S 0
-#define HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_M GENMASK(23, 0)
-
-#define HNS_ROCE_AEQE_EVENT_QP_EVENT_PORT_NUM_S 25
-#define HNS_ROCE_AEQE_EVENT_QP_EVENT_PORT_NUM_M GENMASK(27, 25)
-
-#define HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_S 0
-#define HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_M GENMASK(15, 0)
-
-#define HNS_ROCE_AEQE_EVENT_CE_EVENT_CEQE_CEQN_S 0
-#define HNS_ROCE_AEQE_EVENT_CE_EVENT_CEQE_CEQN_M GENMASK(4, 0)
-
-/* Local Work Queue Catastrophic Error,SUBTYPE 0x5 */
-enum {
- HNS_ROCE_LWQCE_QPC_ERROR = 1,
- HNS_ROCE_LWQCE_MTU_ERROR,
- HNS_ROCE_LWQCE_WQE_BA_ADDR_ERROR,
- HNS_ROCE_LWQCE_WQE_ADDR_ERROR,
- HNS_ROCE_LWQCE_SQ_WQE_SHIFT_ERROR,
- HNS_ROCE_LWQCE_SL_ERROR,
- HNS_ROCE_LWQCE_PORT_ERROR,
-};
-
-/* Local Access Violation Work Queue Error,SUBTYPE 0x7 */
-enum {
- HNS_ROCE_LAVWQE_R_KEY_VIOLATION = 1,
- HNS_ROCE_LAVWQE_LENGTH_ERROR,
- HNS_ROCE_LAVWQE_VA_ERROR,
- HNS_ROCE_LAVWQE_PD_ERROR,
- HNS_ROCE_LAVWQE_RW_ACC_ERROR,
- HNS_ROCE_LAVWQE_KEY_STATE_ERROR,
- HNS_ROCE_LAVWQE_MR_OPERATION_ERROR,
-};
-
-/* DOORBELL overflow subtype */
-enum {
- HNS_ROCE_DB_SUBTYPE_SDB_OVF = 1,
- HNS_ROCE_DB_SUBTYPE_SDB_ALM_OVF,
- HNS_ROCE_DB_SUBTYPE_ODB_OVF,
- HNS_ROCE_DB_SUBTYPE_ODB_ALM_OVF,
- HNS_ROCE_DB_SUBTYPE_SDB_ALM_EMP,
- HNS_ROCE_DB_SUBTYPE_ODB_ALM_EMP,
-};
-
-enum {
- /* RQ&SRQ related operations */
- HNS_ROCE_OPCODE_SEND_DATA_RECEIVE = 0x06,
- HNS_ROCE_OPCODE_RDMA_WITH_IMM_RECEIVE,
-};
-
-enum {
- HNS_ROCE_PORT_DOWN = 0,
- HNS_ROCE_PORT_UP,
-};
-
-struct hns_roce_cq_context {
- __le32 cqc_byte_4;
- __le32 cq_bt_l;
- __le32 cqc_byte_12;
- __le32 cur_cqe_ba0_l;
- __le32 cqc_byte_20;
- __le32 cqe_tptr_addr_l;
- __le32 cur_cqe_ba1_l;
- __le32 cqc_byte_32;
-};
-
-#define CQ_CONTEXT_CQC_BYTE_4_CQC_STATE_S 0
-#define CQ_CONTEXT_CQC_BYTE_4_CQC_STATE_M \
- (((1UL << 2) - 1) << CQ_CONTEXT_CQC_BYTE_4_CQC_STATE_S)
-
-#define CQ_CONTEXT_CQC_BYTE_4_CQN_S 16
-#define CQ_CONTEXT_CQC_BYTE_4_CQN_M \
- (((1UL << 16) - 1) << CQ_CONTEXT_CQC_BYTE_4_CQN_S)
-
-#define CQ_CONTEXT_CQC_BYTE_12_CQ_BT_H_S 0
-#define CQ_CONTEXT_CQC_BYTE_12_CQ_BT_H_M \
- (((1UL << 17) - 1) << CQ_CONTEXT_CQC_BYTE_12_CQ_BT_H_S)
-
-#define CQ_CONTEXT_CQC_BYTE_12_CQ_CQE_SHIFT_S 20
-#define CQ_CONTEXT_CQC_BYTE_12_CQ_CQE_SHIFT_M \
- (((1UL << 4) - 1) << CQ_CONTEXT_CQC_BYTE_12_CQ_CQE_SHIFT_S)
-
-#define CQ_CONTEXT_CQC_BYTE_12_CEQN_S 24
-#define CQ_CONTEXT_CQC_BYTE_12_CEQN_M \
- (((1UL << 5) - 1) << CQ_CONTEXT_CQC_BYTE_12_CEQN_S)
-
-#define CQ_CONTEXT_CQC_BYTE_20_CUR_CQE_BA0_H_S 0
-#define CQ_CONTEXT_CQC_BYTE_20_CUR_CQE_BA0_H_M \
- (((1UL << 5) - 1) << CQ_CONTEXT_CQC_BYTE_20_CUR_CQE_BA0_H_S)
-
-#define CQ_CONTEXT_CQC_BYTE_20_CQ_CUR_INDEX_S 16
-#define CQ_CONTEXT_CQC_BYTE_20_CQ_CUR_INDEX_M \
- (((1UL << 16) - 1) << CQ_CONTEXT_CQC_BYTE_20_CQ_CUR_INDEX_S)
-
-#define CQ_CONTEXT_CQC_BYTE_20_CQE_TPTR_ADDR_H_S 8
-#define CQ_CONTEXT_CQC_BYTE_20_CQE_TPTR_ADDR_H_M \
- (((1UL << 5) - 1) << CQ_CONTEXT_CQC_BYTE_20_CQE_TPTR_ADDR_H_S)
-
-#define CQ_CONTEXT_CQC_BYTE_32_CUR_CQE_BA1_H_S 0
-#define CQ_CONTEXT_CQC_BYTE_32_CUR_CQE_BA1_H_M \
- (((1UL << 5) - 1) << CQ_CONTEXT_CQC_BYTE_32_CUR_CQE_BA1_H_S)
-
-#define CQ_CONTEXT_CQC_BYTE_32_SE_FLAG_S 9
-
-#define CQ_CONTEXT_CQC_BYTE_32_CE_FLAG_S 8
-#define CQ_CONTEXT_CQC_BYTE_32_NOTIFICATION_FLAG_S 14
-#define CQ_CQNTEXT_CQC_BYTE_32_TYPE_OF_COMPLETION_NOTIFICATION_S 15
-
-#define CQ_CONTEXT_CQC_BYTE_32_CQ_CONS_IDX_S 16
-#define CQ_CONTEXT_CQC_BYTE_32_CQ_CONS_IDX_M \
- (((1UL << 16) - 1) << CQ_CONTEXT_CQC_BYTE_32_CQ_CONS_IDX_S)
-
-struct hns_roce_cqe {
- __le32 cqe_byte_4;
- union {
- __le32 r_key;
- __le32 immediate_data;
- };
- __le32 byte_cnt;
- __le32 cqe_byte_16;
- __le32 cqe_byte_20;
- __le32 s_mac_l;
- __le32 cqe_byte_28;
- __le32 reserved;
-};
-
-#define CQE_BYTE_4_OWNER_S 7
-#define CQE_BYTE_4_SQ_RQ_FLAG_S 14
-
-#define CQE_BYTE_4_STATUS_OF_THE_OPERATION_S 8
-#define CQE_BYTE_4_STATUS_OF_THE_OPERATION_M \
- (((1UL << 5) - 1) << CQE_BYTE_4_STATUS_OF_THE_OPERATION_S)
-
-#define CQE_BYTE_4_WQE_INDEX_S 16
-#define CQE_BYTE_4_WQE_INDEX_M (((1UL << 14) - 1) << CQE_BYTE_4_WQE_INDEX_S)
-
-#define CQE_BYTE_4_OPERATION_TYPE_S 0
-#define CQE_BYTE_4_OPERATION_TYPE_M \
- (((1UL << 4) - 1) << CQE_BYTE_4_OPERATION_TYPE_S)
-
-#define CQE_BYTE_4_IMM_INDICATOR_S 15
-
-#define CQE_BYTE_16_LOCAL_QPN_S 0
-#define CQE_BYTE_16_LOCAL_QPN_M (((1UL << 24) - 1) << CQE_BYTE_16_LOCAL_QPN_S)
-
-#define CQE_BYTE_20_PORT_NUM_S 26
-#define CQE_BYTE_20_PORT_NUM_M (((1UL << 3) - 1) << CQE_BYTE_20_PORT_NUM_S)
-
-#define CQE_BYTE_20_SL_S 24
-#define CQE_BYTE_20_SL_M (((1UL << 2) - 1) << CQE_BYTE_20_SL_S)
-
-#define CQE_BYTE_20_REMOTE_QPN_S 0
-#define CQE_BYTE_20_REMOTE_QPN_M \
- (((1UL << 24) - 1) << CQE_BYTE_20_REMOTE_QPN_S)
-
-#define CQE_BYTE_20_GRH_PRESENT_S 29
-
-#define CQE_BYTE_28_P_KEY_IDX_S 16
-#define CQE_BYTE_28_P_KEY_IDX_M (((1UL << 16) - 1) << CQE_BYTE_28_P_KEY_IDX_S)
-
-#define CQ_DB_REQ_NOT_SOL 0
-#define CQ_DB_REQ_NOT (1 << 16)
-
-struct hns_roce_v1_mpt_entry {
- __le32 mpt_byte_4;
- __le32 pbl_addr_l;
- __le32 mpt_byte_12;
- __le32 virt_addr_l;
- __le32 virt_addr_h;
- __le32 length;
- __le32 mpt_byte_28;
- __le32 pa0_l;
- __le32 mpt_byte_36;
- __le32 mpt_byte_40;
- __le32 mpt_byte_44;
- __le32 mpt_byte_48;
- __le32 pa4_l;
- __le32 mpt_byte_56;
- __le32 mpt_byte_60;
- __le32 mpt_byte_64;
-};
-
-#define MPT_BYTE_4_KEY_STATE_S 0
-#define MPT_BYTE_4_KEY_STATE_M (((1UL << 2) - 1) << MPT_BYTE_4_KEY_STATE_S)
-
-#define MPT_BYTE_4_KEY_S 8
-#define MPT_BYTE_4_KEY_M (((1UL << 8) - 1) << MPT_BYTE_4_KEY_S)
-
-#define MPT_BYTE_4_PAGE_SIZE_S 16
-#define MPT_BYTE_4_PAGE_SIZE_M (((1UL << 2) - 1) << MPT_BYTE_4_PAGE_SIZE_S)
-
-#define MPT_BYTE_4_MW_TYPE_S 20
-
-#define MPT_BYTE_4_MW_BIND_ENABLE_S 21
-
-#define MPT_BYTE_4_OWN_S 22
-
-#define MPT_BYTE_4_MEMORY_LOCATION_TYPE_S 24
-#define MPT_BYTE_4_MEMORY_LOCATION_TYPE_M \
- (((1UL << 2) - 1) << MPT_BYTE_4_MEMORY_LOCATION_TYPE_S)
-
-#define MPT_BYTE_4_REMOTE_ATOMIC_S 26
-#define MPT_BYTE_4_LOCAL_WRITE_S 27
-#define MPT_BYTE_4_REMOTE_WRITE_S 28
-#define MPT_BYTE_4_REMOTE_READ_S 29
-#define MPT_BYTE_4_REMOTE_INVAL_ENABLE_S 30
-#define MPT_BYTE_4_ADDRESS_TYPE_S 31
-
-#define MPT_BYTE_12_PBL_ADDR_H_S 0
-#define MPT_BYTE_12_PBL_ADDR_H_M \
- (((1UL << 17) - 1) << MPT_BYTE_12_PBL_ADDR_H_S)
-
-#define MPT_BYTE_12_MW_BIND_COUNTER_S 17
-#define MPT_BYTE_12_MW_BIND_COUNTER_M \
- (((1UL << 15) - 1) << MPT_BYTE_12_MW_BIND_COUNTER_S)
-
-#define MPT_BYTE_28_PD_S 0
-#define MPT_BYTE_28_PD_M (((1UL << 16) - 1) << MPT_BYTE_28_PD_S)
-
-#define MPT_BYTE_28_L_KEY_IDX_L_S 16
-#define MPT_BYTE_28_L_KEY_IDX_L_M \
- (((1UL << 16) - 1) << MPT_BYTE_28_L_KEY_IDX_L_S)
-
-#define MPT_BYTE_36_PA0_H_S 0
-#define MPT_BYTE_36_PA0_H_M (((1UL << 5) - 1) << MPT_BYTE_36_PA0_H_S)
-
-#define MPT_BYTE_36_PA1_L_S 8
-#define MPT_BYTE_36_PA1_L_M (((1UL << 24) - 1) << MPT_BYTE_36_PA1_L_S)
-
-#define MPT_BYTE_40_PA1_H_S 0
-#define MPT_BYTE_40_PA1_H_M (((1UL << 13) - 1) << MPT_BYTE_40_PA1_H_S)
-
-#define MPT_BYTE_40_PA2_L_S 16
-#define MPT_BYTE_40_PA2_L_M (((1UL << 16) - 1) << MPT_BYTE_40_PA2_L_S)
-
-#define MPT_BYTE_44_PA2_H_S 0
-#define MPT_BYTE_44_PA2_H_M (((1UL << 21) - 1) << MPT_BYTE_44_PA2_H_S)
-
-#define MPT_BYTE_44_PA3_L_S 24
-#define MPT_BYTE_44_PA3_L_M (((1UL << 8) - 1) << MPT_BYTE_44_PA3_L_S)
-
-#define MPT_BYTE_48_PA3_H_S 0
-#define MPT_BYTE_48_PA3_H_M (((1UL << 29) - 1) << MPT_BYTE_48_PA3_H_S)
-
-#define MPT_BYTE_56_PA4_H_S 0
-#define MPT_BYTE_56_PA4_H_M (((1UL << 5) - 1) << MPT_BYTE_56_PA4_H_S)
-
-#define MPT_BYTE_56_PA5_L_S 8
-#define MPT_BYTE_56_PA5_L_M (((1UL << 24) - 1) << MPT_BYTE_56_PA5_L_S)
-
-#define MPT_BYTE_60_PA5_H_S 0
-#define MPT_BYTE_60_PA5_H_M (((1UL << 13) - 1) << MPT_BYTE_60_PA5_H_S)
-
-#define MPT_BYTE_60_PA6_L_S 16
-#define MPT_BYTE_60_PA6_L_M (((1UL << 16) - 1) << MPT_BYTE_60_PA6_L_S)
-
-#define MPT_BYTE_64_PA6_H_S 0
-#define MPT_BYTE_64_PA6_H_M (((1UL << 21) - 1) << MPT_BYTE_64_PA6_H_S)
-
-#define MPT_BYTE_64_L_KEY_IDX_H_S 24
-#define MPT_BYTE_64_L_KEY_IDX_H_M \
- (((1UL << 8) - 1) << MPT_BYTE_64_L_KEY_IDX_H_S)
-
-struct hns_roce_wqe_ctrl_seg {
- __le32 sgl_pa_h;
- __le32 flag;
- union {
- __be32 imm_data;
- __le32 inv_key;
- };
- __le32 msg_length;
-};
-
-struct hns_roce_wqe_data_seg {
- __le64 addr;
- __le32 lkey;
- __le32 len;
-};
-
-struct hns_roce_wqe_raddr_seg {
- __le32 rkey;
- __le32 len; /* reserved */
- __le64 raddr;
-};
-
-struct hns_roce_rq_wqe_ctrl {
- __le32 rwqe_byte_4;
- __le32 rocee_sgl_ba_l;
- __le32 rwqe_byte_12;
- __le32 reserved[5];
-};
-
-#define RQ_WQE_CTRL_RWQE_BYTE_12_RWQE_SGE_NUM_S 16
-#define RQ_WQE_CTRL_RWQE_BYTE_12_RWQE_SGE_NUM_M \
- (((1UL << 6) - 1) << RQ_WQE_CTRL_RWQE_BYTE_12_RWQE_SGE_NUM_S)
-
-#define HNS_ROCE_QP_DESTROY_TIMEOUT_MSECS 10000
-
-#define GID_LEN 16
-
-struct hns_roce_ud_send_wqe {
- __le32 dmac_h;
- __le32 u32_8;
- __le32 immediate_data;
-
- __le32 u32_16;
- union {
- unsigned char dgid[GID_LEN];
- struct {
- __le32 u32_20;
- __le32 u32_24;
- __le32 u32_28;
- __le32 u32_32;
- };
- };
-
- __le32 u32_36;
- __le32 u32_40;
-
- __le32 va0_l;
- __le32 va0_h;
- __le32 l_key0;
-
- __le32 va1_l;
- __le32 va1_h;
- __le32 l_key1;
-};
-
-#define UD_SEND_WQE_U32_4_DMAC_0_S 0
-#define UD_SEND_WQE_U32_4_DMAC_0_M \
- (((1UL << 8) - 1) << UD_SEND_WQE_U32_4_DMAC_0_S)
-
-#define UD_SEND_WQE_U32_4_DMAC_1_S 8
-#define UD_SEND_WQE_U32_4_DMAC_1_M \
- (((1UL << 8) - 1) << UD_SEND_WQE_U32_4_DMAC_1_S)
-
-#define UD_SEND_WQE_U32_4_DMAC_2_S 16
-#define UD_SEND_WQE_U32_4_DMAC_2_M \
- (((1UL << 8) - 1) << UD_SEND_WQE_U32_4_DMAC_2_S)
-
-#define UD_SEND_WQE_U32_4_DMAC_3_S 24
-#define UD_SEND_WQE_U32_4_DMAC_3_M \
- (((1UL << 8) - 1) << UD_SEND_WQE_U32_4_DMAC_3_S)
-
-#define UD_SEND_WQE_U32_8_DMAC_4_S 0
-#define UD_SEND_WQE_U32_8_DMAC_4_M \
- (((1UL << 8) - 1) << UD_SEND_WQE_U32_8_DMAC_4_S)
-
-#define UD_SEND_WQE_U32_8_DMAC_5_S 8
-#define UD_SEND_WQE_U32_8_DMAC_5_M \
- (((1UL << 8) - 1) << UD_SEND_WQE_U32_8_DMAC_5_S)
-
-#define UD_SEND_WQE_U32_8_LOOPBACK_INDICATOR_S 22
-
-#define UD_SEND_WQE_U32_8_OPERATION_TYPE_S 16
-#define UD_SEND_WQE_U32_8_OPERATION_TYPE_M \
- (((1UL << 4) - 1) << UD_SEND_WQE_U32_8_OPERATION_TYPE_S)
-
-#define UD_SEND_WQE_U32_8_NUMBER_OF_DATA_SEG_S 24
-#define UD_SEND_WQE_U32_8_NUMBER_OF_DATA_SEG_M \
- (((1UL << 6) - 1) << UD_SEND_WQE_U32_8_NUMBER_OF_DATA_SEG_S)
-
-#define UD_SEND_WQE_U32_8_SEND_GL_ROUTING_HDR_FLAG_S 31
-
-#define UD_SEND_WQE_U32_16_DEST_QP_S 0
-#define UD_SEND_WQE_U32_16_DEST_QP_M \
- (((1UL << 24) - 1) << UD_SEND_WQE_U32_16_DEST_QP_S)
-
-#define UD_SEND_WQE_U32_16_MAX_STATIC_RATE_S 24
-#define UD_SEND_WQE_U32_16_MAX_STATIC_RATE_M \
- (((1UL << 8) - 1) << UD_SEND_WQE_U32_16_MAX_STATIC_RATE_S)
-
-#define UD_SEND_WQE_U32_36_FLOW_LABEL_S 0
-#define UD_SEND_WQE_U32_36_FLOW_LABEL_M \
- (((1UL << 20) - 1) << UD_SEND_WQE_U32_36_FLOW_LABEL_S)
-
-#define UD_SEND_WQE_U32_36_PRIORITY_S 20
-#define UD_SEND_WQE_U32_36_PRIORITY_M \
- (((1UL << 4) - 1) << UD_SEND_WQE_U32_36_PRIORITY_S)
-
-#define UD_SEND_WQE_U32_36_SGID_INDEX_S 24
-#define UD_SEND_WQE_U32_36_SGID_INDEX_M \
- (((1UL << 8) - 1) << UD_SEND_WQE_U32_36_SGID_INDEX_S)
-
-#define UD_SEND_WQE_U32_40_HOP_LIMIT_S 0
-#define UD_SEND_WQE_U32_40_HOP_LIMIT_M \
- (((1UL << 8) - 1) << UD_SEND_WQE_U32_40_HOP_LIMIT_S)
-
-#define UD_SEND_WQE_U32_40_TRAFFIC_CLASS_S 8
-#define UD_SEND_WQE_U32_40_TRAFFIC_CLASS_M \
- (((1UL << 8) - 1) << UD_SEND_WQE_U32_40_TRAFFIC_CLASS_S)
-
-struct hns_roce_sqp_context {
- __le32 qp1c_bytes_4;
- __le32 sq_rq_bt_l;
- __le32 qp1c_bytes_12;
- __le32 qp1c_bytes_16;
- __le32 qp1c_bytes_20;
- __le32 cur_rq_wqe_ba_l;
- __le32 qp1c_bytes_28;
- __le32 qp1c_bytes_32;
- __le32 cur_sq_wqe_ba_l;
- __le32 qp1c_bytes_40;
-};
-
-#define QP1C_BYTES_4_QP_STATE_S 0
-#define QP1C_BYTES_4_QP_STATE_M \
- (((1UL << 3) - 1) << QP1C_BYTES_4_QP_STATE_S)
-
-#define QP1C_BYTES_4_SQ_WQE_SHIFT_S 8
-#define QP1C_BYTES_4_SQ_WQE_SHIFT_M \
- (((1UL << 4) - 1) << QP1C_BYTES_4_SQ_WQE_SHIFT_S)
-
-#define QP1C_BYTES_4_RQ_WQE_SHIFT_S 12
-#define QP1C_BYTES_4_RQ_WQE_SHIFT_M \
- (((1UL << 4) - 1) << QP1C_BYTES_4_RQ_WQE_SHIFT_S)
-
-#define QP1C_BYTES_4_PD_S 16
-#define QP1C_BYTES_4_PD_M (((1UL << 16) - 1) << QP1C_BYTES_4_PD_S)
-
-#define QP1C_BYTES_12_SQ_RQ_BT_H_S 0
-#define QP1C_BYTES_12_SQ_RQ_BT_H_M \
- (((1UL << 17) - 1) << QP1C_BYTES_12_SQ_RQ_BT_H_S)
-
-#define QP1C_BYTES_16_RQ_HEAD_S 0
-#define QP1C_BYTES_16_RQ_HEAD_M (((1UL << 15) - 1) << QP1C_BYTES_16_RQ_HEAD_S)
-
-#define QP1C_BYTES_16_PORT_NUM_S 16
-#define QP1C_BYTES_16_PORT_NUM_M \
- (((1UL << 3) - 1) << QP1C_BYTES_16_PORT_NUM_S)
-
-#define QP1C_BYTES_16_SIGNALING_TYPE_S 27
-#define QP1C_BYTES_16_LOCAL_ENABLE_E2E_CREDIT_S 28
-#define QP1C_BYTES_16_RQ_BA_FLG_S 29
-#define QP1C_BYTES_16_SQ_BA_FLG_S 30
-#define QP1C_BYTES_16_QP1_ERR_S 31
-
-#define QP1C_BYTES_20_SQ_HEAD_S 0
-#define QP1C_BYTES_20_SQ_HEAD_M (((1UL << 15) - 1) << QP1C_BYTES_20_SQ_HEAD_S)
-
-#define QP1C_BYTES_20_PKEY_IDX_S 16
-#define QP1C_BYTES_20_PKEY_IDX_M \
- (((1UL << 16) - 1) << QP1C_BYTES_20_PKEY_IDX_S)
-
-#define QP1C_BYTES_28_CUR_RQ_WQE_BA_H_S 0
-#define QP1C_BYTES_28_CUR_RQ_WQE_BA_H_M \
- (((1UL << 5) - 1) << QP1C_BYTES_28_CUR_RQ_WQE_BA_H_S)
-
-#define QP1C_BYTES_28_RQ_CUR_IDX_S 16
-#define QP1C_BYTES_28_RQ_CUR_IDX_M \
- (((1UL << 15) - 1) << QP1C_BYTES_28_RQ_CUR_IDX_S)
-
-#define QP1C_BYTES_32_TX_CQ_NUM_S 0
-#define QP1C_BYTES_32_TX_CQ_NUM_M \
- (((1UL << 16) - 1) << QP1C_BYTES_32_TX_CQ_NUM_S)
-
-#define QP1C_BYTES_32_RX_CQ_NUM_S 16
-#define QP1C_BYTES_32_RX_CQ_NUM_M \
- (((1UL << 16) - 1) << QP1C_BYTES_32_RX_CQ_NUM_S)
-
-#define QP1C_BYTES_40_CUR_SQ_WQE_BA_H_S 0
-#define QP1C_BYTES_40_CUR_SQ_WQE_BA_H_M \
- (((1UL << 5) - 1) << QP1C_BYTES_40_CUR_SQ_WQE_BA_H_S)
-
-#define QP1C_BYTES_40_SQ_CUR_IDX_S 16
-#define QP1C_BYTES_40_SQ_CUR_IDX_M \
- (((1UL << 15) - 1) << QP1C_BYTES_40_SQ_CUR_IDX_S)
-
-#define HNS_ROCE_WQE_INLINE (1UL<<31)
-#define HNS_ROCE_WQE_SE (1UL<<30)
-
-#define HNS_ROCE_WQE_SGE_NUM_BIT 24
-#define HNS_ROCE_WQE_IMM (1UL<<23)
-#define HNS_ROCE_WQE_FENCE (1UL<<21)
-#define HNS_ROCE_WQE_CQ_NOTIFY (1UL<<20)
-
-#define HNS_ROCE_WQE_OPCODE_SEND (0<<16)
-#define HNS_ROCE_WQE_OPCODE_RDMA_READ (1<<16)
-#define HNS_ROCE_WQE_OPCODE_RDMA_WRITE (2<<16)
-#define HNS_ROCE_WQE_OPCODE_LOCAL_INV (4<<16)
-#define HNS_ROCE_WQE_OPCODE_UD_SEND (7<<16)
-#define HNS_ROCE_WQE_OPCODE_MASK (15<<16)
-
-struct hns_roce_qp_context {
- __le32 qpc_bytes_4;
- __le32 qpc_bytes_8;
- __le32 qpc_bytes_12;
- __le32 qpc_bytes_16;
- __le32 sq_rq_bt_l;
- __le32 qpc_bytes_24;
- __le32 irrl_ba_l;
- __le32 qpc_bytes_32;
- __le32 qpc_bytes_36;
- __le32 dmac_l;
- __le32 qpc_bytes_44;
- __le32 qpc_bytes_48;
- u8 dgid[16];
- __le32 qpc_bytes_68;
- __le32 cur_rq_wqe_ba_l;
- __le32 qpc_bytes_76;
- __le32 rx_rnr_time;
- __le32 qpc_bytes_84;
- __le32 qpc_bytes_88;
- union {
- __le32 rx_sge_len;
- __le32 dma_length;
- };
- union {
- __le32 rx_sge_num;
- __le32 rx_send_pktn;
- __le32 r_key;
- };
- __le32 va_l;
- __le32 va_h;
- __le32 qpc_bytes_108;
- __le32 qpc_bytes_112;
- __le32 rx_cur_sq_wqe_ba_l;
- __le32 qpc_bytes_120;
- __le32 qpc_bytes_124;
- __le32 qpc_bytes_128;
- __le32 qpc_bytes_132;
- __le32 qpc_bytes_136;
- __le32 qpc_bytes_140;
- __le32 qpc_bytes_144;
- __le32 qpc_bytes_148;
- union {
- __le32 rnr_retry;
- __le32 ack_time;
- };
- __le32 qpc_bytes_156;
- __le32 pkt_use_len;
- __le32 qpc_bytes_164;
- __le32 qpc_bytes_168;
- union {
- __le32 sge_use_len;
- __le32 pa_use_len;
- };
- __le32 qpc_bytes_176;
- __le32 qpc_bytes_180;
- __le32 tx_cur_sq_wqe_ba_l;
- __le32 qpc_bytes_188;
- __le32 rvd21;
-};
-
-#define QP_CONTEXT_QPC_BYTES_4_TRANSPORT_SERVICE_TYPE_S 0
-#define QP_CONTEXT_QPC_BYTES_4_TRANSPORT_SERVICE_TYPE_M \
- (((1UL << 3) - 1) << QP_CONTEXT_QPC_BYTES_4_TRANSPORT_SERVICE_TYPE_S)
-
-#define QP_CONTEXT_QPC_BYTE_4_ENABLE_FPMR_S 3
-#define QP_CONTEXT_QPC_BYTE_4_RDMA_READ_ENABLE_S 4
-#define QP_CONTEXT_QPC_BYTE_4_RDMA_WRITE_ENABLE_S 5
-#define QP_CONTEXT_QPC_BYTE_4_ATOMIC_OPERATION_ENABLE_S 6
-#define QP_CONTEXT_QPC_BYTE_4_RDMAR_USE_S 7
-
-#define QP_CONTEXT_QPC_BYTES_4_SQ_WQE_SHIFT_S 8
-#define QP_CONTEXT_QPC_BYTES_4_SQ_WQE_SHIFT_M \
- (((1UL << 4) - 1) << QP_CONTEXT_QPC_BYTES_4_SQ_WQE_SHIFT_S)
-
-#define QP_CONTEXT_QPC_BYTES_4_RQ_WQE_SHIFT_S 12
-#define QP_CONTEXT_QPC_BYTES_4_RQ_WQE_SHIFT_M \
- (((1UL << 4) - 1) << QP_CONTEXT_QPC_BYTES_4_RQ_WQE_SHIFT_S)
-
-#define QP_CONTEXT_QPC_BYTES_4_PD_S 16
-#define QP_CONTEXT_QPC_BYTES_4_PD_M \
- (((1UL << 16) - 1) << QP_CONTEXT_QPC_BYTES_4_PD_S)
-
-#define QP_CONTEXT_QPC_BYTES_8_TX_COMPLETION_S 0
-#define QP_CONTEXT_QPC_BYTES_8_TX_COMPLETION_M \
- (((1UL << 16) - 1) << QP_CONTEXT_QPC_BYTES_8_TX_COMPLETION_S)
-
-#define QP_CONTEXT_QPC_BYTES_8_RX_COMPLETION_S 16
-#define QP_CONTEXT_QPC_BYTES_8_RX_COMPLETION_M \
- (((1UL << 16) - 1) << QP_CONTEXT_QPC_BYTES_8_RX_COMPLETION_S)
-
-#define QP_CONTEXT_QPC_BYTES_12_SRQ_NUMBER_S 0
-#define QP_CONTEXT_QPC_BYTES_12_SRQ_NUMBER_M \
- (((1UL << 16) - 1) << QP_CONTEXT_QPC_BYTES_12_SRQ_NUMBER_S)
-
-#define QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_S 16
-#define QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_M \
- (((1UL << 16) - 1) << QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_S)
-
-#define QP_CONTEXT_QPC_BYTES_16_QP_NUM_S 0
-#define QP_CONTEXT_QPC_BYTES_16_QP_NUM_M \
- (((1UL << 24) - 1) << QP_CONTEXT_QPC_BYTES_16_QP_NUM_S)
-
-#define QP_CONTEXT_QPC_BYTES_24_SQ_RQ_BT_H_S 0
-#define QP_CONTEXT_QPC_BYTES_24_SQ_RQ_BT_H_M \
- (((1UL << 17) - 1) << QP_CONTEXT_QPC_BYTES_24_SQ_RQ_BT_H_S)
-
-#define QP_CONTEXT_QPC_BYTES_24_MINIMUM_RNR_NAK_TIMER_S 18
-#define QP_CONTEXT_QPC_BYTES_24_MINIMUM_RNR_NAK_TIMER_M \
- (((1UL << 5) - 1) << QP_CONTEXT_QPC_BYTES_24_MINIMUM_RNR_NAK_TIMER_S)
-
-#define QP_CONTEXT_QPC_BYTE_24_REMOTE_ENABLE_E2E_CREDITS_S 23
-
-#define QP_CONTEXT_QPC_BYTES_32_IRRL_BA_H_S 0
-#define QP_CONTEXT_QPC_BYTES_32_IRRL_BA_H_M \
- (((1UL << 17) - 1) << QP_CONTEXT_QPC_BYTES_32_IRRL_BA_H_S)
-
-#define QP_CONTEXT_QPC_BYTES_32_MIG_STATE_S 18
-#define QP_CONTEXT_QPC_BYTES_32_MIG_STATE_M \
- (((1UL << 2) - 1) << QP_CONTEXT_QPC_BYTES_32_MIG_STATE_S)
-
-#define QP_CONTEXT_QPC_BYTE_32_LOCAL_ENABLE_E2E_CREDITS_S 20
-#define QP_CONTEXT_QPC_BYTE_32_SIGNALING_TYPE_S 21
-#define QP_CONTEXT_QPC_BYTE_32_LOOPBACK_INDICATOR_S 22
-#define QP_CONTEXT_QPC_BYTE_32_GLOBAL_HEADER_S 23
-
-#define QP_CONTEXT_QPC_BYTES_32_RESPONDER_RESOURCES_S 24
-#define QP_CONTEXT_QPC_BYTES_32_RESPONDER_RESOURCES_M \
- (((1UL << 8) - 1) << QP_CONTEXT_QPC_BYTES_32_RESPONDER_RESOURCES_S)
-
-#define QP_CONTEXT_QPC_BYTES_36_DEST_QP_S 0
-#define QP_CONTEXT_QPC_BYTES_36_DEST_QP_M \
- (((1UL << 24) - 1) << QP_CONTEXT_QPC_BYTES_36_DEST_QP_S)
-
-#define QP_CONTEXT_QPC_BYTES_36_SGID_INDEX_S 24
-#define QP_CONTEXT_QPC_BYTES_36_SGID_INDEX_M \
- (((1UL << 8) - 1) << QP_CONTEXT_QPC_BYTES_36_SGID_INDEX_S)
-
-#define QP_CONTEXT_QPC_BYTES_44_DMAC_H_S 0
-#define QP_CONTEXT_QPC_BYTES_44_DMAC_H_M \
- (((1UL << 16) - 1) << QP_CONTEXT_QPC_BYTES_44_DMAC_H_S)
-
-#define QP_CONTEXT_QPC_BYTES_44_MAXIMUM_STATIC_RATE_S 16
-#define QP_CONTEXT_QPC_BYTES_44_MAXIMUM_STATIC_RATE_M \
- (((1UL << 8) - 1) << QP_CONTEXT_QPC_BYTES_44_MAXIMUM_STATIC_RATE_S)
-
-#define QP_CONTEXT_QPC_BYTES_44_HOPLMT_S 24
-#define QP_CONTEXT_QPC_BYTES_44_HOPLMT_M \
- (((1UL << 8) - 1) << QP_CONTEXT_QPC_BYTES_44_HOPLMT_S)
-
-#define QP_CONTEXT_QPC_BYTES_48_FLOWLABEL_S 0
-#define QP_CONTEXT_QPC_BYTES_48_FLOWLABEL_M \
- (((1UL << 20) - 1) << QP_CONTEXT_QPC_BYTES_48_FLOWLABEL_S)
-
-#define QP_CONTEXT_QPC_BYTES_48_TCLASS_S 20
-#define QP_CONTEXT_QPC_BYTES_48_TCLASS_M \
- (((1UL << 8) - 1) << QP_CONTEXT_QPC_BYTES_48_TCLASS_S)
-
-#define QP_CONTEXT_QPC_BYTES_48_MTU_S 28
-#define QP_CONTEXT_QPC_BYTES_48_MTU_M \
- (((1UL << 4) - 1) << QP_CONTEXT_QPC_BYTES_48_MTU_S)
-
-#define QP_CONTEXT_QPC_BYTES_68_RQ_HEAD_S 0
-#define QP_CONTEXT_QPC_BYTES_68_RQ_HEAD_M \
- (((1UL << 15) - 1) << QP_CONTEXT_QPC_BYTES_68_RQ_HEAD_S)
-
-#define QP_CONTEXT_QPC_BYTES_68_RQ_CUR_INDEX_S 16
-#define QP_CONTEXT_QPC_BYTES_68_RQ_CUR_INDEX_M \
- (((1UL << 15) - 1) << QP_CONTEXT_QPC_BYTES_68_RQ_CUR_INDEX_S)
-
-#define QP_CONTEXT_QPC_BYTES_76_CUR_RQ_WQE_BA_H_S 0
-#define QP_CONTEXT_QPC_BYTES_76_CUR_RQ_WQE_BA_H_M \
- (((1UL << 5) - 1) << QP_CONTEXT_QPC_BYTES_76_CUR_RQ_WQE_BA_H_S)
-
-#define QP_CONTEXT_QPC_BYTES_76_RX_REQ_MSN_S 8
-#define QP_CONTEXT_QPC_BYTES_76_RX_REQ_MSN_M \
- (((1UL << 24) - 1) << QP_CONTEXT_QPC_BYTES_76_RX_REQ_MSN_S)
-
-#define QP_CONTEXT_QPC_BYTES_84_LAST_ACK_PSN_S 0
-#define QP_CONTEXT_QPC_BYTES_84_LAST_ACK_PSN_M \
- (((1UL << 24) - 1) << QP_CONTEXT_QPC_BYTES_84_LAST_ACK_PSN_S)
-
-#define QP_CONTEXT_QPC_BYTES_84_TRRL_HEAD_S 24
-#define QP_CONTEXT_QPC_BYTES_84_TRRL_HEAD_M \
- (((1UL << 8) - 1) << QP_CONTEXT_QPC_BYTES_84_TRRL_HEAD_S)
-
-#define QP_CONTEXT_QPC_BYTES_88_RX_REQ_EPSN_S 0
-#define QP_CONTEXT_QPC_BYTES_88_RX_REQ_EPSN_M \
- (((1UL << 24) - 1) << QP_CONTEXT_QPC_BYTES_88_RX_REQ_EPSN_S)
-
-#define QP_CONTEXT_QPC_BYTES_88_RX_REQ_PSN_ERR_FLAG_S 24
-#define QP_CONTEXT_QPC_BYTES_88_RX_LAST_OPCODE_FLG_S 25
-
-#define QP_CONTEXT_QPC_BYTES_88_RQ_REQ_LAST_OPERATION_TYPE_S 26
-#define QP_CONTEXT_QPC_BYTES_88_RQ_REQ_LAST_OPERATION_TYPE_M \
- (((1UL << 2) - 1) << \
- QP_CONTEXT_QPC_BYTES_88_RQ_REQ_LAST_OPERATION_TYPE_S)
-
-#define QP_CONTEXT_QPC_BYTES_88_RQ_REQ_RDMA_WR_FLAG_S 29
-#define QP_CONTEXT_QPC_BYTES_88_RQ_REQ_RDMA_WR_FLAG_M \
- (((1UL << 2) - 1) << QP_CONTEXT_QPC_BYTES_88_RQ_REQ_RDMA_WR_FLAG_S)
-
-#define QP_CONTEXT_QPC_BYTES_108_TRRL_SDB_PSN_S 0
-#define QP_CONTEXT_QPC_BYTES_108_TRRL_SDB_PSN_M \
- (((1UL << 24) - 1) << QP_CONTEXT_QPC_BYTES_108_TRRL_SDB_PSN_S)
-
-#define QP_CONTEXT_QPC_BYTES_108_TRRL_SDB_PSN_FLG_S 24
-#define QP_CONTEXT_QPC_BYTES_108_TRRL_TDB_PSN_FLG_S 25
-
-#define QP_CONTEXT_QPC_BYTES_112_TRRL_TDB_PSN_S 0
-#define QP_CONTEXT_QPC_BYTES_112_TRRL_TDB_PSN_M \
- (((1UL << 24) - 1) << QP_CONTEXT_QPC_BYTES_112_TRRL_TDB_PSN_S)
-
-#define QP_CONTEXT_QPC_BYTES_112_TRRL_TAIL_S 24
-#define QP_CONTEXT_QPC_BYTES_112_TRRL_TAIL_M \
- (((1UL << 8) - 1) << QP_CONTEXT_QPC_BYTES_112_TRRL_TAIL_S)
-
-#define QP_CONTEXT_QPC_BYTES_120_RX_CUR_SQ_WQE_BA_H_S 0
-#define QP_CONTEXT_QPC_BYTES_120_RX_CUR_SQ_WQE_BA_H_M \
- (((1UL << 5) - 1) << QP_CONTEXT_QPC_BYTES_120_RX_CUR_SQ_WQE_BA_H_S)
-
-#define QP_CONTEXT_QPC_BYTES_124_RX_ACK_MSN_S 0
-#define QP_CONTEXT_QPC_BYTES_124_RX_ACK_MSN_M \
- (((1UL << 15) - 1) << QP_CONTEXT_QPC_BYTES_124_RX_ACK_MSN_S)
-
-#define QP_CONTEXT_QPC_BYTES_124_IRRL_MSG_IDX_S 16
-#define QP_CONTEXT_QPC_BYTES_124_IRRL_MSG_IDX_M \
- (((1UL << 15) - 1) << QP_CONTEXT_QPC_BYTES_124_IRRL_MSG_IDX_S)
-
-#define QP_CONTEXT_QPC_BYTES_128_RX_ACK_EPSN_S 0
-#define QP_CONTEXT_QPC_BYTES_128_RX_ACK_EPSN_M \
- (((1UL << 24) - 1) << QP_CONTEXT_QPC_BYTES_128_RX_ACK_EPSN_S)
-
-#define QP_CONTEXT_QPC_BYTES_128_RX_ACK_PSN_ERR_FLG_S 24
-
-#define QP_CONTEXT_QPC_BYTES_128_ACK_LAST_OPERATION_TYPE_S 25
-#define QP_CONTEXT_QPC_BYTES_128_ACK_LAST_OPERATION_TYPE_M \
- (((1UL << 2) - 1) << QP_CONTEXT_QPC_BYTES_128_ACK_LAST_OPERATION_TYPE_S)
-
-#define QP_CONTEXT_QPC_BYTES_128_IRRL_PSN_VLD_FLG_S 27
-
-#define QP_CONTEXT_QPC_BYTES_132_IRRL_PSN_S 0
-#define QP_CONTEXT_QPC_BYTES_132_IRRL_PSN_M \
- (((1UL << 24) - 1) << QP_CONTEXT_QPC_BYTES_132_IRRL_PSN_S)
-
-#define QP_CONTEXT_QPC_BYTES_132_IRRL_TAIL_S 24
-#define QP_CONTEXT_QPC_BYTES_132_IRRL_TAIL_M \
- (((1UL << 8) - 1) << QP_CONTEXT_QPC_BYTES_132_IRRL_TAIL_S)
-
-#define QP_CONTEXT_QPC_BYTES_136_RETRY_MSG_PSN_S 0
-#define QP_CONTEXT_QPC_BYTES_136_RETRY_MSG_PSN_M \
- (((1UL << 24) - 1) << QP_CONTEXT_QPC_BYTES_136_RETRY_MSG_PSN_S)
-
-#define QP_CONTEXT_QPC_BYTES_136_RETRY_MSG_FPKT_PSN_L_S 24
-#define QP_CONTEXT_QPC_BYTES_136_RETRY_MSG_FPKT_PSN_L_M \
- (((1UL << 8) - 1) << QP_CONTEXT_QPC_BYTES_136_RETRY_MSG_FPKT_PSN_L_S)
-
-#define QP_CONTEXT_QPC_BYTES_140_RETRY_MSG_FPKT_PSN_H_S 0
-#define QP_CONTEXT_QPC_BYTES_140_RETRY_MSG_FPKT_PSN_H_M \
- (((1UL << 16) - 1) << QP_CONTEXT_QPC_BYTES_140_RETRY_MSG_FPKT_PSN_H_S)
-
-#define QP_CONTEXT_QPC_BYTES_140_RETRY_MSG_MSN_S 16
-#define QP_CONTEXT_QPC_BYTES_140_RETRY_MSG_MSN_M \
- (((1UL << 15) - 1) << QP_CONTEXT_QPC_BYTES_140_RETRY_MSG_MSN_S)
-
-#define QP_CONTEXT_QPC_BYTES_140_RNR_RETRY_FLG_S 31
-
-#define QP_CONTEXT_QPC_BYTES_144_QP_STATE_S 0
-#define QP_CONTEXT_QPC_BYTES_144_QP_STATE_M \
- (((1UL << 3) - 1) << QP_CONTEXT_QPC_BYTES_144_QP_STATE_S)
-
-#define QP_CONTEXT_QPC_BYTES_148_CHECK_FLAG_S 0
-#define QP_CONTEXT_QPC_BYTES_148_CHECK_FLAG_M \
- (((1UL << 2) - 1) << QP_CONTEXT_QPC_BYTES_148_CHECK_FLAG_S)
-
-#define QP_CONTEXT_QPC_BYTES_148_RETRY_COUNT_S 2
-#define QP_CONTEXT_QPC_BYTES_148_RETRY_COUNT_M \
- (((1UL << 3) - 1) << QP_CONTEXT_QPC_BYTES_148_RETRY_COUNT_S)
-
-#define QP_CONTEXT_QPC_BYTES_148_RNR_RETRY_COUNT_S 5
-#define QP_CONTEXT_QPC_BYTES_148_RNR_RETRY_COUNT_M \
- (((1UL << 3) - 1) << QP_CONTEXT_QPC_BYTES_148_RNR_RETRY_COUNT_S)
-
-#define QP_CONTEXT_QPC_BYTES_148_LSN_S 8
-#define QP_CONTEXT_QPC_BYTES_148_LSN_M \
- (((1UL << 16) - 1) << QP_CONTEXT_QPC_BYTES_148_LSN_S)
-
-#define QP_CONTEXT_QPC_BYTES_156_RETRY_COUNT_INIT_S 0
-#define QP_CONTEXT_QPC_BYTES_156_RETRY_COUNT_INIT_M \
- (((1UL << 3) - 1) << QP_CONTEXT_QPC_BYTES_156_RETRY_COUNT_INIT_S)
-
-#define QP_CONTEXT_QPC_BYTES_156_ACK_TIMEOUT_S 3
-#define QP_CONTEXT_QPC_BYTES_156_ACK_TIMEOUT_M \
- (((1UL << 5) - 1) << QP_CONTEXT_QPC_BYTES_156_ACK_TIMEOUT_S)
-
-#define QP_CONTEXT_QPC_BYTES_156_RNR_RETRY_COUNT_INIT_S 8
-#define QP_CONTEXT_QPC_BYTES_156_RNR_RETRY_COUNT_INIT_M \
- (((1UL << 3) - 1) << QP_CONTEXT_QPC_BYTES_156_RNR_RETRY_COUNT_INIT_S)
-
-#define QP_CONTEXT_QPC_BYTES_156_PORT_NUM_S 11
-#define QP_CONTEXT_QPC_BYTES_156_PORT_NUM_M \
- (((1UL << 3) - 1) << QP_CONTEXT_QPC_BYTES_156_PORT_NUM_S)
-
-#define QP_CONTEXT_QPC_BYTES_156_SL_S 14
-#define QP_CONTEXT_QPC_BYTES_156_SL_M \
- (((1UL << 2) - 1) << QP_CONTEXT_QPC_BYTES_156_SL_S)
-
-#define QP_CONTEXT_QPC_BYTES_156_INITIATOR_DEPTH_S 16
-#define QP_CONTEXT_QPC_BYTES_156_INITIATOR_DEPTH_M \
- (((1UL << 8) - 1) << QP_CONTEXT_QPC_BYTES_156_INITIATOR_DEPTH_S)
-
-#define QP_CONTEXT_QPC_BYTES_156_ACK_REQ_IND_S 24
-#define QP_CONTEXT_QPC_BYTES_156_ACK_REQ_IND_M \
- (((1UL << 2) - 1) << QP_CONTEXT_QPC_BYTES_156_ACK_REQ_IND_S)
-
-#define QP_CONTEXT_QPC_BYTES_164_SQ_PSN_S 0
-#define QP_CONTEXT_QPC_BYTES_164_SQ_PSN_M \
- (((1UL << 24) - 1) << QP_CONTEXT_QPC_BYTES_164_SQ_PSN_S)
-
-#define QP_CONTEXT_QPC_BYTES_164_IRRL_HEAD_S 24
-#define QP_CONTEXT_QPC_BYTES_164_IRRL_HEAD_M \
- (((1UL << 8) - 1) << QP_CONTEXT_QPC_BYTES_164_IRRL_HEAD_S)
-
-#define QP_CONTEXT_QPC_BYTES_168_RETRY_SQ_PSN_S 0
-#define QP_CONTEXT_QPC_BYTES_168_RETRY_SQ_PSN_M \
- (((1UL << 24) - 1) << QP_CONTEXT_QPC_BYTES_168_RETRY_SQ_PSN_S)
-
-#define QP_CONTEXT_QPC_BYTES_168_SGE_USE_FLA_S 24
-#define QP_CONTEXT_QPC_BYTES_168_SGE_USE_FLA_M \
- (((1UL << 2) - 1) << QP_CONTEXT_QPC_BYTES_168_SGE_USE_FLA_S)
-
-#define QP_CONTEXT_QPC_BYTES_168_DB_TYPE_S 26
-#define QP_CONTEXT_QPC_BYTES_168_DB_TYPE_M \
- (((1UL << 2) - 1) << QP_CONTEXT_QPC_BYTES_168_DB_TYPE_S)
-
-#define QP_CONTEXT_QPC_BYTES_168_MSG_LP_IND_S 28
-#define QP_CONTEXT_QPC_BYTES_168_CSDB_LP_IND_S 29
-#define QP_CONTEXT_QPC_BYTES_168_QP_ERR_FLG_S 30
-
-#define QP_CONTEXT_QPC_BYTES_176_DB_CUR_INDEX_S 0
-#define QP_CONTEXT_QPC_BYTES_176_DB_CUR_INDEX_M \
- (((1UL << 15) - 1) << QP_CONTEXT_QPC_BYTES_176_DB_CUR_INDEX_S)
-
-#define QP_CONTEXT_QPC_BYTES_176_RETRY_DB_CUR_INDEX_S 16
-#define QP_CONTEXT_QPC_BYTES_176_RETRY_DB_CUR_INDEX_M \
- (((1UL << 15) - 1) << QP_CONTEXT_QPC_BYTES_176_RETRY_DB_CUR_INDEX_S)
-
-#define QP_CONTEXT_QPC_BYTES_180_SQ_HEAD_S 0
-#define QP_CONTEXT_QPC_BYTES_180_SQ_HEAD_M \
- (((1UL << 15) - 1) << QP_CONTEXT_QPC_BYTES_180_SQ_HEAD_S)
-
-#define QP_CONTEXT_QPC_BYTES_180_SQ_CUR_INDEX_S 16
-#define QP_CONTEXT_QPC_BYTES_180_SQ_CUR_INDEX_M \
- (((1UL << 15) - 1) << QP_CONTEXT_QPC_BYTES_180_SQ_CUR_INDEX_S)
-
-#define QP_CONTEXT_QPC_BYTES_188_TX_CUR_SQ_WQE_BA_H_S 0
-#define QP_CONTEXT_QPC_BYTES_188_TX_CUR_SQ_WQE_BA_H_M \
- (((1UL << 5) - 1) << QP_CONTEXT_QPC_BYTES_188_TX_CUR_SQ_WQE_BA_H_S)
-
-#define QP_CONTEXT_QPC_BYTES_188_PKT_RETRY_FLG_S 8
-
-#define QP_CONTEXT_QPC_BYTES_188_TX_RETRY_CUR_INDEX_S 16
-#define QP_CONTEXT_QPC_BYTES_188_TX_RETRY_CUR_INDEX_M \
- (((1UL << 15) - 1) << QP_CONTEXT_QPC_BYTES_188_TX_RETRY_CUR_INDEX_S)
-
-#define STATUS_MASK 0xff
-#define GO_BIT_TIMEOUT_MSECS 10000
-#define HCR_STATUS_OFFSET 0x18
-#define HCR_GO_BIT 15
-
-struct hns_roce_rq_db {
- __le32 u32_4;
- __le32 u32_8;
-};
-
-#define RQ_DOORBELL_U32_4_RQ_HEAD_S 0
-#define RQ_DOORBELL_U32_4_RQ_HEAD_M \
- (((1UL << 15) - 1) << RQ_DOORBELL_U32_4_RQ_HEAD_S)
-
-#define RQ_DOORBELL_U32_8_QPN_S 0
-#define RQ_DOORBELL_U32_8_QPN_M (((1UL << 24) - 1) << RQ_DOORBELL_U32_8_QPN_S)
-
-#define RQ_DOORBELL_U32_8_CMD_S 28
-#define RQ_DOORBELL_U32_8_CMD_M (((1UL << 3) - 1) << RQ_DOORBELL_U32_8_CMD_S)
-
-#define RQ_DOORBELL_U32_8_HW_SYNC_S 31
-
-struct hns_roce_sq_db {
- __le32 u32_4;
- __le32 u32_8;
-};
-
-#define SQ_DOORBELL_U32_4_SQ_HEAD_S 0
-#define SQ_DOORBELL_U32_4_SQ_HEAD_M \
- (((1UL << 15) - 1) << SQ_DOORBELL_U32_4_SQ_HEAD_S)
-
-#define SQ_DOORBELL_U32_4_SL_S 16
-#define SQ_DOORBELL_U32_4_SL_M \
- (((1UL << 2) - 1) << SQ_DOORBELL_U32_4_SL_S)
-
-#define SQ_DOORBELL_U32_4_PORT_S 18
-#define SQ_DOORBELL_U32_4_PORT_M (((1UL << 3) - 1) << SQ_DOORBELL_U32_4_PORT_S)
-
-#define SQ_DOORBELL_U32_8_QPN_S 0
-#define SQ_DOORBELL_U32_8_QPN_M (((1UL << 24) - 1) << SQ_DOORBELL_U32_8_QPN_S)
-
-#define SQ_DOORBELL_HW_SYNC_S 31
-
-struct hns_roce_ext_db {
- int esdb_dep;
- int eodb_dep;
- struct hns_roce_buf_list *sdb_buf_list;
- struct hns_roce_buf_list *odb_buf_list;
-};
-
-struct hns_roce_db_table {
- int sdb_ext_mod;
- int odb_ext_mod;
- struct hns_roce_ext_db *ext_db;
-};
-
-#define HW_SYNC_SLEEP_TIME_INTERVAL 20
-#define HW_SYNC_TIMEOUT_MSECS (25 * HW_SYNC_SLEEP_TIME_INTERVAL)
-#define BT_CMD_SYNC_SHIFT 31
-#define HNS_ROCE_BA_SIZE (32 * 4096)
-
-struct hns_roce_bt_table {
- struct hns_roce_buf_list qpc_buf;
- struct hns_roce_buf_list mtpt_buf;
- struct hns_roce_buf_list cqc_buf;
-};
-
-struct hns_roce_tptr_table {
- struct hns_roce_buf_list tptr_buf;
-};
-
-struct hns_roce_qp_work {
- struct work_struct work;
- struct ib_device *ib_dev;
- struct hns_roce_qp *qp;
- u32 db_wait_stage;
- u32 sdb_issue_ptr;
- u32 sdb_inv_cnt;
- u32 sche_cnt;
-};
-
-struct hns_roce_mr_free_work {
- struct work_struct work;
- struct ib_device *ib_dev;
- struct completion *comp;
- int comp_flag;
- void *mr;
-};
-
-struct hns_roce_recreate_lp_qp_work {
- struct work_struct work;
- struct ib_device *ib_dev;
- struct completion *comp;
- int comp_flag;
-};
-
-struct hns_roce_free_mr {
- struct workqueue_struct *free_mr_wq;
- struct hns_roce_qp *mr_free_qp[HNS_ROCE_V1_RESV_QP];
- struct hns_roce_cq *mr_free_cq;
- struct hns_roce_pd *mr_free_pd;
-};
-
-struct hns_roce_v1_priv {
- struct hns_roce_db_table db_table;
- struct hns_roce_raq_table raq_table;
- struct hns_roce_bt_table bt_table;
- struct hns_roce_tptr_table tptr_table;
- struct hns_roce_free_mr free_mr;
-};
-
-int hns_dsaf_roce_reset(struct fwnode_handle *dsaf_fwnode, bool dereset);
-int hns_roce_v1_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
-int hns_roce_v1_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata);
-
-#endif
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
index eb0defa80d0d..b33e948fd060 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
@@ -678,6 +678,7 @@ static void hns_roce_write512(struct hns_roce_dev *hr_dev, u64 *val,
static void write_dwqe(struct hns_roce_dev *hr_dev, struct hns_roce_qp *qp,
void *wqe)
{
+#define HNS_ROCE_SL_SHIFT 2
struct hns_roce_v2_rc_send_wqe *rc_sq_wqe = wqe;
/* All kinds of DirectWQE have the same header field layout */
@@ -685,7 +686,8 @@ static void write_dwqe(struct hns_roce_dev *hr_dev, struct hns_roce_qp *qp,
roce_set_field(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_DB_SL_L_M,
V2_RC_SEND_WQE_BYTE_4_DB_SL_L_S, qp->sl);
roce_set_field(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_DB_SL_H_M,
- V2_RC_SEND_WQE_BYTE_4_DB_SL_H_S, qp->sl >> 2);
+ V2_RC_SEND_WQE_BYTE_4_DB_SL_H_S,
+ qp->sl >> HNS_ROCE_SL_SHIFT);
roce_set_field(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_WQE_INDEX_M,
V2_RC_SEND_WQE_BYTE_4_WQE_INDEX_S, qp->sq.head);
@@ -1305,14 +1307,14 @@ static int __hns_roce_cmq_send(struct hns_roce_dev *hr_dev,
continue;
dev_err_ratelimited(hr_dev->dev,
- "Cmdq IO error, opcode = %x, return = %x\n",
+ "Cmdq IO error, opcode = 0x%x, return = 0x%x.\n",
desc->opcode, desc_ret);
ret = -EIO;
}
} else {
/* FW/HW reset or incorrect number of desc */
tail = roce_read(hr_dev, ROCEE_TX_CMQ_CI_REG);
- dev_warn(hr_dev->dev, "CMDQ move tail from %d to %d\n",
+ dev_warn(hr_dev->dev, "CMDQ move tail from %u to %u.\n",
csq->head, tail);
csq->head = tail;
@@ -1571,7 +1573,7 @@ static int hns_roce_query_func_info(struct hns_roce_dev *hr_dev)
struct hns_roce_cmq_desc desc;
int ret;
- if (hr_dev->pci_dev->revision < PCI_REVISION_ID_HIP09) {
+ if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08) {
hr_dev->func_num = 1;
return 0;
}
@@ -2003,7 +2005,8 @@ static void set_default_caps(struct hns_roce_dev *hr_dev)
caps->gid_table_len[0] = HNS_ROCE_V2_GID_INDEX_NUM;
if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09) {
- caps->flags |= HNS_ROCE_CAP_FLAG_STASH;
+ caps->flags |= HNS_ROCE_CAP_FLAG_STASH |
+ HNS_ROCE_CAP_FLAG_DIRECT_WQE;
caps->max_sq_inline = HNS_ROCE_V3_MAX_SQ_INLINE;
} else {
caps->max_sq_inline = HNS_ROCE_V2_MAX_SQ_INLINE;
@@ -2144,7 +2147,6 @@ static void apply_func_caps(struct hns_roce_dev *hr_dev)
caps->cqc_timer_entry_sz = HNS_ROCE_V2_CQC_TIMER_ENTRY_SZ;
caps->mtt_entry_sz = HNS_ROCE_V2_MTT_ENTRY_SZ;
- caps->eqe_hop_num = HNS_ROCE_EQE_HOP_NUM;
caps->pbl_hop_num = HNS_ROCE_PBL_HOP_NUM;
caps->qpc_timer_hop_num = HNS_ROCE_HOP_NUM_0;
caps->cqc_timer_hop_num = HNS_ROCE_HOP_NUM_0;
@@ -2161,6 +2163,7 @@ static void apply_func_caps(struct hns_roce_dev *hr_dev)
(u32)priv->handle->rinfo.num_vectors - 2);
if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09) {
+ caps->eqe_hop_num = HNS_ROCE_V3_EQE_HOP_NUM;
caps->ceqe_size = HNS_ROCE_V3_EQE_SIZE;
caps->aeqe_size = HNS_ROCE_V3_EQE_SIZE;
@@ -2181,6 +2184,7 @@ static void apply_func_caps(struct hns_roce_dev *hr_dev)
} else {
u32 func_num = max_t(u32, 1, hr_dev->func_num);
+ caps->eqe_hop_num = HNS_ROCE_V2_EQE_HOP_NUM;
caps->ceqe_size = HNS_ROCE_CEQE_SIZE;
caps->aeqe_size = HNS_ROCE_AEQE_SIZE;
caps->gid_table_len[0] /= func_num;
@@ -2393,7 +2397,7 @@ static int hns_roce_config_entry_size(struct hns_roce_dev *hr_dev)
struct hns_roce_caps *caps = &hr_dev->caps;
int ret;
- if (hr_dev->pci_dev->revision < PCI_REVISION_ID_HIP09)
+ if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08)
return 0;
ret = config_hem_entry_size(hr_dev, HNS_ROCE_CFG_QPC_SIZE,
@@ -2967,8 +2971,8 @@ static int config_gmv_table(struct hns_roce_dev *hr_dev,
return hns_roce_cmq_send(hr_dev, desc, 2);
}
-static int hns_roce_v2_set_gid(struct hns_roce_dev *hr_dev, u32 port,
- int gid_index, const union ib_gid *gid,
+static int hns_roce_v2_set_gid(struct hns_roce_dev *hr_dev, int gid_index,
+ const union ib_gid *gid,
const struct ib_gid_attr *attr)
{
enum hns_roce_sgid_type sgid_type = GID_TYPE_FLAG_ROCE_V1;
@@ -3063,8 +3067,7 @@ static int set_mtpt_pbl(struct hns_roce_dev *hr_dev,
}
static int hns_roce_v2_write_mtpt(struct hns_roce_dev *hr_dev,
- void *mb_buf, struct hns_roce_mr *mr,
- unsigned long mtpt_idx)
+ void *mb_buf, struct hns_roce_mr *mr)
{
struct hns_roce_v2_mpt_entry *mpt_entry;
int ret;
@@ -4488,14 +4491,6 @@ static int modify_qp_rtr_to_rts(struct ib_qp *ibqp,
return 0;
}
-static inline u16 get_udp_sport(u32 fl, u32 lqpn, u32 rqpn)
-{
- if (!fl)
- fl = rdma_calc_flow_label(lqpn, rqpn);
-
- return rdma_flow_label_to_udp_sport(fl);
-}
-
static int get_dip_ctx_idx(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
u32 *dip_idx)
{
@@ -4712,8 +4707,9 @@ static int hns_roce_v2_set_path(struct ib_qp *ibqp,
}
hr_reg_write(context, QPC_UDPSPN,
- is_udp ? get_udp_sport(grh->flow_label, ibqp->qp_num,
- attr->dest_qp_num) : 0);
+ is_udp ? rdma_get_udp_sport(grh->flow_label, ibqp->qp_num,
+ attr->dest_qp_num) :
+ 0);
hr_reg_clear(qpc_mask, QPC_UDPSPN);
@@ -4739,7 +4735,7 @@ static int hns_roce_v2_set_path(struct ib_qp *ibqp,
hr_qp->sl = rdma_ah_get_sl(&attr->ah_attr);
if (unlikely(hr_qp->sl > MAX_SERVICE_LEVEL)) {
ibdev_err(ibdev,
- "failed to fill QPC, sl (%d) shouldn't be larger than %d.\n",
+ "failed to fill QPC, sl (%u) shouldn't be larger than %d.\n",
hr_qp->sl, MAX_SERVICE_LEVEL);
return -EINVAL;
}
@@ -4768,7 +4764,8 @@ static bool check_qp_state(enum ib_qp_state cur_state,
[IB_QPS_ERR] = true },
[IB_QPS_SQD] = {},
[IB_QPS_SQE] = {},
- [IB_QPS_ERR] = { [IB_QPS_RESET] = true, [IB_QPS_ERR] = true }
+ [IB_QPS_ERR] = { [IB_QPS_RESET] = true,
+ [IB_QPS_ERR] = true }
};
return sm[cur_state][new_state];
@@ -5868,7 +5865,7 @@ static void hns_roce_v2_int_mask_enable(struct hns_roce_dev *hr_dev,
roce_write(hr_dev, ROCEE_VF_ABN_INT_CFG_REG, enable_flag);
}
-static void hns_roce_v2_destroy_eqc(struct hns_roce_dev *hr_dev, int eqn)
+static void hns_roce_v2_destroy_eqc(struct hns_roce_dev *hr_dev, u32 eqn)
{
struct device *dev = hr_dev->dev;
int ret;
@@ -5882,7 +5879,7 @@ static void hns_roce_v2_destroy_eqc(struct hns_roce_dev *hr_dev, int eqn)
0, HNS_ROCE_CMD_DESTROY_AEQC,
HNS_ROCE_CMD_TIMEOUT_MSECS);
if (ret)
- dev_err(dev, "[mailbox cmd] destroy eqc(%d) failed.\n", eqn);
+ dev_err(dev, "[mailbox cmd] destroy eqc(%u) failed.\n", eqn);
}
static void free_eq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq)
@@ -6394,7 +6391,7 @@ static int hns_roce_hw_v2_init_instance(struct hnae3_handle *handle)
if (!id)
return 0;
- if (id->driver_data && handle->pdev->revision < PCI_REVISION_ID_HIP09)
+ if (id->driver_data && handle->pdev->revision == PCI_REVISION_ID_HIP08)
return 0;
ret = __hns_roce_hw_v2_init_instance(handle);
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
index 35c61da7ba15..12be85f0986e 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
@@ -35,26 +35,15 @@
#include <linux/bitops.h>
-#define HNS_ROCE_VF_QPC_BT_NUM 256
-#define HNS_ROCE_VF_SCCC_BT_NUM 64
-#define HNS_ROCE_VF_SRQC_BT_NUM 64
-#define HNS_ROCE_VF_CQC_BT_NUM 64
-#define HNS_ROCE_VF_MPT_BT_NUM 64
-#define HNS_ROCE_VF_SMAC_NUM 32
-#define HNS_ROCE_VF_SL_NUM 8
-#define HNS_ROCE_VF_GMV_BT_NUM 256
-
#define HNS_ROCE_V2_MAX_QP_NUM 0x1000
#define HNS_ROCE_V2_MAX_QPC_TIMER_NUM 0x200
#define HNS_ROCE_V2_MAX_WQE_NUM 0x8000
-#define HNS_ROCE_V2_MAX_SRQ 0x100000
#define HNS_ROCE_V2_MAX_SRQ_WR 0x8000
#define HNS_ROCE_V2_MAX_SRQ_SGE 64
#define HNS_ROCE_V2_MAX_CQ_NUM 0x100000
#define HNS_ROCE_V2_MAX_CQC_TIMER_NUM 0x100
#define HNS_ROCE_V2_MAX_SRQ_NUM 0x100000
#define HNS_ROCE_V2_MAX_CQE_NUM 0x400000
-#define HNS_ROCE_V2_MAX_SRQWQE_NUM 0x8000
#define HNS_ROCE_V2_MAX_RQ_SGE_NUM 64
#define HNS_ROCE_V2_MAX_SQ_SGE_NUM 64
#define HNS_ROCE_V2_MAX_EXTEND_SGE_NUM 0x200000
@@ -63,13 +52,10 @@
#define HNS_ROCE_V2_MAX_RC_INL_INN_SZ 32
#define HNS_ROCE_V2_UAR_NUM 256
#define HNS_ROCE_V2_PHY_UAR_NUM 1
-#define HNS_ROCE_V2_MAX_IRQ_NUM 65
-#define HNS_ROCE_V2_COMP_VEC_NUM 63
#define HNS_ROCE_V2_AEQE_VEC_NUM 1
#define HNS_ROCE_V2_ABNORMAL_VEC_NUM 1
#define HNS_ROCE_V2_MAX_MTPT_NUM 0x100000
#define HNS_ROCE_V2_MAX_MTT_SEGS 0x1000000
-#define HNS_ROCE_V2_MAX_CQE_SEGS 0x1000000
#define HNS_ROCE_V2_MAX_SRQWQE_SEGS 0x1000000
#define HNS_ROCE_V2_MAX_IDX_SEGS 0x1000000
#define HNS_ROCE_V2_MAX_PD_NUM 0x1000000
@@ -81,7 +67,6 @@
#define HNS_ROCE_V2_MAX_RQ_DESC_SZ 16
#define HNS_ROCE_V2_MAX_SRQ_DESC_SZ 64
#define HNS_ROCE_V2_IRRL_ENTRY_SZ 64
-#define HNS_ROCE_V2_TRRL_ENTRY_SZ 48
#define HNS_ROCE_V2_EXT_ATOMIC_TRRL_ENTRY_SZ 100
#define HNS_ROCE_V2_CQC_ENTRY_SZ 64
#define HNS_ROCE_V2_SRQC_ENTRY_SZ 64
@@ -103,7 +88,6 @@
#define HNS_ROCE_INVALID_LKEY 0x0
#define HNS_ROCE_INVALID_SGE_LENGTH 0x80000000
#define HNS_ROCE_CMQ_TX_TIMEOUT 30000
-#define HNS_ROCE_V2_UC_RC_SGE_NUM_IN_WQE 2
#define HNS_ROCE_V2_RSV_QPS 8
#define HNS_ROCE_V2_HW_RST_TIMEOUT 1000
@@ -117,12 +101,14 @@
#define HNS_ROCE_CQE_HOP_NUM 1
#define HNS_ROCE_SRQWQE_HOP_NUM 1
#define HNS_ROCE_PBL_HOP_NUM 2
-#define HNS_ROCE_EQE_HOP_NUM 2
#define HNS_ROCE_IDX_HOP_NUM 1
#define HNS_ROCE_SQWQE_HOP_NUM 2
#define HNS_ROCE_EXT_SGE_HOP_NUM 1
#define HNS_ROCE_RQWQE_HOP_NUM 2
+#define HNS_ROCE_V2_EQE_HOP_NUM 2
+#define HNS_ROCE_V3_EQE_HOP_NUM 1
+
#define HNS_ROCE_BA_PG_SZ_SUPPORTED_256K 6
#define HNS_ROCE_BA_PG_SZ_SUPPORTED_16K 2
#define HNS_ROCE_V2_GID_INDEX_NUM 16
@@ -1441,7 +1427,7 @@ struct hns_roce_v2_priv {
struct hns_roce_dip {
u8 dgid[GID_LEN_V2];
u32 dip_idx;
- struct list_head node; /* all dips are on a list */
+ struct list_head node; /* all dips are on a list */
};
/* only for RNR timeout issue of HIP08 */
diff --git a/drivers/infiniband/hw/hns/hns_roce_main.c b/drivers/infiniband/hw/hns/hns_roce_main.c
index 4194b626f3c6..f73ba619f375 100644
--- a/drivers/infiniband/hw/hns/hns_roce_main.c
+++ b/drivers/infiniband/hw/hns/hns_roce_main.c
@@ -31,7 +31,6 @@
* SOFTWARE.
*/
#include <linux/acpi.h>
-#include <linux/of_platform.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <rdma/ib_addr.h>
@@ -70,7 +69,7 @@ static int hns_roce_add_gid(const struct ib_gid_attr *attr, void **context)
if (port >= hr_dev->caps.num_ports)
return -EINVAL;
- ret = hr_dev->hw->set_gid(hr_dev, port, attr->index, &attr->gid, attr);
+ ret = hr_dev->hw->set_gid(hr_dev, attr->index, &attr->gid, attr);
return ret;
}
@@ -84,7 +83,7 @@ static int hns_roce_del_gid(const struct ib_gid_attr *attr, void **context)
if (port >= hr_dev->caps.num_ports)
return -EINVAL;
- ret = hr_dev->hw->set_gid(hr_dev, port, attr->index, NULL, NULL);
+ ret = hr_dev->hw->set_gid(hr_dev, attr->index, NULL, NULL);
return ret;
}
@@ -152,9 +151,6 @@ static int hns_roce_setup_mtu_mac(struct hns_roce_dev *hr_dev)
u8 i;
for (i = 0; i < hr_dev->caps.num_ports; i++) {
- if (hr_dev->hw->set_mtu)
- hr_dev->hw->set_mtu(hr_dev, hr_dev->iboe.phy_port[i],
- hr_dev->caps.max_mtu);
ret = hns_roce_set_mac(hr_dev, i,
hr_dev->iboe.netdevs[i]->dev_addr);
if (ret)
@@ -270,6 +266,9 @@ static enum rdma_link_layer hns_roce_get_link_layer(struct ib_device *device,
static int hns_roce_query_pkey(struct ib_device *ib_dev, u32 port, u16 index,
u16 *pkey)
{
+ if (index > 0)
+ return -EINVAL;
+
*pkey = PKEY_ID;
return 0;
@@ -307,9 +306,22 @@ hns_roce_user_mmap_entry_insert(struct ib_ucontext *ucontext, u64 address,
entry->address = address;
entry->mmap_type = mmap_type;
- ret = rdma_user_mmap_entry_insert_exact(
- ucontext, &entry->rdma_entry, length,
- mmap_type == HNS_ROCE_MMAP_TYPE_DB ? 0 : 1);
+ switch (mmap_type) {
+ /* pgoff 0 must be used by DB for compatibility */
+ case HNS_ROCE_MMAP_TYPE_DB:
+ ret = rdma_user_mmap_entry_insert_exact(
+ ucontext, &entry->rdma_entry, length, 0);
+ break;
+ case HNS_ROCE_MMAP_TYPE_DWQE:
+ ret = rdma_user_mmap_entry_insert_range(
+ ucontext, &entry->rdma_entry, length, 1,
+ U32_MAX);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
if (ret) {
kfree(entry);
return NULL;
@@ -323,18 +335,12 @@ static void hns_roce_dealloc_uar_entry(struct hns_roce_ucontext *context)
if (context->db_mmap_entry)
rdma_user_mmap_entry_remove(
&context->db_mmap_entry->rdma_entry);
-
- if (context->tptr_mmap_entry)
- rdma_user_mmap_entry_remove(
- &context->tptr_mmap_entry->rdma_entry);
}
static int hns_roce_alloc_uar_entry(struct ib_ucontext *uctx)
{
struct hns_roce_ucontext *context = to_hr_ucontext(uctx);
- struct hns_roce_dev *hr_dev = to_hr_dev(uctx->device);
u64 address;
- int ret;
address = context->uar.pfn << PAGE_SHIFT;
context->db_mmap_entry = hns_roce_user_mmap_entry_insert(
@@ -342,27 +348,7 @@ static int hns_roce_alloc_uar_entry(struct ib_ucontext *uctx)
if (!context->db_mmap_entry)
return -ENOMEM;
- if (!hr_dev->tptr_dma_addr || !hr_dev->tptr_size)
- return 0;
-
- /*
- * FIXME: using io_remap_pfn_range on the dma address returned
- * by dma_alloc_coherent is totally wrong.
- */
- context->tptr_mmap_entry =
- hns_roce_user_mmap_entry_insert(uctx, hr_dev->tptr_dma_addr,
- hr_dev->tptr_size,
- HNS_ROCE_MMAP_TYPE_TPTR);
- if (!context->tptr_mmap_entry) {
- ret = -ENOMEM;
- goto err;
- }
-
return 0;
-
-err:
- hns_roce_dealloc_uar_entry(context);
- return ret;
}
static int hns_roce_alloc_ucontext(struct ib_ucontext *uctx,
@@ -436,10 +422,15 @@ static int hns_roce_mmap(struct ib_ucontext *uctx, struct vm_area_struct *vma)
entry = to_hns_mmap(rdma_entry);
pfn = entry->address >> PAGE_SHIFT;
- prot = vma->vm_page_prot;
- if (entry->mmap_type != HNS_ROCE_MMAP_TYPE_TPTR)
- prot = pgprot_noncached(prot);
+ switch (entry->mmap_type) {
+ case HNS_ROCE_MMAP_TYPE_DB:
+ case HNS_ROCE_MMAP_TYPE_DWQE:
+ prot = pgprot_device(vma->vm_page_prot);
+ break;
+ default:
+ return -EINVAL;
+ }
ret = rdma_user_mmap_io(uctx, vma, pfn, rdma_entry->npages * PAGE_SIZE,
prot, rdma_entry);
@@ -816,7 +807,6 @@ static int hns_roce_setup_hca(struct hns_roce_dev *hr_dev)
int ret;
spin_lock_init(&hr_dev->sm_lock);
- spin_lock_init(&hr_dev->bt_cmd_lock);
if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_CQ_RECORD_DB ||
hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_RECORD_DB) {
@@ -907,20 +897,13 @@ int hns_roce_init(struct hns_roce_dev *hr_dev)
struct device *dev = hr_dev->dev;
int ret;
- if (hr_dev->hw->reset) {
- ret = hr_dev->hw->reset(hr_dev, true);
- if (ret) {
- dev_err(dev, "Reset RoCE engine failed!\n");
- return ret;
- }
- }
hr_dev->is_reset = false;
if (hr_dev->hw->cmq_init) {
ret = hr_dev->hw->cmq_init(hr_dev);
if (ret) {
dev_err(dev, "Init RoCE Command Queue failed!\n");
- goto error_failed_cmq_init;
+ return ret;
}
}
@@ -1003,12 +986,6 @@ error_failed_cmd_init:
if (hr_dev->hw->cmq_exit)
hr_dev->hw->cmq_exit(hr_dev);
-error_failed_cmq_init:
- if (hr_dev->hw->reset) {
- if (hr_dev->hw->reset(hr_dev, false))
- dev_err(dev, "Dereset RoCE engine failed!\n");
- }
-
return ret;
}
@@ -1028,8 +1005,6 @@ void hns_roce_exit(struct hns_roce_dev *hr_dev)
hns_roce_cmd_cleanup(hr_dev);
if (hr_dev->hw->cmq_exit)
hr_dev->hw->cmq_exit(hr_dev);
- if (hr_dev->hw->reset)
- hr_dev->hw->reset(hr_dev, false);
}
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/infiniband/hw/hns/hns_roce_mr.c b/drivers/infiniband/hw/hns/hns_roce_mr.c
index 7089ac780291..2ee06b906b60 100644
--- a/drivers/infiniband/hw/hns/hns_roce_mr.c
+++ b/drivers/infiniband/hw/hns/hns_roce_mr.c
@@ -31,7 +31,6 @@
* SOFTWARE.
*/
-#include <linux/platform_device.h>
#include <linux/vmalloc.h>
#include <rdma/ib_umem.h>
#include "hns_roce_device.h"
@@ -81,7 +80,7 @@ static int alloc_mr_key(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr)
return -ENOMEM;
}
- mr->key = hw_index_to_key(id); /* MR key */
+ mr->key = hw_index_to_key(id); /* MR key */
err = hns_roce_table_get(hr_dev, &hr_dev->mr_table.mtpt_table,
(unsigned long)id);
@@ -173,8 +172,7 @@ static int hns_roce_mr_enable(struct hns_roce_dev *hr_dev,
}
if (mr->type != MR_TYPE_FRMR)
- ret = hr_dev->hw->write_mtpt(hr_dev, mailbox->buf, mr,
- mtpt_idx);
+ ret = hr_dev->hw->write_mtpt(hr_dev, mailbox->buf, mr);
else
ret = hr_dev->hw->frmr_write_mtpt(hr_dev, mailbox->buf, mr);
if (ret) {
@@ -363,12 +361,8 @@ int hns_roce_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
struct hns_roce_mr *mr = to_hr_mr(ibmr);
int ret = 0;
- if (hr_dev->hw->dereg_mr) {
- ret = hr_dev->hw->dereg_mr(hr_dev, mr, udata);
- } else {
- hns_roce_mr_free(hr_dev, mr);
- kfree(mr);
- }
+ hns_roce_mr_free(hr_dev, mr);
+ kfree(mr);
return ret;
}
@@ -614,10 +608,7 @@ static int mtr_map_region(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
return -ENOBUFS;
for (i = 0; i < count && npage < max_count; i++) {
- if (hr_dev->hw_rev == HNS_ROCE_HW_VER1)
- addr = to_hr_hw_page_addr(pages[npage]);
- else
- addr = pages[npage];
+ addr = pages[npage];
mtts[i] = cpu_to_le64(addr);
npage++;
@@ -824,11 +815,11 @@ int hns_roce_mtr_map(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
}
int hns_roce_mtr_find(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
- int offset, u64 *mtt_buf, int mtt_max, u64 *base_addr)
+ u32 offset, u64 *mtt_buf, int mtt_max, u64 *base_addr)
{
struct hns_roce_hem_cfg *cfg = &mtr->hem_cfg;
int mtt_count, left;
- int start_index;
+ u32 start_index;
int total = 0;
__le64 *mtts;
u32 npage;
@@ -847,10 +838,7 @@ int hns_roce_mtr_find(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
continue;
addr = cfg->root_ba + (npage << HNS_HW_PAGE_SHIFT);
- if (hr_dev->hw_rev == HNS_ROCE_HW_VER1)
- mtt_buf[total] = to_hr_hw_page_addr(addr);
- else
- mtt_buf[total] = addr;
+ mtt_buf[total] = addr;
total++;
}
@@ -884,10 +872,10 @@ done:
static int mtr_init_buf_cfg(struct hns_roce_dev *hr_dev,
struct hns_roce_buf_attr *attr,
struct hns_roce_hem_cfg *cfg,
- unsigned int *buf_page_shift, int unalinged_size)
+ unsigned int *buf_page_shift, u64 unalinged_size)
{
struct hns_roce_buf_region *r;
- int first_region_padding;
+ u64 first_region_padding;
int page_cnt, region_cnt;
unsigned int page_shift;
size_t buf_size;
diff --git a/drivers/infiniband/hw/hns/hns_roce_pd.c b/drivers/infiniband/hw/hns/hns_roce_pd.c
index 81ffad77ae42..783e71852c50 100644
--- a/drivers/infiniband/hw/hns/hns_roce_pd.c
+++ b/drivers/infiniband/hw/hns/hns_roce_pd.c
@@ -30,7 +30,6 @@
* SOFTWARE.
*/
-#include <linux/platform_device.h>
#include <linux/pci.h>
#include "hns_roce_device.h"
@@ -86,7 +85,6 @@ int hns_roce_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata)
int hns_roce_uar_alloc(struct hns_roce_dev *hr_dev, struct hns_roce_uar *uar)
{
struct hns_roce_ida *uar_ida = &hr_dev->uar_ida;
- struct resource *res;
int id;
/* Using bitmap to manager UAR index */
@@ -104,18 +102,9 @@ int hns_roce_uar_alloc(struct hns_roce_dev *hr_dev, struct hns_roce_uar *uar)
else
uar->index = 0;
- if (!dev_is_pci(hr_dev->dev)) {
- res = platform_get_resource(hr_dev->pdev, IORESOURCE_MEM, 0);
- if (!res) {
- ida_free(&uar_ida->ida, id);
- dev_err(&hr_dev->pdev->dev, "memory resource not found!\n");
- return -EINVAL;
- }
- uar->pfn = ((res->start) >> PAGE_SHIFT) + uar->index;
- } else {
- uar->pfn = ((pci_resource_start(hr_dev->pci_dev, 2))
- >> PAGE_SHIFT);
- }
+ uar->pfn = ((pci_resource_start(hr_dev->pci_dev, 2)) >> PAGE_SHIFT);
+ if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_DIRECT_WQE)
+ hr_dev->dwqe_page = pci_resource_start(hr_dev->pci_dev, 4);
return 0;
}
diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c
index 9af4509894e6..d78373e10aab 100644
--- a/drivers/infiniband/hw/hns/hns_roce_qp.c
+++ b/drivers/infiniband/hw/hns/hns_roce_qp.c
@@ -32,7 +32,6 @@
*/
#include <linux/pci.h>
-#include <linux/platform_device.h>
#include <rdma/ib_addr.h>
#include <rdma/ib_umem.h>
#include <rdma/uverbs_ioctl.h>
@@ -110,12 +109,11 @@ void hns_roce_qp_event(struct hns_roce_dev *hr_dev, u32 qpn, int event_type)
return;
}
- if (hr_dev->hw_rev != HNS_ROCE_HW_VER1 &&
- (event_type == HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR ||
- event_type == HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR ||
- event_type == HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR ||
- event_type == HNS_ROCE_EVENT_TYPE_XRCD_VIOLATION ||
- event_type == HNS_ROCE_EVENT_TYPE_INVALID_XRCETH)) {
+ if (event_type == HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR ||
+ event_type == HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR ||
+ event_type == HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR ||
+ event_type == HNS_ROCE_EVENT_TYPE_XRCD_VIOLATION ||
+ event_type == HNS_ROCE_EVENT_TYPE_INVALID_XRCETH) {
qp->state = IB_QPS_ERR;
flush_cqe(hr_dev, qp);
@@ -219,13 +217,7 @@ static int alloc_qpn(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
int ret;
if (hr_qp->ibqp.qp_type == IB_QPT_GSI) {
- /* when hw version is v1, the sqpn is allocated */
- if (hr_dev->hw_rev == HNS_ROCE_HW_VER1)
- num = HNS_ROCE_MAX_PORTS +
- hr_dev->iboe.phy_port[hr_qp->port];
- else
- num = 1;
-
+ num = 1;
hr_qp->doorbell_qpn = 1;
} else {
mutex_lock(&qp_table->bank_mutex);
@@ -324,11 +316,6 @@ static int alloc_qpc(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
if (!hr_qp->qpn)
return -EINVAL;
- /* In v1 engine, GSI QP context is saved in the RoCE hw's register */
- if (hr_qp->ibqp.qp_type == IB_QPT_GSI &&
- hr_dev->hw_rev == HNS_ROCE_HW_VER1)
- return 0;
-
/* Alloc memory for QPC */
ret = hns_roce_table_get(hr_dev, &qp_table->qp_table, hr_qp->qpn);
if (ret) {
@@ -379,6 +366,11 @@ err_out:
return ret;
}
+static void qp_user_mmap_entry_remove(struct hns_roce_qp *hr_qp)
+{
+ rdma_user_mmap_entry_remove(&hr_qp->dwqe_mmap_entry->rdma_entry);
+}
+
void hns_roce_qp_remove(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
{
struct xarray *xa = &hr_dev->qp_table_xa;
@@ -402,11 +394,6 @@ static void free_qpc(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
{
struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
- /* In v1 engine, GSI QP context is saved in the RoCE hw's register */
- if (hr_qp->ibqp.qp_type == IB_QPT_GSI &&
- hr_dev->hw_rev == HNS_ROCE_HW_VER1)
- return;
-
if (hr_dev->caps.trrl_entry_sz)
hns_roce_table_put(hr_dev, &qp_table->trrl_table, hr_qp->qpn);
hns_roce_table_put(hr_dev, &qp_table->irrl_table, hr_qp->qpn);
@@ -535,11 +522,6 @@ static void set_ext_sge_param(struct hns_roce_dev *hr_dev, u32 sq_wqe_cnt,
hr_qp->sge.sge_shift = HNS_ROCE_SGE_SHIFT;
- if (hr_dev->hw_rev == HNS_ROCE_HW_VER1) {
- hr_qp->sq.max_gs = HNS_ROCE_SGE_IN_WQE;
- return;
- }
-
hr_qp->sq.max_gs = max(1U, cap->max_send_sge);
wqe_sge_cnt = get_wqe_ext_sge_cnt(hr_qp);
@@ -780,7 +762,11 @@ static int alloc_qp_buf(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
goto err_inline;
}
+ if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_DIRECT_WQE)
+ hr_qp->en_flags |= HNS_ROCE_QP_CAP_DIRECT_WQE;
+
return 0;
+
err_inline:
free_rq_inline_buf(hr_qp);
@@ -822,6 +808,35 @@ static inline bool kernel_qp_has_rdb(struct hns_roce_dev *hr_dev,
hns_roce_qp_has_rq(init_attr));
}
+static int qp_mmap_entry(struct hns_roce_qp *hr_qp,
+ struct hns_roce_dev *hr_dev,
+ struct ib_udata *udata,
+ struct hns_roce_ib_create_qp_resp *resp)
+{
+ struct hns_roce_ucontext *uctx =
+ rdma_udata_to_drv_context(udata,
+ struct hns_roce_ucontext, ibucontext);
+ struct rdma_user_mmap_entry *rdma_entry;
+ u64 address;
+
+ address = hr_dev->dwqe_page + hr_qp->qpn * HNS_ROCE_DWQE_SIZE;
+
+ hr_qp->dwqe_mmap_entry =
+ hns_roce_user_mmap_entry_insert(&uctx->ibucontext, address,
+ HNS_ROCE_DWQE_SIZE,
+ HNS_ROCE_MMAP_TYPE_DWQE);
+
+ if (!hr_qp->dwqe_mmap_entry) {
+ ibdev_err(&hr_dev->ib_dev, "failed to get dwqe mmap entry.\n");
+ return -ENOMEM;
+ }
+
+ rdma_entry = &hr_qp->dwqe_mmap_entry->rdma_entry;
+ resp->dwqe_mmap_key = rdma_user_mmap_get_offset(rdma_entry);
+
+ return 0;
+}
+
static int alloc_user_qp_db(struct hns_roce_dev *hr_dev,
struct hns_roce_qp *hr_qp,
struct ib_qp_init_attr *init_attr,
@@ -909,10 +924,16 @@ static int alloc_qp_db(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
hr_qp->en_flags |= HNS_ROCE_QP_CAP_OWNER_DB;
if (udata) {
+ if (hr_qp->en_flags & HNS_ROCE_QP_CAP_DIRECT_WQE) {
+ ret = qp_mmap_entry(hr_qp, hr_dev, udata, resp);
+ if (ret)
+ return ret;
+ }
+
ret = alloc_user_qp_db(hr_dev, hr_qp, init_attr, udata, ucmd,
resp);
if (ret)
- return ret;
+ goto err_remove_qp;
} else {
ret = alloc_kernel_qp_db(hr_dev, hr_qp, init_attr);
if (ret)
@@ -920,6 +941,12 @@ static int alloc_qp_db(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
}
return 0;
+
+err_remove_qp:
+ if (hr_qp->en_flags & HNS_ROCE_QP_CAP_DIRECT_WQE)
+ qp_user_mmap_entry_remove(hr_qp);
+
+ return ret;
}
static void free_qp_db(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
@@ -933,6 +960,8 @@ static void free_qp_db(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
hns_roce_db_unmap_user(uctx, &hr_qp->rdb);
if (hr_qp->en_flags & HNS_ROCE_QP_CAP_SQ_RECORD_DB)
hns_roce_db_unmap_user(uctx, &hr_qp->sdb);
+ if (hr_qp->en_flags & HNS_ROCE_QP_CAP_DIRECT_WQE)
+ qp_user_mmap_entry_remove(hr_qp);
} else {
if (hr_qp->en_flags & HNS_ROCE_QP_CAP_RQ_RECORD_DB)
hns_roce_free_db(hr_dev, &hr_qp->rdb);
@@ -1158,7 +1187,7 @@ static int check_qp_type(struct hns_roce_dev *hr_dev, enum ib_qp_type type,
goto out;
break;
case IB_QPT_UD:
- if (hr_dev->pci_dev->revision <= PCI_REVISION_ID_HIP08 &&
+ if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08 &&
is_user)
goto out;
break;
@@ -1391,7 +1420,7 @@ void hns_roce_unlock_cqs(struct hns_roce_cq *send_cq,
}
}
-static inline void *get_wqe(struct hns_roce_qp *hr_qp, int offset)
+static inline void *get_wqe(struct hns_roce_qp *hr_qp, u32 offset)
{
return hns_roce_buf_offset(hr_qp->mtr.kmem, offset);
}
diff --git a/drivers/infiniband/hw/irdma/i40iw_if.c b/drivers/infiniband/hw/irdma/i40iw_if.c
index d219f64b2c3d..43e962b97d6a 100644
--- a/drivers/infiniband/hw/irdma/i40iw_if.c
+++ b/drivers/infiniband/hw/irdma/i40iw_if.c
@@ -198,7 +198,7 @@ static void i40iw_remove(struct auxiliary_device *aux_dev)
aux_dev);
struct i40e_info *cdev_info = i40e_adev->ldev;
- return i40e_client_device_unregister(cdev_info);
+ i40e_client_device_unregister(cdev_info);
}
static const struct auxiliary_device_id i40iw_auxiliary_id_table[] = {
diff --git a/drivers/infiniband/hw/irdma/pble.h b/drivers/infiniband/hw/irdma/pble.h
index aa20827dcc9d..d0d4f2b77d34 100644
--- a/drivers/infiniband/hw/irdma/pble.h
+++ b/drivers/infiniband/hw/irdma/pble.h
@@ -69,7 +69,7 @@ struct irdma_add_page_info {
struct irdma_chunk {
struct list_head list;
struct irdma_dma_info dmainfo;
- void *bitmapbuf;
+ unsigned long *bitmapbuf;
u32 sizeofbitmap;
u64 size;
diff --git a/drivers/infiniband/hw/irdma/verbs.c b/drivers/infiniband/hw/irdma/verbs.c
index 8cd5f9261692..460e757d3fe6 100644
--- a/drivers/infiniband/hw/irdma/verbs.c
+++ b/drivers/infiniband/hw/irdma/verbs.c
@@ -21,7 +21,8 @@ static int irdma_query_device(struct ib_device *ibdev,
return -EINVAL;
memset(props, 0, sizeof(*props));
- ether_addr_copy((u8 *)&props->sys_image_guid, iwdev->netdev->dev_addr);
+ addrconf_addr_eui48((u8 *)&props->sys_image_guid,
+ iwdev->netdev->dev_addr);
props->fw_ver = (u64)irdma_fw_major_ver(&rf->sc_dev) << 32 |
irdma_fw_minor_ver(&rf->sc_dev);
props->device_cap_flags = iwdev->device_cap_flags;
@@ -1170,6 +1171,10 @@ int irdma_modify_qp_roce(struct ib_qp *ibqp, struct ib_qp_attr *attr,
udp_info->ttl = attr->ah_attr.grh.hop_limit;
udp_info->flow_label = attr->ah_attr.grh.flow_label;
udp_info->tos = attr->ah_attr.grh.traffic_class;
+ udp_info->src_port =
+ rdma_get_udp_sport(udp_info->flow_label,
+ ibqp->qp_num,
+ roce_info->dest_qp);
irdma_qp_rem_qos(&iwqp->sc_qp);
dev->ws_remove(iwqp->sc_qp.vsi, ctx_info->user_pri);
ctx_info->user_pri = rt_tos2priority(udp_info->tos);
@@ -4321,24 +4326,6 @@ static enum rdma_link_layer irdma_get_link_layer(struct ib_device *ibdev,
return IB_LINK_LAYER_ETHERNET;
}
-static __be64 irdma_mac_to_guid(struct net_device *ndev)
-{
- const unsigned char *mac = ndev->dev_addr;
- __be64 guid;
- unsigned char *dst = (unsigned char *)&guid;
-
- dst[0] = mac[0] ^ 2;
- dst[1] = mac[1];
- dst[2] = mac[2];
- dst[3] = 0xff;
- dst[4] = 0xfe;
- dst[5] = mac[3];
- dst[6] = mac[4];
- dst[7] = mac[5];
-
- return guid;
-}
-
static const struct ib_device_ops irdma_roce_dev_ops = {
.attach_mcast = irdma_attach_mcast,
.create_ah = irdma_create_ah,
@@ -4408,7 +4395,8 @@ static const struct ib_device_ops irdma_dev_ops = {
static void irdma_init_roce_device(struct irdma_device *iwdev)
{
iwdev->ibdev.node_type = RDMA_NODE_IB_CA;
- iwdev->ibdev.node_guid = irdma_mac_to_guid(iwdev->netdev);
+ addrconf_addr_eui48((u8 *)&iwdev->ibdev.node_guid,
+ iwdev->netdev->dev_addr);
ib_set_device_ops(&iwdev->ibdev, &irdma_roce_dev_ops);
}
@@ -4421,7 +4409,8 @@ static int irdma_init_iw_device(struct irdma_device *iwdev)
struct net_device *netdev = iwdev->netdev;
iwdev->ibdev.node_type = RDMA_NODE_RNIC;
- ether_addr_copy((u8 *)&iwdev->ibdev.node_guid, netdev->dev_addr);
+ addrconf_addr_eui48((u8 *)&iwdev->ibdev.node_guid,
+ netdev->dev_addr);
iwdev->ibdev.ops.iw_add_ref = irdma_qp_add_ref;
iwdev->ibdev.ops.iw_rem_ref = irdma_qp_rem_ref;
iwdev->ibdev.ops.iw_get_qp = irdma_get_qp;
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index 0d2fa3338784..1c3d97229988 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -85,14 +85,6 @@ static enum rdma_link_layer mlx4_ib_port_link_layer(struct ib_device *device,
static struct workqueue_struct *wq;
-static void init_query_mad(struct ib_smp *mad)
-{
- mad->base_version = 1;
- mad->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED;
- mad->class_version = 1;
- mad->method = IB_MGMT_METHOD_GET;
-}
-
static int check_flow_steering_support(struct mlx4_dev *dev)
{
int eth_num_ports = 0;
@@ -471,7 +463,7 @@ static int mlx4_ib_query_device(struct ib_device *ibdev,
if (!in_mad || !out_mad)
goto out;
- init_query_mad(in_mad);
+ ib_init_query_mad(in_mad);
in_mad->attr_id = IB_SMP_ATTR_NODE_INFO;
err = mlx4_MAD_IFC(to_mdev(ibdev), MLX4_MAD_IFC_IGNORE_KEYS,
@@ -669,7 +661,7 @@ static int ib_link_query_port(struct ib_device *ibdev, u32 port,
if (!in_mad || !out_mad)
goto out;
- init_query_mad(in_mad);
+ ib_init_query_mad(in_mad);
in_mad->attr_id = IB_SMP_ATTR_PORT_INFO;
in_mad->attr_mod = cpu_to_be32(port);
@@ -721,7 +713,7 @@ static int ib_link_query_port(struct ib_device *ibdev, u32 port,
/* If reported active speed is QDR, check if is FDR-10 */
if (props->active_speed == IB_SPEED_QDR) {
- init_query_mad(in_mad);
+ ib_init_query_mad(in_mad);
in_mad->attr_id = MLX4_ATTR_EXTENDED_PORT_INFO;
in_mad->attr_mod = cpu_to_be32(port);
@@ -848,7 +840,7 @@ int __mlx4_ib_query_gid(struct ib_device *ibdev, u32 port, int index,
if (!in_mad || !out_mad)
goto out;
- init_query_mad(in_mad);
+ ib_init_query_mad(in_mad);
in_mad->attr_id = IB_SMP_ATTR_PORT_INFO;
in_mad->attr_mod = cpu_to_be32(port);
@@ -870,7 +862,7 @@ int __mlx4_ib_query_gid(struct ib_device *ibdev, u32 port, int index,
}
}
- init_query_mad(in_mad);
+ ib_init_query_mad(in_mad);
in_mad->attr_id = IB_SMP_ATTR_GUID_INFO;
in_mad->attr_mod = cpu_to_be32(index / 8);
@@ -917,7 +909,7 @@ static int mlx4_ib_query_sl2vl(struct ib_device *ibdev, u32 port,
if (!in_mad || !out_mad)
goto out;
- init_query_mad(in_mad);
+ ib_init_query_mad(in_mad);
in_mad->attr_id = IB_SMP_ATTR_SL_TO_VL_TABLE;
in_mad->attr_mod = 0;
@@ -971,7 +963,7 @@ int __mlx4_ib_query_pkey(struct ib_device *ibdev, u32 port, u16 index,
if (!in_mad || !out_mad)
goto out;
- init_query_mad(in_mad);
+ ib_init_query_mad(in_mad);
in_mad->attr_id = IB_SMP_ATTR_PKEY_TABLE;
in_mad->attr_mod = cpu_to_be32(index / 32);
@@ -1990,7 +1982,7 @@ static int init_node_data(struct mlx4_ib_dev *dev)
if (!in_mad || !out_mad)
goto out;
- init_query_mad(in_mad);
+ ib_init_query_mad(in_mad);
in_mad->attr_id = IB_SMP_ATTR_NODE_DESC;
if (mlx4_is_master(dev->dev))
mad_ifc_flags |= MLX4_MAD_IFC_NET_VIEW;
@@ -2784,10 +2776,8 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
if (err)
goto err_counter;
- ibdev->ib_uc_qpns_bitmap =
- kmalloc_array(BITS_TO_LONGS(ibdev->steer_qpn_count),
- sizeof(long),
- GFP_KERNEL);
+ ibdev->ib_uc_qpns_bitmap = bitmap_alloc(ibdev->steer_qpn_count,
+ GFP_KERNEL);
if (!ibdev->ib_uc_qpns_bitmap)
goto err_steer_qp_release;
@@ -2875,7 +2865,7 @@ err_diag_counters:
mlx4_ib_diag_cleanup(ibdev);
err_steer_free_bitmap:
- kfree(ibdev->ib_uc_qpns_bitmap);
+ bitmap_free(ibdev->ib_uc_qpns_bitmap);
err_steer_qp_release:
mlx4_qp_release_range(dev, ibdev->steer_qpn_base,
@@ -2988,7 +2978,7 @@ static void mlx4_ib_remove(struct mlx4_dev *dev, void *ibdev_ptr)
mlx4_qp_release_range(dev, ibdev->steer_qpn_base,
ibdev->steer_qpn_count);
- kfree(ibdev->ib_uc_qpns_bitmap);
+ bitmap_free(ibdev->ib_uc_qpns_bitmap);
iounmap(ibdev->uar_map);
for (p = 0; p < ibdev->num_ports; ++p)
diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c
index a190fb581591..08371a80fdc2 100644
--- a/drivers/infiniband/hw/mlx5/cq.c
+++ b/drivers/infiniband/hw/mlx5/cq.c
@@ -328,8 +328,11 @@ static void mlx5_handle_error_cqe(struct mlx5_ib_dev *dev,
}
wc->vendor_err = cqe->vendor_err_synd;
- if (dump)
+ if (dump) {
+ mlx5_ib_warn(dev, "WC error: %d, Message: %s\n", wc->status,
+ ib_wc_status_msg(wc->status));
dump_cqe(dev, cqe);
+ }
}
static void handle_atomics(struct mlx5_ib_qp *qp, struct mlx5_cqe64 *cqe64,
diff --git a/drivers/infiniband/hw/mlx5/mad.c b/drivers/infiniband/hw/mlx5/mad.c
index ec242a5a17a3..293ed709e5ed 100644
--- a/drivers/infiniband/hw/mlx5/mad.c
+++ b/drivers/infiniband/hw/mlx5/mad.c
@@ -291,7 +291,7 @@ int mlx5_query_ext_port_caps(struct mlx5_ib_dev *dev, unsigned int port)
if (!in_mad || !out_mad)
goto out;
- init_query_mad(in_mad);
+ ib_init_query_mad(in_mad);
in_mad->attr_id = MLX5_ATTR_EXTENDED_PORT_INFO;
in_mad->attr_mod = cpu_to_be32(port);
@@ -318,7 +318,7 @@ static int mlx5_query_mad_ifc_smp_attr_node_info(struct ib_device *ibdev,
if (!in_mad)
return -ENOMEM;
- init_query_mad(in_mad);
+ ib_init_query_mad(in_mad);
in_mad->attr_id = IB_SMP_ATTR_NODE_INFO;
err = mlx5_MAD_IFC(to_mdev(ibdev), 1, 1, 1, NULL, NULL, in_mad,
@@ -405,7 +405,7 @@ int mlx5_query_mad_ifc_node_desc(struct mlx5_ib_dev *dev, char *node_desc)
if (!in_mad || !out_mad)
goto out;
- init_query_mad(in_mad);
+ ib_init_query_mad(in_mad);
in_mad->attr_id = IB_SMP_ATTR_NODE_DESC;
err = mlx5_MAD_IFC(dev, 1, 1, 1, NULL, NULL, in_mad, out_mad);
@@ -430,7 +430,7 @@ int mlx5_query_mad_ifc_node_guid(struct mlx5_ib_dev *dev, __be64 *node_guid)
if (!in_mad || !out_mad)
goto out;
- init_query_mad(in_mad);
+ ib_init_query_mad(in_mad);
in_mad->attr_id = IB_SMP_ATTR_NODE_INFO;
err = mlx5_MAD_IFC(dev, 1, 1, 1, NULL, NULL, in_mad, out_mad);
@@ -456,7 +456,7 @@ int mlx5_query_mad_ifc_pkey(struct ib_device *ibdev, u32 port, u16 index,
if (!in_mad || !out_mad)
goto out;
- init_query_mad(in_mad);
+ ib_init_query_mad(in_mad);
in_mad->attr_id = IB_SMP_ATTR_PKEY_TABLE;
in_mad->attr_mod = cpu_to_be32(index / 32);
@@ -485,7 +485,7 @@ int mlx5_query_mad_ifc_gids(struct ib_device *ibdev, u32 port, int index,
if (!in_mad || !out_mad)
goto out;
- init_query_mad(in_mad);
+ ib_init_query_mad(in_mad);
in_mad->attr_id = IB_SMP_ATTR_PORT_INFO;
in_mad->attr_mod = cpu_to_be32(port);
@@ -496,7 +496,7 @@ int mlx5_query_mad_ifc_gids(struct ib_device *ibdev, u32 port, int index,
memcpy(gid->raw, out_mad->data + 8, 8);
- init_query_mad(in_mad);
+ ib_init_query_mad(in_mad);
in_mad->attr_id = IB_SMP_ATTR_GUID_INFO;
in_mad->attr_mod = cpu_to_be32(index / 8);
@@ -530,7 +530,7 @@ int mlx5_query_mad_ifc_port(struct ib_device *ibdev, u32 port,
/* props being zeroed by the caller, avoid zeroing it here */
- init_query_mad(in_mad);
+ ib_init_query_mad(in_mad);
in_mad->attr_id = IB_SMP_ATTR_PORT_INFO;
in_mad->attr_mod = cpu_to_be32(port);
@@ -584,6 +584,11 @@ int mlx5_query_mad_ifc_port(struct ib_device *ibdev, u32 port,
props->port_cap_flags2 & IB_PORT_LINK_SPEED_HDR_SUP)
props->active_speed = IB_SPEED_HDR;
break;
+ case 8:
+ if (props->port_cap_flags & IB_PORT_CAP_MASK2_SUP &&
+ props->port_cap_flags2 & IB_PORT_LINK_SPEED_NDR_SUP)
+ props->active_speed = IB_SPEED_NDR;
+ break;
}
}
@@ -591,7 +596,7 @@ int mlx5_query_mad_ifc_port(struct ib_device *ibdev, u32 port,
if (props->active_speed == 4) {
if (dev->port_caps[port - 1].ext_port_cap &
MLX_EXT_PORT_CAP_FLAG_EXTENDED_PORT_INFO) {
- init_query_mad(in_mad);
+ ib_init_query_mad(in_mad);
in_mad->attr_id = MLX5_ATTR_EXTENDED_PORT_INFO;
in_mad->attr_mod = cpu_to_be32(port);
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
index e3c33be9c5a0..cbc20e400be0 100644
--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
+++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
@@ -665,9 +665,9 @@ struct mlx5_ib_mr {
/* User MR data */
struct mlx5_cache_ent *cache_ent;
+ /* Everything after cache_ent is zero'd when MR allocated */
struct ib_umem *umem;
- /* This is zero'd when the MR is allocated */
union {
/* Used only while the MR is in the cache */
struct {
@@ -719,7 +719,7 @@ struct mlx5_ib_mr {
/* Zero the fields in the mr that are variant depending on usage */
static inline void mlx5_clear_mr(struct mlx5_ib_mr *mr)
{
- memset(mr->out, 0, sizeof(*mr) - offsetof(struct mlx5_ib_mr, out));
+ memset_after(mr, 0, cache_ent);
}
static inline bool is_odp_mr(struct mlx5_ib_mr *mr)
@@ -1466,14 +1466,6 @@ extern const struct uapi_definition mlx5_ib_flow_defs[];
extern const struct uapi_definition mlx5_ib_qos_defs[];
extern const struct uapi_definition mlx5_ib_std_types_defs[];
-static inline void init_query_mad(struct ib_smp *mad)
-{
- mad->base_version = 1;
- mad->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED;
- mad->class_version = 1;
- mad->method = IB_MGMT_METHOD_GET;
-}
-
static inline int is_qp1(enum ib_qp_type qp_type)
{
return qp_type == MLX5_IB_QPT_HW_GSI || qp_type == IB_QPT_GSI;
diff --git a/drivers/infiniband/hw/mthca/mthca_allocator.c b/drivers/infiniband/hw/mthca/mthca_allocator.c
index aef1d274a14e..9f0f79d02d3c 100644
--- a/drivers/infiniband/hw/mthca/mthca_allocator.c
+++ b/drivers/infiniband/hw/mthca/mthca_allocator.c
@@ -51,7 +51,7 @@ u32 mthca_alloc(struct mthca_alloc *alloc)
}
if (obj < alloc->max) {
- set_bit(obj, alloc->table);
+ __set_bit(obj, alloc->table);
obj |= alloc->top;
} else
obj = -1;
@@ -69,7 +69,7 @@ void mthca_free(struct mthca_alloc *alloc, u32 obj)
spin_lock_irqsave(&alloc->lock, flags);
- clear_bit(obj, alloc->table);
+ __clear_bit(obj, alloc->table);
alloc->last = min(alloc->last, obj);
alloc->top = (alloc->top + alloc->max) & alloc->mask;
@@ -79,8 +79,6 @@ void mthca_free(struct mthca_alloc *alloc, u32 obj)
int mthca_alloc_init(struct mthca_alloc *alloc, u32 num, u32 mask,
u32 reserved)
{
- int i;
-
/* num must be a power of 2 */
if (num != 1 << (ffs(num) - 1))
return -EINVAL;
@@ -90,21 +88,18 @@ int mthca_alloc_init(struct mthca_alloc *alloc, u32 num, u32 mask,
alloc->max = num;
alloc->mask = mask;
spin_lock_init(&alloc->lock);
- alloc->table = kmalloc_array(BITS_TO_LONGS(num), sizeof(long),
- GFP_KERNEL);
+ alloc->table = bitmap_zalloc(num, GFP_KERNEL);
if (!alloc->table)
return -ENOMEM;
- bitmap_zero(alloc->table, num);
- for (i = 0; i < reserved; ++i)
- set_bit(i, alloc->table);
+ bitmap_set(alloc->table, 0, reserved);
return 0;
}
void mthca_alloc_cleanup(struct mthca_alloc *alloc)
{
- kfree(alloc->table);
+ bitmap_free(alloc->table);
}
/*
diff --git a/drivers/infiniband/hw/mthca/mthca_mr.c b/drivers/infiniband/hw/mthca/mthca_mr.c
index ce0e0867e488..192f83fd7c8a 100644
--- a/drivers/infiniband/hw/mthca/mthca_mr.c
+++ b/drivers/infiniband/hw/mthca/mthca_mr.c
@@ -101,13 +101,13 @@ static u32 mthca_buddy_alloc(struct mthca_buddy *buddy, int order)
return -1;
found:
- clear_bit(seg, buddy->bits[o]);
+ __clear_bit(seg, buddy->bits[o]);
--buddy->num_free[o];
while (o > order) {
--o;
seg <<= 1;
- set_bit(seg ^ 1, buddy->bits[o]);
+ __set_bit(seg ^ 1, buddy->bits[o]);
++buddy->num_free[o];
}
@@ -125,13 +125,13 @@ static void mthca_buddy_free(struct mthca_buddy *buddy, u32 seg, int order)
spin_lock(&buddy->lock);
while (test_bit(seg ^ 1, buddy->bits[order])) {
- clear_bit(seg ^ 1, buddy->bits[order]);
+ __clear_bit(seg ^ 1, buddy->bits[order]);
--buddy->num_free[order];
seg >>= 1;
++order;
}
- set_bit(seg, buddy->bits[order]);
+ __set_bit(seg, buddy->bits[order]);
++buddy->num_free[order];
spin_unlock(&buddy->lock);
@@ -139,7 +139,7 @@ static void mthca_buddy_free(struct mthca_buddy *buddy, u32 seg, int order)
static int mthca_buddy_init(struct mthca_buddy *buddy, int max_order)
{
- int i, s;
+ int i;
buddy->max_order = max_order;
spin_lock_init(&buddy->lock);
@@ -152,22 +152,20 @@ static int mthca_buddy_init(struct mthca_buddy *buddy, int max_order)
goto err_out;
for (i = 0; i <= buddy->max_order; ++i) {
- s = BITS_TO_LONGS(1 << (buddy->max_order - i));
- buddy->bits[i] = kmalloc_array(s, sizeof(long), GFP_KERNEL);
+ buddy->bits[i] = bitmap_zalloc(1 << (buddy->max_order - i),
+ GFP_KERNEL);
if (!buddy->bits[i])
goto err_out_free;
- bitmap_zero(buddy->bits[i],
- 1 << (buddy->max_order - i));
}
- set_bit(0, buddy->bits[buddy->max_order]);
+ __set_bit(0, buddy->bits[buddy->max_order]);
buddy->num_free[buddy->max_order] = 1;
return 0;
err_out_free:
for (i = 0; i <= buddy->max_order; ++i)
- kfree(buddy->bits[i]);
+ bitmap_free(buddy->bits[i]);
err_out:
kfree(buddy->bits);
@@ -181,7 +179,7 @@ static void mthca_buddy_cleanup(struct mthca_buddy *buddy)
int i;
for (i = 0; i <= buddy->max_order; ++i)
- kfree(buddy->bits[i]);
+ bitmap_free(buddy->bits[i]);
kfree(buddy->bits);
kfree(buddy->num_free);
@@ -469,8 +467,7 @@ int mthca_mr_alloc(struct mthca_dev *dev, u32 pd, int buffer_size_shift,
mpt_entry->start = cpu_to_be64(iova);
mpt_entry->length = cpu_to_be64(total_size);
- memset(&mpt_entry->lkey, 0,
- sizeof *mpt_entry - offsetof(struct mthca_mpt_entry, lkey));
+ memset_startat(mpt_entry, 0, lkey);
if (mr->mtt)
mpt_entry->mtt_seg =
diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c
index ceee23ebc0f2..c46df53f26cf 100644
--- a/drivers/infiniband/hw/mthca/mthca_provider.c
+++ b/drivers/infiniband/hw/mthca/mthca_provider.c
@@ -50,14 +50,6 @@
#include <rdma/mthca-abi.h>
#include "mthca_memfree.h"
-static void init_query_mad(struct ib_smp *mad)
-{
- mad->base_version = 1;
- mad->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED;
- mad->class_version = 1;
- mad->method = IB_MGMT_METHOD_GET;
-}
-
static int mthca_query_device(struct ib_device *ibdev, struct ib_device_attr *props,
struct ib_udata *uhw)
{
@@ -78,7 +70,7 @@ static int mthca_query_device(struct ib_device *ibdev, struct ib_device_attr *pr
props->fw_ver = mdev->fw_ver;
- init_query_mad(in_mad);
+ ib_init_query_mad(in_mad);
in_mad->attr_id = IB_SMP_ATTR_NODE_INFO;
err = mthca_MAD_IFC(mdev, 1, 1,
@@ -140,7 +132,7 @@ static int mthca_query_port(struct ib_device *ibdev,
/* props being zeroed by the caller, avoid zeroing it here */
- init_query_mad(in_mad);
+ ib_init_query_mad(in_mad);
in_mad->attr_id = IB_SMP_ATTR_PORT_INFO;
in_mad->attr_mod = cpu_to_be32(port);
@@ -234,7 +226,7 @@ static int mthca_query_pkey(struct ib_device *ibdev,
if (!in_mad || !out_mad)
goto out;
- init_query_mad(in_mad);
+ ib_init_query_mad(in_mad);
in_mad->attr_id = IB_SMP_ATTR_PKEY_TABLE;
in_mad->attr_mod = cpu_to_be32(index / 32);
@@ -263,7 +255,7 @@ static int mthca_query_gid(struct ib_device *ibdev, u32 port,
if (!in_mad || !out_mad)
goto out;
- init_query_mad(in_mad);
+ ib_init_query_mad(in_mad);
in_mad->attr_id = IB_SMP_ATTR_PORT_INFO;
in_mad->attr_mod = cpu_to_be32(port);
@@ -274,7 +266,7 @@ static int mthca_query_gid(struct ib_device *ibdev, u32 port,
memcpy(gid->raw, out_mad->data + 8, 8);
- init_query_mad(in_mad);
+ ib_init_query_mad(in_mad);
in_mad->attr_id = IB_SMP_ATTR_GUID_INFO;
in_mad->attr_mod = cpu_to_be32(index / 8);
@@ -1006,7 +998,7 @@ static int mthca_init_node_data(struct mthca_dev *dev)
if (!in_mad || !out_mad)
goto out;
- init_query_mad(in_mad);
+ ib_init_query_mad(in_mad);
in_mad->attr_id = IB_SMP_ATTR_NODE_DESC;
err = mthca_MAD_IFC(dev, 1, 1,
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
index c51c3f40700e..265a581133dc 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
@@ -1506,7 +1506,6 @@ int ocrdma_mbx_dealloc_pd(struct ocrdma_dev *dev, struct ocrdma_pd *pd)
static int ocrdma_mbx_alloc_pd_range(struct ocrdma_dev *dev)
{
int status = -ENOMEM;
- size_t pd_bitmap_size;
struct ocrdma_alloc_pd_range *cmd;
struct ocrdma_alloc_pd_range_rsp *rsp;
@@ -1528,10 +1527,8 @@ static int ocrdma_mbx_alloc_pd_range(struct ocrdma_dev *dev)
dev->pd_mgr->pd_dpp_start = rsp->dpp_page_pdid &
OCRDMA_ALLOC_PD_RNG_RSP_START_PDID_MASK;
dev->pd_mgr->max_dpp_pd = rsp->pd_count;
- pd_bitmap_size =
- BITS_TO_LONGS(rsp->pd_count) * sizeof(long);
- dev->pd_mgr->pd_dpp_bitmap = kzalloc(pd_bitmap_size,
- GFP_KERNEL);
+ dev->pd_mgr->pd_dpp_bitmap = bitmap_zalloc(rsp->pd_count,
+ GFP_KERNEL);
}
kfree(cmd);
}
@@ -1547,9 +1544,8 @@ static int ocrdma_mbx_alloc_pd_range(struct ocrdma_dev *dev)
dev->pd_mgr->pd_norm_start = rsp->dpp_page_pdid &
OCRDMA_ALLOC_PD_RNG_RSP_START_PDID_MASK;
dev->pd_mgr->max_normal_pd = rsp->pd_count;
- pd_bitmap_size = BITS_TO_LONGS(rsp->pd_count) * sizeof(long);
- dev->pd_mgr->pd_norm_bitmap = kzalloc(pd_bitmap_size,
- GFP_KERNEL);
+ dev->pd_mgr->pd_norm_bitmap = bitmap_zalloc(rsp->pd_count,
+ GFP_KERNEL);
}
kfree(cmd);
@@ -1611,8 +1607,8 @@ void ocrdma_alloc_pd_pool(struct ocrdma_dev *dev)
static void ocrdma_free_pd_pool(struct ocrdma_dev *dev)
{
ocrdma_mbx_dealloc_pd_range(dev);
- kfree(dev->pd_mgr->pd_norm_bitmap);
- kfree(dev->pd_mgr->pd_dpp_bitmap);
+ bitmap_free(dev->pd_mgr->pd_norm_bitmap);
+ bitmap_free(dev->pd_mgr->pd_dpp_bitmap);
kfree(dev->pd_mgr);
}
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_main.c b/drivers/infiniband/hw/ocrdma/ocrdma_main.c
index 7abf6cf1e937..5d4b3bc16493 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_main.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_main.c
@@ -62,20 +62,6 @@ MODULE_DESCRIPTION(OCRDMA_ROCE_DRV_DESC " " OCRDMA_ROCE_DRV_VERSION);
MODULE_AUTHOR("Emulex Corporation");
MODULE_LICENSE("Dual BSD/GPL");
-void ocrdma_get_guid(struct ocrdma_dev *dev, u8 *guid)
-{
- u8 mac_addr[6];
-
- memcpy(&mac_addr[0], &dev->nic_info.mac_addr[0], ETH_ALEN);
- guid[0] = mac_addr[0] ^ 2;
- guid[1] = mac_addr[1];
- guid[2] = mac_addr[2];
- guid[3] = 0xff;
- guid[4] = 0xfe;
- guid[5] = mac_addr[3];
- guid[6] = mac_addr[4];
- guid[7] = mac_addr[5];
-}
static enum rdma_link_layer ocrdma_link_layer(struct ib_device *device,
u32 port_num)
{
@@ -203,7 +189,8 @@ static int ocrdma_register_device(struct ocrdma_dev *dev)
{
int ret;
- ocrdma_get_guid(dev, (u8 *)&dev->ibdev.node_guid);
+ addrconf_addr_eui48((u8 *)&dev->ibdev.node_guid,
+ dev->nic_info.mac_addr);
BUILD_BUG_ON(sizeof(OCRDMA_NODE_DESC) > IB_DEVICE_NODE_DESC_MAX);
memcpy(dev->ibdev.node_desc, OCRDMA_NODE_DESC,
sizeof(OCRDMA_NODE_DESC));
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
index 735123d0e9ec..acf9970ec245 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
@@ -41,6 +41,7 @@
*/
#include <linux/dma-mapping.h>
+#include <net/addrconf.h>
#include <rdma/ib_verbs.h>
#include <rdma/ib_user_verbs.h>
#include <rdma/iw_cm.h>
@@ -74,7 +75,8 @@ int ocrdma_query_device(struct ib_device *ibdev, struct ib_device_attr *attr,
memset(attr, 0, sizeof *attr);
memcpy(&attr->fw_ver, &dev->attr.fw_ver[0],
min(sizeof(dev->attr.fw_ver), sizeof(attr->fw_ver)));
- ocrdma_get_guid(dev, (u8 *)&attr->sys_image_guid);
+ addrconf_addr_eui48((u8 *)&attr->sys_image_guid,
+ dev->nic_info.mac_addr);
attr->max_mr_size = dev->attr.max_mr_size;
attr->page_size_cap = 0xffff000;
attr->vendor_id = dev->nic_info.pdev->vendor;
@@ -245,13 +247,13 @@ static bool ocrdma_search_mmap(struct ocrdma_ucontext *uctx, u64 phy_addr,
static u16 _ocrdma_pd_mgr_get_bitmap(struct ocrdma_dev *dev, bool dpp_pool)
{
u16 pd_bitmap_idx = 0;
- const unsigned long *pd_bitmap;
+ unsigned long *pd_bitmap;
if (dpp_pool) {
pd_bitmap = dev->pd_mgr->pd_dpp_bitmap;
pd_bitmap_idx = find_first_zero_bit(pd_bitmap,
dev->pd_mgr->max_dpp_pd);
- __set_bit(pd_bitmap_idx, dev->pd_mgr->pd_dpp_bitmap);
+ __set_bit(pd_bitmap_idx, pd_bitmap);
dev->pd_mgr->pd_dpp_count++;
if (dev->pd_mgr->pd_dpp_count > dev->pd_mgr->pd_dpp_thrsh)
dev->pd_mgr->pd_dpp_thrsh = dev->pd_mgr->pd_dpp_count;
@@ -259,7 +261,7 @@ static u16 _ocrdma_pd_mgr_get_bitmap(struct ocrdma_dev *dev, bool dpp_pool)
pd_bitmap = dev->pd_mgr->pd_norm_bitmap;
pd_bitmap_idx = find_first_zero_bit(pd_bitmap,
dev->pd_mgr->max_normal_pd);
- __set_bit(pd_bitmap_idx, dev->pd_mgr->pd_norm_bitmap);
+ __set_bit(pd_bitmap_idx, pd_bitmap);
dev->pd_mgr->pd_norm_count++;
if (dev->pd_mgr->pd_norm_count > dev->pd_mgr->pd_norm_thrsh)
dev->pd_mgr->pd_norm_thrsh = dev->pd_mgr->pd_norm_count;
@@ -1844,12 +1846,10 @@ int ocrdma_modify_srq(struct ib_srq *ibsrq,
int ocrdma_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr)
{
- int status;
struct ocrdma_srq *srq;
srq = get_ocrdma_srq(ibsrq);
- status = ocrdma_mbx_query_srq(srq, srq_attr);
- return status;
+ return ocrdma_mbx_query_srq(srq, srq_attr);
}
int ocrdma_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata)
@@ -1960,7 +1960,6 @@ static int ocrdma_build_inline_sges(struct ocrdma_qp *qp,
static int ocrdma_build_send(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr,
const struct ib_send_wr *wr)
{
- int status;
struct ocrdma_sge *sge;
u32 wqe_size = sizeof(*hdr);
@@ -1972,8 +1971,7 @@ static int ocrdma_build_send(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr,
sge = (struct ocrdma_sge *)(hdr + 1);
}
- status = ocrdma_build_inline_sges(qp, hdr, sge, wr, wqe_size);
- return status;
+ return ocrdma_build_inline_sges(qp, hdr, sge, wr, wqe_size);
}
static int ocrdma_build_write(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr,
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h
index b73d742a520c..f860b7fcef33 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h
@@ -59,7 +59,6 @@ int ocrdma_query_port(struct ib_device *ibdev, u32 port,
enum rdma_protocol_type
ocrdma_query_protocol(struct ib_device *device, u32 port_num);
-void ocrdma_get_guid(struct ocrdma_dev *, u8 *guid);
int ocrdma_query_pkey(struct ib_device *ibdev, u32 port, u16 index, u16 *pkey);
int ocrdma_alloc_ucontext(struct ib_ucontext *uctx, struct ib_udata *udata);
diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c
index 9100009f0a23..a53476653b0d 100644
--- a/drivers/infiniband/hw/qedr/verbs.c
+++ b/drivers/infiniband/hw/qedr/verbs.c
@@ -1931,6 +1931,7 @@ static int qedr_create_user_qp(struct qedr_dev *dev,
/* db offset was calculated in copy_qp_uresp, now set in the user q */
if (qedr_qp_has_sq(qp)) {
qp->usq.db_addr = ctx->dpi_addr + uresp.sq_db_offset;
+ qp->sq.max_wr = attrs->cap.max_send_wr;
rc = qedr_db_recovery_add(dev, qp->usq.db_addr,
&qp->usq.db_rec_data->db_data,
DB_REC_WIDTH_32B,
@@ -1941,6 +1942,7 @@ static int qedr_create_user_qp(struct qedr_dev *dev,
if (qedr_qp_has_rq(qp)) {
qp->urq.db_addr = ctx->dpi_addr + uresp.rq_db_offset;
+ qp->rq.max_wr = attrs->cap.max_recv_wr;
rc = qedr_db_recovery_add(dev, qp->urq.db_addr,
&qp->urq.db_rec_data->db_data,
DB_REC_WIDTH_32B,
diff --git a/drivers/infiniband/hw/qib/qib_iba6120.c b/drivers/infiniband/hw/qib/qib_iba6120.c
index a9b83bc13f4a..aea571943768 100644
--- a/drivers/infiniband/hw/qib/qib_iba6120.c
+++ b/drivers/infiniband/hw/qib/qib_iba6120.c
@@ -3030,7 +3030,7 @@ static int qib_6120_ib_updown(struct qib_pportdata *ppd, int ibup, u64 ibcs)
/* Does read/modify/write to appropriate registers to
* set output and direction bits selected by mask.
- * these are in their canonical postions (e.g. lsb of
+ * these are in their canonical positions (e.g. lsb of
* dir will end up in D48 of extctrl on existing chips).
* returns contents of GP Inputs.
*/
diff --git a/drivers/infiniband/hw/qib/qib_iba7220.c b/drivers/infiniband/hw/qib/qib_iba7220.c
index d1c0bc31869f..80a8dd6c7814 100644
--- a/drivers/infiniband/hw/qib/qib_iba7220.c
+++ b/drivers/infiniband/hw/qib/qib_iba7220.c
@@ -3742,7 +3742,7 @@ static int qib_7220_ib_updown(struct qib_pportdata *ppd, int ibup, u64 ibcs)
/*
* Does read/modify/write to appropriate registers to
* set output and direction bits selected by mask.
- * these are in their canonical postions (e.g. lsb of
+ * these are in their canonical positions (e.g. lsb of
* dir will end up in D48 of extctrl on existing chips).
* returns contents of GP Inputs.
*/
diff --git a/drivers/infiniband/hw/qib/qib_iba7322.c b/drivers/infiniband/hw/qib/qib_iba7322.c
index ab98b6a3ae1e..ceed302cf6a0 100644
--- a/drivers/infiniband/hw/qib/qib_iba7322.c
+++ b/drivers/infiniband/hw/qib/qib_iba7322.c
@@ -5665,7 +5665,7 @@ static int qib_7322_ib_updown(struct qib_pportdata *ppd, int ibup, u64 ibcs)
/*
* Does read/modify/write to appropriate registers to
* set output and direction bits selected by mask.
- * these are in their canonical postions (e.g. lsb of
+ * these are in their canonical positions (e.g. lsb of
* dir will end up in D48 of extctrl on existing chips).
* returns contents of GP Inputs.
*/
diff --git a/drivers/infiniband/hw/usnic/usnic_ib_sysfs.c b/drivers/infiniband/hw/usnic/usnic_ib_sysfs.c
index 586b0e52ba7f..7d868f033bbf 100644
--- a/drivers/infiniband/hw/usnic/usnic_ib_sysfs.c
+++ b/drivers/infiniband/hw/usnic/usnic_ib_sysfs.c
@@ -243,10 +243,11 @@ static struct attribute *usnic_ib_qpn_default_attrs[] = {
&qpn_attr_summary.attr,
NULL
};
+ATTRIBUTE_GROUPS(usnic_ib_qpn_default);
static struct kobj_type usnic_ib_qpn_type = {
.sysfs_ops = &usnic_ib_qpn_sysfs_ops,
- .default_attrs = usnic_ib_qpn_default_attrs
+ .default_groups = usnic_ib_qpn_default_groups,
};
int usnic_ib_sysfs_register_usdev(struct usnic_ib_dev *us_ibdev)
diff --git a/drivers/infiniband/hw/usnic/usnic_ib_verbs.c b/drivers/infiniband/hw/usnic/usnic_ib_verbs.c
index 756a83bcff58..5a0e26cd648e 100644
--- a/drivers/infiniband/hw/usnic/usnic_ib_verbs.c
+++ b/drivers/infiniband/hw/usnic/usnic_ib_verbs.c
@@ -442,12 +442,10 @@ int usnic_ib_query_gid(struct ib_device *ibdev, u32 port, int index,
int usnic_ib_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
{
struct usnic_ib_pd *pd = to_upd(ibpd);
- void *umem_pd;
- umem_pd = pd->umem_pd = usnic_uiom_alloc_pd();
- if (IS_ERR_OR_NULL(umem_pd)) {
- return umem_pd ? PTR_ERR(umem_pd) : -ENOMEM;
- }
+ pd->umem_pd = usnic_uiom_alloc_pd();
+ if (IS_ERR(pd->umem_pd))
+ return PTR_ERR(pd->umem_pd);
return 0;
}
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_doorbell.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_doorbell.c
index bf51357ea3aa..9a4de962e947 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_doorbell.c
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_doorbell.c
@@ -63,12 +63,12 @@ int pvrdma_uar_table_init(struct pvrdma_dev *dev)
tbl->max = num;
tbl->mask = mask;
spin_lock_init(&tbl->lock);
- tbl->table = kcalloc(BITS_TO_LONGS(num), sizeof(long), GFP_KERNEL);
+ tbl->table = bitmap_zalloc(num, GFP_KERNEL);
if (!tbl->table)
return -ENOMEM;
/* 0th UAR is taken by the device. */
- set_bit(0, tbl->table);
+ __set_bit(0, tbl->table);
return 0;
}
@@ -77,7 +77,7 @@ void pvrdma_uar_table_cleanup(struct pvrdma_dev *dev)
{
struct pvrdma_id_table *tbl = &dev->uar_table.tbl;
- kfree(tbl->table);
+ bitmap_free(tbl->table);
}
int pvrdma_uar_alloc(struct pvrdma_dev *dev, struct pvrdma_uar_map *uar)
@@ -100,7 +100,7 @@ int pvrdma_uar_alloc(struct pvrdma_dev *dev, struct pvrdma_uar_map *uar)
return -ENOMEM;
}
- set_bit(obj, tbl->table);
+ __set_bit(obj, tbl->table);
obj |= tbl->top;
spin_unlock_irqrestore(&tbl->lock, flags);
@@ -120,7 +120,7 @@ void pvrdma_uar_free(struct pvrdma_dev *dev, struct pvrdma_uar_map *uar)
obj = uar->index & (tbl->max - 1);
spin_lock_irqsave(&tbl->lock, flags);
- clear_bit(obj, tbl->table);
+ __clear_bit(obj, tbl->table);
tbl->last = min(tbl->last, obj);
tbl->top = (tbl->top + tbl->max) & tbl->mask;
spin_unlock_irqrestore(&tbl->lock, flags);
diff --git a/drivers/infiniband/sw/rxe/Makefile b/drivers/infiniband/sw/rxe/Makefile
index 1e24673e9318..5395a581f4bb 100644
--- a/drivers/infiniband/sw/rxe/Makefile
+++ b/drivers/infiniband/sw/rxe/Makefile
@@ -22,5 +22,4 @@ rdma_rxe-y := \
rxe_mcast.o \
rxe_task.o \
rxe_net.o \
- rxe_sysfs.o \
rxe_hw_counters.o
diff --git a/drivers/infiniband/sw/rxe/rxe.c b/drivers/infiniband/sw/rxe/rxe.c
index 8e0f9c489cab..fab291245366 100644
--- a/drivers/infiniband/sw/rxe/rxe.c
+++ b/drivers/infiniband/sw/rxe/rxe.c
@@ -13,8 +13,6 @@ MODULE_AUTHOR("Bob Pearson, Frank Zago, John Groves, Kamal Heib");
MODULE_DESCRIPTION("Soft RDMA transport");
MODULE_LICENSE("Dual BSD/GPL");
-bool rxe_initialized;
-
/* free resources for a rxe device all objects created for this device must
* have been destroyed
*/
@@ -290,7 +288,6 @@ static int __init rxe_module_init(void)
return err;
rdma_link_register(&rxe_link_ops);
- rxe_initialized = true;
pr_info("loaded\n");
return 0;
}
@@ -301,7 +298,6 @@ static void __exit rxe_module_exit(void)
ib_unregister_driver(RDMA_DRIVER_RXE);
rxe_net_exit();
- rxe_initialized = false;
pr_info("unloaded\n");
}
diff --git a/drivers/infiniband/sw/rxe/rxe.h b/drivers/infiniband/sw/rxe/rxe.h
index 1bb3fb618bf5..fb9066e6f5f0 100644
--- a/drivers/infiniband/sw/rxe/rxe.h
+++ b/drivers/infiniband/sw/rxe/rxe.h
@@ -39,8 +39,6 @@
#define RXE_ROCE_V2_SPORT (0xc000)
-extern bool rxe_initialized;
-
void rxe_set_mtu(struct rxe_dev *rxe, unsigned int dev_mtu);
int rxe_add(struct rxe_dev *rxe, unsigned int mtu, const char *ibdev_name);
diff --git a/drivers/infiniband/sw/rxe/rxe_comp.c b/drivers/infiniband/sw/rxe/rxe_comp.c
index d771ba8449a1..f363fe3fa414 100644
--- a/drivers/infiniband/sw/rxe/rxe_comp.c
+++ b/drivers/infiniband/sw/rxe/rxe_comp.c
@@ -458,8 +458,6 @@ static inline enum comp_state complete_ack(struct rxe_qp *qp,
struct rxe_pkt_info *pkt,
struct rxe_send_wqe *wqe)
{
- unsigned long flags;
-
if (wqe->has_rd_atomic) {
wqe->has_rd_atomic = 0;
atomic_inc(&qp->req.rd_atomic);
@@ -472,11 +470,11 @@ static inline enum comp_state complete_ack(struct rxe_qp *qp,
if (unlikely(qp->req.state == QP_STATE_DRAIN)) {
/* state_lock used by requester & completer */
- spin_lock_irqsave(&qp->state_lock, flags);
+ spin_lock_bh(&qp->state_lock);
if ((qp->req.state == QP_STATE_DRAIN) &&
(qp->comp.psn == qp->req.psn)) {
qp->req.state = QP_STATE_DRAINED;
- spin_unlock_irqrestore(&qp->state_lock, flags);
+ spin_unlock_bh(&qp->state_lock);
if (qp->ibqp.event_handler) {
struct ib_event ev;
@@ -488,7 +486,7 @@ static inline enum comp_state complete_ack(struct rxe_qp *qp,
qp->ibqp.qp_context);
}
} else {
- spin_unlock_irqrestore(&qp->state_lock, flags);
+ spin_unlock_bh(&qp->state_lock);
}
}
diff --git a/drivers/infiniband/sw/rxe/rxe_cq.c b/drivers/infiniband/sw/rxe/rxe_cq.c
index 6848426c074f..6baaaa34458e 100644
--- a/drivers/infiniband/sw/rxe/rxe_cq.c
+++ b/drivers/infiniband/sw/rxe/rxe_cq.c
@@ -42,14 +42,13 @@ err1:
static void rxe_send_complete(struct tasklet_struct *t)
{
struct rxe_cq *cq = from_tasklet(cq, t, comp_task);
- unsigned long flags;
- spin_lock_irqsave(&cq->cq_lock, flags);
+ spin_lock_bh(&cq->cq_lock);
if (cq->is_dying) {
- spin_unlock_irqrestore(&cq->cq_lock, flags);
+ spin_unlock_bh(&cq->cq_lock);
return;
}
- spin_unlock_irqrestore(&cq->cq_lock, flags);
+ spin_unlock_bh(&cq->cq_lock);
cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
}
@@ -106,15 +105,14 @@ int rxe_cq_resize_queue(struct rxe_cq *cq, int cqe,
int rxe_cq_post(struct rxe_cq *cq, struct rxe_cqe *cqe, int solicited)
{
struct ib_event ev;
- unsigned long flags;
int full;
void *addr;
- spin_lock_irqsave(&cq->cq_lock, flags);
+ spin_lock_bh(&cq->cq_lock);
full = queue_full(cq->queue, QUEUE_TYPE_TO_CLIENT);
if (unlikely(full)) {
- spin_unlock_irqrestore(&cq->cq_lock, flags);
+ spin_unlock_bh(&cq->cq_lock);
if (cq->ibcq.event_handler) {
ev.device = cq->ibcq.device;
ev.element.cq = &cq->ibcq;
@@ -130,7 +128,7 @@ int rxe_cq_post(struct rxe_cq *cq, struct rxe_cqe *cqe, int solicited)
queue_advance_producer(cq->queue, QUEUE_TYPE_TO_CLIENT);
- spin_unlock_irqrestore(&cq->cq_lock, flags);
+ spin_unlock_bh(&cq->cq_lock);
if ((cq->notify == IB_CQ_NEXT_COMP) ||
(cq->notify == IB_CQ_SOLICITED && solicited)) {
@@ -143,16 +141,14 @@ int rxe_cq_post(struct rxe_cq *cq, struct rxe_cqe *cqe, int solicited)
void rxe_cq_disable(struct rxe_cq *cq)
{
- unsigned long flags;
-
- spin_lock_irqsave(&cq->cq_lock, flags);
+ spin_lock_bh(&cq->cq_lock);
cq->is_dying = true;
- spin_unlock_irqrestore(&cq->cq_lock, flags);
+ spin_unlock_bh(&cq->cq_lock);
}
-void rxe_cq_cleanup(struct rxe_pool_entry *arg)
+void rxe_cq_cleanup(struct rxe_pool_elem *elem)
{
- struct rxe_cq *cq = container_of(arg, typeof(*cq), pelem);
+ struct rxe_cq *cq = container_of(elem, typeof(*cq), elem);
if (cq->queue)
rxe_queue_cleanup(cq->queue);
diff --git a/drivers/infiniband/sw/rxe/rxe_loc.h b/drivers/infiniband/sw/rxe/rxe_loc.h
index 1ca43b859d80..b1e174afb1d4 100644
--- a/drivers/infiniband/sw/rxe/rxe_loc.h
+++ b/drivers/infiniband/sw/rxe/rxe_loc.h
@@ -37,7 +37,7 @@ int rxe_cq_post(struct rxe_cq *cq, struct rxe_cqe *cqe, int solicited);
void rxe_cq_disable(struct rxe_cq *cq);
-void rxe_cq_cleanup(struct rxe_pool_entry *arg);
+void rxe_cq_cleanup(struct rxe_pool_elem *arg);
/* rxe_mcast.c */
int rxe_mcast_get_grp(struct rxe_dev *rxe, union ib_gid *mgid,
@@ -51,7 +51,7 @@ int rxe_mcast_drop_grp_elem(struct rxe_dev *rxe, struct rxe_qp *qp,
void rxe_drop_all_mcast_groups(struct rxe_qp *qp);
-void rxe_mc_cleanup(struct rxe_pool_entry *arg);
+void rxe_mc_cleanup(struct rxe_pool_elem *arg);
/* rxe_mmap.c */
struct rxe_mmap_info {
@@ -89,7 +89,7 @@ int rxe_invalidate_mr(struct rxe_qp *qp, u32 rkey);
int rxe_reg_fast_mr(struct rxe_qp *qp, struct rxe_send_wqe *wqe);
int rxe_mr_set_page(struct ib_mr *ibmr, u64 addr);
int rxe_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata);
-void rxe_mr_cleanup(struct rxe_pool_entry *arg);
+void rxe_mr_cleanup(struct rxe_pool_elem *arg);
/* rxe_mw.c */
int rxe_alloc_mw(struct ib_mw *ibmw, struct ib_udata *udata);
@@ -97,7 +97,7 @@ int rxe_dealloc_mw(struct ib_mw *ibmw);
int rxe_bind_mw(struct rxe_qp *qp, struct rxe_send_wqe *wqe);
int rxe_invalidate_mw(struct rxe_qp *qp, u32 rkey);
struct rxe_mw *rxe_lookup_mw(struct rxe_qp *qp, int access, u32 rkey);
-void rxe_mw_cleanup(struct rxe_pool_entry *arg);
+void rxe_mw_cleanup(struct rxe_pool_elem *arg);
/* rxe_net.c */
struct sk_buff *rxe_init_packet(struct rxe_dev *rxe, struct rxe_av *av,
@@ -131,7 +131,7 @@ void rxe_qp_error(struct rxe_qp *qp);
void rxe_qp_destroy(struct rxe_qp *qp);
-void rxe_qp_cleanup(struct rxe_pool_entry *arg);
+void rxe_qp_cleanup(struct rxe_pool_elem *elem);
static inline int qp_num(struct rxe_qp *qp)
{
diff --git a/drivers/infiniband/sw/rxe/rxe_mcast.c b/drivers/infiniband/sw/rxe/rxe_mcast.c
index 1c1d1b53312d..bd1ac88b8700 100644
--- a/drivers/infiniband/sw/rxe/rxe_mcast.c
+++ b/drivers/infiniband/sw/rxe/rxe_mcast.c
@@ -40,12 +40,11 @@ int rxe_mcast_get_grp(struct rxe_dev *rxe, union ib_gid *mgid,
int err;
struct rxe_mc_grp *grp;
struct rxe_pool *pool = &rxe->mc_grp_pool;
- unsigned long flags;
if (rxe->attr.max_mcast_qp_attach == 0)
return -EINVAL;
- write_lock_irqsave(&pool->pool_lock, flags);
+ write_lock_bh(&pool->pool_lock);
grp = rxe_pool_get_key_locked(pool, mgid);
if (grp)
@@ -53,13 +52,13 @@ int rxe_mcast_get_grp(struct rxe_dev *rxe, union ib_gid *mgid,
grp = create_grp(rxe, pool, mgid);
if (IS_ERR(grp)) {
- write_unlock_irqrestore(&pool->pool_lock, flags);
+ write_unlock_bh(&pool->pool_lock);
err = PTR_ERR(grp);
return err;
}
done:
- write_unlock_irqrestore(&pool->pool_lock, flags);
+ write_unlock_bh(&pool->pool_lock);
*grp_p = grp;
return 0;
}
@@ -169,9 +168,9 @@ void rxe_drop_all_mcast_groups(struct rxe_qp *qp)
}
}
-void rxe_mc_cleanup(struct rxe_pool_entry *arg)
+void rxe_mc_cleanup(struct rxe_pool_elem *elem)
{
- struct rxe_mc_grp *grp = container_of(arg, typeof(*grp), pelem);
+ struct rxe_mc_grp *grp = container_of(elem, typeof(*grp), elem);
struct rxe_dev *rxe = grp->rxe;
rxe_drop_key(grp);
diff --git a/drivers/infiniband/sw/rxe/rxe_mr.c b/drivers/infiniband/sw/rxe/rxe_mr.c
index bcf717bcf0b3..453ef3c9d535 100644
--- a/drivers/infiniband/sw/rxe/rxe_mr.c
+++ b/drivers/infiniband/sw/rxe/rxe_mr.c
@@ -50,7 +50,7 @@ int mr_check_range(struct rxe_mr *mr, u64 iova, size_t length)
static void rxe_mr_init(int access, struct rxe_mr *mr)
{
- u32 lkey = mr->pelem.index << 8 | rxe_get_next_key(-1);
+ u32 lkey = mr->elem.index << 8 | rxe_get_next_key(-1);
u32 rkey = (access & IB_ACCESS_REMOTE) ? lkey : 0;
/* set ibmr->l/rkey and also copy into private l/rkey
@@ -697,9 +697,9 @@ int rxe_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
return 0;
}
-void rxe_mr_cleanup(struct rxe_pool_entry *arg)
+void rxe_mr_cleanup(struct rxe_pool_elem *elem)
{
- struct rxe_mr *mr = container_of(arg, typeof(*mr), pelem);
+ struct rxe_mr *mr = container_of(elem, typeof(*mr), elem);
ib_umem_release(mr->umem);
diff --git a/drivers/infiniband/sw/rxe/rxe_mw.c b/drivers/infiniband/sw/rxe/rxe_mw.c
index 9534a7fe1a98..32dd8c0b8b9e 100644
--- a/drivers/infiniband/sw/rxe/rxe_mw.c
+++ b/drivers/infiniband/sw/rxe/rxe_mw.c
@@ -21,7 +21,7 @@ int rxe_alloc_mw(struct ib_mw *ibmw, struct ib_udata *udata)
}
rxe_add_index(mw);
- mw->rkey = ibmw->rkey = (mw->pelem.index << 8) | rxe_get_next_key(-1);
+ mw->rkey = ibmw->rkey = (mw->elem.index << 8) | rxe_get_next_key(-1);
mw->state = (mw->ibmw.type == IB_MW_TYPE_2) ?
RXE_MW_STATE_FREE : RXE_MW_STATE_VALID;
spin_lock_init(&mw->lock);
@@ -56,11 +56,10 @@ int rxe_dealloc_mw(struct ib_mw *ibmw)
{
struct rxe_mw *mw = to_rmw(ibmw);
struct rxe_pd *pd = to_rpd(ibmw->pd);
- unsigned long flags;
- spin_lock_irqsave(&mw->lock, flags);
+ spin_lock_bh(&mw->lock);
rxe_do_dealloc_mw(mw);
- spin_unlock_irqrestore(&mw->lock, flags);
+ spin_unlock_bh(&mw->lock);
rxe_drop_ref(mw);
rxe_drop_ref(pd);
@@ -197,7 +196,6 @@ int rxe_bind_mw(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
u32 mw_rkey = wqe->wr.wr.mw.mw_rkey;
u32 mr_lkey = wqe->wr.wr.mw.mr_lkey;
- unsigned long flags;
mw = rxe_pool_get_index(&rxe->mw_pool, mw_rkey >> 8);
if (unlikely(!mw)) {
@@ -225,7 +223,7 @@ int rxe_bind_mw(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
mr = NULL;
}
- spin_lock_irqsave(&mw->lock, flags);
+ spin_lock_bh(&mw->lock);
ret = rxe_check_bind_mw(qp, wqe, mw, mr);
if (ret)
@@ -233,7 +231,7 @@ int rxe_bind_mw(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
rxe_do_bind_mw(qp, wqe, mw, mr);
err_unlock:
- spin_unlock_irqrestore(&mw->lock, flags);
+ spin_unlock_bh(&mw->lock);
err_drop_mr:
if (mr)
rxe_drop_ref(mr);
@@ -280,7 +278,6 @@ static void rxe_do_invalidate_mw(struct rxe_mw *mw)
int rxe_invalidate_mw(struct rxe_qp *qp, u32 rkey)
{
struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
- unsigned long flags;
struct rxe_mw *mw;
int ret;
@@ -295,7 +292,7 @@ int rxe_invalidate_mw(struct rxe_qp *qp, u32 rkey)
goto err_drop_ref;
}
- spin_lock_irqsave(&mw->lock, flags);
+ spin_lock_bh(&mw->lock);
ret = rxe_check_invalidate_mw(qp, mw);
if (ret)
@@ -303,7 +300,7 @@ int rxe_invalidate_mw(struct rxe_qp *qp, u32 rkey)
rxe_do_invalidate_mw(mw);
err_unlock:
- spin_unlock_irqrestore(&mw->lock, flags);
+ spin_unlock_bh(&mw->lock);
err_drop_ref:
rxe_drop_ref(mw);
err:
@@ -333,9 +330,9 @@ struct rxe_mw *rxe_lookup_mw(struct rxe_qp *qp, int access, u32 rkey)
return mw;
}
-void rxe_mw_cleanup(struct rxe_pool_entry *elem)
+void rxe_mw_cleanup(struct rxe_pool_elem *elem)
{
- struct rxe_mw *mw = container_of(elem, typeof(*mw), pelem);
+ struct rxe_mw *mw = container_of(elem, typeof(*mw), elem);
rxe_drop_index(mw);
}
diff --git a/drivers/infiniband/sw/rxe/rxe_net.c b/drivers/infiniband/sw/rxe/rxe_net.c
index 2cb810cb890a..be72bdbfb4ba 100644
--- a/drivers/infiniband/sw/rxe/rxe_net.c
+++ b/drivers/infiniband/sw/rxe/rxe_net.c
@@ -22,24 +22,20 @@ static struct rxe_recv_sockets recv_sockets;
int rxe_mcast_add(struct rxe_dev *rxe, union ib_gid *mgid)
{
- int err;
unsigned char ll_addr[ETH_ALEN];
ipv6_eth_mc_map((struct in6_addr *)mgid->raw, ll_addr);
- err = dev_mc_add(rxe->ndev, ll_addr);
- return err;
+ return dev_mc_add(rxe->ndev, ll_addr);
}
int rxe_mcast_delete(struct rxe_dev *rxe, union ib_gid *mgid)
{
- int err;
unsigned char ll_addr[ETH_ALEN];
ipv6_eth_mc_map((struct in6_addr *)mgid->raw, ll_addr);
- err = dev_mc_del(rxe->ndev, ll_addr);
- return err;
+ return dev_mc_del(rxe->ndev, ll_addr);
}
static struct dst_entry *rxe_find_route4(struct net_device *ndev,
@@ -444,7 +440,6 @@ int rxe_xmit_packet(struct rxe_qp *qp, struct rxe_pkt_info *pkt,
else
err = rxe_send(skb, pkt);
if (err) {
- rxe->xmit_errors++;
rxe_counter_inc(rxe, RXE_CNT_SEND_ERR);
return err;
}
diff --git a/drivers/infiniband/sw/rxe/rxe_opcode.c b/drivers/infiniband/sw/rxe/rxe_opcode.c
index 3ef5a10a6efd..df596ba7527d 100644
--- a/drivers/infiniband/sw/rxe/rxe_opcode.c
+++ b/drivers/infiniband/sw/rxe/rxe_opcode.c
@@ -108,8 +108,8 @@ struct rxe_wr_opcode_info rxe_wr_opcode_info[] = {
struct rxe_opcode_info rxe_opcode[RXE_NUM_OPCODE] = {
[IB_OPCODE_RC_SEND_FIRST] = {
.name = "IB_OPCODE_RC_SEND_FIRST",
- .mask = RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_RWR_MASK
- | RXE_SEND_MASK | RXE_START_MASK,
+ .mask = RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_RWR_MASK |
+ RXE_SEND_MASK | RXE_START_MASK,
.length = RXE_BTH_BYTES,
.offset = {
[RXE_BTH] = 0,
@@ -117,9 +117,9 @@ struct rxe_opcode_info rxe_opcode[RXE_NUM_OPCODE] = {
}
},
[IB_OPCODE_RC_SEND_MIDDLE] = {
- .name = "IB_OPCODE_RC_SEND_MIDDLE]",
- .mask = RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_SEND_MASK
- | RXE_MIDDLE_MASK,
+ .name = "IB_OPCODE_RC_SEND_MIDDLE",
+ .mask = RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_SEND_MASK |
+ RXE_MIDDLE_MASK,
.length = RXE_BTH_BYTES,
.offset = {
[RXE_BTH] = 0,
@@ -128,8 +128,8 @@ struct rxe_opcode_info rxe_opcode[RXE_NUM_OPCODE] = {
},
[IB_OPCODE_RC_SEND_LAST] = {
.name = "IB_OPCODE_RC_SEND_LAST",
- .mask = RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_COMP_MASK
- | RXE_SEND_MASK | RXE_END_MASK,
+ .mask = RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_COMP_MASK |
+ RXE_SEND_MASK | RXE_END_MASK,
.length = RXE_BTH_BYTES,
.offset = {
[RXE_BTH] = 0,
@@ -138,21 +138,21 @@ struct rxe_opcode_info rxe_opcode[RXE_NUM_OPCODE] = {
},
[IB_OPCODE_RC_SEND_LAST_WITH_IMMEDIATE] = {
.name = "IB_OPCODE_RC_SEND_LAST_WITH_IMMEDIATE",
- .mask = RXE_IMMDT_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK
- | RXE_COMP_MASK | RXE_SEND_MASK | RXE_END_MASK,
+ .mask = RXE_IMMDT_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK |
+ RXE_COMP_MASK | RXE_SEND_MASK | RXE_END_MASK,
.length = RXE_BTH_BYTES + RXE_IMMDT_BYTES,
.offset = {
[RXE_BTH] = 0,
[RXE_IMMDT] = RXE_BTH_BYTES,
- [RXE_PAYLOAD] = RXE_BTH_BYTES
- + RXE_IMMDT_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES +
+ RXE_IMMDT_BYTES,
}
},
[IB_OPCODE_RC_SEND_ONLY] = {
.name = "IB_OPCODE_RC_SEND_ONLY",
- .mask = RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_COMP_MASK
- | RXE_RWR_MASK | RXE_SEND_MASK
- | RXE_START_MASK | RXE_END_MASK,
+ .mask = RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_COMP_MASK |
+ RXE_RWR_MASK | RXE_SEND_MASK |
+ RXE_START_MASK | RXE_END_MASK,
.length = RXE_BTH_BYTES,
.offset = {
[RXE_BTH] = 0,
@@ -161,33 +161,33 @@ struct rxe_opcode_info rxe_opcode[RXE_NUM_OPCODE] = {
},
[IB_OPCODE_RC_SEND_ONLY_WITH_IMMEDIATE] = {
.name = "IB_OPCODE_RC_SEND_ONLY_WITH_IMMEDIATE",
- .mask = RXE_IMMDT_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK
- | RXE_COMP_MASK | RXE_RWR_MASK | RXE_SEND_MASK
- | RXE_START_MASK | RXE_END_MASK,
+ .mask = RXE_IMMDT_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK |
+ RXE_COMP_MASK | RXE_RWR_MASK | RXE_SEND_MASK |
+ RXE_START_MASK | RXE_END_MASK,
.length = RXE_BTH_BYTES + RXE_IMMDT_BYTES,
.offset = {
[RXE_BTH] = 0,
[RXE_IMMDT] = RXE_BTH_BYTES,
- [RXE_PAYLOAD] = RXE_BTH_BYTES
- + RXE_IMMDT_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES +
+ RXE_IMMDT_BYTES,
}
},
[IB_OPCODE_RC_RDMA_WRITE_FIRST] = {
.name = "IB_OPCODE_RC_RDMA_WRITE_FIRST",
- .mask = RXE_RETH_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK
- | RXE_WRITE_MASK | RXE_START_MASK,
+ .mask = RXE_RETH_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK |
+ RXE_WRITE_MASK | RXE_START_MASK,
.length = RXE_BTH_BYTES + RXE_RETH_BYTES,
.offset = {
[RXE_BTH] = 0,
[RXE_RETH] = RXE_BTH_BYTES,
- [RXE_PAYLOAD] = RXE_BTH_BYTES
- + RXE_RETH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES +
+ RXE_RETH_BYTES,
}
},
[IB_OPCODE_RC_RDMA_WRITE_MIDDLE] = {
.name = "IB_OPCODE_RC_RDMA_WRITE_MIDDLE",
- .mask = RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_WRITE_MASK
- | RXE_MIDDLE_MASK,
+ .mask = RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_WRITE_MASK |
+ RXE_MIDDLE_MASK,
.length = RXE_BTH_BYTES,
.offset = {
[RXE_BTH] = 0,
@@ -196,8 +196,8 @@ struct rxe_opcode_info rxe_opcode[RXE_NUM_OPCODE] = {
},
[IB_OPCODE_RC_RDMA_WRITE_LAST] = {
.name = "IB_OPCODE_RC_RDMA_WRITE_LAST",
- .mask = RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_WRITE_MASK
- | RXE_END_MASK,
+ .mask = RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_WRITE_MASK |
+ RXE_END_MASK,
.length = RXE_BTH_BYTES,
.offset = {
[RXE_BTH] = 0,
@@ -206,69 +206,69 @@ struct rxe_opcode_info rxe_opcode[RXE_NUM_OPCODE] = {
},
[IB_OPCODE_RC_RDMA_WRITE_LAST_WITH_IMMEDIATE] = {
.name = "IB_OPCODE_RC_RDMA_WRITE_LAST_WITH_IMMEDIATE",
- .mask = RXE_IMMDT_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK
- | RXE_WRITE_MASK | RXE_COMP_MASK | RXE_RWR_MASK
- | RXE_END_MASK,
+ .mask = RXE_IMMDT_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK |
+ RXE_WRITE_MASK | RXE_COMP_MASK | RXE_RWR_MASK |
+ RXE_END_MASK,
.length = RXE_BTH_BYTES + RXE_IMMDT_BYTES,
.offset = {
[RXE_BTH] = 0,
[RXE_IMMDT] = RXE_BTH_BYTES,
- [RXE_PAYLOAD] = RXE_BTH_BYTES
- + RXE_IMMDT_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES +
+ RXE_IMMDT_BYTES,
}
},
[IB_OPCODE_RC_RDMA_WRITE_ONLY] = {
.name = "IB_OPCODE_RC_RDMA_WRITE_ONLY",
- .mask = RXE_RETH_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK
- | RXE_WRITE_MASK | RXE_START_MASK
- | RXE_END_MASK,
+ .mask = RXE_RETH_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK |
+ RXE_WRITE_MASK | RXE_START_MASK |
+ RXE_END_MASK,
.length = RXE_BTH_BYTES + RXE_RETH_BYTES,
.offset = {
[RXE_BTH] = 0,
[RXE_RETH] = RXE_BTH_BYTES,
- [RXE_PAYLOAD] = RXE_BTH_BYTES
- + RXE_RETH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES +
+ RXE_RETH_BYTES,
}
},
[IB_OPCODE_RC_RDMA_WRITE_ONLY_WITH_IMMEDIATE] = {
.name = "IB_OPCODE_RC_RDMA_WRITE_ONLY_WITH_IMMEDIATE",
- .mask = RXE_RETH_MASK | RXE_IMMDT_MASK | RXE_PAYLOAD_MASK
- | RXE_REQ_MASK | RXE_WRITE_MASK
- | RXE_COMP_MASK | RXE_RWR_MASK
- | RXE_START_MASK | RXE_END_MASK,
+ .mask = RXE_RETH_MASK | RXE_IMMDT_MASK | RXE_PAYLOAD_MASK |
+ RXE_REQ_MASK | RXE_WRITE_MASK |
+ RXE_COMP_MASK | RXE_RWR_MASK |
+ RXE_START_MASK | RXE_END_MASK,
.length = RXE_BTH_BYTES + RXE_IMMDT_BYTES + RXE_RETH_BYTES,
.offset = {
[RXE_BTH] = 0,
[RXE_RETH] = RXE_BTH_BYTES,
- [RXE_IMMDT] = RXE_BTH_BYTES
- + RXE_RETH_BYTES,
- [RXE_PAYLOAD] = RXE_BTH_BYTES
- + RXE_RETH_BYTES
- + RXE_IMMDT_BYTES,
+ [RXE_IMMDT] = RXE_BTH_BYTES +
+ RXE_RETH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES +
+ RXE_RETH_BYTES +
+ RXE_IMMDT_BYTES,
}
},
[IB_OPCODE_RC_RDMA_READ_REQUEST] = {
.name = "IB_OPCODE_RC_RDMA_READ_REQUEST",
- .mask = RXE_RETH_MASK | RXE_REQ_MASK | RXE_READ_MASK
- | RXE_START_MASK | RXE_END_MASK,
+ .mask = RXE_RETH_MASK | RXE_REQ_MASK | RXE_READ_MASK |
+ RXE_START_MASK | RXE_END_MASK,
.length = RXE_BTH_BYTES + RXE_RETH_BYTES,
.offset = {
[RXE_BTH] = 0,
[RXE_RETH] = RXE_BTH_BYTES,
- [RXE_PAYLOAD] = RXE_BTH_BYTES
- + RXE_RETH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES +
+ RXE_RETH_BYTES,
}
},
[IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST] = {
.name = "IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST",
- .mask = RXE_AETH_MASK | RXE_PAYLOAD_MASK | RXE_ACK_MASK
- | RXE_START_MASK,
+ .mask = RXE_AETH_MASK | RXE_PAYLOAD_MASK | RXE_ACK_MASK |
+ RXE_START_MASK,
.length = RXE_BTH_BYTES + RXE_AETH_BYTES,
.offset = {
[RXE_BTH] = 0,
[RXE_AETH] = RXE_BTH_BYTES,
- [RXE_PAYLOAD] = RXE_BTH_BYTES
- + RXE_AETH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES +
+ RXE_AETH_BYTES,
}
},
[IB_OPCODE_RC_RDMA_READ_RESPONSE_MIDDLE] = {
@@ -282,109 +282,110 @@ struct rxe_opcode_info rxe_opcode[RXE_NUM_OPCODE] = {
},
[IB_OPCODE_RC_RDMA_READ_RESPONSE_LAST] = {
.name = "IB_OPCODE_RC_RDMA_READ_RESPONSE_LAST",
- .mask = RXE_AETH_MASK | RXE_PAYLOAD_MASK | RXE_ACK_MASK
- | RXE_END_MASK,
+ .mask = RXE_AETH_MASK | RXE_PAYLOAD_MASK | RXE_ACK_MASK |
+ RXE_END_MASK,
.length = RXE_BTH_BYTES + RXE_AETH_BYTES,
.offset = {
[RXE_BTH] = 0,
[RXE_AETH] = RXE_BTH_BYTES,
- [RXE_PAYLOAD] = RXE_BTH_BYTES
- + RXE_AETH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES +
+ RXE_AETH_BYTES,
}
},
[IB_OPCODE_RC_RDMA_READ_RESPONSE_ONLY] = {
.name = "IB_OPCODE_RC_RDMA_READ_RESPONSE_ONLY",
- .mask = RXE_AETH_MASK | RXE_PAYLOAD_MASK | RXE_ACK_MASK
- | RXE_START_MASK | RXE_END_MASK,
+ .mask = RXE_AETH_MASK | RXE_PAYLOAD_MASK | RXE_ACK_MASK |
+ RXE_START_MASK | RXE_END_MASK,
.length = RXE_BTH_BYTES + RXE_AETH_BYTES,
.offset = {
[RXE_BTH] = 0,
[RXE_AETH] = RXE_BTH_BYTES,
- [RXE_PAYLOAD] = RXE_BTH_BYTES
- + RXE_AETH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES +
+ RXE_AETH_BYTES,
}
},
[IB_OPCODE_RC_ACKNOWLEDGE] = {
.name = "IB_OPCODE_RC_ACKNOWLEDGE",
- .mask = RXE_AETH_MASK | RXE_ACK_MASK | RXE_START_MASK
- | RXE_END_MASK,
+ .mask = RXE_AETH_MASK | RXE_ACK_MASK | RXE_START_MASK |
+ RXE_END_MASK,
.length = RXE_BTH_BYTES + RXE_AETH_BYTES,
.offset = {
[RXE_BTH] = 0,
[RXE_AETH] = RXE_BTH_BYTES,
- [RXE_PAYLOAD] = RXE_BTH_BYTES
- + RXE_AETH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES +
+ RXE_AETH_BYTES,
}
},
[IB_OPCODE_RC_ATOMIC_ACKNOWLEDGE] = {
.name = "IB_OPCODE_RC_ATOMIC_ACKNOWLEDGE",
- .mask = RXE_AETH_MASK | RXE_ATMACK_MASK | RXE_ACK_MASK
- | RXE_START_MASK | RXE_END_MASK,
+ .mask = RXE_AETH_MASK | RXE_ATMACK_MASK | RXE_ACK_MASK |
+ RXE_START_MASK | RXE_END_MASK,
.length = RXE_BTH_BYTES + RXE_ATMACK_BYTES + RXE_AETH_BYTES,
.offset = {
[RXE_BTH] = 0,
[RXE_AETH] = RXE_BTH_BYTES,
- [RXE_ATMACK] = RXE_BTH_BYTES
- + RXE_AETH_BYTES,
- [RXE_PAYLOAD] = RXE_BTH_BYTES
- + RXE_ATMACK_BYTES + RXE_AETH_BYTES,
+ [RXE_ATMACK] = RXE_BTH_BYTES +
+ RXE_AETH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES +
+ RXE_ATMACK_BYTES +
+ RXE_AETH_BYTES,
}
},
[IB_OPCODE_RC_COMPARE_SWAP] = {
.name = "IB_OPCODE_RC_COMPARE_SWAP",
- .mask = RXE_ATMETH_MASK | RXE_REQ_MASK | RXE_ATOMIC_MASK
- | RXE_START_MASK | RXE_END_MASK,
+ .mask = RXE_ATMETH_MASK | RXE_REQ_MASK | RXE_ATOMIC_MASK |
+ RXE_START_MASK | RXE_END_MASK,
.length = RXE_BTH_BYTES + RXE_ATMETH_BYTES,
.offset = {
[RXE_BTH] = 0,
[RXE_ATMETH] = RXE_BTH_BYTES,
- [RXE_PAYLOAD] = RXE_BTH_BYTES
- + RXE_ATMETH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES +
+ RXE_ATMETH_BYTES,
}
},
[IB_OPCODE_RC_FETCH_ADD] = {
.name = "IB_OPCODE_RC_FETCH_ADD",
- .mask = RXE_ATMETH_MASK | RXE_REQ_MASK | RXE_ATOMIC_MASK
- | RXE_START_MASK | RXE_END_MASK,
+ .mask = RXE_ATMETH_MASK | RXE_REQ_MASK | RXE_ATOMIC_MASK |
+ RXE_START_MASK | RXE_END_MASK,
.length = RXE_BTH_BYTES + RXE_ATMETH_BYTES,
.offset = {
[RXE_BTH] = 0,
[RXE_ATMETH] = RXE_BTH_BYTES,
- [RXE_PAYLOAD] = RXE_BTH_BYTES
- + RXE_ATMETH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES +
+ RXE_ATMETH_BYTES,
}
},
[IB_OPCODE_RC_SEND_LAST_WITH_INVALIDATE] = {
.name = "IB_OPCODE_RC_SEND_LAST_WITH_INVALIDATE",
- .mask = RXE_IETH_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK
- | RXE_COMP_MASK | RXE_SEND_MASK | RXE_END_MASK,
+ .mask = RXE_IETH_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK |
+ RXE_COMP_MASK | RXE_SEND_MASK | RXE_END_MASK,
.length = RXE_BTH_BYTES + RXE_IETH_BYTES,
.offset = {
[RXE_BTH] = 0,
[RXE_IETH] = RXE_BTH_BYTES,
- [RXE_PAYLOAD] = RXE_BTH_BYTES
- + RXE_IETH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES +
+ RXE_IETH_BYTES,
}
},
[IB_OPCODE_RC_SEND_ONLY_WITH_INVALIDATE] = {
.name = "IB_OPCODE_RC_SEND_ONLY_INV",
- .mask = RXE_IETH_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK
- | RXE_COMP_MASK | RXE_RWR_MASK | RXE_SEND_MASK
- | RXE_END_MASK | RXE_START_MASK,
+ .mask = RXE_IETH_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK |
+ RXE_COMP_MASK | RXE_RWR_MASK | RXE_SEND_MASK |
+ RXE_END_MASK | RXE_START_MASK,
.length = RXE_BTH_BYTES + RXE_IETH_BYTES,
.offset = {
[RXE_BTH] = 0,
[RXE_IETH] = RXE_BTH_BYTES,
- [RXE_PAYLOAD] = RXE_BTH_BYTES
- + RXE_IETH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES +
+ RXE_IETH_BYTES,
}
},
/* UC */
[IB_OPCODE_UC_SEND_FIRST] = {
.name = "IB_OPCODE_UC_SEND_FIRST",
- .mask = RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_RWR_MASK
- | RXE_SEND_MASK | RXE_START_MASK,
+ .mask = RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_RWR_MASK |
+ RXE_SEND_MASK | RXE_START_MASK,
.length = RXE_BTH_BYTES,
.offset = {
[RXE_BTH] = 0,
@@ -393,8 +394,8 @@ struct rxe_opcode_info rxe_opcode[RXE_NUM_OPCODE] = {
},
[IB_OPCODE_UC_SEND_MIDDLE] = {
.name = "IB_OPCODE_UC_SEND_MIDDLE",
- .mask = RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_SEND_MASK
- | RXE_MIDDLE_MASK,
+ .mask = RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_SEND_MASK |
+ RXE_MIDDLE_MASK,
.length = RXE_BTH_BYTES,
.offset = {
[RXE_BTH] = 0,
@@ -403,8 +404,8 @@ struct rxe_opcode_info rxe_opcode[RXE_NUM_OPCODE] = {
},
[IB_OPCODE_UC_SEND_LAST] = {
.name = "IB_OPCODE_UC_SEND_LAST",
- .mask = RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_COMP_MASK
- | RXE_SEND_MASK | RXE_END_MASK,
+ .mask = RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_COMP_MASK |
+ RXE_SEND_MASK | RXE_END_MASK,
.length = RXE_BTH_BYTES,
.offset = {
[RXE_BTH] = 0,
@@ -413,21 +414,21 @@ struct rxe_opcode_info rxe_opcode[RXE_NUM_OPCODE] = {
},
[IB_OPCODE_UC_SEND_LAST_WITH_IMMEDIATE] = {
.name = "IB_OPCODE_UC_SEND_LAST_WITH_IMMEDIATE",
- .mask = RXE_IMMDT_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK
- | RXE_COMP_MASK | RXE_SEND_MASK | RXE_END_MASK,
+ .mask = RXE_IMMDT_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK |
+ RXE_COMP_MASK | RXE_SEND_MASK | RXE_END_MASK,
.length = RXE_BTH_BYTES + RXE_IMMDT_BYTES,
.offset = {
[RXE_BTH] = 0,
[RXE_IMMDT] = RXE_BTH_BYTES,
- [RXE_PAYLOAD] = RXE_BTH_BYTES
- + RXE_IMMDT_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES +
+ RXE_IMMDT_BYTES,
}
},
[IB_OPCODE_UC_SEND_ONLY] = {
.name = "IB_OPCODE_UC_SEND_ONLY",
- .mask = RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_COMP_MASK
- | RXE_RWR_MASK | RXE_SEND_MASK
- | RXE_START_MASK | RXE_END_MASK,
+ .mask = RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_COMP_MASK |
+ RXE_RWR_MASK | RXE_SEND_MASK |
+ RXE_START_MASK | RXE_END_MASK,
.length = RXE_BTH_BYTES,
.offset = {
[RXE_BTH] = 0,
@@ -436,33 +437,33 @@ struct rxe_opcode_info rxe_opcode[RXE_NUM_OPCODE] = {
},
[IB_OPCODE_UC_SEND_ONLY_WITH_IMMEDIATE] = {
.name = "IB_OPCODE_UC_SEND_ONLY_WITH_IMMEDIATE",
- .mask = RXE_IMMDT_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK
- | RXE_COMP_MASK | RXE_RWR_MASK | RXE_SEND_MASK
- | RXE_START_MASK | RXE_END_MASK,
+ .mask = RXE_IMMDT_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK |
+ RXE_COMP_MASK | RXE_RWR_MASK | RXE_SEND_MASK |
+ RXE_START_MASK | RXE_END_MASK,
.length = RXE_BTH_BYTES + RXE_IMMDT_BYTES,
.offset = {
[RXE_BTH] = 0,
[RXE_IMMDT] = RXE_BTH_BYTES,
- [RXE_PAYLOAD] = RXE_BTH_BYTES
- + RXE_IMMDT_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES +
+ RXE_IMMDT_BYTES,
}
},
[IB_OPCODE_UC_RDMA_WRITE_FIRST] = {
.name = "IB_OPCODE_UC_RDMA_WRITE_FIRST",
- .mask = RXE_RETH_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK
- | RXE_WRITE_MASK | RXE_START_MASK,
+ .mask = RXE_RETH_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK |
+ RXE_WRITE_MASK | RXE_START_MASK,
.length = RXE_BTH_BYTES + RXE_RETH_BYTES,
.offset = {
[RXE_BTH] = 0,
[RXE_RETH] = RXE_BTH_BYTES,
- [RXE_PAYLOAD] = RXE_BTH_BYTES
- + RXE_RETH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES +
+ RXE_RETH_BYTES,
}
},
[IB_OPCODE_UC_RDMA_WRITE_MIDDLE] = {
.name = "IB_OPCODE_UC_RDMA_WRITE_MIDDLE",
- .mask = RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_WRITE_MASK
- | RXE_MIDDLE_MASK,
+ .mask = RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_WRITE_MASK |
+ RXE_MIDDLE_MASK,
.length = RXE_BTH_BYTES,
.offset = {
[RXE_BTH] = 0,
@@ -471,8 +472,8 @@ struct rxe_opcode_info rxe_opcode[RXE_NUM_OPCODE] = {
},
[IB_OPCODE_UC_RDMA_WRITE_LAST] = {
.name = "IB_OPCODE_UC_RDMA_WRITE_LAST",
- .mask = RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_WRITE_MASK
- | RXE_END_MASK,
+ .mask = RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_WRITE_MASK |
+ RXE_END_MASK,
.length = RXE_BTH_BYTES,
.offset = {
[RXE_BTH] = 0,
@@ -481,460 +482,460 @@ struct rxe_opcode_info rxe_opcode[RXE_NUM_OPCODE] = {
},
[IB_OPCODE_UC_RDMA_WRITE_LAST_WITH_IMMEDIATE] = {
.name = "IB_OPCODE_UC_RDMA_WRITE_LAST_WITH_IMMEDIATE",
- .mask = RXE_IMMDT_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK
- | RXE_WRITE_MASK | RXE_COMP_MASK | RXE_RWR_MASK
- | RXE_END_MASK,
+ .mask = RXE_IMMDT_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK |
+ RXE_WRITE_MASK | RXE_COMP_MASK | RXE_RWR_MASK |
+ RXE_END_MASK,
.length = RXE_BTH_BYTES + RXE_IMMDT_BYTES,
.offset = {
[RXE_BTH] = 0,
[RXE_IMMDT] = RXE_BTH_BYTES,
- [RXE_PAYLOAD] = RXE_BTH_BYTES
- + RXE_IMMDT_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES +
+ RXE_IMMDT_BYTES,
}
},
[IB_OPCODE_UC_RDMA_WRITE_ONLY] = {
.name = "IB_OPCODE_UC_RDMA_WRITE_ONLY",
- .mask = RXE_RETH_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK
- | RXE_WRITE_MASK | RXE_START_MASK
- | RXE_END_MASK,
+ .mask = RXE_RETH_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK |
+ RXE_WRITE_MASK | RXE_START_MASK |
+ RXE_END_MASK,
.length = RXE_BTH_BYTES + RXE_RETH_BYTES,
.offset = {
[RXE_BTH] = 0,
[RXE_RETH] = RXE_BTH_BYTES,
- [RXE_PAYLOAD] = RXE_BTH_BYTES
- + RXE_RETH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES +
+ RXE_RETH_BYTES,
}
},
[IB_OPCODE_UC_RDMA_WRITE_ONLY_WITH_IMMEDIATE] = {
.name = "IB_OPCODE_UC_RDMA_WRITE_ONLY_WITH_IMMEDIATE",
- .mask = RXE_RETH_MASK | RXE_IMMDT_MASK | RXE_PAYLOAD_MASK
- | RXE_REQ_MASK | RXE_WRITE_MASK
- | RXE_COMP_MASK | RXE_RWR_MASK
- | RXE_START_MASK | RXE_END_MASK,
+ .mask = RXE_RETH_MASK | RXE_IMMDT_MASK | RXE_PAYLOAD_MASK |
+ RXE_REQ_MASK | RXE_WRITE_MASK |
+ RXE_COMP_MASK | RXE_RWR_MASK |
+ RXE_START_MASK | RXE_END_MASK,
.length = RXE_BTH_BYTES + RXE_IMMDT_BYTES + RXE_RETH_BYTES,
.offset = {
[RXE_BTH] = 0,
[RXE_RETH] = RXE_BTH_BYTES,
- [RXE_IMMDT] = RXE_BTH_BYTES
- + RXE_RETH_BYTES,
- [RXE_PAYLOAD] = RXE_BTH_BYTES
- + RXE_RETH_BYTES
- + RXE_IMMDT_BYTES,
+ [RXE_IMMDT] = RXE_BTH_BYTES +
+ RXE_RETH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES +
+ RXE_RETH_BYTES +
+ RXE_IMMDT_BYTES,
}
},
/* RD */
[IB_OPCODE_RD_SEND_FIRST] = {
.name = "IB_OPCODE_RD_SEND_FIRST",
- .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_PAYLOAD_MASK
- | RXE_REQ_MASK | RXE_RWR_MASK | RXE_SEND_MASK
- | RXE_START_MASK,
+ .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_PAYLOAD_MASK |
+ RXE_REQ_MASK | RXE_RWR_MASK | RXE_SEND_MASK |
+ RXE_START_MASK,
.length = RXE_BTH_BYTES + RXE_DETH_BYTES + RXE_RDETH_BYTES,
.offset = {
[RXE_BTH] = 0,
[RXE_RDETH] = RXE_BTH_BYTES,
- [RXE_DETH] = RXE_BTH_BYTES
- + RXE_RDETH_BYTES,
- [RXE_PAYLOAD] = RXE_BTH_BYTES
- + RXE_RDETH_BYTES
- + RXE_DETH_BYTES,
+ [RXE_DETH] = RXE_BTH_BYTES +
+ RXE_RDETH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES +
+ RXE_RDETH_BYTES +
+ RXE_DETH_BYTES,
}
},
[IB_OPCODE_RD_SEND_MIDDLE] = {
.name = "IB_OPCODE_RD_SEND_MIDDLE",
- .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_PAYLOAD_MASK
- | RXE_REQ_MASK | RXE_SEND_MASK
- | RXE_MIDDLE_MASK,
+ .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_PAYLOAD_MASK |
+ RXE_REQ_MASK | RXE_SEND_MASK |
+ RXE_MIDDLE_MASK,
.length = RXE_BTH_BYTES + RXE_DETH_BYTES + RXE_RDETH_BYTES,
.offset = {
[RXE_BTH] = 0,
[RXE_RDETH] = RXE_BTH_BYTES,
- [RXE_DETH] = RXE_BTH_BYTES
- + RXE_RDETH_BYTES,
- [RXE_PAYLOAD] = RXE_BTH_BYTES
- + RXE_RDETH_BYTES
- + RXE_DETH_BYTES,
+ [RXE_DETH] = RXE_BTH_BYTES +
+ RXE_RDETH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES +
+ RXE_RDETH_BYTES +
+ RXE_DETH_BYTES,
}
},
[IB_OPCODE_RD_SEND_LAST] = {
.name = "IB_OPCODE_RD_SEND_LAST",
- .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_PAYLOAD_MASK
- | RXE_REQ_MASK | RXE_COMP_MASK | RXE_SEND_MASK
- | RXE_END_MASK,
+ .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_PAYLOAD_MASK |
+ RXE_REQ_MASK | RXE_COMP_MASK | RXE_SEND_MASK |
+ RXE_END_MASK,
.length = RXE_BTH_BYTES + RXE_DETH_BYTES + RXE_RDETH_BYTES,
.offset = {
[RXE_BTH] = 0,
[RXE_RDETH] = RXE_BTH_BYTES,
- [RXE_DETH] = RXE_BTH_BYTES
- + RXE_RDETH_BYTES,
- [RXE_PAYLOAD] = RXE_BTH_BYTES
- + RXE_RDETH_BYTES
- + RXE_DETH_BYTES,
+ [RXE_DETH] = RXE_BTH_BYTES +
+ RXE_RDETH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES +
+ RXE_RDETH_BYTES +
+ RXE_DETH_BYTES,
}
},
[IB_OPCODE_RD_SEND_LAST_WITH_IMMEDIATE] = {
.name = "IB_OPCODE_RD_SEND_LAST_WITH_IMMEDIATE",
- .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_IMMDT_MASK
- | RXE_PAYLOAD_MASK | RXE_REQ_MASK
- | RXE_COMP_MASK | RXE_SEND_MASK
- | RXE_END_MASK,
- .length = RXE_BTH_BYTES + RXE_IMMDT_BYTES + RXE_DETH_BYTES
- + RXE_RDETH_BYTES,
+ .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_IMMDT_MASK |
+ RXE_PAYLOAD_MASK | RXE_REQ_MASK |
+ RXE_COMP_MASK | RXE_SEND_MASK |
+ RXE_END_MASK,
+ .length = RXE_BTH_BYTES + RXE_IMMDT_BYTES + RXE_DETH_BYTES +
+ RXE_RDETH_BYTES,
.offset = {
[RXE_BTH] = 0,
[RXE_RDETH] = RXE_BTH_BYTES,
- [RXE_DETH] = RXE_BTH_BYTES
- + RXE_RDETH_BYTES,
- [RXE_IMMDT] = RXE_BTH_BYTES
- + RXE_RDETH_BYTES
- + RXE_DETH_BYTES,
- [RXE_PAYLOAD] = RXE_BTH_BYTES
- + RXE_RDETH_BYTES
- + RXE_DETH_BYTES
- + RXE_IMMDT_BYTES,
+ [RXE_DETH] = RXE_BTH_BYTES +
+ RXE_RDETH_BYTES,
+ [RXE_IMMDT] = RXE_BTH_BYTES +
+ RXE_RDETH_BYTES +
+ RXE_DETH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES +
+ RXE_RDETH_BYTES +
+ RXE_DETH_BYTES +
+ RXE_IMMDT_BYTES,
}
},
[IB_OPCODE_RD_SEND_ONLY] = {
.name = "IB_OPCODE_RD_SEND_ONLY",
- .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_PAYLOAD_MASK
- | RXE_REQ_MASK | RXE_COMP_MASK | RXE_RWR_MASK
- | RXE_SEND_MASK | RXE_START_MASK | RXE_END_MASK,
+ .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_PAYLOAD_MASK |
+ RXE_REQ_MASK | RXE_COMP_MASK | RXE_RWR_MASK |
+ RXE_SEND_MASK | RXE_START_MASK | RXE_END_MASK,
.length = RXE_BTH_BYTES + RXE_DETH_BYTES + RXE_RDETH_BYTES,
.offset = {
[RXE_BTH] = 0,
[RXE_RDETH] = RXE_BTH_BYTES,
- [RXE_DETH] = RXE_BTH_BYTES
- + RXE_RDETH_BYTES,
- [RXE_PAYLOAD] = RXE_BTH_BYTES
- + RXE_RDETH_BYTES
- + RXE_DETH_BYTES,
+ [RXE_DETH] = RXE_BTH_BYTES +
+ RXE_RDETH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES +
+ RXE_RDETH_BYTES +
+ RXE_DETH_BYTES,
}
},
[IB_OPCODE_RD_SEND_ONLY_WITH_IMMEDIATE] = {
.name = "IB_OPCODE_RD_SEND_ONLY_WITH_IMMEDIATE",
- .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_IMMDT_MASK
- | RXE_PAYLOAD_MASK | RXE_REQ_MASK
- | RXE_COMP_MASK | RXE_RWR_MASK | RXE_SEND_MASK
- | RXE_START_MASK | RXE_END_MASK,
- .length = RXE_BTH_BYTES + RXE_IMMDT_BYTES + RXE_DETH_BYTES
- + RXE_RDETH_BYTES,
+ .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_IMMDT_MASK |
+ RXE_PAYLOAD_MASK | RXE_REQ_MASK |
+ RXE_COMP_MASK | RXE_RWR_MASK | RXE_SEND_MASK |
+ RXE_START_MASK | RXE_END_MASK,
+ .length = RXE_BTH_BYTES + RXE_IMMDT_BYTES + RXE_DETH_BYTES +
+ RXE_RDETH_BYTES,
.offset = {
[RXE_BTH] = 0,
[RXE_RDETH] = RXE_BTH_BYTES,
- [RXE_DETH] = RXE_BTH_BYTES
- + RXE_RDETH_BYTES,
- [RXE_IMMDT] = RXE_BTH_BYTES
- + RXE_RDETH_BYTES
- + RXE_DETH_BYTES,
- [RXE_PAYLOAD] = RXE_BTH_BYTES
- + RXE_RDETH_BYTES
- + RXE_DETH_BYTES
- + RXE_IMMDT_BYTES,
+ [RXE_DETH] = RXE_BTH_BYTES +
+ RXE_RDETH_BYTES,
+ [RXE_IMMDT] = RXE_BTH_BYTES +
+ RXE_RDETH_BYTES +
+ RXE_DETH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES +
+ RXE_RDETH_BYTES +
+ RXE_DETH_BYTES +
+ RXE_IMMDT_BYTES,
}
},
[IB_OPCODE_RD_RDMA_WRITE_FIRST] = {
.name = "IB_OPCODE_RD_RDMA_WRITE_FIRST",
- .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_RETH_MASK
- | RXE_PAYLOAD_MASK | RXE_REQ_MASK
- | RXE_WRITE_MASK | RXE_START_MASK,
- .length = RXE_BTH_BYTES + RXE_RETH_BYTES + RXE_DETH_BYTES
- + RXE_RDETH_BYTES,
+ .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_RETH_MASK |
+ RXE_PAYLOAD_MASK | RXE_REQ_MASK |
+ RXE_WRITE_MASK | RXE_START_MASK,
+ .length = RXE_BTH_BYTES + RXE_RETH_BYTES + RXE_DETH_BYTES +
+ RXE_RDETH_BYTES,
.offset = {
[RXE_BTH] = 0,
[RXE_RDETH] = RXE_BTH_BYTES,
- [RXE_DETH] = RXE_BTH_BYTES
- + RXE_RDETH_BYTES,
- [RXE_RETH] = RXE_BTH_BYTES
- + RXE_RDETH_BYTES
- + RXE_DETH_BYTES,
- [RXE_PAYLOAD] = RXE_BTH_BYTES
- + RXE_RDETH_BYTES
- + RXE_DETH_BYTES
- + RXE_RETH_BYTES,
+ [RXE_DETH] = RXE_BTH_BYTES +
+ RXE_RDETH_BYTES,
+ [RXE_RETH] = RXE_BTH_BYTES +
+ RXE_RDETH_BYTES +
+ RXE_DETH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES +
+ RXE_RDETH_BYTES +
+ RXE_DETH_BYTES +
+ RXE_RETH_BYTES,
}
},
[IB_OPCODE_RD_RDMA_WRITE_MIDDLE] = {
.name = "IB_OPCODE_RD_RDMA_WRITE_MIDDLE",
- .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_PAYLOAD_MASK
- | RXE_REQ_MASK | RXE_WRITE_MASK
- | RXE_MIDDLE_MASK,
+ .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_PAYLOAD_MASK |
+ RXE_REQ_MASK | RXE_WRITE_MASK |
+ RXE_MIDDLE_MASK,
.length = RXE_BTH_BYTES + RXE_DETH_BYTES + RXE_RDETH_BYTES,
.offset = {
[RXE_BTH] = 0,
[RXE_RDETH] = RXE_BTH_BYTES,
- [RXE_DETH] = RXE_BTH_BYTES
- + RXE_RDETH_BYTES,
- [RXE_PAYLOAD] = RXE_BTH_BYTES
- + RXE_RDETH_BYTES
- + RXE_DETH_BYTES,
+ [RXE_DETH] = RXE_BTH_BYTES +
+ RXE_RDETH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES +
+ RXE_RDETH_BYTES +
+ RXE_DETH_BYTES,
}
},
[IB_OPCODE_RD_RDMA_WRITE_LAST] = {
.name = "IB_OPCODE_RD_RDMA_WRITE_LAST",
- .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_PAYLOAD_MASK
- | RXE_REQ_MASK | RXE_WRITE_MASK
- | RXE_END_MASK,
+ .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_PAYLOAD_MASK |
+ RXE_REQ_MASK | RXE_WRITE_MASK |
+ RXE_END_MASK,
.length = RXE_BTH_BYTES + RXE_DETH_BYTES + RXE_RDETH_BYTES,
.offset = {
[RXE_BTH] = 0,
[RXE_RDETH] = RXE_BTH_BYTES,
- [RXE_DETH] = RXE_BTH_BYTES
- + RXE_RDETH_BYTES,
- [RXE_PAYLOAD] = RXE_BTH_BYTES
- + RXE_RDETH_BYTES
- + RXE_DETH_BYTES,
+ [RXE_DETH] = RXE_BTH_BYTES +
+ RXE_RDETH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES +
+ RXE_RDETH_BYTES +
+ RXE_DETH_BYTES,
}
},
[IB_OPCODE_RD_RDMA_WRITE_LAST_WITH_IMMEDIATE] = {
.name = "IB_OPCODE_RD_RDMA_WRITE_LAST_WITH_IMMEDIATE",
- .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_IMMDT_MASK
- | RXE_PAYLOAD_MASK | RXE_REQ_MASK
- | RXE_WRITE_MASK | RXE_COMP_MASK | RXE_RWR_MASK
- | RXE_END_MASK,
- .length = RXE_BTH_BYTES + RXE_IMMDT_BYTES + RXE_DETH_BYTES
- + RXE_RDETH_BYTES,
+ .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_IMMDT_MASK |
+ RXE_PAYLOAD_MASK | RXE_REQ_MASK |
+ RXE_WRITE_MASK | RXE_COMP_MASK | RXE_RWR_MASK |
+ RXE_END_MASK,
+ .length = RXE_BTH_BYTES + RXE_IMMDT_BYTES + RXE_DETH_BYTES +
+ RXE_RDETH_BYTES,
.offset = {
[RXE_BTH] = 0,
[RXE_RDETH] = RXE_BTH_BYTES,
- [RXE_DETH] = RXE_BTH_BYTES
- + RXE_RDETH_BYTES,
- [RXE_IMMDT] = RXE_BTH_BYTES
- + RXE_RDETH_BYTES
- + RXE_DETH_BYTES,
- [RXE_PAYLOAD] = RXE_BTH_BYTES
- + RXE_RDETH_BYTES
- + RXE_DETH_BYTES
- + RXE_IMMDT_BYTES,
+ [RXE_DETH] = RXE_BTH_BYTES +
+ RXE_RDETH_BYTES,
+ [RXE_IMMDT] = RXE_BTH_BYTES +
+ RXE_RDETH_BYTES +
+ RXE_DETH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES +
+ RXE_RDETH_BYTES +
+ RXE_DETH_BYTES +
+ RXE_IMMDT_BYTES,
}
},
[IB_OPCODE_RD_RDMA_WRITE_ONLY] = {
.name = "IB_OPCODE_RD_RDMA_WRITE_ONLY",
- .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_RETH_MASK
- | RXE_PAYLOAD_MASK | RXE_REQ_MASK
- | RXE_WRITE_MASK | RXE_START_MASK
- | RXE_END_MASK,
- .length = RXE_BTH_BYTES + RXE_RETH_BYTES + RXE_DETH_BYTES
- + RXE_RDETH_BYTES,
+ .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_RETH_MASK |
+ RXE_PAYLOAD_MASK | RXE_REQ_MASK |
+ RXE_WRITE_MASK | RXE_START_MASK |
+ RXE_END_MASK,
+ .length = RXE_BTH_BYTES + RXE_RETH_BYTES + RXE_DETH_BYTES +
+ RXE_RDETH_BYTES,
.offset = {
[RXE_BTH] = 0,
[RXE_RDETH] = RXE_BTH_BYTES,
- [RXE_DETH] = RXE_BTH_BYTES
- + RXE_RDETH_BYTES,
- [RXE_RETH] = RXE_BTH_BYTES
- + RXE_RDETH_BYTES
- + RXE_DETH_BYTES,
- [RXE_PAYLOAD] = RXE_BTH_BYTES
- + RXE_RDETH_BYTES
- + RXE_DETH_BYTES
- + RXE_RETH_BYTES,
+ [RXE_DETH] = RXE_BTH_BYTES +
+ RXE_RDETH_BYTES,
+ [RXE_RETH] = RXE_BTH_BYTES +
+ RXE_RDETH_BYTES +
+ RXE_DETH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES +
+ RXE_RDETH_BYTES +
+ RXE_DETH_BYTES +
+ RXE_RETH_BYTES,
}
},
[IB_OPCODE_RD_RDMA_WRITE_ONLY_WITH_IMMEDIATE] = {
.name = "IB_OPCODE_RD_RDMA_WRITE_ONLY_WITH_IMMEDIATE",
- .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_RETH_MASK
- | RXE_IMMDT_MASK | RXE_PAYLOAD_MASK
- | RXE_REQ_MASK | RXE_WRITE_MASK
- | RXE_COMP_MASK | RXE_RWR_MASK
- | RXE_START_MASK | RXE_END_MASK,
- .length = RXE_BTH_BYTES + RXE_IMMDT_BYTES + RXE_RETH_BYTES
- + RXE_DETH_BYTES + RXE_RDETH_BYTES,
+ .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_RETH_MASK |
+ RXE_IMMDT_MASK | RXE_PAYLOAD_MASK |
+ RXE_REQ_MASK | RXE_WRITE_MASK |
+ RXE_COMP_MASK | RXE_RWR_MASK |
+ RXE_START_MASK | RXE_END_MASK,
+ .length = RXE_BTH_BYTES + RXE_IMMDT_BYTES + RXE_RETH_BYTES +
+ RXE_DETH_BYTES + RXE_RDETH_BYTES,
.offset = {
[RXE_BTH] = 0,
[RXE_RDETH] = RXE_BTH_BYTES,
- [RXE_DETH] = RXE_BTH_BYTES
- + RXE_RDETH_BYTES,
- [RXE_RETH] = RXE_BTH_BYTES
- + RXE_RDETH_BYTES
- + RXE_DETH_BYTES,
- [RXE_IMMDT] = RXE_BTH_BYTES
- + RXE_RDETH_BYTES
- + RXE_DETH_BYTES
- + RXE_RETH_BYTES,
- [RXE_PAYLOAD] = RXE_BTH_BYTES
- + RXE_RDETH_BYTES
- + RXE_DETH_BYTES
- + RXE_RETH_BYTES
- + RXE_IMMDT_BYTES,
+ [RXE_DETH] = RXE_BTH_BYTES +
+ RXE_RDETH_BYTES,
+ [RXE_RETH] = RXE_BTH_BYTES +
+ RXE_RDETH_BYTES +
+ RXE_DETH_BYTES,
+ [RXE_IMMDT] = RXE_BTH_BYTES +
+ RXE_RDETH_BYTES +
+ RXE_DETH_BYTES +
+ RXE_RETH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES +
+ RXE_RDETH_BYTES +
+ RXE_DETH_BYTES +
+ RXE_RETH_BYTES +
+ RXE_IMMDT_BYTES,
}
},
[IB_OPCODE_RD_RDMA_READ_REQUEST] = {
.name = "IB_OPCODE_RD_RDMA_READ_REQUEST",
- .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_RETH_MASK
- | RXE_REQ_MASK | RXE_READ_MASK
- | RXE_START_MASK | RXE_END_MASK,
- .length = RXE_BTH_BYTES + RXE_RETH_BYTES + RXE_DETH_BYTES
- + RXE_RDETH_BYTES,
+ .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_RETH_MASK |
+ RXE_REQ_MASK | RXE_READ_MASK |
+ RXE_START_MASK | RXE_END_MASK,
+ .length = RXE_BTH_BYTES + RXE_RETH_BYTES + RXE_DETH_BYTES +
+ RXE_RDETH_BYTES,
.offset = {
[RXE_BTH] = 0,
[RXE_RDETH] = RXE_BTH_BYTES,
- [RXE_DETH] = RXE_BTH_BYTES
- + RXE_RDETH_BYTES,
- [RXE_RETH] = RXE_BTH_BYTES
- + RXE_RDETH_BYTES
- + RXE_DETH_BYTES,
- [RXE_PAYLOAD] = RXE_BTH_BYTES
- + RXE_RETH_BYTES
- + RXE_DETH_BYTES
- + RXE_RDETH_BYTES,
+ [RXE_DETH] = RXE_BTH_BYTES +
+ RXE_RDETH_BYTES,
+ [RXE_RETH] = RXE_BTH_BYTES +
+ RXE_RDETH_BYTES +
+ RXE_DETH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES +
+ RXE_RETH_BYTES +
+ RXE_DETH_BYTES +
+ RXE_RDETH_BYTES,
}
},
[IB_OPCODE_RD_RDMA_READ_RESPONSE_FIRST] = {
.name = "IB_OPCODE_RD_RDMA_READ_RESPONSE_FIRST",
- .mask = RXE_RDETH_MASK | RXE_AETH_MASK
- | RXE_PAYLOAD_MASK | RXE_ACK_MASK
- | RXE_START_MASK,
+ .mask = RXE_RDETH_MASK | RXE_AETH_MASK |
+ RXE_PAYLOAD_MASK | RXE_ACK_MASK |
+ RXE_START_MASK,
.length = RXE_BTH_BYTES + RXE_AETH_BYTES + RXE_RDETH_BYTES,
.offset = {
[RXE_BTH] = 0,
[RXE_RDETH] = RXE_BTH_BYTES,
- [RXE_AETH] = RXE_BTH_BYTES
- + RXE_RDETH_BYTES,
- [RXE_PAYLOAD] = RXE_BTH_BYTES
- + RXE_RDETH_BYTES
- + RXE_AETH_BYTES,
+ [RXE_AETH] = RXE_BTH_BYTES +
+ RXE_RDETH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES +
+ RXE_RDETH_BYTES +
+ RXE_AETH_BYTES,
}
},
[IB_OPCODE_RD_RDMA_READ_RESPONSE_MIDDLE] = {
.name = "IB_OPCODE_RD_RDMA_READ_RESPONSE_MIDDLE",
- .mask = RXE_RDETH_MASK | RXE_PAYLOAD_MASK | RXE_ACK_MASK
- | RXE_MIDDLE_MASK,
+ .mask = RXE_RDETH_MASK | RXE_PAYLOAD_MASK | RXE_ACK_MASK |
+ RXE_MIDDLE_MASK,
.length = RXE_BTH_BYTES + RXE_RDETH_BYTES,
.offset = {
[RXE_BTH] = 0,
[RXE_RDETH] = RXE_BTH_BYTES,
- [RXE_PAYLOAD] = RXE_BTH_BYTES
- + RXE_RDETH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES +
+ RXE_RDETH_BYTES,
}
},
[IB_OPCODE_RD_RDMA_READ_RESPONSE_LAST] = {
.name = "IB_OPCODE_RD_RDMA_READ_RESPONSE_LAST",
- .mask = RXE_RDETH_MASK | RXE_AETH_MASK | RXE_PAYLOAD_MASK
- | RXE_ACK_MASK | RXE_END_MASK,
+ .mask = RXE_RDETH_MASK | RXE_AETH_MASK | RXE_PAYLOAD_MASK |
+ RXE_ACK_MASK | RXE_END_MASK,
.length = RXE_BTH_BYTES + RXE_AETH_BYTES + RXE_RDETH_BYTES,
.offset = {
[RXE_BTH] = 0,
[RXE_RDETH] = RXE_BTH_BYTES,
- [RXE_AETH] = RXE_BTH_BYTES
- + RXE_RDETH_BYTES,
- [RXE_PAYLOAD] = RXE_BTH_BYTES
- + RXE_RDETH_BYTES
- + RXE_AETH_BYTES,
+ [RXE_AETH] = RXE_BTH_BYTES +
+ RXE_RDETH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES +
+ RXE_RDETH_BYTES +
+ RXE_AETH_BYTES,
}
},
[IB_OPCODE_RD_RDMA_READ_RESPONSE_ONLY] = {
.name = "IB_OPCODE_RD_RDMA_READ_RESPONSE_ONLY",
- .mask = RXE_RDETH_MASK | RXE_AETH_MASK | RXE_PAYLOAD_MASK
- | RXE_ACK_MASK | RXE_START_MASK | RXE_END_MASK,
+ .mask = RXE_RDETH_MASK | RXE_AETH_MASK | RXE_PAYLOAD_MASK |
+ RXE_ACK_MASK | RXE_START_MASK | RXE_END_MASK,
.length = RXE_BTH_BYTES + RXE_AETH_BYTES + RXE_RDETH_BYTES,
.offset = {
[RXE_BTH] = 0,
[RXE_RDETH] = RXE_BTH_BYTES,
- [RXE_AETH] = RXE_BTH_BYTES
- + RXE_RDETH_BYTES,
- [RXE_PAYLOAD] = RXE_BTH_BYTES
- + RXE_RDETH_BYTES
- + RXE_AETH_BYTES,
+ [RXE_AETH] = RXE_BTH_BYTES +
+ RXE_RDETH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES +
+ RXE_RDETH_BYTES +
+ RXE_AETH_BYTES,
}
},
[IB_OPCODE_RD_ACKNOWLEDGE] = {
.name = "IB_OPCODE_RD_ACKNOWLEDGE",
- .mask = RXE_RDETH_MASK | RXE_AETH_MASK | RXE_ACK_MASK
- | RXE_START_MASK | RXE_END_MASK,
+ .mask = RXE_RDETH_MASK | RXE_AETH_MASK | RXE_ACK_MASK |
+ RXE_START_MASK | RXE_END_MASK,
.length = RXE_BTH_BYTES + RXE_AETH_BYTES + RXE_RDETH_BYTES,
.offset = {
[RXE_BTH] = 0,
[RXE_RDETH] = RXE_BTH_BYTES,
- [RXE_AETH] = RXE_BTH_BYTES
- + RXE_RDETH_BYTES,
+ [RXE_AETH] = RXE_BTH_BYTES +
+ RXE_RDETH_BYTES,
}
},
[IB_OPCODE_RD_ATOMIC_ACKNOWLEDGE] = {
.name = "IB_OPCODE_RD_ATOMIC_ACKNOWLEDGE",
- .mask = RXE_RDETH_MASK | RXE_AETH_MASK | RXE_ATMACK_MASK
- | RXE_ACK_MASK | RXE_START_MASK | RXE_END_MASK,
- .length = RXE_BTH_BYTES + RXE_ATMACK_BYTES + RXE_AETH_BYTES
- + RXE_RDETH_BYTES,
+ .mask = RXE_RDETH_MASK | RXE_AETH_MASK | RXE_ATMACK_MASK |
+ RXE_ACK_MASK | RXE_START_MASK | RXE_END_MASK,
+ .length = RXE_BTH_BYTES + RXE_ATMACK_BYTES + RXE_AETH_BYTES +
+ RXE_RDETH_BYTES,
.offset = {
[RXE_BTH] = 0,
[RXE_RDETH] = RXE_BTH_BYTES,
- [RXE_AETH] = RXE_BTH_BYTES
- + RXE_RDETH_BYTES,
- [RXE_ATMACK] = RXE_BTH_BYTES
- + RXE_RDETH_BYTES
- + RXE_AETH_BYTES,
+ [RXE_AETH] = RXE_BTH_BYTES +
+ RXE_RDETH_BYTES,
+ [RXE_ATMACK] = RXE_BTH_BYTES +
+ RXE_RDETH_BYTES +
+ RXE_AETH_BYTES,
}
},
[IB_OPCODE_RD_COMPARE_SWAP] = {
.name = "RD_COMPARE_SWAP",
- .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_ATMETH_MASK
- | RXE_REQ_MASK | RXE_ATOMIC_MASK
- | RXE_START_MASK | RXE_END_MASK,
- .length = RXE_BTH_BYTES + RXE_ATMETH_BYTES + RXE_DETH_BYTES
- + RXE_RDETH_BYTES,
+ .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_ATMETH_MASK |
+ RXE_REQ_MASK | RXE_ATOMIC_MASK |
+ RXE_START_MASK | RXE_END_MASK,
+ .length = RXE_BTH_BYTES + RXE_ATMETH_BYTES + RXE_DETH_BYTES +
+ RXE_RDETH_BYTES,
.offset = {
[RXE_BTH] = 0,
[RXE_RDETH] = RXE_BTH_BYTES,
- [RXE_DETH] = RXE_BTH_BYTES
- + RXE_RDETH_BYTES,
- [RXE_ATMETH] = RXE_BTH_BYTES
- + RXE_RDETH_BYTES
- + RXE_DETH_BYTES,
+ [RXE_DETH] = RXE_BTH_BYTES +
+ RXE_RDETH_BYTES,
+ [RXE_ATMETH] = RXE_BTH_BYTES +
+ RXE_RDETH_BYTES +
+ RXE_DETH_BYTES,
[RXE_PAYLOAD] = RXE_BTH_BYTES +
- + RXE_ATMETH_BYTES
- + RXE_DETH_BYTES +
- + RXE_RDETH_BYTES,
+ RXE_ATMETH_BYTES +
+ RXE_DETH_BYTES +
+ RXE_RDETH_BYTES,
}
},
[IB_OPCODE_RD_FETCH_ADD] = {
.name = "IB_OPCODE_RD_FETCH_ADD",
- .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_ATMETH_MASK
- | RXE_REQ_MASK | RXE_ATOMIC_MASK
- | RXE_START_MASK | RXE_END_MASK,
- .length = RXE_BTH_BYTES + RXE_ATMETH_BYTES + RXE_DETH_BYTES
- + RXE_RDETH_BYTES,
+ .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_ATMETH_MASK |
+ RXE_REQ_MASK | RXE_ATOMIC_MASK |
+ RXE_START_MASK | RXE_END_MASK,
+ .length = RXE_BTH_BYTES + RXE_ATMETH_BYTES + RXE_DETH_BYTES +
+ RXE_RDETH_BYTES,
.offset = {
[RXE_BTH] = 0,
[RXE_RDETH] = RXE_BTH_BYTES,
- [RXE_DETH] = RXE_BTH_BYTES
- + RXE_RDETH_BYTES,
- [RXE_ATMETH] = RXE_BTH_BYTES
- + RXE_RDETH_BYTES
- + RXE_DETH_BYTES,
+ [RXE_DETH] = RXE_BTH_BYTES +
+ RXE_RDETH_BYTES,
+ [RXE_ATMETH] = RXE_BTH_BYTES +
+ RXE_RDETH_BYTES +
+ RXE_DETH_BYTES,
[RXE_PAYLOAD] = RXE_BTH_BYTES +
- + RXE_ATMETH_BYTES
- + RXE_DETH_BYTES +
- + RXE_RDETH_BYTES,
+ RXE_ATMETH_BYTES +
+ RXE_DETH_BYTES +
+ RXE_RDETH_BYTES,
}
},
/* UD */
[IB_OPCODE_UD_SEND_ONLY] = {
.name = "IB_OPCODE_UD_SEND_ONLY",
- .mask = RXE_DETH_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK
- | RXE_COMP_MASK | RXE_RWR_MASK | RXE_SEND_MASK
- | RXE_START_MASK | RXE_END_MASK,
+ .mask = RXE_DETH_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK |
+ RXE_COMP_MASK | RXE_RWR_MASK | RXE_SEND_MASK |
+ RXE_START_MASK | RXE_END_MASK,
.length = RXE_BTH_BYTES + RXE_DETH_BYTES,
.offset = {
[RXE_BTH] = 0,
[RXE_DETH] = RXE_BTH_BYTES,
- [RXE_PAYLOAD] = RXE_BTH_BYTES
- + RXE_DETH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES +
+ RXE_DETH_BYTES,
}
},
[IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE] = {
.name = "IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE",
- .mask = RXE_DETH_MASK | RXE_IMMDT_MASK | RXE_PAYLOAD_MASK
- | RXE_REQ_MASK | RXE_COMP_MASK | RXE_RWR_MASK
- | RXE_SEND_MASK | RXE_START_MASK | RXE_END_MASK,
+ .mask = RXE_DETH_MASK | RXE_IMMDT_MASK | RXE_PAYLOAD_MASK |
+ RXE_REQ_MASK | RXE_COMP_MASK | RXE_RWR_MASK |
+ RXE_SEND_MASK | RXE_START_MASK | RXE_END_MASK,
.length = RXE_BTH_BYTES + RXE_IMMDT_BYTES + RXE_DETH_BYTES,
.offset = {
[RXE_BTH] = 0,
[RXE_DETH] = RXE_BTH_BYTES,
- [RXE_IMMDT] = RXE_BTH_BYTES
- + RXE_DETH_BYTES,
- [RXE_PAYLOAD] = RXE_BTH_BYTES
- + RXE_DETH_BYTES
- + RXE_IMMDT_BYTES,
+ [RXE_IMMDT] = RXE_BTH_BYTES +
+ RXE_DETH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES +
+ RXE_DETH_BYTES +
+ RXE_IMMDT_BYTES,
}
},
diff --git a/drivers/infiniband/sw/rxe/rxe_pool.c b/drivers/infiniband/sw/rxe/rxe_pool.c
index 2e80bb6aa957..4cb003885e00 100644
--- a/drivers/infiniband/sw/rxe/rxe_pool.c
+++ b/drivers/infiniband/sw/rxe/rxe_pool.c
@@ -5,13 +5,14 @@
*/
#include "rxe.h"
-#include "rxe_loc.h"
+
+#define RXE_POOL_ALIGN (16)
static const struct rxe_type_info {
const char *name;
size_t size;
size_t elem_offset;
- void (*cleanup)(struct rxe_pool_entry *obj);
+ void (*cleanup)(struct rxe_pool_elem *obj);
enum rxe_pool_flags flags;
u32 min_index;
u32 max_index;
@@ -21,19 +22,19 @@ static const struct rxe_type_info {
[RXE_TYPE_UC] = {
.name = "rxe-uc",
.size = sizeof(struct rxe_ucontext),
- .elem_offset = offsetof(struct rxe_ucontext, pelem),
+ .elem_offset = offsetof(struct rxe_ucontext, elem),
.flags = RXE_POOL_NO_ALLOC,
},
[RXE_TYPE_PD] = {
.name = "rxe-pd",
.size = sizeof(struct rxe_pd),
- .elem_offset = offsetof(struct rxe_pd, pelem),
+ .elem_offset = offsetof(struct rxe_pd, elem),
.flags = RXE_POOL_NO_ALLOC,
},
[RXE_TYPE_AH] = {
.name = "rxe-ah",
.size = sizeof(struct rxe_ah),
- .elem_offset = offsetof(struct rxe_ah, pelem),
+ .elem_offset = offsetof(struct rxe_ah, elem),
.flags = RXE_POOL_INDEX | RXE_POOL_NO_ALLOC,
.min_index = RXE_MIN_AH_INDEX,
.max_index = RXE_MAX_AH_INDEX,
@@ -41,7 +42,7 @@ static const struct rxe_type_info {
[RXE_TYPE_SRQ] = {
.name = "rxe-srq",
.size = sizeof(struct rxe_srq),
- .elem_offset = offsetof(struct rxe_srq, pelem),
+ .elem_offset = offsetof(struct rxe_srq, elem),
.flags = RXE_POOL_INDEX | RXE_POOL_NO_ALLOC,
.min_index = RXE_MIN_SRQ_INDEX,
.max_index = RXE_MAX_SRQ_INDEX,
@@ -49,7 +50,7 @@ static const struct rxe_type_info {
[RXE_TYPE_QP] = {
.name = "rxe-qp",
.size = sizeof(struct rxe_qp),
- .elem_offset = offsetof(struct rxe_qp, pelem),
+ .elem_offset = offsetof(struct rxe_qp, elem),
.cleanup = rxe_qp_cleanup,
.flags = RXE_POOL_INDEX | RXE_POOL_NO_ALLOC,
.min_index = RXE_MIN_QP_INDEX,
@@ -58,14 +59,14 @@ static const struct rxe_type_info {
[RXE_TYPE_CQ] = {
.name = "rxe-cq",
.size = sizeof(struct rxe_cq),
- .elem_offset = offsetof(struct rxe_cq, pelem),
+ .elem_offset = offsetof(struct rxe_cq, elem),
.flags = RXE_POOL_NO_ALLOC,
.cleanup = rxe_cq_cleanup,
},
[RXE_TYPE_MR] = {
.name = "rxe-mr",
.size = sizeof(struct rxe_mr),
- .elem_offset = offsetof(struct rxe_mr, pelem),
+ .elem_offset = offsetof(struct rxe_mr, elem),
.cleanup = rxe_mr_cleanup,
.flags = RXE_POOL_INDEX,
.min_index = RXE_MIN_MR_INDEX,
@@ -74,7 +75,7 @@ static const struct rxe_type_info {
[RXE_TYPE_MW] = {
.name = "rxe-mw",
.size = sizeof(struct rxe_mw),
- .elem_offset = offsetof(struct rxe_mw, pelem),
+ .elem_offset = offsetof(struct rxe_mw, elem),
.cleanup = rxe_mw_cleanup,
.flags = RXE_POOL_INDEX | RXE_POOL_NO_ALLOC,
.min_index = RXE_MIN_MW_INDEX,
@@ -83,7 +84,7 @@ static const struct rxe_type_info {
[RXE_TYPE_MC_GRP] = {
.name = "rxe-mc_grp",
.size = sizeof(struct rxe_mc_grp),
- .elem_offset = offsetof(struct rxe_mc_grp, pelem),
+ .elem_offset = offsetof(struct rxe_mc_grp, elem),
.cleanup = rxe_mc_cleanup,
.flags = RXE_POOL_KEY,
.key_offset = offsetof(struct rxe_mc_grp, mgid),
@@ -92,15 +93,10 @@ static const struct rxe_type_info {
[RXE_TYPE_MC_ELEM] = {
.name = "rxe-mc_elem",
.size = sizeof(struct rxe_mc_elem),
- .elem_offset = offsetof(struct rxe_mc_elem, pelem),
+ .elem_offset = offsetof(struct rxe_mc_elem, elem),
},
};
-static inline const char *pool_name(struct rxe_pool *pool)
-{
- return rxe_type_info[pool->type].name;
-}
-
static int rxe_pool_init_index(struct rxe_pool *pool, u32 max, u32 min)
{
int err = 0;
@@ -130,35 +126,36 @@ int rxe_pool_init(
enum rxe_elem_type type,
unsigned int max_elem)
{
+ const struct rxe_type_info *info = &rxe_type_info[type];
int err = 0;
- size_t size = rxe_type_info[type].size;
memset(pool, 0, sizeof(*pool));
pool->rxe = rxe;
+ pool->name = info->name;
pool->type = type;
pool->max_elem = max_elem;
- pool->elem_size = ALIGN(size, RXE_POOL_ALIGN);
- pool->flags = rxe_type_info[type].flags;
- pool->index.tree = RB_ROOT;
- pool->key.tree = RB_ROOT;
- pool->cleanup = rxe_type_info[type].cleanup;
+ pool->elem_size = ALIGN(info->size, RXE_POOL_ALIGN);
+ pool->elem_offset = info->elem_offset;
+ pool->flags = info->flags;
+ pool->cleanup = info->cleanup;
atomic_set(&pool->num_elem, 0);
rwlock_init(&pool->pool_lock);
- if (rxe_type_info[type].flags & RXE_POOL_INDEX) {
- err = rxe_pool_init_index(pool,
- rxe_type_info[type].max_index,
- rxe_type_info[type].min_index);
+ if (pool->flags & RXE_POOL_INDEX) {
+ pool->index.tree = RB_ROOT;
+ err = rxe_pool_init_index(pool, info->max_index,
+ info->min_index);
if (err)
goto out;
}
- if (rxe_type_info[type].flags & RXE_POOL_KEY) {
- pool->key.key_offset = rxe_type_info[type].key_offset;
- pool->key.key_size = rxe_type_info[type].key_size;
+ if (pool->flags & RXE_POOL_KEY) {
+ pool->key.tree = RB_ROOT;
+ pool->key.key_offset = info->key_offset;
+ pool->key.key_size = info->key_size;
}
out:
@@ -169,9 +166,10 @@ void rxe_pool_cleanup(struct rxe_pool *pool)
{
if (atomic_read(&pool->num_elem) > 0)
pr_warn("%s pool destroyed with unfree'd elem\n",
- pool_name(pool));
+ pool->name);
- bitmap_free(pool->index.table);
+ if (pool->flags & RXE_POOL_INDEX)
+ bitmap_free(pool->index.table);
}
static u32 alloc_index(struct rxe_pool *pool)
@@ -189,15 +187,15 @@ static u32 alloc_index(struct rxe_pool *pool)
return index + pool->index.min_index;
}
-static int rxe_insert_index(struct rxe_pool *pool, struct rxe_pool_entry *new)
+static int rxe_insert_index(struct rxe_pool *pool, struct rxe_pool_elem *new)
{
struct rb_node **link = &pool->index.tree.rb_node;
struct rb_node *parent = NULL;
- struct rxe_pool_entry *elem;
+ struct rxe_pool_elem *elem;
while (*link) {
parent = *link;
- elem = rb_entry(parent, struct rxe_pool_entry, index_node);
+ elem = rb_entry(parent, struct rxe_pool_elem, index_node);
if (elem->index == new->index) {
pr_warn("element already exists!\n");
@@ -216,19 +214,20 @@ static int rxe_insert_index(struct rxe_pool *pool, struct rxe_pool_entry *new)
return 0;
}
-static int rxe_insert_key(struct rxe_pool *pool, struct rxe_pool_entry *new)
+static int rxe_insert_key(struct rxe_pool *pool, struct rxe_pool_elem *new)
{
struct rb_node **link = &pool->key.tree.rb_node;
struct rb_node *parent = NULL;
- struct rxe_pool_entry *elem;
+ struct rxe_pool_elem *elem;
int cmp;
while (*link) {
parent = *link;
- elem = rb_entry(parent, struct rxe_pool_entry, key_node);
+ elem = rb_entry(parent, struct rxe_pool_elem, key_node);
cmp = memcmp((u8 *)elem + pool->key.key_offset,
- (u8 *)new + pool->key.key_offset, pool->key.key_size);
+ (u8 *)new + pool->key.key_offset,
+ pool->key.key_size);
if (cmp == 0) {
pr_warn("key already exists!\n");
@@ -247,7 +246,7 @@ static int rxe_insert_key(struct rxe_pool *pool, struct rxe_pool_entry *new)
return 0;
}
-int __rxe_add_key_locked(struct rxe_pool_entry *elem, void *key)
+int __rxe_add_key_locked(struct rxe_pool_elem *elem, void *key)
{
struct rxe_pool *pool = elem->pool;
int err;
@@ -258,37 +257,35 @@ int __rxe_add_key_locked(struct rxe_pool_entry *elem, void *key)
return err;
}
-int __rxe_add_key(struct rxe_pool_entry *elem, void *key)
+int __rxe_add_key(struct rxe_pool_elem *elem, void *key)
{
struct rxe_pool *pool = elem->pool;
- unsigned long flags;
int err;
- write_lock_irqsave(&pool->pool_lock, flags);
+ write_lock_bh(&pool->pool_lock);
err = __rxe_add_key_locked(elem, key);
- write_unlock_irqrestore(&pool->pool_lock, flags);
+ write_unlock_bh(&pool->pool_lock);
return err;
}
-void __rxe_drop_key_locked(struct rxe_pool_entry *elem)
+void __rxe_drop_key_locked(struct rxe_pool_elem *elem)
{
struct rxe_pool *pool = elem->pool;
rb_erase(&elem->key_node, &pool->key.tree);
}
-void __rxe_drop_key(struct rxe_pool_entry *elem)
+void __rxe_drop_key(struct rxe_pool_elem *elem)
{
struct rxe_pool *pool = elem->pool;
- unsigned long flags;
- write_lock_irqsave(&pool->pool_lock, flags);
+ write_lock_bh(&pool->pool_lock);
__rxe_drop_key_locked(elem);
- write_unlock_irqrestore(&pool->pool_lock, flags);
+ write_unlock_bh(&pool->pool_lock);
}
-int __rxe_add_index_locked(struct rxe_pool_entry *elem)
+int __rxe_add_index_locked(struct rxe_pool_elem *elem)
{
struct rxe_pool *pool = elem->pool;
int err;
@@ -299,20 +296,19 @@ int __rxe_add_index_locked(struct rxe_pool_entry *elem)
return err;
}
-int __rxe_add_index(struct rxe_pool_entry *elem)
+int __rxe_add_index(struct rxe_pool_elem *elem)
{
struct rxe_pool *pool = elem->pool;
- unsigned long flags;
int err;
- write_lock_irqsave(&pool->pool_lock, flags);
+ write_lock_bh(&pool->pool_lock);
err = __rxe_add_index_locked(elem);
- write_unlock_irqrestore(&pool->pool_lock, flags);
+ write_unlock_bh(&pool->pool_lock);
return err;
}
-void __rxe_drop_index_locked(struct rxe_pool_entry *elem)
+void __rxe_drop_index_locked(struct rxe_pool_elem *elem)
{
struct rxe_pool *pool = elem->pool;
@@ -320,32 +316,31 @@ void __rxe_drop_index_locked(struct rxe_pool_entry *elem)
rb_erase(&elem->index_node, &pool->index.tree);
}
-void __rxe_drop_index(struct rxe_pool_entry *elem)
+void __rxe_drop_index(struct rxe_pool_elem *elem)
{
struct rxe_pool *pool = elem->pool;
- unsigned long flags;
- write_lock_irqsave(&pool->pool_lock, flags);
+ write_lock_bh(&pool->pool_lock);
__rxe_drop_index_locked(elem);
- write_unlock_irqrestore(&pool->pool_lock, flags);
+ write_unlock_bh(&pool->pool_lock);
}
void *rxe_alloc_locked(struct rxe_pool *pool)
{
- const struct rxe_type_info *info = &rxe_type_info[pool->type];
- struct rxe_pool_entry *elem;
- u8 *obj;
+ struct rxe_pool_elem *elem;
+ void *obj;
if (atomic_inc_return(&pool->num_elem) > pool->max_elem)
goto out_cnt;
- obj = kzalloc(info->size, GFP_ATOMIC);
+ obj = kzalloc(pool->elem_size, GFP_ATOMIC);
if (!obj)
goto out_cnt;
- elem = (struct rxe_pool_entry *)(obj + info->elem_offset);
+ elem = (struct rxe_pool_elem *)((u8 *)obj + pool->elem_offset);
elem->pool = pool;
+ elem->obj = obj;
kref_init(&elem->ref_cnt);
return obj;
@@ -357,20 +352,20 @@ out_cnt:
void *rxe_alloc(struct rxe_pool *pool)
{
- const struct rxe_type_info *info = &rxe_type_info[pool->type];
- struct rxe_pool_entry *elem;
- u8 *obj;
+ struct rxe_pool_elem *elem;
+ void *obj;
if (atomic_inc_return(&pool->num_elem) > pool->max_elem)
goto out_cnt;
- obj = kzalloc(info->size, GFP_KERNEL);
+ obj = kzalloc(pool->elem_size, GFP_KERNEL);
if (!obj)
goto out_cnt;
- elem = (struct rxe_pool_entry *)(obj + info->elem_offset);
+ elem = (struct rxe_pool_elem *)((u8 *)obj + pool->elem_offset);
elem->pool = pool;
+ elem->obj = obj;
kref_init(&elem->ref_cnt);
return obj;
@@ -380,12 +375,13 @@ out_cnt:
return NULL;
}
-int __rxe_add_to_pool(struct rxe_pool *pool, struct rxe_pool_entry *elem)
+int __rxe_add_to_pool(struct rxe_pool *pool, struct rxe_pool_elem *elem)
{
if (atomic_inc_return(&pool->num_elem) > pool->max_elem)
goto out_cnt;
elem->pool = pool;
+ elem->obj = (u8 *)elem - pool->elem_offset;
kref_init(&elem->ref_cnt);
return 0;
@@ -397,17 +393,16 @@ out_cnt:
void rxe_elem_release(struct kref *kref)
{
- struct rxe_pool_entry *elem =
- container_of(kref, struct rxe_pool_entry, ref_cnt);
+ struct rxe_pool_elem *elem =
+ container_of(kref, struct rxe_pool_elem, ref_cnt);
struct rxe_pool *pool = elem->pool;
- const struct rxe_type_info *info = &rxe_type_info[pool->type];
- u8 *obj;
+ void *obj;
if (pool->cleanup)
pool->cleanup(elem);
if (!(pool->flags & RXE_POOL_NO_ALLOC)) {
- obj = (u8 *)elem - info->elem_offset;
+ obj = elem->obj;
kfree(obj);
}
@@ -416,15 +411,14 @@ void rxe_elem_release(struct kref *kref)
void *rxe_pool_get_index_locked(struct rxe_pool *pool, u32 index)
{
- const struct rxe_type_info *info = &rxe_type_info[pool->type];
struct rb_node *node;
- struct rxe_pool_entry *elem;
- u8 *obj;
+ struct rxe_pool_elem *elem;
+ void *obj;
node = pool->index.tree.rb_node;
while (node) {
- elem = rb_entry(node, struct rxe_pool_entry, index_node);
+ elem = rb_entry(node, struct rxe_pool_elem, index_node);
if (elem->index > index)
node = node->rb_left;
@@ -436,7 +430,7 @@ void *rxe_pool_get_index_locked(struct rxe_pool *pool, u32 index)
if (node) {
kref_get(&elem->ref_cnt);
- obj = (u8 *)elem - info->elem_offset;
+ obj = elem->obj;
} else {
obj = NULL;
}
@@ -446,28 +440,26 @@ void *rxe_pool_get_index_locked(struct rxe_pool *pool, u32 index)
void *rxe_pool_get_index(struct rxe_pool *pool, u32 index)
{
- u8 *obj;
- unsigned long flags;
+ void *obj;
- read_lock_irqsave(&pool->pool_lock, flags);
+ read_lock_bh(&pool->pool_lock);
obj = rxe_pool_get_index_locked(pool, index);
- read_unlock_irqrestore(&pool->pool_lock, flags);
+ read_unlock_bh(&pool->pool_lock);
return obj;
}
void *rxe_pool_get_key_locked(struct rxe_pool *pool, void *key)
{
- const struct rxe_type_info *info = &rxe_type_info[pool->type];
struct rb_node *node;
- struct rxe_pool_entry *elem;
- u8 *obj;
+ struct rxe_pool_elem *elem;
+ void *obj;
int cmp;
node = pool->key.tree.rb_node;
while (node) {
- elem = rb_entry(node, struct rxe_pool_entry, key_node);
+ elem = rb_entry(node, struct rxe_pool_elem, key_node);
cmp = memcmp((u8 *)elem + pool->key.key_offset,
key, pool->key.key_size);
@@ -482,7 +474,7 @@ void *rxe_pool_get_key_locked(struct rxe_pool *pool, void *key)
if (node) {
kref_get(&elem->ref_cnt);
- obj = (u8 *)elem - info->elem_offset;
+ obj = elem->obj;
} else {
obj = NULL;
}
@@ -492,12 +484,11 @@ void *rxe_pool_get_key_locked(struct rxe_pool *pool, void *key)
void *rxe_pool_get_key(struct rxe_pool *pool, void *key)
{
- u8 *obj;
- unsigned long flags;
+ void *obj;
- read_lock_irqsave(&pool->pool_lock, flags);
+ read_lock_bh(&pool->pool_lock);
obj = rxe_pool_get_key_locked(pool, key);
- read_unlock_irqrestore(&pool->pool_lock, flags);
+ read_unlock_bh(&pool->pool_lock);
return obj;
}
diff --git a/drivers/infiniband/sw/rxe/rxe_pool.h b/drivers/infiniband/sw/rxe/rxe_pool.h
index 8ecd9f870aea..214279310f4d 100644
--- a/drivers/infiniband/sw/rxe/rxe_pool.h
+++ b/drivers/infiniband/sw/rxe/rxe_pool.h
@@ -7,9 +7,6 @@
#ifndef RXE_POOL_H
#define RXE_POOL_H
-#define RXE_POOL_ALIGN (16)
-#define RXE_POOL_CACHE_FLAGS (0)
-
enum rxe_pool_flags {
RXE_POOL_INDEX = BIT(1),
RXE_POOL_KEY = BIT(2),
@@ -30,10 +27,9 @@ enum rxe_elem_type {
RXE_NUM_TYPES, /* keep me last */
};
-struct rxe_pool_entry;
-
-struct rxe_pool_entry {
+struct rxe_pool_elem {
struct rxe_pool *pool;
+ void *obj;
struct kref ref_cnt;
struct list_head list;
@@ -47,14 +43,16 @@ struct rxe_pool_entry {
struct rxe_pool {
struct rxe_dev *rxe;
+ const char *name;
rwlock_t pool_lock; /* protects pool add/del/search */
- size_t elem_size;
- void (*cleanup)(struct rxe_pool_entry *obj);
+ void (*cleanup)(struct rxe_pool_elem *obj);
enum rxe_pool_flags flags;
enum rxe_elem_type type;
unsigned int max_elem;
atomic_t num_elem;
+ size_t elem_size;
+ size_t elem_offset;
/* only used if indexed */
struct {
@@ -89,51 +87,51 @@ void *rxe_alloc_locked(struct rxe_pool *pool);
void *rxe_alloc(struct rxe_pool *pool);
/* connect already allocated object to pool */
-int __rxe_add_to_pool(struct rxe_pool *pool, struct rxe_pool_entry *elem);
+int __rxe_add_to_pool(struct rxe_pool *pool, struct rxe_pool_elem *elem);
-#define rxe_add_to_pool(pool, obj) __rxe_add_to_pool(pool, &(obj)->pelem)
+#define rxe_add_to_pool(pool, obj) __rxe_add_to_pool(pool, &(obj)->elem)
/* assign an index to an indexed object and insert object into
* pool's rb tree holding and not holding the pool_lock
*/
-int __rxe_add_index_locked(struct rxe_pool_entry *elem);
+int __rxe_add_index_locked(struct rxe_pool_elem *elem);
-#define rxe_add_index_locked(obj) __rxe_add_index_locked(&(obj)->pelem)
+#define rxe_add_index_locked(obj) __rxe_add_index_locked(&(obj)->elem)
-int __rxe_add_index(struct rxe_pool_entry *elem);
+int __rxe_add_index(struct rxe_pool_elem *elem);
-#define rxe_add_index(obj) __rxe_add_index(&(obj)->pelem)
+#define rxe_add_index(obj) __rxe_add_index(&(obj)->elem)
/* drop an index and remove object from rb tree
* holding and not holding the pool_lock
*/
-void __rxe_drop_index_locked(struct rxe_pool_entry *elem);
+void __rxe_drop_index_locked(struct rxe_pool_elem *elem);
-#define rxe_drop_index_locked(obj) __rxe_drop_index_locked(&(obj)->pelem)
+#define rxe_drop_index_locked(obj) __rxe_drop_index_locked(&(obj)->elem)
-void __rxe_drop_index(struct rxe_pool_entry *elem);
+void __rxe_drop_index(struct rxe_pool_elem *elem);
-#define rxe_drop_index(obj) __rxe_drop_index(&(obj)->pelem)
+#define rxe_drop_index(obj) __rxe_drop_index(&(obj)->elem)
/* assign a key to a keyed object and insert object into
* pool's rb tree holding and not holding pool_lock
*/
-int __rxe_add_key_locked(struct rxe_pool_entry *elem, void *key);
+int __rxe_add_key_locked(struct rxe_pool_elem *elem, void *key);
-#define rxe_add_key_locked(obj, key) __rxe_add_key_locked(&(obj)->pelem, key)
+#define rxe_add_key_locked(obj, key) __rxe_add_key_locked(&(obj)->elem, key)
-int __rxe_add_key(struct rxe_pool_entry *elem, void *key);
+int __rxe_add_key(struct rxe_pool_elem *elem, void *key);
-#define rxe_add_key(obj, key) __rxe_add_key(&(obj)->pelem, key)
+#define rxe_add_key(obj, key) __rxe_add_key(&(obj)->elem, key)
/* remove elem from rb tree holding and not holding the pool_lock */
-void __rxe_drop_key_locked(struct rxe_pool_entry *elem);
+void __rxe_drop_key_locked(struct rxe_pool_elem *elem);
-#define rxe_drop_key_locked(obj) __rxe_drop_key_locked(&(obj)->pelem)
+#define rxe_drop_key_locked(obj) __rxe_drop_key_locked(&(obj)->elem)
-void __rxe_drop_key(struct rxe_pool_entry *elem);
+void __rxe_drop_key(struct rxe_pool_elem *elem);
-#define rxe_drop_key(obj) __rxe_drop_key(&(obj)->pelem)
+#define rxe_drop_key(obj) __rxe_drop_key(&(obj)->elem)
/* lookup an indexed object from index holding and not holding the pool_lock.
* takes a reference on object
@@ -153,9 +151,9 @@ void *rxe_pool_get_key(struct rxe_pool *pool, void *key);
void rxe_elem_release(struct kref *kref);
/* take a reference on an object */
-#define rxe_add_ref(elem) kref_get(&(elem)->pelem.ref_cnt)
+#define rxe_add_ref(obj) kref_get(&(obj)->elem.ref_cnt)
/* drop a reference on an object */
-#define rxe_drop_ref(elem) kref_put(&(elem)->pelem.ref_cnt, rxe_elem_release)
+#define rxe_drop_ref(obj) kref_put(&(obj)->elem.ref_cnt, rxe_elem_release)
#endif /* RXE_POOL_H */
diff --git a/drivers/infiniband/sw/rxe/rxe_qp.c b/drivers/infiniband/sw/rxe/rxe_qp.c
index 54b8711321c1..afe11f475b8c 100644
--- a/drivers/infiniband/sw/rxe/rxe_qp.c
+++ b/drivers/infiniband/sw/rxe/rxe_qp.c
@@ -167,7 +167,7 @@ static void rxe_qp_init_misc(struct rxe_dev *rxe, struct rxe_qp *qp,
qp->attr.path_mtu = 1;
qp->mtu = ib_mtu_enum_to_int(qp->attr.path_mtu);
- qpn = qp->pelem.index;
+ qpn = qp->elem.index;
port = &rxe->port;
switch (init->qp_type) {
@@ -832,9 +832,9 @@ static void rxe_qp_do_cleanup(struct work_struct *work)
}
/* called when the last reference to the qp is dropped */
-void rxe_qp_cleanup(struct rxe_pool_entry *arg)
+void rxe_qp_cleanup(struct rxe_pool_elem *elem)
{
- struct rxe_qp *qp = container_of(arg, typeof(*qp), pelem);
+ struct rxe_qp *qp = container_of(elem, typeof(*qp), elem);
execute_in_process_context(rxe_qp_do_cleanup, &qp->cleanup_work);
}
diff --git a/drivers/infiniband/sw/rxe/rxe_queue.c b/drivers/infiniband/sw/rxe/rxe_queue.c
index 6e6e023c1b45..a1b283dd2d4c 100644
--- a/drivers/infiniband/sw/rxe/rxe_queue.c
+++ b/drivers/infiniband/sw/rxe/rxe_queue.c
@@ -151,7 +151,6 @@ int rxe_queue_resize(struct rxe_queue *q, unsigned int *num_elem_p,
struct rxe_queue *new_q;
unsigned int num_elem = *num_elem_p;
int err;
- unsigned long flags = 0, flags1;
new_q = rxe_queue_init(q->rxe, &num_elem, elem_size, q->type);
if (!new_q)
@@ -165,17 +164,17 @@ int rxe_queue_resize(struct rxe_queue *q, unsigned int *num_elem_p,
goto err1;
}
- spin_lock_irqsave(consumer_lock, flags1);
+ spin_lock_bh(consumer_lock);
if (producer_lock) {
- spin_lock_irqsave(producer_lock, flags);
+ spin_lock_bh(producer_lock);
err = resize_finish(q, new_q, num_elem);
- spin_unlock_irqrestore(producer_lock, flags);
+ spin_unlock_bh(producer_lock);
} else {
err = resize_finish(q, new_q, num_elem);
}
- spin_unlock_irqrestore(consumer_lock, flags1);
+ spin_unlock_bh(consumer_lock);
rxe_queue_cleanup(new_q); /* new/old dep on err */
if (err)
diff --git a/drivers/infiniband/sw/rxe/rxe_req.c b/drivers/infiniband/sw/rxe/rxe_req.c
index 0c9d2af15f3d..5eb89052dd66 100644
--- a/drivers/infiniband/sw/rxe/rxe_req.c
+++ b/drivers/infiniband/sw/rxe/rxe_req.c
@@ -110,7 +110,6 @@ void rnr_nak_timer(struct timer_list *t)
static struct rxe_send_wqe *req_next_wqe(struct rxe_qp *qp)
{
struct rxe_send_wqe *wqe;
- unsigned long flags;
struct rxe_queue *q = qp->sq.queue;
unsigned int index = qp->req.wqe_index;
unsigned int cons;
@@ -124,25 +123,23 @@ static struct rxe_send_wqe *req_next_wqe(struct rxe_qp *qp)
/* check to see if we are drained;
* state_lock used by requester and completer
*/
- spin_lock_irqsave(&qp->state_lock, flags);
+ spin_lock_bh(&qp->state_lock);
do {
if (qp->req.state != QP_STATE_DRAIN) {
/* comp just finished */
- spin_unlock_irqrestore(&qp->state_lock,
- flags);
+ spin_unlock_bh(&qp->state_lock);
break;
}
if (wqe && ((index != cons) ||
(wqe->state != wqe_state_posted))) {
/* comp not done yet */
- spin_unlock_irqrestore(&qp->state_lock,
- flags);
+ spin_unlock_bh(&qp->state_lock);
break;
}
qp->req.state = QP_STATE_DRAINED;
- spin_unlock_irqrestore(&qp->state_lock, flags);
+ spin_unlock_bh(&qp->state_lock);
if (qp->ibqp.event_handler) {
struct ib_event ev;
@@ -372,7 +369,6 @@ static struct sk_buff *init_req_packet(struct rxe_qp *qp,
int pad = (-payload) & 0x3;
int paylen;
int solicited;
- u16 pkey;
u32 qp_num;
int ack_req;
@@ -404,8 +400,6 @@ static struct sk_buff *init_req_packet(struct rxe_qp *qp,
(pkt->mask & (RXE_WRITE_MASK | RXE_IMMDT_MASK)) ==
(RXE_WRITE_MASK | RXE_IMMDT_MASK));
- pkey = IB_DEFAULT_PKEY_FULL;
-
qp_num = (pkt->mask & RXE_DETH_MASK) ? ibwr->wr.ud.remote_qpn :
qp->attr.dest_qp_num;
@@ -414,7 +408,7 @@ static struct sk_buff *init_req_packet(struct rxe_qp *qp,
if (ack_req)
qp->req.noack_pkts = 0;
- bth_init(pkt, pkt->opcode, solicited, 0, pad, pkey, qp_num,
+ bth_init(pkt, pkt->opcode, solicited, 0, pad, IB_DEFAULT_PKEY_FULL, qp_num,
ack_req, pkt->psn);
/* init optional headers */
diff --git a/drivers/infiniband/sw/rxe/rxe_srq.c b/drivers/infiniband/sw/rxe/rxe_srq.c
index eb1c4c3b3a78..0c0721f04357 100644
--- a/drivers/infiniband/sw/rxe/rxe_srq.c
+++ b/drivers/infiniband/sw/rxe/rxe_srq.c
@@ -83,7 +83,7 @@ int rxe_srq_from_init(struct rxe_dev *rxe, struct rxe_srq *srq,
srq->ibsrq.event_handler = init->event_handler;
srq->ibsrq.srq_context = init->srq_context;
srq->limit = init->attr.srq_limit;
- srq->srq_num = srq->pelem.index;
+ srq->srq_num = srq->elem.index;
srq->rq.max_wr = init->attr.max_wr;
srq->rq.max_sge = init->attr.max_sge;
diff --git a/drivers/infiniband/sw/rxe/rxe_sysfs.c b/drivers/infiniband/sw/rxe/rxe_sysfs.c
deleted file mode 100644
index 666202ddff48..000000000000
--- a/drivers/infiniband/sw/rxe/rxe_sysfs.c
+++ /dev/null
@@ -1,119 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
-/*
- * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
- * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
- */
-
-#include "rxe.h"
-#include "rxe_net.h"
-
-/* Copy argument and remove trailing CR. Return the new length. */
-static int sanitize_arg(const char *val, char *intf, int intf_len)
-{
- int len;
-
- if (!val)
- return 0;
-
- /* Remove newline. */
- for (len = 0; len < intf_len - 1 && val[len] && val[len] != '\n'; len++)
- intf[len] = val[len];
- intf[len] = 0;
-
- if (len == 0 || (val[len] != 0 && val[len] != '\n'))
- return 0;
-
- return len;
-}
-
-static int rxe_param_set_add(const char *val, const struct kernel_param *kp)
-{
- int len;
- int err = 0;
- char intf[32];
- struct net_device *ndev;
- struct rxe_dev *exists;
-
- if (!rxe_initialized) {
- pr_err("Module parameters are not supported, use rdma link add or rxe_cfg\n");
- return -EAGAIN;
- }
-
- len = sanitize_arg(val, intf, sizeof(intf));
- if (!len) {
- pr_err("add: invalid interface name\n");
- return -EINVAL;
- }
-
- ndev = dev_get_by_name(&init_net, intf);
- if (!ndev) {
- pr_err("interface %s not found\n", intf);
- return -EINVAL;
- }
-
- if (is_vlan_dev(ndev)) {
- pr_err("rxe creation allowed on top of a real device only\n");
- err = -EPERM;
- goto err;
- }
-
- exists = rxe_get_dev_from_net(ndev);
- if (exists) {
- ib_device_put(&exists->ib_dev);
- pr_err("already configured on %s\n", intf);
- err = -EINVAL;
- goto err;
- }
-
- err = rxe_net_add("rxe%d", ndev);
- if (err) {
- pr_err("failed to add %s\n", intf);
- goto err;
- }
-
-err:
- dev_put(ndev);
- return err;
-}
-
-static int rxe_param_set_remove(const char *val, const struct kernel_param *kp)
-{
- int len;
- char intf[32];
- struct ib_device *ib_dev;
-
- len = sanitize_arg(val, intf, sizeof(intf));
- if (!len) {
- pr_err("add: invalid interface name\n");
- return -EINVAL;
- }
-
- if (strncmp("all", intf, len) == 0) {
- pr_info("rxe_sys: remove all");
- ib_unregister_driver(RDMA_DRIVER_RXE);
- return 0;
- }
-
- ib_dev = ib_device_get_by_name(intf, RDMA_DRIVER_RXE);
- if (!ib_dev) {
- pr_err("not configured on %s\n", intf);
- return -EINVAL;
- }
-
- ib_unregister_device_and_put(ib_dev);
-
- return 0;
-}
-
-static const struct kernel_param_ops rxe_add_ops = {
- .set = rxe_param_set_add,
-};
-
-static const struct kernel_param_ops rxe_remove_ops = {
- .set = rxe_param_set_remove,
-};
-
-module_param_cb(add, &rxe_add_ops, NULL, 0200);
-MODULE_PARM_DESC(add, "DEPRECATED. Create RXE device over network interface");
-module_param_cb(remove, &rxe_remove_ops, NULL, 0200);
-MODULE_PARM_DESC(remove, "DEPRECATED. Remove RXE device over network interface");
diff --git a/drivers/infiniband/sw/rxe/rxe_task.c b/drivers/infiniband/sw/rxe/rxe_task.c
index 6951fdcb31bf..0c4db5bb17d7 100644
--- a/drivers/infiniband/sw/rxe/rxe_task.c
+++ b/drivers/infiniband/sw/rxe/rxe_task.c
@@ -32,25 +32,24 @@ void rxe_do_task(struct tasklet_struct *t)
{
int cont;
int ret;
- unsigned long flags;
struct rxe_task *task = from_tasklet(task, t, tasklet);
- spin_lock_irqsave(&task->state_lock, flags);
+ spin_lock_bh(&task->state_lock);
switch (task->state) {
case TASK_STATE_START:
task->state = TASK_STATE_BUSY;
- spin_unlock_irqrestore(&task->state_lock, flags);
+ spin_unlock_bh(&task->state_lock);
break;
case TASK_STATE_BUSY:
task->state = TASK_STATE_ARMED;
fallthrough;
case TASK_STATE_ARMED:
- spin_unlock_irqrestore(&task->state_lock, flags);
+ spin_unlock_bh(&task->state_lock);
return;
default:
- spin_unlock_irqrestore(&task->state_lock, flags);
+ spin_unlock_bh(&task->state_lock);
pr_warn("%s failed with bad state %d\n", __func__, task->state);
return;
}
@@ -59,7 +58,7 @@ void rxe_do_task(struct tasklet_struct *t)
cont = 0;
ret = task->func(task->arg);
- spin_lock_irqsave(&task->state_lock, flags);
+ spin_lock_bh(&task->state_lock);
switch (task->state) {
case TASK_STATE_BUSY:
if (ret)
@@ -81,7 +80,7 @@ void rxe_do_task(struct tasklet_struct *t)
pr_warn("%s failed with bad state %d\n", __func__,
task->state);
}
- spin_unlock_irqrestore(&task->state_lock, flags);
+ spin_unlock_bh(&task->state_lock);
} while (cont);
task->ret = ret;
@@ -106,7 +105,6 @@ int rxe_init_task(void *obj, struct rxe_task *task,
void rxe_cleanup_task(struct rxe_task *task)
{
- unsigned long flags;
bool idle;
/*
@@ -116,9 +114,9 @@ void rxe_cleanup_task(struct rxe_task *task)
task->destroyed = true;
do {
- spin_lock_irqsave(&task->state_lock, flags);
+ spin_lock_bh(&task->state_lock);
idle = (task->state == TASK_STATE_START);
- spin_unlock_irqrestore(&task->state_lock, flags);
+ spin_unlock_bh(&task->state_lock);
} while (!idle);
tasklet_kill(&task->tasklet);
diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c
index 0aa0d7e52773..915ad6664321 100644
--- a/drivers/infiniband/sw/rxe/rxe_verbs.c
+++ b/drivers/infiniband/sw/rxe/rxe_verbs.c
@@ -182,7 +182,7 @@ static int rxe_create_ah(struct ib_ah *ibah,
/* create index > 0 */
rxe_add_index(ah);
- ah->ah_num = ah->pelem.index;
+ ah->ah_num = ah->elem.index;
if (uresp) {
/* only if new user provider */
@@ -383,10 +383,9 @@ static int rxe_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
const struct ib_recv_wr **bad_wr)
{
int err = 0;
- unsigned long flags;
struct rxe_srq *srq = to_rsrq(ibsrq);
- spin_lock_irqsave(&srq->rq.producer_lock, flags);
+ spin_lock_bh(&srq->rq.producer_lock);
while (wr) {
err = post_one_recv(&srq->rq, wr);
@@ -395,7 +394,7 @@ static int rxe_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
wr = wr->next;
}
- spin_unlock_irqrestore(&srq->rq.producer_lock, flags);
+ spin_unlock_bh(&srq->rq.producer_lock);
if (err)
*bad_wr = wr;
@@ -469,6 +468,11 @@ static int rxe_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
if (err)
goto err1;
+ if ((mask & IB_QP_AV) && (attr->ah_attr.ah_flags & IB_AH_GRH))
+ qp->src_port = rdma_get_udp_sport(attr->ah_attr.grh.flow_label,
+ qp->ibqp.qp_num,
+ qp->attr.dest_qp_num);
+
return 0;
err1:
@@ -634,19 +638,18 @@ static int post_one_send(struct rxe_qp *qp, const struct ib_send_wr *ibwr,
int err;
struct rxe_sq *sq = &qp->sq;
struct rxe_send_wqe *send_wqe;
- unsigned long flags;
int full;
err = validate_send_wr(qp, ibwr, mask, length);
if (err)
return err;
- spin_lock_irqsave(&qp->sq.sq_lock, flags);
+ spin_lock_bh(&qp->sq.sq_lock);
full = queue_full(sq->queue, QUEUE_TYPE_TO_DRIVER);
if (unlikely(full)) {
- spin_unlock_irqrestore(&qp->sq.sq_lock, flags);
+ spin_unlock_bh(&qp->sq.sq_lock);
return -ENOMEM;
}
@@ -655,7 +658,7 @@ static int post_one_send(struct rxe_qp *qp, const struct ib_send_wr *ibwr,
queue_advance_producer(sq->queue, QUEUE_TYPE_TO_DRIVER);
- spin_unlock_irqrestore(&qp->sq.sq_lock, flags);
+ spin_unlock_bh(&qp->sq.sq_lock);
return 0;
}
@@ -735,7 +738,6 @@ static int rxe_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
int err = 0;
struct rxe_qp *qp = to_rqp(ibqp);
struct rxe_rq *rq = &qp->rq;
- unsigned long flags;
if (unlikely((qp_state(qp) < IB_QPS_INIT) || !qp->valid)) {
*bad_wr = wr;
@@ -749,7 +751,7 @@ static int rxe_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
goto err1;
}
- spin_lock_irqsave(&rq->producer_lock, flags);
+ spin_lock_bh(&rq->producer_lock);
while (wr) {
err = post_one_recv(rq, wr);
@@ -760,7 +762,7 @@ static int rxe_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
wr = wr->next;
}
- spin_unlock_irqrestore(&rq->producer_lock, flags);
+ spin_unlock_bh(&rq->producer_lock);
if (qp->resp.state == QP_STATE_ERROR)
rxe_run_task(&qp->resp.task, 1);
@@ -841,9 +843,8 @@ static int rxe_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
int i;
struct rxe_cq *cq = to_rcq(ibcq);
struct rxe_cqe *cqe;
- unsigned long flags;
- spin_lock_irqsave(&cq->cq_lock, flags);
+ spin_lock_bh(&cq->cq_lock);
for (i = 0; i < num_entries; i++) {
cqe = queue_head(cq->queue, QUEUE_TYPE_FROM_DRIVER);
if (!cqe)
@@ -852,7 +853,7 @@ static int rxe_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
memcpy(wc++, &cqe->ibwc, sizeof(*wc));
queue_advance_consumer(cq->queue, QUEUE_TYPE_FROM_DRIVER);
}
- spin_unlock_irqrestore(&cq->cq_lock, flags);
+ spin_unlock_bh(&cq->cq_lock);
return i;
}
@@ -870,11 +871,10 @@ static int rxe_peek_cq(struct ib_cq *ibcq, int wc_cnt)
static int rxe_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
{
struct rxe_cq *cq = to_rcq(ibcq);
- unsigned long irq_flags;
int ret = 0;
int empty;
- spin_lock_irqsave(&cq->cq_lock, irq_flags);
+ spin_lock_bh(&cq->cq_lock);
if (cq->notify != IB_CQ_NEXT_COMP)
cq->notify = flags & IB_CQ_SOLICITED_MASK;
@@ -883,7 +883,7 @@ static int rxe_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
if ((flags & IB_CQ_REPORT_MISSED_EVENTS) && !empty)
ret = 1;
- spin_unlock_irqrestore(&cq->cq_lock, irq_flags);
+ spin_unlock_bh(&cq->cq_lock);
return ret;
}
diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.h b/drivers/infiniband/sw/rxe/rxe_verbs.h
index 35e041450090..e48969e8d4c8 100644
--- a/drivers/infiniband/sw/rxe/rxe_verbs.h
+++ b/drivers/infiniband/sw/rxe/rxe_verbs.h
@@ -35,17 +35,17 @@ static inline int psn_compare(u32 psn_a, u32 psn_b)
struct rxe_ucontext {
struct ib_ucontext ibuc;
- struct rxe_pool_entry pelem;
+ struct rxe_pool_elem elem;
};
struct rxe_pd {
struct ib_pd ibpd;
- struct rxe_pool_entry pelem;
+ struct rxe_pool_elem elem;
};
struct rxe_ah {
struct ib_ah ibah;
- struct rxe_pool_entry pelem;
+ struct rxe_pool_elem elem;
struct rxe_av av;
bool is_user;
int ah_num;
@@ -60,7 +60,7 @@ struct rxe_cqe {
struct rxe_cq {
struct ib_cq ibcq;
- struct rxe_pool_entry pelem;
+ struct rxe_pool_elem elem;
struct rxe_queue *queue;
spinlock_t cq_lock;
u8 notify;
@@ -95,7 +95,7 @@ struct rxe_rq {
struct rxe_srq {
struct ib_srq ibsrq;
- struct rxe_pool_entry pelem;
+ struct rxe_pool_elem elem;
struct rxe_pd *pd;
struct rxe_rq rq;
u32 srq_num;
@@ -209,7 +209,7 @@ struct rxe_resp_info {
struct rxe_qp {
struct ib_qp ibqp;
- struct rxe_pool_entry pelem;
+ struct rxe_pool_elem elem;
struct ib_qp_attr attr;
unsigned int valid;
unsigned int mtu;
@@ -309,7 +309,7 @@ static inline int rkey_is_mw(u32 rkey)
}
struct rxe_mr {
- struct rxe_pool_entry pelem;
+ struct rxe_pool_elem elem;
struct ib_mr ibmr;
struct ib_umem *umem;
@@ -342,7 +342,7 @@ enum rxe_mw_state {
struct rxe_mw {
struct ib_mw ibmw;
- struct rxe_pool_entry pelem;
+ struct rxe_pool_elem elem;
spinlock_t lock;
enum rxe_mw_state state;
struct rxe_qp *qp; /* Type 2 only */
@@ -354,7 +354,7 @@ struct rxe_mw {
};
struct rxe_mc_grp {
- struct rxe_pool_entry pelem;
+ struct rxe_pool_elem elem;
spinlock_t mcg_lock; /* guard group */
struct rxe_dev *rxe;
struct list_head qp_list;
@@ -365,7 +365,7 @@ struct rxe_mc_grp {
};
struct rxe_mc_elem {
- struct rxe_pool_entry pelem;
+ struct rxe_pool_elem elem;
struct list_head qp_list;
struct list_head grp_list;
struct rxe_qp *qp;
@@ -392,8 +392,6 @@ struct rxe_dev {
struct net_device *ndev;
- int xmit_errors;
-
struct rxe_pool uc_pool;
struct rxe_pool pd_pool;
struct rxe_pool ah_pool;
@@ -484,6 +482,6 @@ static inline struct rxe_pd *rxe_mw_pd(struct rxe_mw *mw)
int rxe_register_device(struct rxe_dev *rxe, const char *ibdev_name);
-void rxe_mc_cleanup(struct rxe_pool_entry *arg);
+void rxe_mc_cleanup(struct rxe_pool_elem *elem);
#endif /* RXE_VERBS_H */
diff --git a/drivers/infiniband/sw/siw/siw_verbs.c b/drivers/infiniband/sw/siw/siw_verbs.c
index 1b36350601fa..a3dd2cb6d5c9 100644
--- a/drivers/infiniband/sw/siw/siw_verbs.c
+++ b/drivers/infiniband/sw/siw/siw_verbs.c
@@ -8,6 +8,7 @@
#include <linux/uaccess.h>
#include <linux/vmalloc.h>
#include <linux/xarray.h>
+#include <net/addrconf.h>
#include <rdma/iw_cm.h>
#include <rdma/ib_verbs.h>
@@ -155,7 +156,8 @@ int siw_query_device(struct ib_device *base_dev, struct ib_device_attr *attr,
attr->vendor_id = SIW_VENDOR_ID;
attr->vendor_part_id = sdev->vendor_part_id;
- memcpy(&attr->sys_image_guid, sdev->netdev->dev_addr, 6);
+ addrconf_addr_eui48((u8 *)&attr->sys_image_guid,
+ sdev->netdev->dev_addr);
return 0;
}
@@ -660,7 +662,7 @@ static int siw_copy_inline_sgl(const struct ib_send_wr *core_wr,
kbuf += core_sge->length;
core_sge++;
}
- sqe->sge[0].length = bytes > 0 ? bytes : 0;
+ sqe->sge[0].length = max(bytes, 0);
sqe->num_sge = bytes > 0 ? 1 : 0;
return bytes;
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c
index 776e46ee95da..07e47021a71f 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.c
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.c
@@ -113,10 +113,6 @@ bool iser_pi_enable = false;
module_param_named(pi_enable, iser_pi_enable, bool, S_IRUGO);
MODULE_PARM_DESC(pi_enable, "Enable T10-PI offload support (default:disabled)");
-int iser_pi_guard;
-module_param_named(pi_guard, iser_pi_guard, int, S_IRUGO);
-MODULE_PARM_DESC(pi_guard, "T10-PI guard_type [deprecated]");
-
static int iscsi_iser_set(const char *val, const struct kernel_param *kp)
{
int ret;
@@ -139,9 +135,8 @@ static int iscsi_iser_set(const char *val, const struct kernel_param *kp)
* Notes: In case of data length errors or iscsi PDU completion failures
* this routine will signal iscsi layer of connection failure.
*/
-void
-iscsi_iser_recv(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
- char *rx_data, int rx_data_len)
+void iscsi_iser_recv(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
+ char *rx_data, int rx_data_len)
{
int rc = 0;
int datalen;
@@ -176,8 +171,7 @@ error:
* Netes: This routine can't fail, just assign iscsi task
* hdr and max hdr size.
*/
-static int
-iscsi_iser_pdu_alloc(struct iscsi_task *task, uint8_t opcode)
+static int iscsi_iser_pdu_alloc(struct iscsi_task *task, uint8_t opcode)
{
struct iscsi_iser_task *iser_task = task->dd_data;
@@ -198,9 +192,8 @@ iscsi_iser_pdu_alloc(struct iscsi_task *task, uint8_t opcode)
* state mutex to avoid dereferencing the IB device which
* may have already been terminated.
*/
-int
-iser_initialize_task_headers(struct iscsi_task *task,
- struct iser_tx_desc *tx_desc)
+int iser_initialize_task_headers(struct iscsi_task *task,
+ struct iser_tx_desc *tx_desc)
{
struct iser_conn *iser_conn = task->conn->dd_data;
struct iser_device *device = iser_conn->ib_conn.device;
@@ -237,8 +230,7 @@ iser_initialize_task_headers(struct iscsi_task *task,
* Return: Returns zero on success or -ENOMEM when failing
* to init task headers (dma mapping error).
*/
-static int
-iscsi_iser_task_init(struct iscsi_task *task)
+static int iscsi_iser_task_init(struct iscsi_task *task)
{
struct iscsi_iser_task *iser_task = task->dd_data;
int ret;
@@ -272,8 +264,8 @@ iscsi_iser_task_init(struct iscsi_task *task)
* xmit.
*
**/
-static int
-iscsi_iser_mtask_xmit(struct iscsi_conn *conn, struct iscsi_task *task)
+static int iscsi_iser_mtask_xmit(struct iscsi_conn *conn,
+ struct iscsi_task *task)
{
int error = 0;
@@ -290,9 +282,8 @@ iscsi_iser_mtask_xmit(struct iscsi_conn *conn, struct iscsi_task *task)
return error;
}
-static int
-iscsi_iser_task_xmit_unsol_data(struct iscsi_conn *conn,
- struct iscsi_task *task)
+static int iscsi_iser_task_xmit_unsol_data(struct iscsi_conn *conn,
+ struct iscsi_task *task)
{
struct iscsi_r2t_info *r2t = &task->unsol_r2t;
struct iscsi_data hdr;
@@ -326,8 +317,7 @@ iscsi_iser_task_xmit_unsol_data_exit:
*
* Return: zero on success or escalates $error on failure.
*/
-static int
-iscsi_iser_task_xmit(struct iscsi_task *task)
+static int iscsi_iser_task_xmit(struct iscsi_task *task)
{
struct iscsi_conn *conn = task->conn;
struct iscsi_iser_task *iser_task = task->dd_data;
@@ -410,8 +400,7 @@ static void iscsi_iser_cleanup_task(struct iscsi_task *task)
*
* In addition the error sector is marked.
*/
-static u8
-iscsi_iser_check_protection(struct iscsi_task *task, sector_t *sector)
+static u8 iscsi_iser_check_protection(struct iscsi_task *task, sector_t *sector)
{
struct iscsi_iser_task *iser_task = task->dd_data;
enum iser_data_dir dir = iser_task->dir[ISER_DIR_IN] ?
@@ -460,11 +449,9 @@ iscsi_iser_conn_create(struct iscsi_cls_session *cls_session,
* -EINVAL in case end-point doesn't exsits anymore or iser connection
* state is not UP (teardown already started).
*/
-static int
-iscsi_iser_conn_bind(struct iscsi_cls_session *cls_session,
- struct iscsi_cls_conn *cls_conn,
- uint64_t transport_eph,
- int is_leading)
+static int iscsi_iser_conn_bind(struct iscsi_cls_session *cls_session,
+ struct iscsi_cls_conn *cls_conn,
+ uint64_t transport_eph, int is_leading)
{
struct iscsi_conn *conn = cls_conn->dd_data;
struct iser_conn *iser_conn;
@@ -519,8 +506,7 @@ out:
* from this point iscsi must call conn_stop in session/connection
* teardown so iser transport must wait for it.
*/
-static int
-iscsi_iser_conn_start(struct iscsi_cls_conn *cls_conn)
+static int iscsi_iser_conn_start(struct iscsi_cls_conn *cls_conn)
{
struct iscsi_conn *iscsi_conn;
struct iser_conn *iser_conn;
@@ -542,8 +528,7 @@ iscsi_iser_conn_start(struct iscsi_cls_conn *cls_conn)
* handle, so we call it under iser the state lock to protect against
* this kind of race.
*/
-static void
-iscsi_iser_conn_stop(struct iscsi_cls_conn *cls_conn, int flag)
+static void iscsi_iser_conn_stop(struct iscsi_cls_conn *cls_conn, int flag)
{
struct iscsi_conn *conn = cls_conn->dd_data;
struct iser_conn *iser_conn = conn->dd_data;
@@ -578,8 +563,7 @@ iscsi_iser_conn_stop(struct iscsi_cls_conn *cls_conn, int flag)
*
* Removes and free iscsi host.
*/
-static void
-iscsi_iser_session_destroy(struct iscsi_cls_session *cls_session)
+static void iscsi_iser_session_destroy(struct iscsi_cls_session *cls_session)
{
struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
@@ -588,8 +572,7 @@ iscsi_iser_session_destroy(struct iscsi_cls_session *cls_session)
iscsi_host_free(shost);
}
-static inline unsigned int
-iser_dif_prot_caps(int prot_caps)
+static inline unsigned int iser_dif_prot_caps(int prot_caps)
{
int ret = 0;
@@ -708,9 +691,8 @@ free_host:
return NULL;
}
-static int
-iscsi_iser_set_param(struct iscsi_cls_conn *cls_conn,
- enum iscsi_param param, char *buf, int buflen)
+static int iscsi_iser_set_param(struct iscsi_cls_conn *cls_conn,
+ enum iscsi_param param, char *buf, int buflen)
{
int value;
@@ -760,8 +742,8 @@ iscsi_iser_set_param(struct iscsi_cls_conn *cls_conn,
*
* Output connection statistics.
*/
-static void
-iscsi_iser_conn_get_stats(struct iscsi_cls_conn *cls_conn, struct iscsi_stats *stats)
+static void iscsi_iser_conn_get_stats(struct iscsi_cls_conn *cls_conn,
+ struct iscsi_stats *stats)
{
struct iscsi_conn *conn = cls_conn->dd_data;
@@ -812,9 +794,9 @@ static int iscsi_iser_get_ep_param(struct iscsi_endpoint *ep,
* Return: iscsi_endpoint created by iscsi layer or ERR_PTR(error)
* if fails.
*/
-static struct iscsi_endpoint *
-iscsi_iser_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr,
- int non_blocking)
+static struct iscsi_endpoint *iscsi_iser_ep_connect(struct Scsi_Host *shost,
+ struct sockaddr *dst_addr,
+ int non_blocking)
{
int err;
struct iser_conn *iser_conn;
@@ -857,8 +839,7 @@ failure:
* or more likely iser connection state transitioned to TEMINATING or
* DOWN during the wait period.
*/
-static int
-iscsi_iser_ep_poll(struct iscsi_endpoint *ep, int timeout_ms)
+static int iscsi_iser_ep_poll(struct iscsi_endpoint *ep, int timeout_ms)
{
struct iser_conn *iser_conn = ep->dd_data;
int rc;
@@ -893,8 +874,7 @@ iscsi_iser_ep_poll(struct iscsi_endpoint *ep, int timeout_ms)
* and cleanup or actually call it immediately in case we didn't pass
* iscsi conn bind/start stage, thus it is safe.
*/
-static void
-iscsi_iser_ep_disconnect(struct iscsi_endpoint *ep)
+static void iscsi_iser_ep_disconnect(struct iscsi_endpoint *ep)
{
struct iser_conn *iser_conn = ep->dd_data;
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h
index 9f6ac0a09a78..20af46c4e954 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.h
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.h
@@ -119,8 +119,6 @@
#define ISER_QP_MAX_RECV_DTOS (ISER_DEF_XMIT_CMDS_MAX)
-#define ISER_MIN_POSTED_RX (ISER_DEF_XMIT_CMDS_MAX >> 2)
-
/* the max TX (send) WR supported by the iSER QP is defined by *
* max_send_wr = T * (1 + D) + C ; D is how many inflight dataouts we expect *
* to have at max for SCSI command. The tx posting & completion handling code *
@@ -148,8 +146,6 @@
- ISER_MAX_RX_MISC_PDUS) / \
(1 + ISER_INFLIGHT_DATAOUTS))
-#define ISER_SIGNAL_CMD_COUNT 32
-
/* Constant PDU lengths calculations */
#define ISER_HEADERS_LEN (sizeof(struct iser_ctrl) + sizeof(struct iscsi_hdr))
@@ -366,9 +362,6 @@ struct iser_fr_pool {
* @qp: Connection Queue-pair
* @cq: Connection completion queue
* @cq_size: The number of max outstanding completions
- * @post_recv_buf_count: post receive counter
- * @sig_count: send work request signal count
- * @rx_wr: receive work request for batch posts
* @device: reference to iser device
* @fr_pool: connection fast registration poool
* @pi_support: Indicate device T10-PI support
@@ -379,9 +372,6 @@ struct ib_conn {
struct ib_qp *qp;
struct ib_cq *cq;
u32 cq_size;
- int post_recv_buf_count;
- u8 sig_count;
- struct ib_recv_wr rx_wr[ISER_MIN_POSTED_RX];
struct iser_device *device;
struct iser_fr_pool fr_pool;
bool pi_support;
@@ -397,8 +387,6 @@ struct ib_conn {
* @state: connection logical state
* @qp_max_recv_dtos: maximum number of data outs, corresponds
* to max number of post recvs
- * @qp_max_recv_dtos_mask: (qp_max_recv_dtos - 1)
- * @min_posted_rx: (qp_max_recv_dtos >> 2)
* @max_cmds: maximum cmds allowed for this connection
* @name: connection peer portal
* @release_work: deffered work for release job
@@ -409,7 +397,6 @@ struct ib_conn {
* (state is ISER_CONN_UP)
* @conn_list: entry in ig conn list
* @login_desc: login descriptor
- * @rx_desc_head: head of rx_descs cyclic buffer
* @rx_descs: rx buffers array (cyclic buffer)
* @num_rx_descs: number of rx descriptors
* @scsi_sg_tablesize: scsi host sg_tablesize
@@ -422,8 +409,6 @@ struct iser_conn {
struct iscsi_endpoint *ep;
enum iser_conn_state state;
unsigned qp_max_recv_dtos;
- unsigned qp_max_recv_dtos_mask;
- unsigned min_posted_rx;
u16 max_cmds;
char name[ISER_OBJECT_NAME_SIZE];
struct work_struct release_work;
@@ -433,7 +418,6 @@ struct iser_conn {
struct completion up_completion;
struct list_head conn_list;
struct iser_login_desc login_desc;
- unsigned int rx_desc_head;
struct iser_rx_desc *rx_descs;
u32 num_rx_descs;
unsigned short scsi_sg_tablesize;
@@ -486,7 +470,6 @@ struct iser_global {
extern struct iser_global ig;
extern int iser_debug_level;
extern bool iser_pi_enable;
-extern int iser_pi_guard;
extern unsigned int iser_max_sectors;
extern bool iser_always_reg;
@@ -543,9 +526,9 @@ int iser_connect(struct iser_conn *iser_conn,
int non_blocking);
int iser_post_recvl(struct iser_conn *iser_conn);
-int iser_post_recvm(struct iser_conn *iser_conn, int count);
-int iser_post_send(struct ib_conn *ib_conn, struct iser_tx_desc *tx_desc,
- bool signal);
+int iser_post_recvm(struct iser_conn *iser_conn,
+ struct iser_rx_desc *rx_desc);
+int iser_post_send(struct ib_conn *ib_conn, struct iser_tx_desc *tx_desc);
int iser_dma_map_task_data(struct iscsi_iser_task *iser_task,
struct iser_data_buf *data,
diff --git a/drivers/infiniband/ulp/iser/iser_initiator.c b/drivers/infiniband/ulp/iser/iser_initiator.c
index 27a6f75a9912..2490150d3085 100644
--- a/drivers/infiniband/ulp/iser/iser_initiator.c
+++ b/drivers/infiniband/ulp/iser/iser_initiator.c
@@ -95,11 +95,8 @@ static int iser_prepare_read_cmd(struct iscsi_task *task)
* task->data[ISER_DIR_OUT].data_len, Protection size
* is stored at task->prot[ISER_DIR_OUT].data_len
*/
-static int
-iser_prepare_write_cmd(struct iscsi_task *task,
- unsigned int imm_sz,
- unsigned int unsol_sz,
- unsigned int edtl)
+static int iser_prepare_write_cmd(struct iscsi_task *task, unsigned int imm_sz,
+ unsigned int unsol_sz, unsigned int edtl)
{
struct iscsi_iser_task *iser_task = task->dd_data;
struct iser_mem_reg *mem_reg;
@@ -160,8 +157,8 @@ iser_prepare_write_cmd(struct iscsi_task *task,
}
/* creates a new tx descriptor and adds header regd buffer */
-static void iser_create_send_desc(struct iser_conn *iser_conn,
- struct iser_tx_desc *tx_desc)
+static void iser_create_send_desc(struct iser_conn *iser_conn,
+ struct iser_tx_desc *tx_desc)
{
struct iser_device *device = iser_conn->ib_conn.device;
@@ -247,8 +244,6 @@ int iser_alloc_rx_descriptors(struct iser_conn *iser_conn,
struct iser_device *device = ib_conn->device;
iser_conn->qp_max_recv_dtos = session->cmds_max;
- iser_conn->qp_max_recv_dtos_mask = session->cmds_max - 1; /* cmds_max is 2^N */
- iser_conn->min_posted_rx = iser_conn->qp_max_recv_dtos >> 2;
if (iser_alloc_fastreg_pool(ib_conn, session->scsi_cmds_max,
iser_conn->pages_per_mr))
@@ -280,7 +275,6 @@ int iser_alloc_rx_descriptors(struct iser_conn *iser_conn,
rx_sg->lkey = device->pd->local_dma_lkey;
}
- iser_conn->rx_desc_head = 0;
return 0;
rx_desc_dma_map_failed:
@@ -322,37 +316,35 @@ void iser_free_rx_descriptors(struct iser_conn *iser_conn)
static int iser_post_rx_bufs(struct iscsi_conn *conn, struct iscsi_hdr *req)
{
struct iser_conn *iser_conn = conn->dd_data;
- struct ib_conn *ib_conn = &iser_conn->ib_conn;
struct iscsi_session *session = conn->session;
+ int err = 0;
+ int i;
iser_dbg("req op %x flags %x\n", req->opcode, req->flags);
/* check if this is the last login - going to full feature phase */
if ((req->flags & ISCSI_FULL_FEATURE_PHASE) != ISCSI_FULL_FEATURE_PHASE)
- return 0;
-
- /*
- * Check that there is one posted recv buffer
- * (for the last login response).
- */
- WARN_ON(ib_conn->post_recv_buf_count != 1);
+ goto out;
if (session->discovery_sess) {
iser_info("Discovery session, re-using login RX buffer\n");
- return 0;
- } else
- iser_info("Normal session, posting batch of RX %d buffers\n",
- iser_conn->min_posted_rx);
-
- /* Initial post receive buffers */
- if (iser_post_recvm(iser_conn, iser_conn->min_posted_rx))
- return -ENOMEM;
+ goto out;
+ }
- return 0;
-}
+ iser_info("Normal session, posting batch of RX %d buffers\n",
+ iser_conn->qp_max_recv_dtos - 1);
-static inline bool iser_signal_comp(u8 sig_count)
-{
- return ((sig_count % ISER_SIGNAL_CMD_COUNT) == 0);
+ /*
+ * Initial post receive buffers.
+ * There is one already posted recv buffer (for the last login
+ * response). Therefore, the first recv buffer is skipped here.
+ */
+ for (i = 1; i < iser_conn->qp_max_recv_dtos; i++) {
+ err = iser_post_recvm(iser_conn, &iser_conn->rx_descs[i]);
+ if (err)
+ goto out;
+ }
+out:
+ return err;
}
/**
@@ -360,8 +352,7 @@ static inline bool iser_signal_comp(u8 sig_count)
* @conn: link to matching iscsi connection
* @task: SCSI command task
*/
-int iser_send_command(struct iscsi_conn *conn,
- struct iscsi_task *task)
+int iser_send_command(struct iscsi_conn *conn, struct iscsi_task *task)
{
struct iser_conn *iser_conn = conn->dd_data;
struct iscsi_iser_task *iser_task = task->dd_data;
@@ -371,7 +362,6 @@ int iser_send_command(struct iscsi_conn *conn,
struct iscsi_scsi_req *hdr = (struct iscsi_scsi_req *)task->hdr;
struct scsi_cmnd *sc = task->sc;
struct iser_tx_desc *tx_desc = &iser_task->desc;
- u8 sig_count = ++iser_conn->ib_conn.sig_count;
edtl = ntohl(hdr->data_length);
@@ -418,8 +408,7 @@ int iser_send_command(struct iscsi_conn *conn,
iser_task->status = ISER_TASK_STATUS_STARTED;
- err = iser_post_send(&iser_conn->ib_conn, tx_desc,
- iser_signal_comp(sig_count));
+ err = iser_post_send(&iser_conn->ib_conn, tx_desc);
if (!err)
return 0;
@@ -434,8 +423,7 @@ send_command_error:
* @task: SCSI command task
* @hdr: pointer to the LLD's iSCSI message header
*/
-int iser_send_data_out(struct iscsi_conn *conn,
- struct iscsi_task *task,
+int iser_send_data_out(struct iscsi_conn *conn, struct iscsi_task *task,
struct iscsi_data *hdr)
{
struct iser_conn *iser_conn = conn->dd_data;
@@ -487,7 +475,7 @@ int iser_send_data_out(struct iscsi_conn *conn,
itt, buf_offset, data_seg_len);
- err = iser_post_send(&iser_conn->ib_conn, tx_desc, true);
+ err = iser_post_send(&iser_conn->ib_conn, tx_desc);
if (!err)
return 0;
@@ -497,8 +485,7 @@ send_data_out_error:
return err;
}
-int iser_send_control(struct iscsi_conn *conn,
- struct iscsi_task *task)
+int iser_send_control(struct iscsi_conn *conn, struct iscsi_task *task)
{
struct iser_conn *iser_conn = conn->dd_data;
struct iscsi_iser_task *iser_task = task->dd_data;
@@ -550,7 +537,7 @@ int iser_send_control(struct iscsi_conn *conn,
goto send_control_error;
}
- err = iser_post_send(&iser_conn->ib_conn, mdesc, true);
+ err = iser_post_send(&iser_conn->ib_conn, mdesc);
if (!err)
return 0;
@@ -590,11 +577,14 @@ void iser_login_rsp(struct ib_cq *cq, struct ib_wc *wc)
desc->rsp_dma, ISER_RX_LOGIN_SIZE,
DMA_FROM_DEVICE);
- ib_conn->post_recv_buf_count--;
+ if (iser_conn->iscsi_conn->session->discovery_sess)
+ return;
+
+ /* Post the first RX buffer that is skipped in iser_post_rx_bufs() */
+ iser_post_recvm(iser_conn, iser_conn->rx_descs);
}
-static inline int
-iser_inv_desc(struct iser_fr_desc *desc, u32 rkey)
+static inline int iser_inv_desc(struct iser_fr_desc *desc, u32 rkey)
{
if (unlikely((!desc->sig_protected && rkey != desc->rsc.mr->rkey) ||
(desc->sig_protected && rkey != desc->rsc.sig_mr->rkey))) {
@@ -607,10 +597,8 @@ iser_inv_desc(struct iser_fr_desc *desc, u32 rkey)
return 0;
}
-static int
-iser_check_remote_inv(struct iser_conn *iser_conn,
- struct ib_wc *wc,
- struct iscsi_hdr *hdr)
+static int iser_check_remote_inv(struct iser_conn *iser_conn, struct ib_wc *wc,
+ struct iscsi_hdr *hdr)
{
if (wc->wc_flags & IB_WC_WITH_INVALIDATE) {
struct iscsi_task *task;
@@ -657,8 +645,7 @@ void iser_task_rsp(struct ib_cq *cq, struct ib_wc *wc)
struct iser_conn *iser_conn = to_iser_conn(ib_conn);
struct iser_rx_desc *desc = iser_rx(wc->wr_cqe);
struct iscsi_hdr *hdr;
- int length;
- int outstanding, count, err;
+ int length, err;
if (unlikely(wc->status != IB_WC_SUCCESS)) {
iser_err_comp(wc, "task_rsp");
@@ -687,20 +674,9 @@ void iser_task_rsp(struct ib_cq *cq, struct ib_wc *wc)
desc->dma_addr, ISER_RX_PAYLOAD_SIZE,
DMA_FROM_DEVICE);
- /* decrementing conn->post_recv_buf_count only --after-- freeing the *
- * task eliminates the need to worry on tasks which are completed in *
- * parallel to the execution of iser_conn_term. So the code that waits *
- * for the posted rx bufs refcount to become zero handles everything */
- ib_conn->post_recv_buf_count--;
-
- outstanding = ib_conn->post_recv_buf_count;
- if (outstanding + iser_conn->min_posted_rx <= iser_conn->qp_max_recv_dtos) {
- count = min(iser_conn->qp_max_recv_dtos - outstanding,
- iser_conn->min_posted_rx);
- err = iser_post_recvm(iser_conn, count);
- if (err)
- iser_err("posting %d rx bufs err %d\n", count, err);
- }
+ err = iser_post_recvm(iser_conn, desc);
+ if (err)
+ iser_err("posting rx buffer err %d\n", err);
}
void iser_cmd_comp(struct ib_cq *cq, struct ib_wc *wc)
diff --git a/drivers/infiniband/ulp/iser/iser_memory.c b/drivers/infiniband/ulp/iser/iser_memory.c
index 9776b755d848..660982625488 100644
--- a/drivers/infiniband/ulp/iser/iser_memory.c
+++ b/drivers/infiniband/ulp/iser/iser_memory.c
@@ -44,8 +44,7 @@ void iser_reg_comp(struct ib_cq *cq, struct ib_wc *wc)
iser_err_comp(wc, "memreg");
}
-static struct iser_fr_desc *
-iser_reg_desc_get_fr(struct ib_conn *ib_conn)
+static struct iser_fr_desc *iser_reg_desc_get_fr(struct ib_conn *ib_conn)
{
struct iser_fr_pool *fr_pool = &ib_conn->fr_pool;
struct iser_fr_desc *desc;
@@ -60,9 +59,8 @@ iser_reg_desc_get_fr(struct ib_conn *ib_conn)
return desc;
}
-static void
-iser_reg_desc_put_fr(struct ib_conn *ib_conn,
- struct iser_fr_desc *desc)
+static void iser_reg_desc_put_fr(struct ib_conn *ib_conn,
+ struct iser_fr_desc *desc)
{
struct iser_fr_pool *fr_pool = &ib_conn->fr_pool;
unsigned long flags;
@@ -73,9 +71,9 @@ iser_reg_desc_put_fr(struct ib_conn *ib_conn,
}
int iser_dma_map_task_data(struct iscsi_iser_task *iser_task,
- struct iser_data_buf *data,
- enum iser_data_dir iser_dir,
- enum dma_data_direction dma_dir)
+ struct iser_data_buf *data,
+ enum iser_data_dir iser_dir,
+ enum dma_data_direction dma_dir)
{
struct ib_device *dev;
@@ -100,9 +98,8 @@ void iser_dma_unmap_task_data(struct iscsi_iser_task *iser_task,
ib_dma_unmap_sg(dev, data->sg, data->size, dir);
}
-static int
-iser_reg_dma(struct iser_device *device, struct iser_data_buf *mem,
- struct iser_mem_reg *reg)
+static int iser_reg_dma(struct iser_device *device, struct iser_data_buf *mem,
+ struct iser_mem_reg *reg)
{
struct scatterlist *sg = mem->sg;
@@ -154,8 +151,8 @@ void iser_unreg_mem_fastreg(struct iscsi_iser_task *iser_task,
reg->mem_h = NULL;
}
-static void
-iser_set_dif_domain(struct scsi_cmnd *sc, struct ib_sig_domain *domain)
+static void iser_set_dif_domain(struct scsi_cmnd *sc,
+ struct ib_sig_domain *domain)
{
domain->sig_type = IB_SIG_TYPE_T10_DIF;
domain->sig.dif.pi_interval = scsi_prot_interval(sc);
@@ -171,8 +168,8 @@ iser_set_dif_domain(struct scsi_cmnd *sc, struct ib_sig_domain *domain)
domain->sig.dif.ref_remap = true;
}
-static int
-iser_set_sig_attrs(struct scsi_cmnd *sc, struct ib_sig_attrs *sig_attrs)
+static int iser_set_sig_attrs(struct scsi_cmnd *sc,
+ struct ib_sig_attrs *sig_attrs)
{
switch (scsi_get_prot_op(sc)) {
case SCSI_PROT_WRITE_INSERT:
@@ -205,8 +202,7 @@ iser_set_sig_attrs(struct scsi_cmnd *sc, struct ib_sig_attrs *sig_attrs)
return 0;
}
-static inline void
-iser_set_prot_checks(struct scsi_cmnd *sc, u8 *mask)
+static inline void iser_set_prot_checks(struct scsi_cmnd *sc, u8 *mask)
{
*mask = 0;
if (sc->prot_flags & SCSI_PROT_REF_CHECK)
@@ -215,11 +211,8 @@ iser_set_prot_checks(struct scsi_cmnd *sc, u8 *mask)
*mask |= IB_SIG_CHECK_GUARD;
}
-static inline void
-iser_inv_rkey(struct ib_send_wr *inv_wr,
- struct ib_mr *mr,
- struct ib_cqe *cqe,
- struct ib_send_wr *next_wr)
+static inline void iser_inv_rkey(struct ib_send_wr *inv_wr, struct ib_mr *mr,
+ struct ib_cqe *cqe, struct ib_send_wr *next_wr)
{
inv_wr->opcode = IB_WR_LOCAL_INV;
inv_wr->wr_cqe = cqe;
@@ -229,12 +222,11 @@ iser_inv_rkey(struct ib_send_wr *inv_wr,
inv_wr->next = next_wr;
}
-static int
-iser_reg_sig_mr(struct iscsi_iser_task *iser_task,
- struct iser_data_buf *mem,
- struct iser_data_buf *sig_mem,
- struct iser_reg_resources *rsc,
- struct iser_mem_reg *sig_reg)
+static int iser_reg_sig_mr(struct iscsi_iser_task *iser_task,
+ struct iser_data_buf *mem,
+ struct iser_data_buf *sig_mem,
+ struct iser_reg_resources *rsc,
+ struct iser_mem_reg *sig_reg)
{
struct iser_tx_desc *tx_desc = &iser_task->desc;
struct ib_cqe *cqe = &iser_task->iser_conn->ib_conn.reg_cqe;
@@ -335,12 +327,10 @@ static int iser_fast_reg_mr(struct iscsi_iser_task *iser_task,
return 0;
}
-static int
-iser_reg_data_sg(struct iscsi_iser_task *task,
- struct iser_data_buf *mem,
- struct iser_fr_desc *desc,
- bool use_dma_key,
- struct iser_mem_reg *reg)
+static int iser_reg_data_sg(struct iscsi_iser_task *task,
+ struct iser_data_buf *mem,
+ struct iser_fr_desc *desc, bool use_dma_key,
+ struct iser_mem_reg *reg)
{
struct iser_device *device = task->iser_conn->ib_conn.device;
diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c
index b566f7cb7797..8bf87b073d9b 100644
--- a/drivers/infiniband/ulp/iser/iser_verbs.c
+++ b/drivers/infiniband/ulp/iser/iser_verbs.c
@@ -265,14 +265,14 @@ static int iser_create_ib_conn_res(struct ib_conn *ib_conn)
memset(&init_attr, 0, sizeof(init_attr));
init_attr.event_handler = iser_qp_event_callback;
- init_attr.qp_context = (void *)ib_conn;
- init_attr.send_cq = ib_conn->cq;
- init_attr.recv_cq = ib_conn->cq;
- init_attr.cap.max_recv_wr = ISER_QP_MAX_RECV_DTOS;
+ init_attr.qp_context = (void *)ib_conn;
+ init_attr.send_cq = ib_conn->cq;
+ init_attr.recv_cq = ib_conn->cq;
+ init_attr.cap.max_recv_wr = ISER_QP_MAX_RECV_DTOS;
init_attr.cap.max_send_sge = 2;
init_attr.cap.max_recv_sge = 1;
- init_attr.sq_sig_type = IB_SIGNAL_REQ_WR;
- init_attr.qp_type = IB_QPT_RC;
+ init_attr.sq_sig_type = IB_SIGNAL_REQ_WR;
+ init_attr.qp_type = IB_QPT_RC;
init_attr.cap.max_send_wr = max_send_wr;
if (ib_conn->pi_support)
init_attr.create_flags |= IB_QP_CREATE_INTEGRITY_EN;
@@ -283,9 +283,8 @@ static int iser_create_ib_conn_res(struct ib_conn *ib_conn)
goto out_err;
ib_conn->qp = ib_conn->cma_id->qp;
- iser_info("setting conn %p cma_id %p qp %p max_send_wr %d\n",
- ib_conn, ib_conn->cma_id,
- ib_conn->cma_id->qp, max_send_wr);
+ iser_info("setting conn %p cma_id %p qp %p max_send_wr %d\n", ib_conn,
+ ib_conn->cma_id, ib_conn->cma_id->qp, max_send_wr);
return ret;
out_err:
@@ -313,7 +312,7 @@ struct iser_device *iser_device_find_by_ib_device(struct rdma_cm_id *cma_id)
goto inc_refcnt;
device = kzalloc(sizeof *device, GFP_KERNEL);
- if (device == NULL)
+ if (!device)
goto out;
/* assign this device to the device */
@@ -392,8 +391,7 @@ void iser_release_work(struct work_struct *work)
* so the cm_id removal is out of here. It is Safe to
* be invoked multiple times.
*/
-static void iser_free_ib_conn_res(struct iser_conn *iser_conn,
- bool destroy)
+static void iser_free_ib_conn_res(struct iser_conn *iser_conn, bool destroy)
{
struct ib_conn *ib_conn = &iser_conn->ib_conn;
struct iser_device *device = ib_conn->device;
@@ -401,7 +399,7 @@ static void iser_free_ib_conn_res(struct iser_conn *iser_conn,
iser_info("freeing conn %p cma_id %p qp %p\n",
iser_conn, ib_conn->cma_id, ib_conn->qp);
- if (ib_conn->qp != NULL) {
+ if (ib_conn->qp) {
rdma_destroy_qp(ib_conn->cma_id);
ib_cq_pool_put(ib_conn->cq, ib_conn->cq_size);
ib_conn->qp = NULL;
@@ -411,7 +409,7 @@ static void iser_free_ib_conn_res(struct iser_conn *iser_conn,
if (iser_conn->rx_descs)
iser_free_rx_descriptors(iser_conn);
- if (device != NULL) {
+ if (device) {
iser_device_try_release(device);
ib_conn->device = NULL;
}
@@ -445,7 +443,7 @@ void iser_conn_release(struct iser_conn *iser_conn)
iser_free_ib_conn_res(iser_conn, true);
mutex_unlock(&iser_conn->state_mutex);
- if (ib_conn->cma_id != NULL) {
+ if (ib_conn->cma_id) {
rdma_destroy_id(ib_conn->cma_id);
ib_conn->cma_id = NULL;
}
@@ -501,13 +499,12 @@ static void iser_connect_error(struct rdma_cm_id *cma_id)
{
struct iser_conn *iser_conn;
- iser_conn = (struct iser_conn *)cma_id->context;
+ iser_conn = cma_id->context;
iser_conn->state = ISER_CONN_TERMINATING;
}
-static void
-iser_calc_scsi_params(struct iser_conn *iser_conn,
- unsigned int max_sectors)
+static void iser_calc_scsi_params(struct iser_conn *iser_conn,
+ unsigned int max_sectors)
{
struct iser_device *device = iser_conn->ib_conn.device;
struct ib_device_attr *attr = &device->ib_device->attrs;
@@ -545,11 +542,11 @@ iser_calc_scsi_params(struct iser_conn *iser_conn,
static void iser_addr_handler(struct rdma_cm_id *cma_id)
{
struct iser_device *device;
- struct iser_conn *iser_conn;
- struct ib_conn *ib_conn;
+ struct iser_conn *iser_conn;
+ struct ib_conn *ib_conn;
int ret;
- iser_conn = (struct iser_conn *)cma_id->context;
+ iser_conn = cma_id->context;
if (iser_conn->state != ISER_CONN_PENDING)
/* bailout */
return;
@@ -593,9 +590,9 @@ static void iser_addr_handler(struct rdma_cm_id *cma_id)
static void iser_route_handler(struct rdma_cm_id *cma_id)
{
struct rdma_conn_param conn_param;
- int ret;
+ int ret;
struct iser_cm_hdr req_hdr;
- struct iser_conn *iser_conn = (struct iser_conn *)cma_id->context;
+ struct iser_conn *iser_conn = cma_id->context;
struct ib_conn *ib_conn = &iser_conn->ib_conn;
struct ib_device *ib_dev = ib_conn->device->ib_device;
@@ -609,9 +606,9 @@ static void iser_route_handler(struct rdma_cm_id *cma_id)
memset(&conn_param, 0, sizeof conn_param);
conn_param.responder_resources = ib_dev->attrs.max_qp_rd_atom;
- conn_param.initiator_depth = 1;
- conn_param.retry_count = 7;
- conn_param.rnr_retry_count = 6;
+ conn_param.initiator_depth = 1;
+ conn_param.retry_count = 7;
+ conn_param.rnr_retry_count = 6;
memset(&req_hdr, 0, sizeof(req_hdr));
req_hdr.flags = ISER_ZBVA_NOT_SUP;
@@ -638,7 +635,7 @@ static void iser_connected_handler(struct rdma_cm_id *cma_id,
struct ib_qp_attr attr;
struct ib_qp_init_attr init_attr;
- iser_conn = (struct iser_conn *)cma_id->context;
+ iser_conn = cma_id->context;
if (iser_conn->state != ISER_CONN_PENDING)
/* bailout */
return;
@@ -661,7 +658,7 @@ static void iser_connected_handler(struct rdma_cm_id *cma_id,
static void iser_disconnected_handler(struct rdma_cm_id *cma_id)
{
- struct iser_conn *iser_conn = (struct iser_conn *)cma_id->context;
+ struct iser_conn *iser_conn = cma_id->context;
if (iser_conn_terminate(iser_conn)) {
if (iser_conn->iscsi_conn)
@@ -675,7 +672,7 @@ static void iser_disconnected_handler(struct rdma_cm_id *cma_id)
static void iser_cleanup_handler(struct rdma_cm_id *cma_id,
bool destroy)
{
- struct iser_conn *iser_conn = (struct iser_conn *)cma_id->context;
+ struct iser_conn *iser_conn = cma_id->context;
/*
* We are not guaranteed that we visited disconnected_handler
@@ -687,12 +684,13 @@ static void iser_cleanup_handler(struct rdma_cm_id *cma_id,
complete(&iser_conn->ib_completion);
}
-static int iser_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
+static int iser_cma_handler(struct rdma_cm_id *cma_id,
+ struct rdma_cm_event *event)
{
struct iser_conn *iser_conn;
int ret = 0;
- iser_conn = (struct iser_conn *)cma_id->context;
+ iser_conn = cma_id->context;
iser_info("%s (%d): status %d conn %p id %p\n",
rdma_event_msg(event->event), event->event,
event->status, cma_id->context, cma_id);
@@ -757,7 +755,6 @@ void iser_conn_init(struct iser_conn *iser_conn)
INIT_LIST_HEAD(&iser_conn->conn_list);
mutex_init(&iser_conn->state_mutex);
- ib_conn->post_recv_buf_count = 0;
ib_conn->reg_cqe.done = iser_reg_comp;
}
@@ -765,10 +762,8 @@ void iser_conn_init(struct iser_conn *iser_conn)
* starts the process of connecting to the target
* sleeps until the connection is established or rejected
*/
-int iser_connect(struct iser_conn *iser_conn,
- struct sockaddr *src_addr,
- struct sockaddr *dst_addr,
- int non_blocking)
+int iser_connect(struct iser_conn *iser_conn, struct sockaddr *src_addr,
+ struct sockaddr *dst_addr, int non_blocking)
{
struct ib_conn *ib_conn = &iser_conn->ib_conn;
int err = 0;
@@ -785,8 +780,7 @@ int iser_connect(struct iser_conn *iser_conn,
iser_conn->state = ISER_CONN_PENDING;
ib_conn->cma_id = rdma_create_id(&init_net, iser_cma_handler,
- (void *)iser_conn,
- RDMA_PS_TCP, IB_QPT_RC);
+ iser_conn, RDMA_PS_TCP, IB_QPT_RC);
if (IS_ERR(ib_conn->cma_id)) {
err = PTR_ERR(ib_conn->cma_id);
iser_err("rdma_create_id failed: %d\n", err);
@@ -829,7 +823,7 @@ int iser_post_recvl(struct iser_conn *iser_conn)
struct ib_conn *ib_conn = &iser_conn->ib_conn;
struct iser_login_desc *desc = &iser_conn->login_desc;
struct ib_recv_wr wr;
- int ib_ret;
+ int ret;
desc->sge.addr = desc->rsp_dma;
desc->sge.length = ISER_RX_LOGIN_SIZE;
@@ -841,46 +835,30 @@ int iser_post_recvl(struct iser_conn *iser_conn)
wr.num_sge = 1;
wr.next = NULL;
- ib_conn->post_recv_buf_count++;
- ib_ret = ib_post_recv(ib_conn->qp, &wr, NULL);
- if (ib_ret) {
- iser_err("ib_post_recv failed ret=%d\n", ib_ret);
- ib_conn->post_recv_buf_count--;
- }
+ ret = ib_post_recv(ib_conn->qp, &wr, NULL);
+ if (unlikely(ret))
+ iser_err("ib_post_recv login failed ret=%d\n", ret);
- return ib_ret;
+ return ret;
}
-int iser_post_recvm(struct iser_conn *iser_conn, int count)
+int iser_post_recvm(struct iser_conn *iser_conn, struct iser_rx_desc *rx_desc)
{
struct ib_conn *ib_conn = &iser_conn->ib_conn;
- unsigned int my_rx_head = iser_conn->rx_desc_head;
- struct iser_rx_desc *rx_desc;
- struct ib_recv_wr *wr;
- int i, ib_ret;
-
- for (wr = ib_conn->rx_wr, i = 0; i < count; i++, wr++) {
- rx_desc = &iser_conn->rx_descs[my_rx_head];
- rx_desc->cqe.done = iser_task_rsp;
- wr->wr_cqe = &rx_desc->cqe;
- wr->sg_list = &rx_desc->rx_sg;
- wr->num_sge = 1;
- wr->next = wr + 1;
- my_rx_head = (my_rx_head + 1) & iser_conn->qp_max_recv_dtos_mask;
- }
+ struct ib_recv_wr wr;
+ int ret;
- wr--;
- wr->next = NULL; /* mark end of work requests list */
+ rx_desc->cqe.done = iser_task_rsp;
+ wr.wr_cqe = &rx_desc->cqe;
+ wr.sg_list = &rx_desc->rx_sg;
+ wr.num_sge = 1;
+ wr.next = NULL;
- ib_conn->post_recv_buf_count += count;
- ib_ret = ib_post_recv(ib_conn->qp, ib_conn->rx_wr, NULL);
- if (unlikely(ib_ret)) {
- iser_err("ib_post_recv failed ret=%d\n", ib_ret);
- ib_conn->post_recv_buf_count -= count;
- } else
- iser_conn->rx_desc_head = my_rx_head;
+ ret = ib_post_recv(ib_conn->qp, &wr, NULL);
+ if (unlikely(ret))
+ iser_err("ib_post_recv failed ret=%d\n", ret);
- return ib_ret;
+ return ret;
}
@@ -888,16 +866,14 @@ int iser_post_recvm(struct iser_conn *iser_conn, int count)
* iser_post_send - Initiate a Send DTO operation
* @ib_conn: connection RDMA resources
* @tx_desc: iSER TX descriptor
- * @signal: true to send work request as SIGNALED
*
* Return: 0 on success, -1 on failure
*/
-int iser_post_send(struct ib_conn *ib_conn, struct iser_tx_desc *tx_desc,
- bool signal)
+int iser_post_send(struct ib_conn *ib_conn, struct iser_tx_desc *tx_desc)
{
struct ib_send_wr *wr = &tx_desc->send_wr;
struct ib_send_wr *first_wr;
- int ib_ret;
+ int ret;
ib_dma_sync_single_for_device(ib_conn->device->ib_device,
tx_desc->dma_addr, ISER_HEADERS_LEN,
@@ -908,7 +884,7 @@ int iser_post_send(struct ib_conn *ib_conn, struct iser_tx_desc *tx_desc,
wr->sg_list = tx_desc->tx_sg;
wr->num_sge = tx_desc->num_sge;
wr->opcode = IB_WR_SEND;
- wr->send_flags = signal ? IB_SEND_SIGNALED : 0;
+ wr->send_flags = IB_SEND_SIGNALED;
if (tx_desc->inv_wr.next)
first_wr = &tx_desc->inv_wr;
@@ -917,12 +893,12 @@ int iser_post_send(struct ib_conn *ib_conn, struct iser_tx_desc *tx_desc,
else
first_wr = wr;
- ib_ret = ib_post_send(ib_conn->qp, first_wr, NULL);
- if (unlikely(ib_ret))
+ ret = ib_post_send(ib_conn->qp, first_wr, NULL);
+ if (unlikely(ret))
iser_err("ib_post_send failed, ret:%d opcode:%d\n",
- ib_ret, wr->opcode);
+ ret, wr->opcode);
- return ib_ret;
+ return ret;
}
u8 iser_check_task_pi_status(struct iscsi_iser_task *iser_task,
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-clt-stats.c b/drivers/infiniband/ulp/rtrs/rtrs-clt-stats.c
index 76e4352fe3f6..385a19846c24 100644
--- a/drivers/infiniband/ulp/rtrs/rtrs-clt-stats.c
+++ b/drivers/infiniband/ulp/rtrs/rtrs-clt-stats.c
@@ -13,8 +13,8 @@
void rtrs_clt_update_wc_stats(struct rtrs_clt_con *con)
{
- struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
- struct rtrs_clt_stats *stats = sess->stats;
+ struct rtrs_clt_path *clt_path = to_clt_path(con->c.path);
+ struct rtrs_clt_stats *stats = clt_path->stats;
struct rtrs_clt_stats_pcpu *s;
int cpu;
@@ -180,8 +180,8 @@ static inline void rtrs_clt_update_rdma_stats(struct rtrs_clt_stats *stats,
void rtrs_clt_update_all_stats(struct rtrs_clt_io_req *req, int dir)
{
struct rtrs_clt_con *con = req->con;
- struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
- struct rtrs_clt_stats *stats = sess->stats;
+ struct rtrs_clt_path *clt_path = to_clt_path(con->c.path);
+ struct rtrs_clt_stats *stats = clt_path->stats;
unsigned int len;
len = req->usr_len + req->data_len;
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-clt-sysfs.c b/drivers/infiniband/ulp/rtrs/rtrs-clt-sysfs.c
index 0e69180c3771..b4fa473b7888 100644
--- a/drivers/infiniband/ulp/rtrs/rtrs-clt-sysfs.c
+++ b/drivers/infiniband/ulp/rtrs/rtrs-clt-sysfs.c
@@ -16,21 +16,21 @@
#define MIN_MAX_RECONN_ATT -1
#define MAX_MAX_RECONN_ATT 9999
-static void rtrs_clt_sess_release(struct kobject *kobj)
+static void rtrs_clt_path_release(struct kobject *kobj)
{
- struct rtrs_clt_sess *sess;
+ struct rtrs_clt_path *clt_path;
- sess = container_of(kobj, struct rtrs_clt_sess, kobj);
+ clt_path = container_of(kobj, struct rtrs_clt_path, kobj);
- free_sess(sess);
+ free_path(clt_path);
}
static struct kobj_type ktype_sess = {
.sysfs_ops = &kobj_sysfs_ops,
- .release = rtrs_clt_sess_release
+ .release = rtrs_clt_path_release
};
-static void rtrs_clt_sess_stats_release(struct kobject *kobj)
+static void rtrs_clt_path_stats_release(struct kobject *kobj)
{
struct rtrs_clt_stats *stats;
@@ -43,14 +43,15 @@ static void rtrs_clt_sess_stats_release(struct kobject *kobj)
static struct kobj_type ktype_stats = {
.sysfs_ops = &kobj_sysfs_ops,
- .release = rtrs_clt_sess_stats_release,
+ .release = rtrs_clt_path_stats_release,
};
static ssize_t max_reconnect_attempts_show(struct device *dev,
struct device_attribute *attr,
char *page)
{
- struct rtrs_clt *clt = container_of(dev, struct rtrs_clt, dev);
+ struct rtrs_clt_sess *clt = container_of(dev, struct rtrs_clt_sess,
+ dev);
return sysfs_emit(page, "%d\n",
rtrs_clt_get_max_reconnect_attempts(clt));
@@ -63,7 +64,8 @@ static ssize_t max_reconnect_attempts_store(struct device *dev,
{
int value;
int ret;
- struct rtrs_clt *clt = container_of(dev, struct rtrs_clt, dev);
+ struct rtrs_clt_sess *clt = container_of(dev, struct rtrs_clt_sess,
+ dev);
ret = kstrtoint(buf, 10, &value);
if (ret) {
@@ -90,9 +92,9 @@ static ssize_t mpath_policy_show(struct device *dev,
struct device_attribute *attr,
char *page)
{
- struct rtrs_clt *clt;
+ struct rtrs_clt_sess *clt;
- clt = container_of(dev, struct rtrs_clt, dev);
+ clt = container_of(dev, struct rtrs_clt_sess, dev);
switch (clt->mp_policy) {
case MP_POLICY_RR:
@@ -114,12 +116,12 @@ static ssize_t mpath_policy_store(struct device *dev,
const char *buf,
size_t count)
{
- struct rtrs_clt *clt;
+ struct rtrs_clt_sess *clt;
int value;
int ret;
size_t len = 0;
- clt = container_of(dev, struct rtrs_clt, dev);
+ clt = container_of(dev, struct rtrs_clt_sess, dev);
ret = kstrtoint(buf, 10, &value);
if (!ret && (value == MP_POLICY_RR ||
@@ -169,12 +171,12 @@ static ssize_t add_path_store(struct device *dev,
.src = &srcaddr,
.dst = &dstaddr
};
- struct rtrs_clt *clt;
+ struct rtrs_clt_sess *clt;
const char *nl;
size_t len;
int err;
- clt = container_of(dev, struct rtrs_clt, dev);
+ clt = container_of(dev, struct rtrs_clt_sess, dev);
nl = strchr(buf, '\n');
if (nl)
@@ -197,10 +199,10 @@ static DEVICE_ATTR_RW(add_path);
static ssize_t rtrs_clt_state_show(struct kobject *kobj,
struct kobj_attribute *attr, char *page)
{
- struct rtrs_clt_sess *sess;
+ struct rtrs_clt_path *clt_path;
- sess = container_of(kobj, struct rtrs_clt_sess, kobj);
- if (sess->state == RTRS_CLT_CONNECTED)
+ clt_path = container_of(kobj, struct rtrs_clt_path, kobj);
+ if (clt_path->state == RTRS_CLT_CONNECTED)
return sysfs_emit(page, "connected\n");
return sysfs_emit(page, "disconnected\n");
@@ -219,16 +221,16 @@ static ssize_t rtrs_clt_reconnect_store(struct kobject *kobj,
struct kobj_attribute *attr,
const char *buf, size_t count)
{
- struct rtrs_clt_sess *sess;
+ struct rtrs_clt_path *clt_path;
int ret;
- sess = container_of(kobj, struct rtrs_clt_sess, kobj);
+ clt_path = container_of(kobj, struct rtrs_clt_path, kobj);
if (!sysfs_streq(buf, "1")) {
- rtrs_err(sess->clt, "%s: unknown value: '%s'\n",
+ rtrs_err(clt_path->clt, "%s: unknown value: '%s'\n",
attr->attr.name, buf);
return -EINVAL;
}
- ret = rtrs_clt_reconnect_from_sysfs(sess);
+ ret = rtrs_clt_reconnect_from_sysfs(clt_path);
if (ret)
return ret;
@@ -249,15 +251,15 @@ static ssize_t rtrs_clt_disconnect_store(struct kobject *kobj,
struct kobj_attribute *attr,
const char *buf, size_t count)
{
- struct rtrs_clt_sess *sess;
+ struct rtrs_clt_path *clt_path;
- sess = container_of(kobj, struct rtrs_clt_sess, kobj);
+ clt_path = container_of(kobj, struct rtrs_clt_path, kobj);
if (!sysfs_streq(buf, "1")) {
- rtrs_err(sess->clt, "%s: unknown value: '%s'\n",
+ rtrs_err(clt_path->clt, "%s: unknown value: '%s'\n",
attr->attr.name, buf);
return -EINVAL;
}
- rtrs_clt_close_conns(sess, true);
+ rtrs_clt_close_conns(clt_path, true);
return count;
}
@@ -276,16 +278,16 @@ static ssize_t rtrs_clt_remove_path_store(struct kobject *kobj,
struct kobj_attribute *attr,
const char *buf, size_t count)
{
- struct rtrs_clt_sess *sess;
+ struct rtrs_clt_path *clt_path;
int ret;
- sess = container_of(kobj, struct rtrs_clt_sess, kobj);
+ clt_path = container_of(kobj, struct rtrs_clt_path, kobj);
if (!sysfs_streq(buf, "1")) {
- rtrs_err(sess->clt, "%s: unknown value: '%s'\n",
+ rtrs_err(clt_path->clt, "%s: unknown value: '%s'\n",
attr->attr.name, buf);
return -EINVAL;
}
- ret = rtrs_clt_remove_path_from_sysfs(sess, &attr->attr);
+ ret = rtrs_clt_remove_path_from_sysfs(clt_path, &attr->attr);
if (ret)
return ret;
@@ -333,11 +335,11 @@ static ssize_t rtrs_clt_hca_port_show(struct kobject *kobj,
struct kobj_attribute *attr,
char *page)
{
- struct rtrs_clt_sess *sess;
+ struct rtrs_clt_path *clt_path;
- sess = container_of(kobj, typeof(*sess), kobj);
+ clt_path = container_of(kobj, typeof(*clt_path), kobj);
- return sysfs_emit(page, "%u\n", sess->hca_port);
+ return sysfs_emit(page, "%u\n", clt_path->hca_port);
}
static struct kobj_attribute rtrs_clt_hca_port_attr =
@@ -347,11 +349,11 @@ static ssize_t rtrs_clt_hca_name_show(struct kobject *kobj,
struct kobj_attribute *attr,
char *page)
{
- struct rtrs_clt_sess *sess;
+ struct rtrs_clt_path *clt_path;
- sess = container_of(kobj, struct rtrs_clt_sess, kobj);
+ clt_path = container_of(kobj, struct rtrs_clt_path, kobj);
- return sysfs_emit(page, "%s\n", sess->hca_name);
+ return sysfs_emit(page, "%s\n", clt_path->hca_name);
}
static struct kobj_attribute rtrs_clt_hca_name_attr =
@@ -361,12 +363,12 @@ static ssize_t rtrs_clt_cur_latency_show(struct kobject *kobj,
struct kobj_attribute *attr,
char *page)
{
- struct rtrs_clt_sess *sess;
+ struct rtrs_clt_path *clt_path;
- sess = container_of(kobj, struct rtrs_clt_sess, kobj);
+ clt_path = container_of(kobj, struct rtrs_clt_path, kobj);
return sysfs_emit(page, "%lld ns\n",
- ktime_to_ns(sess->s.hb_cur_latency));
+ ktime_to_ns(clt_path->s.hb_cur_latency));
}
static struct kobj_attribute rtrs_clt_cur_latency_attr =
@@ -376,11 +378,11 @@ static ssize_t rtrs_clt_src_addr_show(struct kobject *kobj,
struct kobj_attribute *attr,
char *page)
{
- struct rtrs_clt_sess *sess;
+ struct rtrs_clt_path *clt_path;
int len;
- sess = container_of(kobj, struct rtrs_clt_sess, kobj);
- len = sockaddr_to_str((struct sockaddr *)&sess->s.src_addr, page,
+ clt_path = container_of(kobj, struct rtrs_clt_path, kobj);
+ len = sockaddr_to_str((struct sockaddr *)&clt_path->s.src_addr, page,
PAGE_SIZE);
len += sysfs_emit_at(page, len, "\n");
return len;
@@ -393,11 +395,11 @@ static ssize_t rtrs_clt_dst_addr_show(struct kobject *kobj,
struct kobj_attribute *attr,
char *page)
{
- struct rtrs_clt_sess *sess;
+ struct rtrs_clt_path *clt_path;
int len;
- sess = container_of(kobj, struct rtrs_clt_sess, kobj);
- len = sockaddr_to_str((struct sockaddr *)&sess->s.dst_addr, page,
+ clt_path = container_of(kobj, struct rtrs_clt_path, kobj);
+ len = sockaddr_to_str((struct sockaddr *)&clt_path->s.dst_addr, page,
PAGE_SIZE);
len += sysfs_emit_at(page, len, "\n");
return len;
@@ -406,7 +408,7 @@ static ssize_t rtrs_clt_dst_addr_show(struct kobject *kobj,
static struct kobj_attribute rtrs_clt_dst_addr_attr =
__ATTR(dst_addr, 0444, rtrs_clt_dst_addr_show, NULL);
-static struct attribute *rtrs_clt_sess_attrs[] = {
+static struct attribute *rtrs_clt_path_attrs[] = {
&rtrs_clt_hca_name_attr.attr,
&rtrs_clt_hca_port_attr.attr,
&rtrs_clt_src_addr_attr.attr,
@@ -419,42 +421,43 @@ static struct attribute *rtrs_clt_sess_attrs[] = {
NULL,
};
-static const struct attribute_group rtrs_clt_sess_attr_group = {
- .attrs = rtrs_clt_sess_attrs,
+static const struct attribute_group rtrs_clt_path_attr_group = {
+ .attrs = rtrs_clt_path_attrs,
};
-int rtrs_clt_create_sess_files(struct rtrs_clt_sess *sess)
+int rtrs_clt_create_path_files(struct rtrs_clt_path *clt_path)
{
- struct rtrs_clt *clt = sess->clt;
+ struct rtrs_clt_sess *clt = clt_path->clt;
char str[NAME_MAX];
int err;
struct rtrs_addr path = {
- .src = &sess->s.src_addr,
- .dst = &sess->s.dst_addr,
+ .src = &clt_path->s.src_addr,
+ .dst = &clt_path->s.dst_addr,
};
rtrs_addr_to_str(&path, str, sizeof(str));
- err = kobject_init_and_add(&sess->kobj, &ktype_sess, clt->kobj_paths,
+ err = kobject_init_and_add(&clt_path->kobj, &ktype_sess,
+ clt->kobj_paths,
"%s", str);
if (err) {
pr_err("kobject_init_and_add: %d\n", err);
- kobject_put(&sess->kobj);
+ kobject_put(&clt_path->kobj);
return err;
}
- err = sysfs_create_group(&sess->kobj, &rtrs_clt_sess_attr_group);
+ err = sysfs_create_group(&clt_path->kobj, &rtrs_clt_path_attr_group);
if (err) {
pr_err("sysfs_create_group(): %d\n", err);
goto put_kobj;
}
- err = kobject_init_and_add(&sess->stats->kobj_stats, &ktype_stats,
- &sess->kobj, "stats");
+ err = kobject_init_and_add(&clt_path->stats->kobj_stats, &ktype_stats,
+ &clt_path->kobj, "stats");
if (err) {
pr_err("kobject_init_and_add: %d\n", err);
- kobject_put(&sess->stats->kobj_stats);
+ kobject_put(&clt_path->stats->kobj_stats);
goto remove_group;
}
- err = sysfs_create_group(&sess->stats->kobj_stats,
+ err = sysfs_create_group(&clt_path->stats->kobj_stats,
&rtrs_clt_stats_attr_group);
if (err) {
pr_err("failed to create stats sysfs group, err: %d\n", err);
@@ -464,25 +467,25 @@ int rtrs_clt_create_sess_files(struct rtrs_clt_sess *sess)
return 0;
put_kobj_stats:
- kobject_del(&sess->stats->kobj_stats);
- kobject_put(&sess->stats->kobj_stats);
+ kobject_del(&clt_path->stats->kobj_stats);
+ kobject_put(&clt_path->stats->kobj_stats);
remove_group:
- sysfs_remove_group(&sess->kobj, &rtrs_clt_sess_attr_group);
+ sysfs_remove_group(&clt_path->kobj, &rtrs_clt_path_attr_group);
put_kobj:
- kobject_del(&sess->kobj);
- kobject_put(&sess->kobj);
+ kobject_del(&clt_path->kobj);
+ kobject_put(&clt_path->kobj);
return err;
}
-void rtrs_clt_destroy_sess_files(struct rtrs_clt_sess *sess,
+void rtrs_clt_destroy_path_files(struct rtrs_clt_path *clt_path,
const struct attribute *sysfs_self)
{
- kobject_del(&sess->stats->kobj_stats);
- kobject_put(&sess->stats->kobj_stats);
+ kobject_del(&clt_path->stats->kobj_stats);
+ kobject_put(&clt_path->stats->kobj_stats);
if (sysfs_self)
- sysfs_remove_file_self(&sess->kobj, sysfs_self);
- kobject_del(&sess->kobj);
+ sysfs_remove_file_self(&clt_path->kobj, sysfs_self);
+ kobject_del(&clt_path->kobj);
}
static struct attribute *rtrs_clt_attrs[] = {
@@ -496,12 +499,12 @@ static const struct attribute_group rtrs_clt_attr_group = {
.attrs = rtrs_clt_attrs,
};
-int rtrs_clt_create_sysfs_root_files(struct rtrs_clt *clt)
+int rtrs_clt_create_sysfs_root_files(struct rtrs_clt_sess *clt)
{
return sysfs_create_group(&clt->dev.kobj, &rtrs_clt_attr_group);
}
-void rtrs_clt_destroy_sysfs_root(struct rtrs_clt *clt)
+void rtrs_clt_destroy_sysfs_root(struct rtrs_clt_sess *clt)
{
sysfs_remove_group(&clt->dev.kobj, &rtrs_clt_attr_group);
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-clt.c b/drivers/infiniband/ulp/rtrs/rtrs-clt.c
index 15c0077dd27e..7c3f98e57889 100644
--- a/drivers/infiniband/ulp/rtrs/rtrs-clt.c
+++ b/drivers/infiniband/ulp/rtrs/rtrs-clt.c
@@ -46,21 +46,21 @@ static struct rtrs_rdma_dev_pd dev_pd = {
static struct workqueue_struct *rtrs_wq;
static struct class *rtrs_clt_dev_class;
-static inline bool rtrs_clt_is_connected(const struct rtrs_clt *clt)
+static inline bool rtrs_clt_is_connected(const struct rtrs_clt_sess *clt)
{
- struct rtrs_clt_sess *sess;
+ struct rtrs_clt_path *clt_path;
bool connected = false;
rcu_read_lock();
- list_for_each_entry_rcu(sess, &clt->paths_list, s.entry)
- connected |= READ_ONCE(sess->state) == RTRS_CLT_CONNECTED;
+ list_for_each_entry_rcu(clt_path, &clt->paths_list, s.entry)
+ connected |= READ_ONCE(clt_path->state) == RTRS_CLT_CONNECTED;
rcu_read_unlock();
return connected;
}
static struct rtrs_permit *
-__rtrs_get_permit(struct rtrs_clt *clt, enum rtrs_clt_con_type con_type)
+__rtrs_get_permit(struct rtrs_clt_sess *clt, enum rtrs_clt_con_type con_type)
{
size_t max_depth = clt->queue_depth;
struct rtrs_permit *permit;
@@ -87,7 +87,7 @@ __rtrs_get_permit(struct rtrs_clt *clt, enum rtrs_clt_con_type con_type)
return permit;
}
-static inline void __rtrs_put_permit(struct rtrs_clt *clt,
+static inline void __rtrs_put_permit(struct rtrs_clt_sess *clt,
struct rtrs_permit *permit)
{
clear_bit_unlock(permit->mem_id, clt->permits_map);
@@ -107,7 +107,7 @@ static inline void __rtrs_put_permit(struct rtrs_clt *clt,
* Context:
* Can sleep if @wait == RTRS_PERMIT_WAIT
*/
-struct rtrs_permit *rtrs_clt_get_permit(struct rtrs_clt *clt,
+struct rtrs_permit *rtrs_clt_get_permit(struct rtrs_clt_sess *clt,
enum rtrs_clt_con_type con_type,
enum wait_type can_wait)
{
@@ -142,7 +142,8 @@ EXPORT_SYMBOL(rtrs_clt_get_permit);
* Context:
* Does not matter
*/
-void rtrs_clt_put_permit(struct rtrs_clt *clt, struct rtrs_permit *permit)
+void rtrs_clt_put_permit(struct rtrs_clt_sess *clt,
+ struct rtrs_permit *permit)
{
if (WARN_ON(!test_bit(permit->mem_id, clt->permits_map)))
return;
@@ -163,29 +164,29 @@ EXPORT_SYMBOL(rtrs_clt_put_permit);
/**
* rtrs_permit_to_clt_con() - returns RDMA connection pointer by the permit
- * @sess: client session pointer
+ * @clt_path: client path pointer
* @permit: permit for the allocation of the RDMA buffer
* Note:
* IO connection starts from 1.
* 0 connection is for user messages.
*/
static
-struct rtrs_clt_con *rtrs_permit_to_clt_con(struct rtrs_clt_sess *sess,
+struct rtrs_clt_con *rtrs_permit_to_clt_con(struct rtrs_clt_path *clt_path,
struct rtrs_permit *permit)
{
int id = 0;
if (permit->con_type == RTRS_IO_CON)
- id = (permit->cpu_id % (sess->s.irq_con_num - 1)) + 1;
+ id = (permit->cpu_id % (clt_path->s.irq_con_num - 1)) + 1;
- return to_clt_con(sess->s.con[id]);
+ return to_clt_con(clt_path->s.con[id]);
}
/**
* rtrs_clt_change_state() - change the session state through session state
* machine.
*
- * @sess: client session to change the state of.
+ * @clt_path: client path to change the state of.
* @new_state: state to change to.
*
* returns true if sess's state is changed to new state, otherwise return false.
@@ -193,15 +194,15 @@ struct rtrs_clt_con *rtrs_permit_to_clt_con(struct rtrs_clt_sess *sess,
* Locks:
* state_wq lock must be hold.
*/
-static bool rtrs_clt_change_state(struct rtrs_clt_sess *sess,
+static bool rtrs_clt_change_state(struct rtrs_clt_path *clt_path,
enum rtrs_clt_state new_state)
{
enum rtrs_clt_state old_state;
bool changed = false;
- lockdep_assert_held(&sess->state_wq.lock);
+ lockdep_assert_held(&clt_path->state_wq.lock);
- old_state = sess->state;
+ old_state = clt_path->state;
switch (new_state) {
case RTRS_CLT_CONNECTING:
switch (old_state) {
@@ -275,42 +276,42 @@ static bool rtrs_clt_change_state(struct rtrs_clt_sess *sess,
break;
}
if (changed) {
- sess->state = new_state;
- wake_up_locked(&sess->state_wq);
+ clt_path->state = new_state;
+ wake_up_locked(&clt_path->state_wq);
}
return changed;
}
-static bool rtrs_clt_change_state_from_to(struct rtrs_clt_sess *sess,
+static bool rtrs_clt_change_state_from_to(struct rtrs_clt_path *clt_path,
enum rtrs_clt_state old_state,
enum rtrs_clt_state new_state)
{
bool changed = false;
- spin_lock_irq(&sess->state_wq.lock);
- if (sess->state == old_state)
- changed = rtrs_clt_change_state(sess, new_state);
- spin_unlock_irq(&sess->state_wq.lock);
+ spin_lock_irq(&clt_path->state_wq.lock);
+ if (clt_path->state == old_state)
+ changed = rtrs_clt_change_state(clt_path, new_state);
+ spin_unlock_irq(&clt_path->state_wq.lock);
return changed;
}
static void rtrs_rdma_error_recovery(struct rtrs_clt_con *con)
{
- struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
+ struct rtrs_clt_path *clt_path = to_clt_path(con->c.path);
- if (rtrs_clt_change_state_from_to(sess,
+ if (rtrs_clt_change_state_from_to(clt_path,
RTRS_CLT_CONNECTED,
RTRS_CLT_RECONNECTING)) {
- struct rtrs_clt *clt = sess->clt;
+ struct rtrs_clt_sess *clt = clt_path->clt;
unsigned int delay_ms;
/*
* Normal scenario, reconnect if we were successfully connected
*/
delay_ms = clt->reconnect_delay_sec * 1000;
- queue_delayed_work(rtrs_wq, &sess->reconnect_dwork,
+ queue_delayed_work(rtrs_wq, &clt_path->reconnect_dwork,
msecs_to_jiffies(delay_ms +
prandom_u32() % RTRS_RECONNECT_SEED));
} else {
@@ -319,7 +320,7 @@ static void rtrs_rdma_error_recovery(struct rtrs_clt_con *con)
* so notify waiter with error state, waiter is responsible
* for cleaning the rest and reconnect if needed.
*/
- rtrs_clt_change_state_from_to(sess,
+ rtrs_clt_change_state_from_to(clt_path,
RTRS_CLT_CONNECTING,
RTRS_CLT_CONNECTING_ERR);
}
@@ -330,7 +331,7 @@ static void rtrs_clt_fast_reg_done(struct ib_cq *cq, struct ib_wc *wc)
struct rtrs_clt_con *con = to_clt_con(wc->qp->qp_context);
if (wc->status != IB_WC_SUCCESS) {
- rtrs_err(con->c.sess, "Failed IB_WR_REG_MR: %s\n",
+ rtrs_err(con->c.path, "Failed IB_WR_REG_MR: %s\n",
ib_wc_status_msg(wc->status));
rtrs_rdma_error_recovery(con);
}
@@ -350,7 +351,7 @@ static void rtrs_clt_inv_rkey_done(struct ib_cq *cq, struct ib_wc *wc)
struct rtrs_clt_con *con = to_clt_con(wc->qp->qp_context);
if (wc->status != IB_WC_SUCCESS) {
- rtrs_err(con->c.sess, "Failed IB_WR_LOCAL_INV: %s\n",
+ rtrs_err(con->c.path, "Failed IB_WR_LOCAL_INV: %s\n",
ib_wc_status_msg(wc->status));
rtrs_rdma_error_recovery(con);
}
@@ -380,14 +381,14 @@ static void complete_rdma_req(struct rtrs_clt_io_req *req, int errno,
bool notify, bool can_wait)
{
struct rtrs_clt_con *con = req->con;
- struct rtrs_clt_sess *sess;
+ struct rtrs_clt_path *clt_path;
int err;
if (WARN_ON(!req->in_use))
return;
if (WARN_ON(!req->con))
return;
- sess = to_clt_sess(con->c.sess);
+ clt_path = to_clt_path(con->c.path);
if (req->sg_cnt) {
if (req->dir == DMA_FROM_DEVICE && req->need_inv) {
@@ -417,7 +418,7 @@ static void complete_rdma_req(struct rtrs_clt_io_req *req, int errno,
refcount_inc(&req->ref);
err = rtrs_inv_rkey(req);
if (err) {
- rtrs_err(con->c.sess, "Send INV WR key=%#x: %d\n",
+ rtrs_err(con->c.path, "Send INV WR key=%#x: %d\n",
req->mr->rkey, err);
} else if (can_wait) {
wait_for_completion(&req->inv_comp);
@@ -433,21 +434,21 @@ static void complete_rdma_req(struct rtrs_clt_io_req *req, int errno,
if (!refcount_dec_and_test(&req->ref))
return;
}
- ib_dma_unmap_sg(sess->s.dev->ib_dev, req->sglist,
+ ib_dma_unmap_sg(clt_path->s.dev->ib_dev, req->sglist,
req->sg_cnt, req->dir);
}
if (!refcount_dec_and_test(&req->ref))
return;
if (req->mp_policy == MP_POLICY_MIN_INFLIGHT)
- atomic_dec(&sess->stats->inflight);
+ atomic_dec(&clt_path->stats->inflight);
req->in_use = false;
req->con = NULL;
if (errno) {
- rtrs_err_rl(con->c.sess, "IO request failed: error=%d path=%s [%s:%u] notify=%d\n",
- errno, kobject_name(&sess->kobj), sess->hca_name,
- sess->hca_port, notify);
+ rtrs_err_rl(con->c.path, "IO request failed: error=%d path=%s [%s:%u] notify=%d\n",
+ errno, kobject_name(&clt_path->kobj), clt_path->hca_name,
+ clt_path->hca_port, notify);
}
if (notify)
@@ -459,12 +460,12 @@ static int rtrs_post_send_rdma(struct rtrs_clt_con *con,
struct rtrs_rbuf *rbuf, u32 off,
u32 imm, struct ib_send_wr *wr)
{
- struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
+ struct rtrs_clt_path *clt_path = to_clt_path(con->c.path);
enum ib_send_flags flags;
struct ib_sge sge;
if (!req->sg_size) {
- rtrs_wrn(con->c.sess,
+ rtrs_wrn(con->c.path,
"Doing RDMA Write failed, no data supplied\n");
return -EINVAL;
}
@@ -472,16 +473,17 @@ static int rtrs_post_send_rdma(struct rtrs_clt_con *con,
/* user data and user message in the first list element */
sge.addr = req->iu->dma_addr;
sge.length = req->sg_size;
- sge.lkey = sess->s.dev->ib_pd->local_dma_lkey;
+ sge.lkey = clt_path->s.dev->ib_pd->local_dma_lkey;
/*
* From time to time we have to post signalled sends,
* or send queue will fill up and only QP reset can help.
*/
- flags = atomic_inc_return(&con->c.wr_cnt) % sess->s.signal_interval ?
+ flags = atomic_inc_return(&con->c.wr_cnt) % clt_path->s.signal_interval ?
0 : IB_SEND_SIGNALED;
- ib_dma_sync_single_for_device(sess->s.dev->ib_dev, req->iu->dma_addr,
+ ib_dma_sync_single_for_device(clt_path->s.dev->ib_dev,
+ req->iu->dma_addr,
req->sg_size, DMA_TO_DEVICE);
return rtrs_iu_post_rdma_write_imm(&con->c, req->iu, &sge, 1,
@@ -489,15 +491,15 @@ static int rtrs_post_send_rdma(struct rtrs_clt_con *con,
imm, flags, wr, NULL);
}
-static void process_io_rsp(struct rtrs_clt_sess *sess, u32 msg_id,
+static void process_io_rsp(struct rtrs_clt_path *clt_path, u32 msg_id,
s16 errno, bool w_inval)
{
struct rtrs_clt_io_req *req;
- if (WARN_ON(msg_id >= sess->queue_depth))
+ if (WARN_ON(msg_id >= clt_path->queue_depth))
return;
- req = &sess->reqs[msg_id];
+ req = &clt_path->reqs[msg_id];
/* Drop need_inv if server responded with send with invalidation */
req->need_inv &= !w_inval;
complete_rdma_req(req, errno, true, false);
@@ -507,21 +509,21 @@ static void rtrs_clt_recv_done(struct rtrs_clt_con *con, struct ib_wc *wc)
{
struct rtrs_iu *iu;
int err;
- struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
+ struct rtrs_clt_path *clt_path = to_clt_path(con->c.path);
- WARN_ON((sess->flags & RTRS_MSG_NEW_RKEY_F) == 0);
+ WARN_ON((clt_path->flags & RTRS_MSG_NEW_RKEY_F) == 0);
iu = container_of(wc->wr_cqe, struct rtrs_iu,
cqe);
err = rtrs_iu_post_recv(&con->c, iu);
if (err) {
- rtrs_err(con->c.sess, "post iu failed %d\n", err);
+ rtrs_err(con->c.path, "post iu failed %d\n", err);
rtrs_rdma_error_recovery(con);
}
}
static void rtrs_clt_rkey_rsp_done(struct rtrs_clt_con *con, struct ib_wc *wc)
{
- struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
+ struct rtrs_clt_path *clt_path = to_clt_path(con->c.path);
struct rtrs_msg_rkey_rsp *msg;
u32 imm_type, imm_payload;
bool w_inval = false;
@@ -529,25 +531,26 @@ static void rtrs_clt_rkey_rsp_done(struct rtrs_clt_con *con, struct ib_wc *wc)
u32 buf_id;
int err;
- WARN_ON((sess->flags & RTRS_MSG_NEW_RKEY_F) == 0);
+ WARN_ON((clt_path->flags & RTRS_MSG_NEW_RKEY_F) == 0);
iu = container_of(wc->wr_cqe, struct rtrs_iu, cqe);
if (wc->byte_len < sizeof(*msg)) {
- rtrs_err(con->c.sess, "rkey response is malformed: size %d\n",
+ rtrs_err(con->c.path, "rkey response is malformed: size %d\n",
wc->byte_len);
goto out;
}
- ib_dma_sync_single_for_cpu(sess->s.dev->ib_dev, iu->dma_addr,
+ ib_dma_sync_single_for_cpu(clt_path->s.dev->ib_dev, iu->dma_addr,
iu->size, DMA_FROM_DEVICE);
msg = iu->buf;
if (le16_to_cpu(msg->type) != RTRS_MSG_RKEY_RSP) {
- rtrs_err(sess->clt, "rkey response is malformed: type %d\n",
+ rtrs_err(clt_path->clt,
+ "rkey response is malformed: type %d\n",
le16_to_cpu(msg->type));
goto out;
}
buf_id = le16_to_cpu(msg->buf_id);
- if (WARN_ON(buf_id >= sess->queue_depth))
+ if (WARN_ON(buf_id >= clt_path->queue_depth))
goto out;
rtrs_from_imm(be32_to_cpu(wc->ex.imm_data), &imm_type, &imm_payload);
@@ -560,10 +563,10 @@ static void rtrs_clt_rkey_rsp_done(struct rtrs_clt_con *con, struct ib_wc *wc)
if (WARN_ON(buf_id != msg_id))
goto out;
- sess->rbufs[buf_id].rkey = le32_to_cpu(msg->rkey);
- process_io_rsp(sess, msg_id, err, w_inval);
+ clt_path->rbufs[buf_id].rkey = le32_to_cpu(msg->rkey);
+ process_io_rsp(clt_path, msg_id, err, w_inval);
}
- ib_dma_sync_single_for_device(sess->s.dev->ib_dev, iu->dma_addr,
+ ib_dma_sync_single_for_device(clt_path->s.dev->ib_dev, iu->dma_addr,
iu->size, DMA_FROM_DEVICE);
return rtrs_clt_recv_done(con, wc);
out:
@@ -600,14 +603,14 @@ static int rtrs_post_recv_empty_x2(struct rtrs_con *con, struct ib_cqe *cqe)
static void rtrs_clt_rdma_done(struct ib_cq *cq, struct ib_wc *wc)
{
struct rtrs_clt_con *con = to_clt_con(wc->qp->qp_context);
- struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
+ struct rtrs_clt_path *clt_path = to_clt_path(con->c.path);
u32 imm_type, imm_payload;
bool w_inval = false;
int err;
if (wc->status != IB_WC_SUCCESS) {
if (wc->status != IB_WC_WR_FLUSH_ERR) {
- rtrs_err(sess->clt, "RDMA failed: %s\n",
+ rtrs_err(clt_path->clt, "RDMA failed: %s\n",
ib_wc_status_msg(wc->status));
rtrs_rdma_error_recovery(con);
}
@@ -632,21 +635,21 @@ static void rtrs_clt_rdma_done(struct ib_cq *cq, struct ib_wc *wc)
w_inval = (imm_type == RTRS_IO_RSP_W_INV_IMM);
rtrs_from_io_rsp_imm(imm_payload, &msg_id, &err);
- process_io_rsp(sess, msg_id, err, w_inval);
+ process_io_rsp(clt_path, msg_id, err, w_inval);
} else if (imm_type == RTRS_HB_MSG_IMM) {
WARN_ON(con->c.cid);
- rtrs_send_hb_ack(&sess->s);
- if (sess->flags & RTRS_MSG_NEW_RKEY_F)
+ rtrs_send_hb_ack(&clt_path->s);
+ if (clt_path->flags & RTRS_MSG_NEW_RKEY_F)
return rtrs_clt_recv_done(con, wc);
} else if (imm_type == RTRS_HB_ACK_IMM) {
WARN_ON(con->c.cid);
- sess->s.hb_missed_cnt = 0;
- sess->s.hb_cur_latency =
- ktime_sub(ktime_get(), sess->s.hb_last_sent);
- if (sess->flags & RTRS_MSG_NEW_RKEY_F)
+ clt_path->s.hb_missed_cnt = 0;
+ clt_path->s.hb_cur_latency =
+ ktime_sub(ktime_get(), clt_path->s.hb_last_sent);
+ if (clt_path->flags & RTRS_MSG_NEW_RKEY_F)
return rtrs_clt_recv_done(con, wc);
} else {
- rtrs_wrn(con->c.sess, "Unknown IMM type %u\n",
+ rtrs_wrn(con->c.path, "Unknown IMM type %u\n",
imm_type);
}
if (w_inval)
@@ -658,7 +661,7 @@ static void rtrs_clt_rdma_done(struct ib_cq *cq, struct ib_wc *wc)
else
err = rtrs_post_recv_empty(&con->c, &io_comp_cqe);
if (err) {
- rtrs_err(con->c.sess, "rtrs_post_recv_empty(): %d\n",
+ rtrs_err(con->c.path, "rtrs_post_recv_empty(): %d\n",
err);
rtrs_rdma_error_recovery(con);
}
@@ -670,7 +673,7 @@ static void rtrs_clt_rdma_done(struct ib_cq *cq, struct ib_wc *wc)
WARN_ON(!(wc->wc_flags & IB_WC_WITH_INVALIDATE ||
wc->wc_flags & IB_WC_WITH_IMM));
WARN_ON(wc->wr_cqe->done != rtrs_clt_rdma_done);
- if (sess->flags & RTRS_MSG_NEW_RKEY_F) {
+ if (clt_path->flags & RTRS_MSG_NEW_RKEY_F) {
if (wc->wc_flags & IB_WC_WITH_INVALIDATE)
return rtrs_clt_recv_done(con, wc);
@@ -685,7 +688,7 @@ static void rtrs_clt_rdma_done(struct ib_cq *cq, struct ib_wc *wc)
break;
default:
- rtrs_wrn(sess->clt, "Unexpected WC type: %d\n", wc->opcode);
+ rtrs_wrn(clt_path->clt, "Unexpected WC type: %d\n", wc->opcode);
return;
}
}
@@ -693,10 +696,10 @@ static void rtrs_clt_rdma_done(struct ib_cq *cq, struct ib_wc *wc)
static int post_recv_io(struct rtrs_clt_con *con, size_t q_size)
{
int err, i;
- struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
+ struct rtrs_clt_path *clt_path = to_clt_path(con->c.path);
for (i = 0; i < q_size; i++) {
- if (sess->flags & RTRS_MSG_NEW_RKEY_F) {
+ if (clt_path->flags & RTRS_MSG_NEW_RKEY_F) {
struct rtrs_iu *iu = &con->rsp_ius[i];
err = rtrs_iu_post_recv(&con->c, iu);
@@ -710,16 +713,16 @@ static int post_recv_io(struct rtrs_clt_con *con, size_t q_size)
return 0;
}
-static int post_recv_sess(struct rtrs_clt_sess *sess)
+static int post_recv_path(struct rtrs_clt_path *clt_path)
{
size_t q_size = 0;
int err, cid;
- for (cid = 0; cid < sess->s.con_num; cid++) {
+ for (cid = 0; cid < clt_path->s.con_num; cid++) {
if (cid == 0)
q_size = SERVICE_CON_QUEUE_DEPTH;
else
- q_size = sess->queue_depth;
+ q_size = clt_path->queue_depth;
/*
* x2 for RDMA read responses + FR key invalidations,
@@ -727,9 +730,10 @@ static int post_recv_sess(struct rtrs_clt_sess *sess)
*/
q_size *= 2;
- err = post_recv_io(to_clt_con(sess->s.con[cid]), q_size);
+ err = post_recv_io(to_clt_con(clt_path->s.con[cid]), q_size);
if (err) {
- rtrs_err(sess->clt, "post_recv_io(), err: %d\n", err);
+ rtrs_err(clt_path->clt, "post_recv_io(), err: %d\n",
+ err);
return err;
}
}
@@ -740,8 +744,8 @@ static int post_recv_sess(struct rtrs_clt_sess *sess)
struct path_it {
int i;
struct list_head skip_list;
- struct rtrs_clt *clt;
- struct rtrs_clt_sess *(*next_path)(struct path_it *it);
+ struct rtrs_clt_sess *clt;
+ struct rtrs_clt_path *(*next_path)(struct path_it *it);
};
/**
@@ -773,11 +777,11 @@ struct path_it {
* Locks:
* rcu_read_lock() must be hold.
*/
-static struct rtrs_clt_sess *get_next_path_rr(struct path_it *it)
+static struct rtrs_clt_path *get_next_path_rr(struct path_it *it)
{
- struct rtrs_clt_sess __rcu **ppcpu_path;
- struct rtrs_clt_sess *path;
- struct rtrs_clt *clt;
+ struct rtrs_clt_path __rcu **ppcpu_path;
+ struct rtrs_clt_path *path;
+ struct rtrs_clt_sess *clt;
clt = it->clt;
@@ -811,26 +815,26 @@ static struct rtrs_clt_sess *get_next_path_rr(struct path_it *it)
* Locks:
* rcu_read_lock() must be hold.
*/
-static struct rtrs_clt_sess *get_next_path_min_inflight(struct path_it *it)
+static struct rtrs_clt_path *get_next_path_min_inflight(struct path_it *it)
{
- struct rtrs_clt_sess *min_path = NULL;
- struct rtrs_clt *clt = it->clt;
- struct rtrs_clt_sess *sess;
+ struct rtrs_clt_path *min_path = NULL;
+ struct rtrs_clt_sess *clt = it->clt;
+ struct rtrs_clt_path *clt_path;
int min_inflight = INT_MAX;
int inflight;
- list_for_each_entry_rcu(sess, &clt->paths_list, s.entry) {
- if (READ_ONCE(sess->state) != RTRS_CLT_CONNECTED)
+ list_for_each_entry_rcu(clt_path, &clt->paths_list, s.entry) {
+ if (READ_ONCE(clt_path->state) != RTRS_CLT_CONNECTED)
continue;
- if (!list_empty(raw_cpu_ptr(sess->mp_skip_entry)))
+ if (!list_empty(raw_cpu_ptr(clt_path->mp_skip_entry)))
continue;
- inflight = atomic_read(&sess->stats->inflight);
+ inflight = atomic_read(&clt_path->stats->inflight);
if (inflight < min_inflight) {
min_inflight = inflight;
- min_path = sess;
+ min_path = clt_path;
}
}
@@ -862,26 +866,26 @@ static struct rtrs_clt_sess *get_next_path_min_inflight(struct path_it *it)
* Therefore the caller MUST check the returned
* path is NULL and trigger the IO error.
*/
-static struct rtrs_clt_sess *get_next_path_min_latency(struct path_it *it)
+static struct rtrs_clt_path *get_next_path_min_latency(struct path_it *it)
{
- struct rtrs_clt_sess *min_path = NULL;
- struct rtrs_clt *clt = it->clt;
- struct rtrs_clt_sess *sess;
- ktime_t min_latency = INT_MAX;
+ struct rtrs_clt_path *min_path = NULL;
+ struct rtrs_clt_sess *clt = it->clt;
+ struct rtrs_clt_path *clt_path;
+ ktime_t min_latency = KTIME_MAX;
ktime_t latency;
- list_for_each_entry_rcu(sess, &clt->paths_list, s.entry) {
- if (READ_ONCE(sess->state) != RTRS_CLT_CONNECTED)
+ list_for_each_entry_rcu(clt_path, &clt->paths_list, s.entry) {
+ if (READ_ONCE(clt_path->state) != RTRS_CLT_CONNECTED)
continue;
- if (!list_empty(raw_cpu_ptr(sess->mp_skip_entry)))
+ if (!list_empty(raw_cpu_ptr(clt_path->mp_skip_entry)))
continue;
- latency = sess->s.hb_cur_latency;
+ latency = clt_path->s.hb_cur_latency;
if (latency < min_latency) {
min_latency = latency;
- min_path = sess;
+ min_path = clt_path;
}
}
@@ -895,7 +899,7 @@ static struct rtrs_clt_sess *get_next_path_min_latency(struct path_it *it)
return min_path;
}
-static inline void path_it_init(struct path_it *it, struct rtrs_clt *clt)
+static inline void path_it_init(struct path_it *it, struct rtrs_clt_sess *clt)
{
INIT_LIST_HEAD(&it->skip_list);
it->clt = clt;
@@ -928,7 +932,7 @@ static inline void path_it_deinit(struct path_it *it)
* the corresponding buffer of rtrs_iu (req->iu->buf), which later on will
* also hold the control message of rtrs.
* @req: an io request holding information about IO.
- * @sess: client session
+ * @clt_path: client path
* @conf: conformation callback function to notify upper layer.
* @permit: permit for allocation of RDMA remote buffer
* @priv: private pointer
@@ -940,7 +944,7 @@ static inline void path_it_deinit(struct path_it *it)
* @dir: direction of the IO.
*/
static void rtrs_clt_init_req(struct rtrs_clt_io_req *req,
- struct rtrs_clt_sess *sess,
+ struct rtrs_clt_path *clt_path,
void (*conf)(void *priv, int errno),
struct rtrs_permit *permit, void *priv,
const struct kvec *vec, size_t usr_len,
@@ -958,13 +962,13 @@ static void rtrs_clt_init_req(struct rtrs_clt_io_req *req,
req->sg_cnt = sg_cnt;
req->priv = priv;
req->dir = dir;
- req->con = rtrs_permit_to_clt_con(sess, permit);
+ req->con = rtrs_permit_to_clt_con(clt_path, permit);
req->conf = conf;
req->need_inv = false;
req->need_inv_comp = false;
req->inv_errno = 0;
refcount_set(&req->ref, 1);
- req->mp_policy = sess->clt->mp_policy;
+ req->mp_policy = clt_path->clt->mp_policy;
iov_iter_kvec(&iter, READ, vec, 1, usr_len);
len = _copy_from_iter(req->iu->buf, usr_len, &iter);
@@ -974,7 +978,7 @@ static void rtrs_clt_init_req(struct rtrs_clt_io_req *req,
}
static struct rtrs_clt_io_req *
-rtrs_clt_get_req(struct rtrs_clt_sess *sess,
+rtrs_clt_get_req(struct rtrs_clt_path *clt_path,
void (*conf)(void *priv, int errno),
struct rtrs_permit *permit, void *priv,
const struct kvec *vec, size_t usr_len,
@@ -983,14 +987,14 @@ rtrs_clt_get_req(struct rtrs_clt_sess *sess,
{
struct rtrs_clt_io_req *req;
- req = &sess->reqs[permit->mem_id];
- rtrs_clt_init_req(req, sess, conf, permit, priv, vec, usr_len,
+ req = &clt_path->reqs[permit->mem_id];
+ rtrs_clt_init_req(req, clt_path, conf, permit, priv, vec, usr_len,
sg, sg_cnt, data_len, dir);
return req;
}
static struct rtrs_clt_io_req *
-rtrs_clt_get_copy_req(struct rtrs_clt_sess *alive_sess,
+rtrs_clt_get_copy_req(struct rtrs_clt_path *alive_path,
struct rtrs_clt_io_req *fail_req)
{
struct rtrs_clt_io_req *req;
@@ -999,8 +1003,8 @@ rtrs_clt_get_copy_req(struct rtrs_clt_sess *alive_sess,
.iov_len = fail_req->usr_len
};
- req = &alive_sess->reqs[fail_req->permit->mem_id];
- rtrs_clt_init_req(req, alive_sess, fail_req->conf, fail_req->permit,
+ req = &alive_path->reqs[fail_req->permit->mem_id];
+ rtrs_clt_init_req(req, alive_path, fail_req->conf, fail_req->permit,
fail_req->priv, &vec, fail_req->usr_len,
fail_req->sglist, fail_req->sg_cnt,
fail_req->data_len, fail_req->dir);
@@ -1013,7 +1017,7 @@ static int rtrs_post_rdma_write_sg(struct rtrs_clt_con *con,
u32 size, u32 imm, struct ib_send_wr *wr,
struct ib_send_wr *tail)
{
- struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
+ struct rtrs_clt_path *clt_path = to_clt_path(con->c.path);
struct ib_sge *sge = req->sge;
enum ib_send_flags flags;
struct scatterlist *sg;
@@ -1033,22 +1037,23 @@ static int rtrs_post_rdma_write_sg(struct rtrs_clt_con *con,
for_each_sg(req->sglist, sg, req->sg_cnt, i) {
sge[i].addr = sg_dma_address(sg);
sge[i].length = sg_dma_len(sg);
- sge[i].lkey = sess->s.dev->ib_pd->local_dma_lkey;
+ sge[i].lkey = clt_path->s.dev->ib_pd->local_dma_lkey;
}
num_sge = 1 + req->sg_cnt;
}
sge[i].addr = req->iu->dma_addr;
sge[i].length = size;
- sge[i].lkey = sess->s.dev->ib_pd->local_dma_lkey;
+ sge[i].lkey = clt_path->s.dev->ib_pd->local_dma_lkey;
/*
* From time to time we have to post signalled sends,
* or send queue will fill up and only QP reset can help.
*/
- flags = atomic_inc_return(&con->c.wr_cnt) % sess->s.signal_interval ?
+ flags = atomic_inc_return(&con->c.wr_cnt) % clt_path->s.signal_interval ?
0 : IB_SEND_SIGNALED;
- ib_dma_sync_single_for_device(sess->s.dev->ib_dev, req->iu->dma_addr,
+ ib_dma_sync_single_for_device(clt_path->s.dev->ib_dev,
+ req->iu->dma_addr,
size, DMA_TO_DEVICE);
return rtrs_iu_post_rdma_write_imm(&con->c, req->iu, sge, num_sge,
@@ -1074,8 +1079,8 @@ static int rtrs_map_sg_fr(struct rtrs_clt_io_req *req, size_t count)
static int rtrs_clt_write_req(struct rtrs_clt_io_req *req)
{
struct rtrs_clt_con *con = req->con;
- struct rtrs_sess *s = con->c.sess;
- struct rtrs_clt_sess *sess = to_clt_sess(s);
+ struct rtrs_path *s = con->c.path;
+ struct rtrs_clt_path *clt_path = to_clt_path(s);
struct rtrs_msg_rdma_write *msg;
struct rtrs_rbuf *rbuf;
@@ -1088,13 +1093,13 @@ static int rtrs_clt_write_req(struct rtrs_clt_io_req *req)
const size_t tsize = sizeof(*msg) + req->data_len + req->usr_len;
- if (tsize > sess->chunk_size) {
+ if (tsize > clt_path->chunk_size) {
rtrs_wrn(s, "Write request failed, size too big %zu > %d\n",
- tsize, sess->chunk_size);
+ tsize, clt_path->chunk_size);
return -EMSGSIZE;
}
if (req->sg_cnt) {
- count = ib_dma_map_sg(sess->s.dev->ib_dev, req->sglist,
+ count = ib_dma_map_sg(clt_path->s.dev->ib_dev, req->sglist,
req->sg_cnt, req->dir);
if (!count) {
rtrs_wrn(s, "Write request failed, map failed\n");
@@ -1111,7 +1116,7 @@ static int rtrs_clt_write_req(struct rtrs_clt_io_req *req)
imm = rtrs_to_io_req_imm(imm);
buf_id = req->permit->mem_id;
req->sg_size = tsize;
- rbuf = &sess->rbufs[buf_id];
+ rbuf = &clt_path->rbufs[buf_id];
if (count) {
ret = rtrs_map_sg_fr(req, count);
@@ -1119,7 +1124,7 @@ static int rtrs_clt_write_req(struct rtrs_clt_io_req *req)
rtrs_err_rl(s,
"Write request failed, failed to map fast reg. data, err: %d\n",
ret);
- ib_dma_unmap_sg(sess->s.dev->ib_dev, req->sglist,
+ ib_dma_unmap_sg(clt_path->s.dev->ib_dev, req->sglist,
req->sg_cnt, req->dir);
return ret;
}
@@ -1153,12 +1158,12 @@ static int rtrs_clt_write_req(struct rtrs_clt_io_req *req)
if (ret) {
rtrs_err_rl(s,
"Write request failed: error=%d path=%s [%s:%u]\n",
- ret, kobject_name(&sess->kobj), sess->hca_name,
- sess->hca_port);
+ ret, kobject_name(&clt_path->kobj), clt_path->hca_name,
+ clt_path->hca_port);
if (req->mp_policy == MP_POLICY_MIN_INFLIGHT)
- atomic_dec(&sess->stats->inflight);
+ atomic_dec(&clt_path->stats->inflight);
if (req->sg_cnt)
- ib_dma_unmap_sg(sess->s.dev->ib_dev, req->sglist,
+ ib_dma_unmap_sg(clt_path->s.dev->ib_dev, req->sglist,
req->sg_cnt, req->dir);
}
@@ -1168,10 +1173,10 @@ static int rtrs_clt_write_req(struct rtrs_clt_io_req *req)
static int rtrs_clt_read_req(struct rtrs_clt_io_req *req)
{
struct rtrs_clt_con *con = req->con;
- struct rtrs_sess *s = con->c.sess;
- struct rtrs_clt_sess *sess = to_clt_sess(s);
+ struct rtrs_path *s = con->c.path;
+ struct rtrs_clt_path *clt_path = to_clt_path(s);
struct rtrs_msg_rdma_read *msg;
- struct rtrs_ib_dev *dev = sess->s.dev;
+ struct rtrs_ib_dev *dev = clt_path->s.dev;
struct ib_reg_wr rwr;
struct ib_send_wr *wr = NULL;
@@ -1181,10 +1186,10 @@ static int rtrs_clt_read_req(struct rtrs_clt_io_req *req)
const size_t tsize = sizeof(*msg) + req->data_len + req->usr_len;
- if (tsize > sess->chunk_size) {
+ if (tsize > clt_path->chunk_size) {
rtrs_wrn(s,
"Read request failed, message size is %zu, bigger than CHUNK_SIZE %d\n",
- tsize, sess->chunk_size);
+ tsize, clt_path->chunk_size);
return -EMSGSIZE;
}
@@ -1254,15 +1259,15 @@ static int rtrs_clt_read_req(struct rtrs_clt_io_req *req)
*/
rtrs_clt_update_all_stats(req, READ);
- ret = rtrs_post_send_rdma(req->con, req, &sess->rbufs[buf_id],
+ ret = rtrs_post_send_rdma(req->con, req, &clt_path->rbufs[buf_id],
req->data_len, imm, wr);
if (ret) {
rtrs_err_rl(s,
"Read request failed: error=%d path=%s [%s:%u]\n",
- ret, kobject_name(&sess->kobj), sess->hca_name,
- sess->hca_port);
+ ret, kobject_name(&clt_path->kobj), clt_path->hca_name,
+ clt_path->hca_port);
if (req->mp_policy == MP_POLICY_MIN_INFLIGHT)
- atomic_dec(&sess->stats->inflight);
+ atomic_dec(&clt_path->stats->inflight);
req->need_inv = false;
if (req->sg_cnt)
ib_dma_unmap_sg(dev->ib_dev, req->sglist,
@@ -1277,21 +1282,21 @@ static int rtrs_clt_read_req(struct rtrs_clt_io_req *req)
* @clt: clt context
* @fail_req: a failed io request.
*/
-static int rtrs_clt_failover_req(struct rtrs_clt *clt,
+static int rtrs_clt_failover_req(struct rtrs_clt_sess *clt,
struct rtrs_clt_io_req *fail_req)
{
- struct rtrs_clt_sess *alive_sess;
+ struct rtrs_clt_path *alive_path;
struct rtrs_clt_io_req *req;
int err = -ECONNABORTED;
struct path_it it;
rcu_read_lock();
for (path_it_init(&it, clt);
- (alive_sess = it.next_path(&it)) && it.i < it.clt->paths_num;
+ (alive_path = it.next_path(&it)) && it.i < it.clt->paths_num;
it.i++) {
- if (READ_ONCE(alive_sess->state) != RTRS_CLT_CONNECTED)
+ if (READ_ONCE(alive_path->state) != RTRS_CLT_CONNECTED)
continue;
- req = rtrs_clt_get_copy_req(alive_sess, fail_req);
+ req = rtrs_clt_get_copy_req(alive_path, fail_req);
if (req->dir == DMA_TO_DEVICE)
err = rtrs_clt_write_req(req);
else
@@ -1301,7 +1306,7 @@ static int rtrs_clt_failover_req(struct rtrs_clt *clt,
continue;
}
/* Success path */
- rtrs_clt_inc_failover_cnt(alive_sess->stats);
+ rtrs_clt_inc_failover_cnt(alive_path->stats);
break;
}
path_it_deinit(&it);
@@ -1310,16 +1315,16 @@ static int rtrs_clt_failover_req(struct rtrs_clt *clt,
return err;
}
-static void fail_all_outstanding_reqs(struct rtrs_clt_sess *sess)
+static void fail_all_outstanding_reqs(struct rtrs_clt_path *clt_path)
{
- struct rtrs_clt *clt = sess->clt;
+ struct rtrs_clt_sess *clt = clt_path->clt;
struct rtrs_clt_io_req *req;
int i, err;
- if (!sess->reqs)
+ if (!clt_path->reqs)
return;
- for (i = 0; i < sess->queue_depth; ++i) {
- req = &sess->reqs[i];
+ for (i = 0; i < clt_path->queue_depth; ++i) {
+ req = &clt_path->reqs[i];
if (!req->in_use)
continue;
@@ -1337,38 +1342,39 @@ static void fail_all_outstanding_reqs(struct rtrs_clt_sess *sess)
}
}
-static void free_sess_reqs(struct rtrs_clt_sess *sess)
+static void free_path_reqs(struct rtrs_clt_path *clt_path)
{
struct rtrs_clt_io_req *req;
int i;
- if (!sess->reqs)
+ if (!clt_path->reqs)
return;
- for (i = 0; i < sess->queue_depth; ++i) {
- req = &sess->reqs[i];
+ for (i = 0; i < clt_path->queue_depth; ++i) {
+ req = &clt_path->reqs[i];
if (req->mr)
ib_dereg_mr(req->mr);
kfree(req->sge);
- rtrs_iu_free(req->iu, sess->s.dev->ib_dev, 1);
+ rtrs_iu_free(req->iu, clt_path->s.dev->ib_dev, 1);
}
- kfree(sess->reqs);
- sess->reqs = NULL;
+ kfree(clt_path->reqs);
+ clt_path->reqs = NULL;
}
-static int alloc_sess_reqs(struct rtrs_clt_sess *sess)
+static int alloc_path_reqs(struct rtrs_clt_path *clt_path)
{
struct rtrs_clt_io_req *req;
int i, err = -ENOMEM;
- sess->reqs = kcalloc(sess->queue_depth, sizeof(*sess->reqs),
- GFP_KERNEL);
- if (!sess->reqs)
+ clt_path->reqs = kcalloc(clt_path->queue_depth,
+ sizeof(*clt_path->reqs),
+ GFP_KERNEL);
+ if (!clt_path->reqs)
return -ENOMEM;
- for (i = 0; i < sess->queue_depth; ++i) {
- req = &sess->reqs[i];
- req->iu = rtrs_iu_alloc(1, sess->max_hdr_size, GFP_KERNEL,
- sess->s.dev->ib_dev,
+ for (i = 0; i < clt_path->queue_depth; ++i) {
+ req = &clt_path->reqs[i];
+ req->iu = rtrs_iu_alloc(1, clt_path->max_hdr_size, GFP_KERNEL,
+ clt_path->s.dev->ib_dev,
DMA_TO_DEVICE,
rtrs_clt_rdma_done);
if (!req->iu)
@@ -1378,13 +1384,14 @@ static int alloc_sess_reqs(struct rtrs_clt_sess *sess)
if (!req->sge)
goto out;
- req->mr = ib_alloc_mr(sess->s.dev->ib_pd, IB_MR_TYPE_MEM_REG,
- sess->max_pages_per_mr);
+ req->mr = ib_alloc_mr(clt_path->s.dev->ib_pd,
+ IB_MR_TYPE_MEM_REG,
+ clt_path->max_pages_per_mr);
if (IS_ERR(req->mr)) {
err = PTR_ERR(req->mr);
req->mr = NULL;
- pr_err("Failed to alloc sess->max_pages_per_mr %d\n",
- sess->max_pages_per_mr);
+ pr_err("Failed to alloc clt_path->max_pages_per_mr %d\n",
+ clt_path->max_pages_per_mr);
goto out;
}
@@ -1394,12 +1401,12 @@ static int alloc_sess_reqs(struct rtrs_clt_sess *sess)
return 0;
out:
- free_sess_reqs(sess);
+ free_path_reqs(clt_path);
return err;
}
-static int alloc_permits(struct rtrs_clt *clt)
+static int alloc_permits(struct rtrs_clt_sess *clt)
{
unsigned int chunk_bits;
int err, i;
@@ -1433,7 +1440,7 @@ out_err:
return err;
}
-static void free_permits(struct rtrs_clt *clt)
+static void free_permits(struct rtrs_clt_sess *clt)
{
if (clt->permits_map) {
size_t sz = clt->queue_depth;
@@ -1447,13 +1454,13 @@ static void free_permits(struct rtrs_clt *clt)
clt->permits = NULL;
}
-static void query_fast_reg_mode(struct rtrs_clt_sess *sess)
+static void query_fast_reg_mode(struct rtrs_clt_path *clt_path)
{
struct ib_device *ib_dev;
u64 max_pages_per_mr;
int mr_page_shift;
- ib_dev = sess->s.dev->ib_dev;
+ ib_dev = clt_path->s.dev->ib_dev;
/*
* Use the smallest page size supported by the HCA, down to a
@@ -1463,24 +1470,24 @@ static void query_fast_reg_mode(struct rtrs_clt_sess *sess)
mr_page_shift = max(12, ffs(ib_dev->attrs.page_size_cap) - 1);
max_pages_per_mr = ib_dev->attrs.max_mr_size;
do_div(max_pages_per_mr, (1ull << mr_page_shift));
- sess->max_pages_per_mr =
- min3(sess->max_pages_per_mr, (u32)max_pages_per_mr,
+ clt_path->max_pages_per_mr =
+ min3(clt_path->max_pages_per_mr, (u32)max_pages_per_mr,
ib_dev->attrs.max_fast_reg_page_list_len);
- sess->clt->max_segments =
- min(sess->max_pages_per_mr, sess->clt->max_segments);
+ clt_path->clt->max_segments =
+ min(clt_path->max_pages_per_mr, clt_path->clt->max_segments);
}
-static bool rtrs_clt_change_state_get_old(struct rtrs_clt_sess *sess,
+static bool rtrs_clt_change_state_get_old(struct rtrs_clt_path *clt_path,
enum rtrs_clt_state new_state,
enum rtrs_clt_state *old_state)
{
bool changed;
- spin_lock_irq(&sess->state_wq.lock);
+ spin_lock_irq(&clt_path->state_wq.lock);
if (old_state)
- *old_state = sess->state;
- changed = rtrs_clt_change_state(sess, new_state);
- spin_unlock_irq(&sess->state_wq.lock);
+ *old_state = clt_path->state;
+ changed = rtrs_clt_change_state(clt_path, new_state);
+ spin_unlock_irq(&clt_path->state_wq.lock);
return changed;
}
@@ -1492,9 +1499,9 @@ static void rtrs_clt_hb_err_handler(struct rtrs_con *c)
rtrs_rdma_error_recovery(con);
}
-static void rtrs_clt_init_hb(struct rtrs_clt_sess *sess)
+static void rtrs_clt_init_hb(struct rtrs_clt_path *clt_path)
{
- rtrs_init_hb(&sess->s, &io_comp_cqe,
+ rtrs_init_hb(&clt_path->s, &io_comp_cqe,
RTRS_HB_INTERVAL_MS,
RTRS_HB_MISSED_MAX,
rtrs_clt_hb_err_handler,
@@ -1504,17 +1511,17 @@ static void rtrs_clt_init_hb(struct rtrs_clt_sess *sess)
static void rtrs_clt_reconnect_work(struct work_struct *work);
static void rtrs_clt_close_work(struct work_struct *work);
-static struct rtrs_clt_sess *alloc_sess(struct rtrs_clt *clt,
+static struct rtrs_clt_path *alloc_path(struct rtrs_clt_sess *clt,
const struct rtrs_addr *path,
size_t con_num, u32 nr_poll_queues)
{
- struct rtrs_clt_sess *sess;
+ struct rtrs_clt_path *clt_path;
int err = -ENOMEM;
int cpu;
size_t total_con;
- sess = kzalloc(sizeof(*sess), GFP_KERNEL);
- if (!sess)
+ clt_path = kzalloc(sizeof(*clt_path), GFP_KERNEL);
+ if (!clt_path)
goto err;
/*
@@ -1522,20 +1529,21 @@ static struct rtrs_clt_sess *alloc_sess(struct rtrs_clt *clt,
* +1: Extra connection for user messages
*/
total_con = con_num + nr_poll_queues + 1;
- sess->s.con = kcalloc(total_con, sizeof(*sess->s.con), GFP_KERNEL);
- if (!sess->s.con)
- goto err_free_sess;
+ clt_path->s.con = kcalloc(total_con, sizeof(*clt_path->s.con),
+ GFP_KERNEL);
+ if (!clt_path->s.con)
+ goto err_free_path;
- sess->s.con_num = total_con;
- sess->s.irq_con_num = con_num + 1;
+ clt_path->s.con_num = total_con;
+ clt_path->s.irq_con_num = con_num + 1;
- sess->stats = kzalloc(sizeof(*sess->stats), GFP_KERNEL);
- if (!sess->stats)
+ clt_path->stats = kzalloc(sizeof(*clt_path->stats), GFP_KERNEL);
+ if (!clt_path->stats)
goto err_free_con;
- mutex_init(&sess->init_mutex);
- uuid_gen(&sess->s.uuid);
- memcpy(&sess->s.dst_addr, path->dst,
+ mutex_init(&clt_path->init_mutex);
+ uuid_gen(&clt_path->s.uuid);
+ memcpy(&clt_path->s.dst_addr, path->dst,
rdma_addr_size((struct sockaddr *)path->dst));
/*
@@ -1544,53 +1552,54 @@ static struct rtrs_clt_sess *alloc_sess(struct rtrs_clt *clt,
* the sess->src_addr will contain only zeros, which is then fine.
*/
if (path->src)
- memcpy(&sess->s.src_addr, path->src,
+ memcpy(&clt_path->s.src_addr, path->src,
rdma_addr_size((struct sockaddr *)path->src));
- strscpy(sess->s.sessname, clt->sessname, sizeof(sess->s.sessname));
- sess->clt = clt;
- sess->max_pages_per_mr = RTRS_MAX_SEGMENTS;
- init_waitqueue_head(&sess->state_wq);
- sess->state = RTRS_CLT_CONNECTING;
- atomic_set(&sess->connected_cnt, 0);
- INIT_WORK(&sess->close_work, rtrs_clt_close_work);
- INIT_DELAYED_WORK(&sess->reconnect_dwork, rtrs_clt_reconnect_work);
- rtrs_clt_init_hb(sess);
-
- sess->mp_skip_entry = alloc_percpu(typeof(*sess->mp_skip_entry));
- if (!sess->mp_skip_entry)
+ strscpy(clt_path->s.sessname, clt->sessname,
+ sizeof(clt_path->s.sessname));
+ clt_path->clt = clt;
+ clt_path->max_pages_per_mr = RTRS_MAX_SEGMENTS;
+ init_waitqueue_head(&clt_path->state_wq);
+ clt_path->state = RTRS_CLT_CONNECTING;
+ atomic_set(&clt_path->connected_cnt, 0);
+ INIT_WORK(&clt_path->close_work, rtrs_clt_close_work);
+ INIT_DELAYED_WORK(&clt_path->reconnect_dwork, rtrs_clt_reconnect_work);
+ rtrs_clt_init_hb(clt_path);
+
+ clt_path->mp_skip_entry = alloc_percpu(typeof(*clt_path->mp_skip_entry));
+ if (!clt_path->mp_skip_entry)
goto err_free_stats;
for_each_possible_cpu(cpu)
- INIT_LIST_HEAD(per_cpu_ptr(sess->mp_skip_entry, cpu));
+ INIT_LIST_HEAD(per_cpu_ptr(clt_path->mp_skip_entry, cpu));
- err = rtrs_clt_init_stats(sess->stats);
+ err = rtrs_clt_init_stats(clt_path->stats);
if (err)
goto err_free_percpu;
- return sess;
+ return clt_path;
err_free_percpu:
- free_percpu(sess->mp_skip_entry);
+ free_percpu(clt_path->mp_skip_entry);
err_free_stats:
- kfree(sess->stats);
+ kfree(clt_path->stats);
err_free_con:
- kfree(sess->s.con);
-err_free_sess:
- kfree(sess);
+ kfree(clt_path->s.con);
+err_free_path:
+ kfree(clt_path);
err:
return ERR_PTR(err);
}
-void free_sess(struct rtrs_clt_sess *sess)
+void free_path(struct rtrs_clt_path *clt_path)
{
- free_percpu(sess->mp_skip_entry);
- mutex_destroy(&sess->init_mutex);
- kfree(sess->s.con);
- kfree(sess->rbufs);
- kfree(sess);
+ free_percpu(clt_path->mp_skip_entry);
+ mutex_destroy(&clt_path->init_mutex);
+ kfree(clt_path->s.con);
+ kfree(clt_path->rbufs);
+ kfree(clt_path);
}
-static int create_con(struct rtrs_clt_sess *sess, unsigned int cid)
+static int create_con(struct rtrs_clt_path *clt_path, unsigned int cid)
{
struct rtrs_clt_con *con;
@@ -1601,28 +1610,28 @@ static int create_con(struct rtrs_clt_sess *sess, unsigned int cid)
/* Map first two connections to the first CPU */
con->cpu = (cid ? cid - 1 : 0) % nr_cpu_ids;
con->c.cid = cid;
- con->c.sess = &sess->s;
+ con->c.path = &clt_path->s;
/* Align with srv, init as 1 */
atomic_set(&con->c.wr_cnt, 1);
mutex_init(&con->con_mutex);
- sess->s.con[cid] = &con->c;
+ clt_path->s.con[cid] = &con->c;
return 0;
}
static void destroy_con(struct rtrs_clt_con *con)
{
- struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
+ struct rtrs_clt_path *clt_path = to_clt_path(con->c.path);
- sess->s.con[con->c.cid] = NULL;
+ clt_path->s.con[con->c.cid] = NULL;
mutex_destroy(&con->con_mutex);
kfree(con);
}
static int create_con_cq_qp(struct rtrs_clt_con *con)
{
- struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
+ struct rtrs_clt_path *clt_path = to_clt_path(con->c.path);
u32 max_send_wr, max_recv_wr, cq_num, max_send_sge, wr_limit;
int err, cq_vector;
struct rtrs_msg_rkey_rsp *rsp;
@@ -1631,7 +1640,7 @@ static int create_con_cq_qp(struct rtrs_clt_con *con)
if (con->c.cid == 0) {
max_send_sge = 1;
/* We must be the first here */
- if (WARN_ON(sess->s.dev))
+ if (WARN_ON(clt_path->s.dev))
return -EINVAL;
/*
@@ -1639,16 +1648,16 @@ static int create_con_cq_qp(struct rtrs_clt_con *con)
* Be careful not to close user connection before ib dev
* is gracefully put.
*/
- sess->s.dev = rtrs_ib_dev_find_or_add(con->c.cm_id->device,
+ clt_path->s.dev = rtrs_ib_dev_find_or_add(con->c.cm_id->device,
&dev_pd);
- if (!sess->s.dev) {
- rtrs_wrn(sess->clt,
+ if (!clt_path->s.dev) {
+ rtrs_wrn(clt_path->clt,
"rtrs_ib_dev_find_get_or_add(): no memory\n");
return -ENOMEM;
}
- sess->s.dev_ref = 1;
- query_fast_reg_mode(sess);
- wr_limit = sess->s.dev->ib_dev->attrs.max_qp_wr;
+ clt_path->s.dev_ref = 1;
+ query_fast_reg_mode(clt_path);
+ wr_limit = clt_path->s.dev->ib_dev->attrs.max_qp_wr;
/*
* Two (request + registration) completion for send
* Two for recv if always_invalidate is set on server
@@ -1665,27 +1674,28 @@ static int create_con_cq_qp(struct rtrs_clt_con *con)
* This is always true if user connection (cid == 0) is
* established first.
*/
- if (WARN_ON(!sess->s.dev))
+ if (WARN_ON(!clt_path->s.dev))
return -EINVAL;
- if (WARN_ON(!sess->queue_depth))
+ if (WARN_ON(!clt_path->queue_depth))
return -EINVAL;
- wr_limit = sess->s.dev->ib_dev->attrs.max_qp_wr;
+ wr_limit = clt_path->s.dev->ib_dev->attrs.max_qp_wr;
/* Shared between connections */
- sess->s.dev_ref++;
+ clt_path->s.dev_ref++;
max_send_wr = min_t(int, wr_limit,
/* QD * (REQ + RSP + FR REGS or INVS) + drain */
- sess->queue_depth * 3 + 1);
+ clt_path->queue_depth * 3 + 1);
max_recv_wr = min_t(int, wr_limit,
- sess->queue_depth * 3 + 1);
+ clt_path->queue_depth * 3 + 1);
max_send_sge = 2;
}
atomic_set(&con->c.sq_wr_avail, max_send_wr);
cq_num = max_send_wr + max_recv_wr;
/* alloc iu to recv new rkey reply when server reports flags set */
- if (sess->flags & RTRS_MSG_NEW_RKEY_F || con->c.cid == 0) {
+ if (clt_path->flags & RTRS_MSG_NEW_RKEY_F || con->c.cid == 0) {
con->rsp_ius = rtrs_iu_alloc(cq_num, sizeof(*rsp),
- GFP_KERNEL, sess->s.dev->ib_dev,
+ GFP_KERNEL,
+ clt_path->s.dev->ib_dev,
DMA_FROM_DEVICE,
rtrs_clt_rdma_done);
if (!con->rsp_ius)
@@ -1693,13 +1703,13 @@ static int create_con_cq_qp(struct rtrs_clt_con *con)
con->queue_num = cq_num;
}
cq_num = max_send_wr + max_recv_wr;
- cq_vector = con->cpu % sess->s.dev->ib_dev->num_comp_vectors;
- if (con->c.cid >= sess->s.irq_con_num)
- err = rtrs_cq_qp_create(&sess->s, &con->c, max_send_sge,
+ cq_vector = con->cpu % clt_path->s.dev->ib_dev->num_comp_vectors;
+ if (con->c.cid >= clt_path->s.irq_con_num)
+ err = rtrs_cq_qp_create(&clt_path->s, &con->c, max_send_sge,
cq_vector, cq_num, max_send_wr,
max_recv_wr, IB_POLL_DIRECT);
else
- err = rtrs_cq_qp_create(&sess->s, &con->c, max_send_sge,
+ err = rtrs_cq_qp_create(&clt_path->s, &con->c, max_send_sge,
cq_vector, cq_num, max_send_wr,
max_recv_wr, IB_POLL_SOFTIRQ);
/*
@@ -1711,7 +1721,7 @@ static int create_con_cq_qp(struct rtrs_clt_con *con)
static void destroy_con_cq_qp(struct rtrs_clt_con *con)
{
- struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
+ struct rtrs_clt_path *clt_path = to_clt_path(con->c.path);
/*
* Be careful here: destroy_con_cq_qp() can be called even
@@ -1720,13 +1730,14 @@ static void destroy_con_cq_qp(struct rtrs_clt_con *con)
lockdep_assert_held(&con->con_mutex);
rtrs_cq_qp_destroy(&con->c);
if (con->rsp_ius) {
- rtrs_iu_free(con->rsp_ius, sess->s.dev->ib_dev, con->queue_num);
+ rtrs_iu_free(con->rsp_ius, clt_path->s.dev->ib_dev,
+ con->queue_num);
con->rsp_ius = NULL;
con->queue_num = 0;
}
- if (sess->s.dev_ref && !--sess->s.dev_ref) {
- rtrs_ib_dev_put(sess->s.dev);
- sess->s.dev = NULL;
+ if (clt_path->s.dev_ref && !--clt_path->s.dev_ref) {
+ rtrs_ib_dev_put(clt_path->s.dev);
+ clt_path->s.dev = NULL;
}
}
@@ -1745,7 +1756,7 @@ static void destroy_cm(struct rtrs_clt_con *con)
static int rtrs_rdma_addr_resolved(struct rtrs_clt_con *con)
{
- struct rtrs_sess *s = con->c.sess;
+ struct rtrs_path *s = con->c.path;
int err;
mutex_lock(&con->con_mutex);
@@ -1764,8 +1775,8 @@ static int rtrs_rdma_addr_resolved(struct rtrs_clt_con *con)
static int rtrs_rdma_route_resolved(struct rtrs_clt_con *con)
{
- struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
- struct rtrs_clt *clt = sess->clt;
+ struct rtrs_clt_path *clt_path = to_clt_path(con->c.path);
+ struct rtrs_clt_sess *clt = clt_path->clt;
struct rtrs_msg_conn_req msg;
struct rdma_conn_param param;
@@ -1782,11 +1793,11 @@ static int rtrs_rdma_route_resolved(struct rtrs_clt_con *con)
.magic = cpu_to_le16(RTRS_MAGIC),
.version = cpu_to_le16(RTRS_PROTO_VER),
.cid = cpu_to_le16(con->c.cid),
- .cid_num = cpu_to_le16(sess->s.con_num),
- .recon_cnt = cpu_to_le16(sess->s.recon_cnt),
+ .cid_num = cpu_to_le16(clt_path->s.con_num),
+ .recon_cnt = cpu_to_le16(clt_path->s.recon_cnt),
};
- msg.first_conn = sess->for_new_clt ? FIRST_CONN : 0;
- uuid_copy(&msg.sess_uuid, &sess->s.uuid);
+ msg.first_conn = clt_path->for_new_clt ? FIRST_CONN : 0;
+ uuid_copy(&msg.sess_uuid, &clt_path->s.uuid);
uuid_copy(&msg.paths_uuid, &clt->paths_uuid);
err = rdma_connect_locked(con->c.cm_id, &param);
@@ -1799,8 +1810,8 @@ static int rtrs_rdma_route_resolved(struct rtrs_clt_con *con)
static int rtrs_rdma_conn_established(struct rtrs_clt_con *con,
struct rdma_cm_event *ev)
{
- struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
- struct rtrs_clt *clt = sess->clt;
+ struct rtrs_clt_path *clt_path = to_clt_path(con->c.path);
+ struct rtrs_clt_sess *clt = clt_path->clt;
const struct rtrs_msg_conn_rsp *msg;
u16 version, queue_depth;
int errno;
@@ -1831,31 +1842,32 @@ static int rtrs_rdma_conn_established(struct rtrs_clt_con *con,
if (con->c.cid == 0) {
queue_depth = le16_to_cpu(msg->queue_depth);
- if (sess->queue_depth > 0 && queue_depth != sess->queue_depth) {
+ if (clt_path->queue_depth > 0 && queue_depth != clt_path->queue_depth) {
rtrs_err(clt, "Error: queue depth changed\n");
/*
* Stop any more reconnection attempts
*/
- sess->reconnect_attempts = -1;
+ clt_path->reconnect_attempts = -1;
rtrs_err(clt,
"Disabling auto-reconnect. Trigger a manual reconnect after issue is resolved\n");
return -ECONNRESET;
}
- if (!sess->rbufs) {
- sess->rbufs = kcalloc(queue_depth, sizeof(*sess->rbufs),
- GFP_KERNEL);
- if (!sess->rbufs)
+ if (!clt_path->rbufs) {
+ clt_path->rbufs = kcalloc(queue_depth,
+ sizeof(*clt_path->rbufs),
+ GFP_KERNEL);
+ if (!clt_path->rbufs)
return -ENOMEM;
}
- sess->queue_depth = queue_depth;
- sess->s.signal_interval = min_not_zero(queue_depth,
+ clt_path->queue_depth = queue_depth;
+ clt_path->s.signal_interval = min_not_zero(queue_depth,
(unsigned short) SERVICE_CON_QUEUE_DEPTH);
- sess->max_hdr_size = le32_to_cpu(msg->max_hdr_size);
- sess->max_io_size = le32_to_cpu(msg->max_io_size);
- sess->flags = le32_to_cpu(msg->flags);
- sess->chunk_size = sess->max_io_size + sess->max_hdr_size;
+ clt_path->max_hdr_size = le32_to_cpu(msg->max_hdr_size);
+ clt_path->max_io_size = le32_to_cpu(msg->max_io_size);
+ clt_path->flags = le32_to_cpu(msg->flags);
+ clt_path->chunk_size = clt_path->max_io_size + clt_path->max_hdr_size;
/*
* Global IO size is always a minimum.
@@ -1866,20 +1878,20 @@ static int rtrs_rdma_conn_established(struct rtrs_clt_con *con,
* connections in parallel, use lock.
*/
mutex_lock(&clt->paths_mutex);
- clt->queue_depth = sess->queue_depth;
- clt->max_io_size = min_not_zero(sess->max_io_size,
+ clt->queue_depth = clt_path->queue_depth;
+ clt->max_io_size = min_not_zero(clt_path->max_io_size,
clt->max_io_size);
mutex_unlock(&clt->paths_mutex);
/*
* Cache the hca_port and hca_name for sysfs
*/
- sess->hca_port = con->c.cm_id->port_num;
- scnprintf(sess->hca_name, sizeof(sess->hca_name),
- sess->s.dev->ib_dev->name);
- sess->s.src_addr = con->c.cm_id->route.addr.src_addr;
+ clt_path->hca_port = con->c.cm_id->port_num;
+ scnprintf(clt_path->hca_name, sizeof(clt_path->hca_name),
+ clt_path->s.dev->ib_dev->name);
+ clt_path->s.src_addr = con->c.cm_id->route.addr.src_addr;
/* set for_new_clt, to allow future reconnect on any path */
- sess->for_new_clt = 1;
+ clt_path->for_new_clt = 1;
}
return 0;
@@ -1887,16 +1899,16 @@ static int rtrs_rdma_conn_established(struct rtrs_clt_con *con,
static inline void flag_success_on_conn(struct rtrs_clt_con *con)
{
- struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
+ struct rtrs_clt_path *clt_path = to_clt_path(con->c.path);
- atomic_inc(&sess->connected_cnt);
+ atomic_inc(&clt_path->connected_cnt);
con->cm_err = 1;
}
static int rtrs_rdma_conn_rejected(struct rtrs_clt_con *con,
struct rdma_cm_event *ev)
{
- struct rtrs_sess *s = con->c.sess;
+ struct rtrs_path *s = con->c.path;
const struct rtrs_msg_conn_rsp *msg;
const char *rej_msg;
int status, errno;
@@ -1924,23 +1936,23 @@ static int rtrs_rdma_conn_rejected(struct rtrs_clt_con *con,
return -ECONNRESET;
}
-void rtrs_clt_close_conns(struct rtrs_clt_sess *sess, bool wait)
+void rtrs_clt_close_conns(struct rtrs_clt_path *clt_path, bool wait)
{
- if (rtrs_clt_change_state_get_old(sess, RTRS_CLT_CLOSING, NULL))
- queue_work(rtrs_wq, &sess->close_work);
+ if (rtrs_clt_change_state_get_old(clt_path, RTRS_CLT_CLOSING, NULL))
+ queue_work(rtrs_wq, &clt_path->close_work);
if (wait)
- flush_work(&sess->close_work);
+ flush_work(&clt_path->close_work);
}
static inline void flag_error_on_conn(struct rtrs_clt_con *con, int cm_err)
{
if (con->cm_err == 1) {
- struct rtrs_clt_sess *sess;
+ struct rtrs_clt_path *clt_path;
- sess = to_clt_sess(con->c.sess);
- if (atomic_dec_and_test(&sess->connected_cnt))
+ clt_path = to_clt_path(con->c.path);
+ if (atomic_dec_and_test(&clt_path->connected_cnt))
- wake_up(&sess->state_wq);
+ wake_up(&clt_path->state_wq);
}
con->cm_err = cm_err;
}
@@ -1949,8 +1961,8 @@ static int rtrs_clt_rdma_cm_handler(struct rdma_cm_id *cm_id,
struct rdma_cm_event *ev)
{
struct rtrs_clt_con *con = cm_id->context;
- struct rtrs_sess *s = con->c.sess;
- struct rtrs_clt_sess *sess = to_clt_sess(s);
+ struct rtrs_path *s = con->c.path;
+ struct rtrs_clt_path *clt_path = to_clt_path(s);
int cm_err = 0;
switch (ev->event) {
@@ -1968,7 +1980,7 @@ static int rtrs_clt_rdma_cm_handler(struct rdma_cm_id *cm_id,
* i.e. wake up without state change, but we set cm_err.
*/
flag_success_on_conn(con);
- wake_up(&sess->state_wq);
+ wake_up(&clt_path->state_wq);
return 0;
}
break;
@@ -1997,7 +2009,7 @@ static int rtrs_clt_rdma_cm_handler(struct rdma_cm_id *cm_id,
/*
* Device removal is a special case. Queue close and return 0.
*/
- rtrs_clt_close_conns(sess, false);
+ rtrs_clt_close_conns(clt_path, false);
return 0;
default:
rtrs_err(s, "Unexpected RDMA CM error (CM event: %s, err: %d)\n",
@@ -2020,13 +2032,13 @@ static int rtrs_clt_rdma_cm_handler(struct rdma_cm_id *cm_id,
static int create_cm(struct rtrs_clt_con *con)
{
- struct rtrs_sess *s = con->c.sess;
- struct rtrs_clt_sess *sess = to_clt_sess(s);
+ struct rtrs_path *s = con->c.path;
+ struct rtrs_clt_path *clt_path = to_clt_path(s);
struct rdma_cm_id *cm_id;
int err;
cm_id = rdma_create_id(&init_net, rtrs_clt_rdma_cm_handler, con,
- sess->s.dst_addr.ss_family == AF_IB ?
+ clt_path->s.dst_addr.ss_family == AF_IB ?
RDMA_PS_IB : RDMA_PS_TCP, IB_QPT_RC);
if (IS_ERR(cm_id)) {
err = PTR_ERR(cm_id);
@@ -2042,8 +2054,8 @@ static int create_cm(struct rtrs_clt_con *con)
rtrs_err(s, "Set address reuse failed, err: %d\n", err);
goto destroy_cm;
}
- err = rdma_resolve_addr(cm_id, (struct sockaddr *)&sess->s.src_addr,
- (struct sockaddr *)&sess->s.dst_addr,
+ err = rdma_resolve_addr(cm_id, (struct sockaddr *)&clt_path->s.src_addr,
+ (struct sockaddr *)&clt_path->s.dst_addr,
RTRS_CONNECT_TIMEOUT_MS);
if (err) {
rtrs_err(s, "Failed to resolve address, err: %d\n", err);
@@ -2055,8 +2067,8 @@ static int create_cm(struct rtrs_clt_con *con)
* or session state was really changed to error by device removal.
*/
err = wait_event_interruptible_timeout(
- sess->state_wq,
- con->cm_err || sess->state != RTRS_CLT_CONNECTING,
+ clt_path->state_wq,
+ con->cm_err || clt_path->state != RTRS_CLT_CONNECTING,
msecs_to_jiffies(RTRS_CONNECT_TIMEOUT_MS));
if (err == 0 || err == -ERESTARTSYS) {
if (err == 0)
@@ -2068,7 +2080,7 @@ static int create_cm(struct rtrs_clt_con *con)
err = con->cm_err;
goto errr;
}
- if (READ_ONCE(sess->state) != RTRS_CLT_CONNECTING) {
+ if (READ_ONCE(clt_path->state) != RTRS_CLT_CONNECTING) {
/* Device removal */
err = -ECONNABORTED;
goto errr;
@@ -2087,9 +2099,9 @@ destroy_cm:
return err;
}
-static void rtrs_clt_sess_up(struct rtrs_clt_sess *sess)
+static void rtrs_clt_path_up(struct rtrs_clt_path *clt_path)
{
- struct rtrs_clt *clt = sess->clt;
+ struct rtrs_clt_sess *clt = clt_path->clt;
int up;
/*
@@ -2113,19 +2125,19 @@ static void rtrs_clt_sess_up(struct rtrs_clt_sess *sess)
mutex_unlock(&clt->paths_ev_mutex);
/* Mark session as established */
- sess->established = true;
- sess->reconnect_attempts = 0;
- sess->stats->reconnects.successful_cnt++;
+ clt_path->established = true;
+ clt_path->reconnect_attempts = 0;
+ clt_path->stats->reconnects.successful_cnt++;
}
-static void rtrs_clt_sess_down(struct rtrs_clt_sess *sess)
+static void rtrs_clt_path_down(struct rtrs_clt_path *clt_path)
{
- struct rtrs_clt *clt = sess->clt;
+ struct rtrs_clt_sess *clt = clt_path->clt;
- if (!sess->established)
+ if (!clt_path->established)
return;
- sess->established = false;
+ clt_path->established = false;
mutex_lock(&clt->paths_ev_mutex);
WARN_ON(!clt->paths_up);
if (--clt->paths_up == 0)
@@ -2133,19 +2145,19 @@ static void rtrs_clt_sess_down(struct rtrs_clt_sess *sess)
mutex_unlock(&clt->paths_ev_mutex);
}
-static void rtrs_clt_stop_and_destroy_conns(struct rtrs_clt_sess *sess)
+static void rtrs_clt_stop_and_destroy_conns(struct rtrs_clt_path *clt_path)
{
struct rtrs_clt_con *con;
unsigned int cid;
- WARN_ON(READ_ONCE(sess->state) == RTRS_CLT_CONNECTED);
+ WARN_ON(READ_ONCE(clt_path->state) == RTRS_CLT_CONNECTED);
/*
* Possible race with rtrs_clt_open(), when DEVICE_REMOVAL comes
* exactly in between. Start destroying after it finishes.
*/
- mutex_lock(&sess->init_mutex);
- mutex_unlock(&sess->init_mutex);
+ mutex_lock(&clt_path->init_mutex);
+ mutex_unlock(&clt_path->init_mutex);
/*
* All IO paths must observe !CONNECTED state before we
@@ -2153,7 +2165,7 @@ static void rtrs_clt_stop_and_destroy_conns(struct rtrs_clt_sess *sess)
*/
synchronize_rcu();
- rtrs_stop_hb(&sess->s);
+ rtrs_stop_hb(&clt_path->s);
/*
* The order it utterly crucial: firstly disconnect and complete all
@@ -2162,15 +2174,15 @@ static void rtrs_clt_stop_and_destroy_conns(struct rtrs_clt_sess *sess)
* eventually notify upper layer about session disconnection.
*/
- for (cid = 0; cid < sess->s.con_num; cid++) {
- if (!sess->s.con[cid])
+ for (cid = 0; cid < clt_path->s.con_num; cid++) {
+ if (!clt_path->s.con[cid])
break;
- con = to_clt_con(sess->s.con[cid]);
+ con = to_clt_con(clt_path->s.con[cid]);
stop_cm(con);
}
- fail_all_outstanding_reqs(sess);
- free_sess_reqs(sess);
- rtrs_clt_sess_down(sess);
+ fail_all_outstanding_reqs(clt_path);
+ free_path_reqs(clt_path);
+ rtrs_clt_path_down(clt_path);
/*
* Wait for graceful shutdown, namely when peer side invokes
@@ -2180,13 +2192,14 @@ static void rtrs_clt_stop_and_destroy_conns(struct rtrs_clt_sess *sess)
* since CM does not fire anything. That is fine, we are not in
* hurry.
*/
- wait_event_timeout(sess->state_wq, !atomic_read(&sess->connected_cnt),
+ wait_event_timeout(clt_path->state_wq,
+ !atomic_read(&clt_path->connected_cnt),
msecs_to_jiffies(RTRS_CONNECT_TIMEOUT_MS));
- for (cid = 0; cid < sess->s.con_num; cid++) {
- if (!sess->s.con[cid])
+ for (cid = 0; cid < clt_path->s.con_num; cid++) {
+ if (!clt_path->s.con[cid])
break;
- con = to_clt_con(sess->s.con[cid]);
+ con = to_clt_con(clt_path->s.con[cid]);
mutex_lock(&con->con_mutex);
destroy_con_cq_qp(con);
mutex_unlock(&con->con_mutex);
@@ -2195,26 +2208,26 @@ static void rtrs_clt_stop_and_destroy_conns(struct rtrs_clt_sess *sess)
}
}
-static inline bool xchg_sessions(struct rtrs_clt_sess __rcu **rcu_ppcpu_path,
- struct rtrs_clt_sess *sess,
- struct rtrs_clt_sess *next)
+static inline bool xchg_paths(struct rtrs_clt_path __rcu **rcu_ppcpu_path,
+ struct rtrs_clt_path *clt_path,
+ struct rtrs_clt_path *next)
{
- struct rtrs_clt_sess **ppcpu_path;
+ struct rtrs_clt_path **ppcpu_path;
/* Call cmpxchg() without sparse warnings */
ppcpu_path = (typeof(ppcpu_path))rcu_ppcpu_path;
- return sess == cmpxchg(ppcpu_path, sess, next);
+ return clt_path == cmpxchg(ppcpu_path, clt_path, next);
}
-static void rtrs_clt_remove_path_from_arr(struct rtrs_clt_sess *sess)
+static void rtrs_clt_remove_path_from_arr(struct rtrs_clt_path *clt_path)
{
- struct rtrs_clt *clt = sess->clt;
- struct rtrs_clt_sess *next;
+ struct rtrs_clt_sess *clt = clt_path->clt;
+ struct rtrs_clt_path *next;
bool wait_for_grace = false;
int cpu;
mutex_lock(&clt->paths_mutex);
- list_del_rcu(&sess->s.entry);
+ list_del_rcu(&clt_path->s.entry);
/* Make sure everybody observes path removal. */
synchronize_rcu();
@@ -2255,7 +2268,7 @@ static void rtrs_clt_remove_path_from_arr(struct rtrs_clt_sess *sess)
* removed. If @sess is the last element, then @next is NULL.
*/
rcu_read_lock();
- next = list_next_or_null_rr_rcu(&clt->paths_list, &sess->s.entry,
+ next = list_next_or_null_rr_rcu(&clt->paths_list, &clt_path->s.entry,
typeof(*next), s.entry);
rcu_read_unlock();
@@ -2264,11 +2277,11 @@ static void rtrs_clt_remove_path_from_arr(struct rtrs_clt_sess *sess)
* removed, so change the pointer manually.
*/
for_each_possible_cpu(cpu) {
- struct rtrs_clt_sess __rcu **ppcpu_path;
+ struct rtrs_clt_path __rcu **ppcpu_path;
ppcpu_path = per_cpu_ptr(clt->pcpu_path, cpu);
if (rcu_dereference_protected(*ppcpu_path,
- lockdep_is_held(&clt->paths_mutex)) != sess)
+ lockdep_is_held(&clt->paths_mutex)) != clt_path)
/*
* synchronize_rcu() was called just after deleting
* entry from the list, thus IO code path cannot
@@ -2281,7 +2294,7 @@ static void rtrs_clt_remove_path_from_arr(struct rtrs_clt_sess *sess)
* We race with IO code path, which also changes pointer,
* thus we have to be careful not to overwrite it.
*/
- if (xchg_sessions(ppcpu_path, sess, next))
+ if (xchg_paths(ppcpu_path, clt_path, next))
/*
* @ppcpu_path was successfully replaced with @next,
* that means that someone could also pick up the
@@ -2296,29 +2309,29 @@ static void rtrs_clt_remove_path_from_arr(struct rtrs_clt_sess *sess)
mutex_unlock(&clt->paths_mutex);
}
-static void rtrs_clt_add_path_to_arr(struct rtrs_clt_sess *sess)
+static void rtrs_clt_add_path_to_arr(struct rtrs_clt_path *clt_path)
{
- struct rtrs_clt *clt = sess->clt;
+ struct rtrs_clt_sess *clt = clt_path->clt;
mutex_lock(&clt->paths_mutex);
clt->paths_num++;
- list_add_tail_rcu(&sess->s.entry, &clt->paths_list);
+ list_add_tail_rcu(&clt_path->s.entry, &clt->paths_list);
mutex_unlock(&clt->paths_mutex);
}
static void rtrs_clt_close_work(struct work_struct *work)
{
- struct rtrs_clt_sess *sess;
+ struct rtrs_clt_path *clt_path;
- sess = container_of(work, struct rtrs_clt_sess, close_work);
+ clt_path = container_of(work, struct rtrs_clt_path, close_work);
- cancel_delayed_work_sync(&sess->reconnect_dwork);
- rtrs_clt_stop_and_destroy_conns(sess);
- rtrs_clt_change_state_get_old(sess, RTRS_CLT_CLOSED, NULL);
+ cancel_delayed_work_sync(&clt_path->reconnect_dwork);
+ rtrs_clt_stop_and_destroy_conns(clt_path);
+ rtrs_clt_change_state_get_old(clt_path, RTRS_CLT_CLOSED, NULL);
}
-static int init_conns(struct rtrs_clt_sess *sess)
+static int init_conns(struct rtrs_clt_path *clt_path)
{
unsigned int cid;
int err;
@@ -2328,31 +2341,31 @@ static int init_conns(struct rtrs_clt_sess *sess)
* to avoid clashes with previous sessions not yet closed
* sessions on a server side.
*/
- sess->s.recon_cnt++;
+ clt_path->s.recon_cnt++;
/* Establish all RDMA connections */
- for (cid = 0; cid < sess->s.con_num; cid++) {
- err = create_con(sess, cid);
+ for (cid = 0; cid < clt_path->s.con_num; cid++) {
+ err = create_con(clt_path, cid);
if (err)
goto destroy;
- err = create_cm(to_clt_con(sess->s.con[cid]));
+ err = create_cm(to_clt_con(clt_path->s.con[cid]));
if (err) {
- destroy_con(to_clt_con(sess->s.con[cid]));
+ destroy_con(to_clt_con(clt_path->s.con[cid]));
goto destroy;
}
}
- err = alloc_sess_reqs(sess);
+ err = alloc_path_reqs(clt_path);
if (err)
goto destroy;
- rtrs_start_hb(&sess->s);
+ rtrs_start_hb(&clt_path->s);
return 0;
destroy:
while (cid--) {
- struct rtrs_clt_con *con = to_clt_con(sess->s.con[cid]);
+ struct rtrs_clt_con *con = to_clt_con(clt_path->s.con[cid]);
stop_cm(con);
@@ -2367,7 +2380,7 @@ destroy:
* doing rdma_resolve_addr(), switch to CONNECTION_ERR state
* manually to keep reconnecting.
*/
- rtrs_clt_change_state_get_old(sess, RTRS_CLT_CONNECTING_ERR, NULL);
+ rtrs_clt_change_state_get_old(clt_path, RTRS_CLT_CONNECTING_ERR, NULL);
return err;
}
@@ -2375,31 +2388,32 @@ destroy:
static void rtrs_clt_info_req_done(struct ib_cq *cq, struct ib_wc *wc)
{
struct rtrs_clt_con *con = to_clt_con(wc->qp->qp_context);
- struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
+ struct rtrs_clt_path *clt_path = to_clt_path(con->c.path);
struct rtrs_iu *iu;
iu = container_of(wc->wr_cqe, struct rtrs_iu, cqe);
- rtrs_iu_free(iu, sess->s.dev->ib_dev, 1);
+ rtrs_iu_free(iu, clt_path->s.dev->ib_dev, 1);
if (wc->status != IB_WC_SUCCESS) {
- rtrs_err(sess->clt, "Sess info request send failed: %s\n",
+ rtrs_err(clt_path->clt, "Path info request send failed: %s\n",
ib_wc_status_msg(wc->status));
- rtrs_clt_change_state_get_old(sess, RTRS_CLT_CONNECTING_ERR, NULL);
+ rtrs_clt_change_state_get_old(clt_path, RTRS_CLT_CONNECTING_ERR, NULL);
return;
}
rtrs_clt_update_wc_stats(con);
}
-static int process_info_rsp(struct rtrs_clt_sess *sess,
+static int process_info_rsp(struct rtrs_clt_path *clt_path,
const struct rtrs_msg_info_rsp *msg)
{
unsigned int sg_cnt, total_len;
int i, sgi;
sg_cnt = le16_to_cpu(msg->sg_cnt);
- if (!sg_cnt || (sess->queue_depth % sg_cnt)) {
- rtrs_err(sess->clt, "Incorrect sg_cnt %d, is not multiple\n",
+ if (!sg_cnt || (clt_path->queue_depth % sg_cnt)) {
+ rtrs_err(clt_path->clt,
+ "Incorrect sg_cnt %d, is not multiple\n",
sg_cnt);
return -EINVAL;
}
@@ -2408,15 +2422,15 @@ static int process_info_rsp(struct rtrs_clt_sess *sess,
* Check if IB immediate data size is enough to hold the mem_id and
* the offset inside the memory chunk.
*/
- if ((ilog2(sg_cnt - 1) + 1) + (ilog2(sess->chunk_size - 1) + 1) >
+ if ((ilog2(sg_cnt - 1) + 1) + (ilog2(clt_path->chunk_size - 1) + 1) >
MAX_IMM_PAYL_BITS) {
- rtrs_err(sess->clt,
+ rtrs_err(clt_path->clt,
"RDMA immediate size (%db) not enough to encode %d buffers of size %dB\n",
- MAX_IMM_PAYL_BITS, sg_cnt, sess->chunk_size);
+ MAX_IMM_PAYL_BITS, sg_cnt, clt_path->chunk_size);
return -EINVAL;
}
total_len = 0;
- for (sgi = 0, i = 0; sgi < sg_cnt && i < sess->queue_depth; sgi++) {
+ for (sgi = 0, i = 0; sgi < sg_cnt && i < clt_path->queue_depth; sgi++) {
const struct rtrs_sg_desc *desc = &msg->desc[sgi];
u32 len, rkey;
u64 addr;
@@ -2427,26 +2441,28 @@ static int process_info_rsp(struct rtrs_clt_sess *sess,
total_len += len;
- if (!len || (len % sess->chunk_size)) {
- rtrs_err(sess->clt, "Incorrect [%d].len %d\n", sgi,
+ if (!len || (len % clt_path->chunk_size)) {
+ rtrs_err(clt_path->clt, "Incorrect [%d].len %d\n",
+ sgi,
len);
return -EINVAL;
}
- for ( ; len && i < sess->queue_depth; i++) {
- sess->rbufs[i].addr = addr;
- sess->rbufs[i].rkey = rkey;
+ for ( ; len && i < clt_path->queue_depth; i++) {
+ clt_path->rbufs[i].addr = addr;
+ clt_path->rbufs[i].rkey = rkey;
- len -= sess->chunk_size;
- addr += sess->chunk_size;
+ len -= clt_path->chunk_size;
+ addr += clt_path->chunk_size;
}
}
/* Sanity check */
- if (sgi != sg_cnt || i != sess->queue_depth) {
- rtrs_err(sess->clt, "Incorrect sg vector, not fully mapped\n");
+ if (sgi != sg_cnt || i != clt_path->queue_depth) {
+ rtrs_err(clt_path->clt,
+ "Incorrect sg vector, not fully mapped\n");
return -EINVAL;
}
- if (total_len != sess->chunk_size * sess->queue_depth) {
- rtrs_err(sess->clt, "Incorrect total_len %d\n", total_len);
+ if (total_len != clt_path->chunk_size * clt_path->queue_depth) {
+ rtrs_err(clt_path->clt, "Incorrect total_len %d\n", total_len);
return -EINVAL;
}
@@ -2456,7 +2472,7 @@ static int process_info_rsp(struct rtrs_clt_sess *sess,
static void rtrs_clt_info_rsp_done(struct ib_cq *cq, struct ib_wc *wc)
{
struct rtrs_clt_con *con = to_clt_con(wc->qp->qp_context);
- struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
+ struct rtrs_clt_path *clt_path = to_clt_path(con->c.path);
struct rtrs_msg_info_rsp *msg;
enum rtrs_clt_state state;
struct rtrs_iu *iu;
@@ -2468,37 +2484,37 @@ static void rtrs_clt_info_rsp_done(struct ib_cq *cq, struct ib_wc *wc)
WARN_ON(con->c.cid);
iu = container_of(wc->wr_cqe, struct rtrs_iu, cqe);
if (wc->status != IB_WC_SUCCESS) {
- rtrs_err(sess->clt, "Sess info response recv failed: %s\n",
+ rtrs_err(clt_path->clt, "Path info response recv failed: %s\n",
ib_wc_status_msg(wc->status));
goto out;
}
WARN_ON(wc->opcode != IB_WC_RECV);
if (wc->byte_len < sizeof(*msg)) {
- rtrs_err(sess->clt, "Sess info response is malformed: size %d\n",
+ rtrs_err(clt_path->clt, "Path info response is malformed: size %d\n",
wc->byte_len);
goto out;
}
- ib_dma_sync_single_for_cpu(sess->s.dev->ib_dev, iu->dma_addr,
+ ib_dma_sync_single_for_cpu(clt_path->s.dev->ib_dev, iu->dma_addr,
iu->size, DMA_FROM_DEVICE);
msg = iu->buf;
if (le16_to_cpu(msg->type) != RTRS_MSG_INFO_RSP) {
- rtrs_err(sess->clt, "Sess info response is malformed: type %d\n",
+ rtrs_err(clt_path->clt, "Path info response is malformed: type %d\n",
le16_to_cpu(msg->type));
goto out;
}
rx_sz = sizeof(*msg);
rx_sz += sizeof(msg->desc[0]) * le16_to_cpu(msg->sg_cnt);
if (wc->byte_len < rx_sz) {
- rtrs_err(sess->clt, "Sess info response is malformed: size %d\n",
+ rtrs_err(clt_path->clt, "Path info response is malformed: size %d\n",
wc->byte_len);
goto out;
}
- err = process_info_rsp(sess, msg);
+ err = process_info_rsp(clt_path, msg);
if (err)
goto out;
- err = post_recv_sess(sess);
+ err = post_recv_path(clt_path);
if (err)
goto out;
@@ -2506,25 +2522,25 @@ static void rtrs_clt_info_rsp_done(struct ib_cq *cq, struct ib_wc *wc)
out:
rtrs_clt_update_wc_stats(con);
- rtrs_iu_free(iu, sess->s.dev->ib_dev, 1);
- rtrs_clt_change_state_get_old(sess, state, NULL);
+ rtrs_iu_free(iu, clt_path->s.dev->ib_dev, 1);
+ rtrs_clt_change_state_get_old(clt_path, state, NULL);
}
-static int rtrs_send_sess_info(struct rtrs_clt_sess *sess)
+static int rtrs_send_path_info(struct rtrs_clt_path *clt_path)
{
- struct rtrs_clt_con *usr_con = to_clt_con(sess->s.con[0]);
+ struct rtrs_clt_con *usr_con = to_clt_con(clt_path->s.con[0]);
struct rtrs_msg_info_req *msg;
struct rtrs_iu *tx_iu, *rx_iu;
size_t rx_sz;
int err;
rx_sz = sizeof(struct rtrs_msg_info_rsp);
- rx_sz += sizeof(struct rtrs_sg_desc) * sess->queue_depth;
+ rx_sz += sizeof(struct rtrs_sg_desc) * clt_path->queue_depth;
tx_iu = rtrs_iu_alloc(1, sizeof(struct rtrs_msg_info_req), GFP_KERNEL,
- sess->s.dev->ib_dev, DMA_TO_DEVICE,
+ clt_path->s.dev->ib_dev, DMA_TO_DEVICE,
rtrs_clt_info_req_done);
- rx_iu = rtrs_iu_alloc(1, rx_sz, GFP_KERNEL, sess->s.dev->ib_dev,
+ rx_iu = rtrs_iu_alloc(1, rx_sz, GFP_KERNEL, clt_path->s.dev->ib_dev,
DMA_FROM_DEVICE, rtrs_clt_info_rsp_done);
if (!tx_iu || !rx_iu) {
err = -ENOMEM;
@@ -2533,33 +2549,34 @@ static int rtrs_send_sess_info(struct rtrs_clt_sess *sess)
/* Prepare for getting info response */
err = rtrs_iu_post_recv(&usr_con->c, rx_iu);
if (err) {
- rtrs_err(sess->clt, "rtrs_iu_post_recv(), err: %d\n", err);
+ rtrs_err(clt_path->clt, "rtrs_iu_post_recv(), err: %d\n", err);
goto out;
}
rx_iu = NULL;
msg = tx_iu->buf;
msg->type = cpu_to_le16(RTRS_MSG_INFO_REQ);
- memcpy(msg->sessname, sess->s.sessname, sizeof(msg->sessname));
+ memcpy(msg->pathname, clt_path->s.sessname, sizeof(msg->pathname));
- ib_dma_sync_single_for_device(sess->s.dev->ib_dev, tx_iu->dma_addr,
+ ib_dma_sync_single_for_device(clt_path->s.dev->ib_dev,
+ tx_iu->dma_addr,
tx_iu->size, DMA_TO_DEVICE);
/* Send info request */
err = rtrs_iu_post_send(&usr_con->c, tx_iu, sizeof(*msg), NULL);
if (err) {
- rtrs_err(sess->clt, "rtrs_iu_post_send(), err: %d\n", err);
+ rtrs_err(clt_path->clt, "rtrs_iu_post_send(), err: %d\n", err);
goto out;
}
tx_iu = NULL;
/* Wait for state change */
- wait_event_interruptible_timeout(sess->state_wq,
- sess->state != RTRS_CLT_CONNECTING,
+ wait_event_interruptible_timeout(clt_path->state_wq,
+ clt_path->state != RTRS_CLT_CONNECTING,
msecs_to_jiffies(
RTRS_CONNECT_TIMEOUT_MS));
- if (READ_ONCE(sess->state) != RTRS_CLT_CONNECTED) {
- if (READ_ONCE(sess->state) == RTRS_CLT_CONNECTING_ERR)
+ if (READ_ONCE(clt_path->state) != RTRS_CLT_CONNECTED) {
+ if (READ_ONCE(clt_path->state) == RTRS_CLT_CONNECTING_ERR)
err = -ECONNRESET;
else
err = -ETIMEDOUT;
@@ -2567,82 +2584,82 @@ static int rtrs_send_sess_info(struct rtrs_clt_sess *sess)
out:
if (tx_iu)
- rtrs_iu_free(tx_iu, sess->s.dev->ib_dev, 1);
+ rtrs_iu_free(tx_iu, clt_path->s.dev->ib_dev, 1);
if (rx_iu)
- rtrs_iu_free(rx_iu, sess->s.dev->ib_dev, 1);
+ rtrs_iu_free(rx_iu, clt_path->s.dev->ib_dev, 1);
if (err)
/* If we've never taken async path because of malloc problems */
- rtrs_clt_change_state_get_old(sess, RTRS_CLT_CONNECTING_ERR, NULL);
+ rtrs_clt_change_state_get_old(clt_path,
+ RTRS_CLT_CONNECTING_ERR, NULL);
return err;
}
/**
- * init_sess() - establishes all session connections and does handshake
- * @sess: client session.
+ * init_path() - establishes all path connections and does handshake
+ * @clt_path: client path.
* In case of error full close or reconnect procedure should be taken,
* because reconnect or close async works can be started.
*/
-static int init_sess(struct rtrs_clt_sess *sess)
+static int init_path(struct rtrs_clt_path *clt_path)
{
int err;
char str[NAME_MAX];
struct rtrs_addr path = {
- .src = &sess->s.src_addr,
- .dst = &sess->s.dst_addr,
+ .src = &clt_path->s.src_addr,
+ .dst = &clt_path->s.dst_addr,
};
rtrs_addr_to_str(&path, str, sizeof(str));
- mutex_lock(&sess->init_mutex);
- err = init_conns(sess);
+ mutex_lock(&clt_path->init_mutex);
+ err = init_conns(clt_path);
if (err) {
- rtrs_err(sess->clt,
+ rtrs_err(clt_path->clt,
"init_conns() failed: err=%d path=%s [%s:%u]\n", err,
- str, sess->hca_name, sess->hca_port);
+ str, clt_path->hca_name, clt_path->hca_port);
goto out;
}
- err = rtrs_send_sess_info(sess);
+ err = rtrs_send_path_info(clt_path);
if (err) {
- rtrs_err(
- sess->clt,
- "rtrs_send_sess_info() failed: err=%d path=%s [%s:%u]\n",
- err, str, sess->hca_name, sess->hca_port);
+ rtrs_err(clt_path->clt,
+ "rtrs_send_path_info() failed: err=%d path=%s [%s:%u]\n",
+ err, str, clt_path->hca_name, clt_path->hca_port);
goto out;
}
- rtrs_clt_sess_up(sess);
+ rtrs_clt_path_up(clt_path);
out:
- mutex_unlock(&sess->init_mutex);
+ mutex_unlock(&clt_path->init_mutex);
return err;
}
static void rtrs_clt_reconnect_work(struct work_struct *work)
{
- struct rtrs_clt_sess *sess;
- struct rtrs_clt *clt;
+ struct rtrs_clt_path *clt_path;
+ struct rtrs_clt_sess *clt;
unsigned int delay_ms;
int err;
- sess = container_of(to_delayed_work(work), struct rtrs_clt_sess,
- reconnect_dwork);
- clt = sess->clt;
+ clt_path = container_of(to_delayed_work(work), struct rtrs_clt_path,
+ reconnect_dwork);
+ clt = clt_path->clt;
- if (READ_ONCE(sess->state) != RTRS_CLT_RECONNECTING)
+ if (READ_ONCE(clt_path->state) != RTRS_CLT_RECONNECTING)
return;
- if (sess->reconnect_attempts >= clt->max_reconnect_attempts) {
- /* Close a session completely if max attempts is reached */
- rtrs_clt_close_conns(sess, false);
+ if (clt_path->reconnect_attempts >= clt->max_reconnect_attempts) {
+ /* Close a path completely if max attempts is reached */
+ rtrs_clt_close_conns(clt_path, false);
return;
}
- sess->reconnect_attempts++;
+ clt_path->reconnect_attempts++;
/* Stop everything */
- rtrs_clt_stop_and_destroy_conns(sess);
+ rtrs_clt_stop_and_destroy_conns(clt_path);
msleep(RTRS_RECONNECT_BACKOFF);
- if (rtrs_clt_change_state_get_old(sess, RTRS_CLT_CONNECTING, NULL)) {
- err = init_sess(sess);
+ if (rtrs_clt_change_state_get_old(clt_path, RTRS_CLT_CONNECTING, NULL)) {
+ err = init_path(clt_path);
if (err)
goto reconnect_again;
}
@@ -2650,10 +2667,10 @@ static void rtrs_clt_reconnect_work(struct work_struct *work)
return;
reconnect_again:
- if (rtrs_clt_change_state_get_old(sess, RTRS_CLT_RECONNECTING, NULL)) {
- sess->stats->reconnects.fail_cnt++;
+ if (rtrs_clt_change_state_get_old(clt_path, RTRS_CLT_RECONNECTING, NULL)) {
+ clt_path->stats->reconnects.fail_cnt++;
delay_ms = clt->reconnect_delay_sec * 1000;
- queue_delayed_work(rtrs_wq, &sess->reconnect_dwork,
+ queue_delayed_work(rtrs_wq, &clt_path->reconnect_dwork,
msecs_to_jiffies(delay_ms +
prandom_u32() %
RTRS_RECONNECT_SEED));
@@ -2662,19 +2679,20 @@ reconnect_again:
static void rtrs_clt_dev_release(struct device *dev)
{
- struct rtrs_clt *clt = container_of(dev, struct rtrs_clt, dev);
+ struct rtrs_clt_sess *clt = container_of(dev, struct rtrs_clt_sess,
+ dev);
kfree(clt);
}
-static struct rtrs_clt *alloc_clt(const char *sessname, size_t paths_num,
+static struct rtrs_clt_sess *alloc_clt(const char *sessname, size_t paths_num,
u16 port, size_t pdu_sz, void *priv,
void (*link_ev)(void *priv,
enum rtrs_clt_link_ev ev),
unsigned int reconnect_delay_sec,
unsigned int max_reconnect_attempts)
{
- struct rtrs_clt *clt;
+ struct rtrs_clt_sess *clt;
int err;
if (!paths_num || paths_num > MAX_PATHS_NUM)
@@ -2749,7 +2767,7 @@ err:
return ERR_PTR(err);
}
-static void free_clt(struct rtrs_clt *clt)
+static void free_clt(struct rtrs_clt_sess *clt)
{
free_permits(clt);
free_percpu(clt->pcpu_path);
@@ -2760,7 +2778,7 @@ static void free_clt(struct rtrs_clt *clt)
}
/**
- * rtrs_clt_open() - Open a session to an RTRS server
+ * rtrs_clt_open() - Open a path to an RTRS server
* @ops: holds the link event callback and the private pointer.
* @sessname: name of the session
* @paths: Paths to be established defined by their src and dst addresses
@@ -2777,24 +2795,24 @@ static void free_clt(struct rtrs_clt *clt)
*
* Return a valid pointer on success otherwise PTR_ERR.
*/
-struct rtrs_clt *rtrs_clt_open(struct rtrs_clt_ops *ops,
- const char *sessname,
+struct rtrs_clt_sess *rtrs_clt_open(struct rtrs_clt_ops *ops,
+ const char *pathname,
const struct rtrs_addr *paths,
size_t paths_num, u16 port,
size_t pdu_sz, u8 reconnect_delay_sec,
s16 max_reconnect_attempts, u32 nr_poll_queues)
{
- struct rtrs_clt_sess *sess, *tmp;
- struct rtrs_clt *clt;
+ struct rtrs_clt_path *clt_path, *tmp;
+ struct rtrs_clt_sess *clt;
int err, i;
- if (strchr(sessname, '/') || strchr(sessname, '.')) {
- pr_err("sessname cannot contain / and .\n");
+ if (strchr(pathname, '/') || strchr(pathname, '.')) {
+ pr_err("pathname cannot contain / and .\n");
err = -EINVAL;
goto out;
}
- clt = alloc_clt(sessname, paths_num, port, pdu_sz, ops->priv,
+ clt = alloc_clt(pathname, paths_num, port, pdu_sz, ops->priv,
ops->link_ev,
reconnect_delay_sec,
max_reconnect_attempts);
@@ -2803,49 +2821,49 @@ struct rtrs_clt *rtrs_clt_open(struct rtrs_clt_ops *ops,
goto out;
}
for (i = 0; i < paths_num; i++) {
- struct rtrs_clt_sess *sess;
+ struct rtrs_clt_path *clt_path;
- sess = alloc_sess(clt, &paths[i], nr_cpu_ids,
+ clt_path = alloc_path(clt, &paths[i], nr_cpu_ids,
nr_poll_queues);
- if (IS_ERR(sess)) {
- err = PTR_ERR(sess);
- goto close_all_sess;
+ if (IS_ERR(clt_path)) {
+ err = PTR_ERR(clt_path);
+ goto close_all_path;
}
if (!i)
- sess->for_new_clt = 1;
- list_add_tail_rcu(&sess->s.entry, &clt->paths_list);
+ clt_path->for_new_clt = 1;
+ list_add_tail_rcu(&clt_path->s.entry, &clt->paths_list);
- err = init_sess(sess);
+ err = init_path(clt_path);
if (err) {
- list_del_rcu(&sess->s.entry);
- rtrs_clt_close_conns(sess, true);
- free_percpu(sess->stats->pcpu_stats);
- kfree(sess->stats);
- free_sess(sess);
- goto close_all_sess;
+ list_del_rcu(&clt_path->s.entry);
+ rtrs_clt_close_conns(clt_path, true);
+ free_percpu(clt_path->stats->pcpu_stats);
+ kfree(clt_path->stats);
+ free_path(clt_path);
+ goto close_all_path;
}
- err = rtrs_clt_create_sess_files(sess);
+ err = rtrs_clt_create_path_files(clt_path);
if (err) {
- list_del_rcu(&sess->s.entry);
- rtrs_clt_close_conns(sess, true);
- free_percpu(sess->stats->pcpu_stats);
- kfree(sess->stats);
- free_sess(sess);
- goto close_all_sess;
+ list_del_rcu(&clt_path->s.entry);
+ rtrs_clt_close_conns(clt_path, true);
+ free_percpu(clt_path->stats->pcpu_stats);
+ kfree(clt_path->stats);
+ free_path(clt_path);
+ goto close_all_path;
}
}
err = alloc_permits(clt);
if (err)
- goto close_all_sess;
+ goto close_all_path;
return clt;
-close_all_sess:
- list_for_each_entry_safe(sess, tmp, &clt->paths_list, s.entry) {
- rtrs_clt_destroy_sess_files(sess, NULL);
- rtrs_clt_close_conns(sess, true);
- kobject_put(&sess->kobj);
+close_all_path:
+ list_for_each_entry_safe(clt_path, tmp, &clt->paths_list, s.entry) {
+ rtrs_clt_destroy_path_files(clt_path, NULL);
+ rtrs_clt_close_conns(clt_path, true);
+ kobject_put(&clt_path->kobj);
}
rtrs_clt_destroy_sysfs_root(clt);
free_clt(clt);
@@ -2856,37 +2874,38 @@ out:
EXPORT_SYMBOL(rtrs_clt_open);
/**
- * rtrs_clt_close() - Close a session
+ * rtrs_clt_close() - Close a path
* @clt: Session handle. Session is freed upon return.
*/
-void rtrs_clt_close(struct rtrs_clt *clt)
+void rtrs_clt_close(struct rtrs_clt_sess *clt)
{
- struct rtrs_clt_sess *sess, *tmp;
+ struct rtrs_clt_path *clt_path, *tmp;
/* Firstly forbid sysfs access */
rtrs_clt_destroy_sysfs_root(clt);
/* Now it is safe to iterate over all paths without locks */
- list_for_each_entry_safe(sess, tmp, &clt->paths_list, s.entry) {
- rtrs_clt_close_conns(sess, true);
- rtrs_clt_destroy_sess_files(sess, NULL);
- kobject_put(&sess->kobj);
+ list_for_each_entry_safe(clt_path, tmp, &clt->paths_list, s.entry) {
+ rtrs_clt_close_conns(clt_path, true);
+ rtrs_clt_destroy_path_files(clt_path, NULL);
+ kobject_put(&clt_path->kobj);
}
free_clt(clt);
}
EXPORT_SYMBOL(rtrs_clt_close);
-int rtrs_clt_reconnect_from_sysfs(struct rtrs_clt_sess *sess)
+int rtrs_clt_reconnect_from_sysfs(struct rtrs_clt_path *clt_path)
{
enum rtrs_clt_state old_state;
int err = -EBUSY;
bool changed;
- changed = rtrs_clt_change_state_get_old(sess, RTRS_CLT_RECONNECTING,
+ changed = rtrs_clt_change_state_get_old(clt_path,
+ RTRS_CLT_RECONNECTING,
&old_state);
if (changed) {
- sess->reconnect_attempts = 0;
- queue_delayed_work(rtrs_wq, &sess->reconnect_dwork, 0);
+ clt_path->reconnect_attempts = 0;
+ queue_delayed_work(rtrs_wq, &clt_path->reconnect_dwork, 0);
}
if (changed || old_state == RTRS_CLT_RECONNECTING) {
/*
@@ -2894,15 +2913,15 @@ int rtrs_clt_reconnect_from_sysfs(struct rtrs_clt_sess *sess)
* execution, so do the flush if we have queued something
* right now or work is pending.
*/
- flush_delayed_work(&sess->reconnect_dwork);
- err = (READ_ONCE(sess->state) ==
+ flush_delayed_work(&clt_path->reconnect_dwork);
+ err = (READ_ONCE(clt_path->state) ==
RTRS_CLT_CONNECTED ? 0 : -ENOTCONN);
}
return err;
}
-int rtrs_clt_remove_path_from_sysfs(struct rtrs_clt_sess *sess,
+int rtrs_clt_remove_path_from_sysfs(struct rtrs_clt_path *clt_path,
const struct attribute *sysfs_self)
{
enum rtrs_clt_state old_state;
@@ -2918,27 +2937,27 @@ int rtrs_clt_remove_path_from_sysfs(struct rtrs_clt_sess *sess,
* removing the path.
*/
do {
- rtrs_clt_close_conns(sess, true);
- changed = rtrs_clt_change_state_get_old(sess,
+ rtrs_clt_close_conns(clt_path, true);
+ changed = rtrs_clt_change_state_get_old(clt_path,
RTRS_CLT_DEAD,
&old_state);
} while (!changed && old_state != RTRS_CLT_DEAD);
if (changed) {
- rtrs_clt_remove_path_from_arr(sess);
- rtrs_clt_destroy_sess_files(sess, sysfs_self);
- kobject_put(&sess->kobj);
+ rtrs_clt_remove_path_from_arr(clt_path);
+ rtrs_clt_destroy_path_files(clt_path, sysfs_self);
+ kobject_put(&clt_path->kobj);
}
return 0;
}
-void rtrs_clt_set_max_reconnect_attempts(struct rtrs_clt *clt, int value)
+void rtrs_clt_set_max_reconnect_attempts(struct rtrs_clt_sess *clt, int value)
{
clt->max_reconnect_attempts = (unsigned int)value;
}
-int rtrs_clt_get_max_reconnect_attempts(const struct rtrs_clt *clt)
+int rtrs_clt_get_max_reconnect_attempts(const struct rtrs_clt_sess *clt)
{
return (int)clt->max_reconnect_attempts;
}
@@ -2968,12 +2987,12 @@ int rtrs_clt_get_max_reconnect_attempts(const struct rtrs_clt *clt)
* On dir=WRITE rtrs client will rdma write data in sg to server side.
*/
int rtrs_clt_request(int dir, struct rtrs_clt_req_ops *ops,
- struct rtrs_clt *clt, struct rtrs_permit *permit,
- const struct kvec *vec, size_t nr, size_t data_len,
- struct scatterlist *sg, unsigned int sg_cnt)
+ struct rtrs_clt_sess *clt, struct rtrs_permit *permit,
+ const struct kvec *vec, size_t nr, size_t data_len,
+ struct scatterlist *sg, unsigned int sg_cnt)
{
struct rtrs_clt_io_req *req;
- struct rtrs_clt_sess *sess;
+ struct rtrs_clt_path *clt_path;
enum dma_data_direction dma_dir;
int err = -ECONNABORTED, i;
@@ -2995,19 +3014,19 @@ int rtrs_clt_request(int dir, struct rtrs_clt_req_ops *ops,
rcu_read_lock();
for (path_it_init(&it, clt);
- (sess = it.next_path(&it)) && it.i < it.clt->paths_num; it.i++) {
- if (READ_ONCE(sess->state) != RTRS_CLT_CONNECTED)
+ (clt_path = it.next_path(&it)) && it.i < it.clt->paths_num; it.i++) {
+ if (READ_ONCE(clt_path->state) != RTRS_CLT_CONNECTED)
continue;
- if (usr_len + hdr_len > sess->max_hdr_size) {
- rtrs_wrn_rl(sess->clt,
+ if (usr_len + hdr_len > clt_path->max_hdr_size) {
+ rtrs_wrn_rl(clt_path->clt,
"%s request failed, user message size is %zu and header length %zu, but max size is %u\n",
dir == READ ? "Read" : "Write",
- usr_len, hdr_len, sess->max_hdr_size);
+ usr_len, hdr_len, clt_path->max_hdr_size);
err = -EMSGSIZE;
break;
}
- req = rtrs_clt_get_req(sess, ops->conf_fn, permit, ops->priv,
+ req = rtrs_clt_get_req(clt_path, ops->conf_fn, permit, ops->priv,
vec, usr_len, sg, sg_cnt, data_len,
dma_dir);
if (dir == READ)
@@ -3028,21 +3047,21 @@ int rtrs_clt_request(int dir, struct rtrs_clt_req_ops *ops,
}
EXPORT_SYMBOL(rtrs_clt_request);
-int rtrs_clt_rdma_cq_direct(struct rtrs_clt *clt, unsigned int index)
+int rtrs_clt_rdma_cq_direct(struct rtrs_clt_sess *clt, unsigned int index)
{
/* If no path, return -1 for block layer not to try again */
int cnt = -1;
struct rtrs_con *con;
- struct rtrs_clt_sess *sess;
+ struct rtrs_clt_path *clt_path;
struct path_it it;
rcu_read_lock();
for (path_it_init(&it, clt);
- (sess = it.next_path(&it)) && it.i < it.clt->paths_num; it.i++) {
- if (READ_ONCE(sess->state) != RTRS_CLT_CONNECTED)
+ (clt_path = it.next_path(&it)) && it.i < it.clt->paths_num; it.i++) {
+ if (READ_ONCE(clt_path->state) != RTRS_CLT_CONNECTED)
continue;
- con = sess->s.con[index + 1];
+ con = clt_path->s.con[index + 1];
cnt = ib_process_cq_direct(con->cq, -1);
if (cnt)
break;
@@ -3062,7 +3081,7 @@ EXPORT_SYMBOL(rtrs_clt_rdma_cq_direct);
* 0 on success
* -ECOMM no connection to the server
*/
-int rtrs_clt_query(struct rtrs_clt *clt, struct rtrs_attrs *attr)
+int rtrs_clt_query(struct rtrs_clt_sess *clt, struct rtrs_attrs *attr)
{
if (!rtrs_clt_is_connected(clt))
return -ECOMM;
@@ -3077,15 +3096,15 @@ int rtrs_clt_query(struct rtrs_clt *clt, struct rtrs_attrs *attr)
}
EXPORT_SYMBOL(rtrs_clt_query);
-int rtrs_clt_create_path_from_sysfs(struct rtrs_clt *clt,
+int rtrs_clt_create_path_from_sysfs(struct rtrs_clt_sess *clt,
struct rtrs_addr *addr)
{
- struct rtrs_clt_sess *sess;
+ struct rtrs_clt_path *clt_path;
int err;
- sess = alloc_sess(clt, addr, nr_cpu_ids, 0);
- if (IS_ERR(sess))
- return PTR_ERR(sess);
+ clt_path = alloc_path(clt, addr, nr_cpu_ids, 0);
+ if (IS_ERR(clt_path))
+ return PTR_ERR(clt_path);
mutex_lock(&clt->paths_mutex);
if (clt->paths_num == 0) {
@@ -3094,7 +3113,7 @@ int rtrs_clt_create_path_from_sysfs(struct rtrs_clt *clt,
* the addition of the first path is like a new session for
* the storage server
*/
- sess->for_new_clt = 1;
+ clt_path->for_new_clt = 1;
}
mutex_unlock(&clt->paths_mutex);
@@ -3104,24 +3123,24 @@ int rtrs_clt_create_path_from_sysfs(struct rtrs_clt *clt,
* IO will never grab it. Also it is very important to add
* path before init, since init fires LINK_CONNECTED event.
*/
- rtrs_clt_add_path_to_arr(sess);
+ rtrs_clt_add_path_to_arr(clt_path);
- err = init_sess(sess);
+ err = init_path(clt_path);
if (err)
- goto close_sess;
+ goto close_path;
- err = rtrs_clt_create_sess_files(sess);
+ err = rtrs_clt_create_path_files(clt_path);
if (err)
- goto close_sess;
+ goto close_path;
return 0;
-close_sess:
- rtrs_clt_remove_path_from_arr(sess);
- rtrs_clt_close_conns(sess, true);
- free_percpu(sess->stats->pcpu_stats);
- kfree(sess->stats);
- free_sess(sess);
+close_path:
+ rtrs_clt_remove_path_from_arr(clt_path);
+ rtrs_clt_close_conns(clt_path, true);
+ free_percpu(clt_path->stats->pcpu_stats);
+ kfree(clt_path->stats);
+ free_path(clt_path);
return err;
}
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-clt.h b/drivers/infiniband/ulp/rtrs/rtrs-clt.h
index 9afffccff973..d1b18a154ae0 100644
--- a/drivers/infiniband/ulp/rtrs/rtrs-clt.h
+++ b/drivers/infiniband/ulp/rtrs/rtrs-clt.h
@@ -124,9 +124,9 @@ struct rtrs_rbuf {
u32 rkey;
};
-struct rtrs_clt_sess {
- struct rtrs_sess s;
- struct rtrs_clt *clt;
+struct rtrs_clt_path {
+ struct rtrs_path s;
+ struct rtrs_clt_sess *clt;
wait_queue_head_t state_wq;
enum rtrs_clt_state state;
atomic_t connected_cnt;
@@ -153,10 +153,10 @@ struct rtrs_clt_sess {
*mp_skip_entry;
};
-struct rtrs_clt {
+struct rtrs_clt_sess {
struct list_head paths_list; /* rcu protected list */
size_t paths_num;
- struct rtrs_clt_sess
+ struct rtrs_clt_path
__rcu * __percpu *pcpu_path;
uuid_t paths_uuid;
int paths_up;
@@ -186,31 +186,32 @@ static inline struct rtrs_clt_con *to_clt_con(struct rtrs_con *c)
return container_of(c, struct rtrs_clt_con, c);
}
-static inline struct rtrs_clt_sess *to_clt_sess(struct rtrs_sess *s)
+static inline struct rtrs_clt_path *to_clt_path(struct rtrs_path *s)
{
- return container_of(s, struct rtrs_clt_sess, s);
+ return container_of(s, struct rtrs_clt_path, s);
}
-static inline int permit_size(struct rtrs_clt *clt)
+static inline int permit_size(struct rtrs_clt_sess *clt)
{
return sizeof(struct rtrs_permit) + clt->pdu_sz;
}
-static inline struct rtrs_permit *get_permit(struct rtrs_clt *clt, int idx)
+static inline struct rtrs_permit *get_permit(struct rtrs_clt_sess *clt,
+ int idx)
{
return (struct rtrs_permit *)(clt->permits + permit_size(clt) * idx);
}
-int rtrs_clt_reconnect_from_sysfs(struct rtrs_clt_sess *sess);
-void rtrs_clt_close_conns(struct rtrs_clt_sess *sess, bool wait);
-int rtrs_clt_create_path_from_sysfs(struct rtrs_clt *clt,
+int rtrs_clt_reconnect_from_sysfs(struct rtrs_clt_path *path);
+void rtrs_clt_close_conns(struct rtrs_clt_path *clt_path, bool wait);
+int rtrs_clt_create_path_from_sysfs(struct rtrs_clt_sess *clt,
struct rtrs_addr *addr);
-int rtrs_clt_remove_path_from_sysfs(struct rtrs_clt_sess *sess,
+int rtrs_clt_remove_path_from_sysfs(struct rtrs_clt_path *path,
const struct attribute *sysfs_self);
-void rtrs_clt_set_max_reconnect_attempts(struct rtrs_clt *clt, int value);
-int rtrs_clt_get_max_reconnect_attempts(const struct rtrs_clt *clt);
-void free_sess(struct rtrs_clt_sess *sess);
+void rtrs_clt_set_max_reconnect_attempts(struct rtrs_clt_sess *clt, int value);
+int rtrs_clt_get_max_reconnect_attempts(const struct rtrs_clt_sess *clt);
+void free_path(struct rtrs_clt_path *clt_path);
/* rtrs-clt-stats.c */
@@ -239,11 +240,11 @@ ssize_t rtrs_clt_reset_all_help(struct rtrs_clt_stats *stats,
/* rtrs-clt-sysfs.c */
-int rtrs_clt_create_sysfs_root_files(struct rtrs_clt *clt);
-void rtrs_clt_destroy_sysfs_root(struct rtrs_clt *clt);
+int rtrs_clt_create_sysfs_root_files(struct rtrs_clt_sess *clt);
+void rtrs_clt_destroy_sysfs_root(struct rtrs_clt_sess *clt);
-int rtrs_clt_create_sess_files(struct rtrs_clt_sess *sess);
-void rtrs_clt_destroy_sess_files(struct rtrs_clt_sess *sess,
+int rtrs_clt_create_path_files(struct rtrs_clt_path *clt_path);
+void rtrs_clt_destroy_path_files(struct rtrs_clt_path *clt_path,
const struct attribute *sysfs_self);
#endif /* RTRS_CLT_H */
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-pri.h b/drivers/infiniband/ulp/rtrs/rtrs-pri.h
index 78eac9a4f703..9a1e5c2ae55c 100644
--- a/drivers/infiniband/ulp/rtrs/rtrs-pri.h
+++ b/drivers/infiniband/ulp/rtrs/rtrs-pri.h
@@ -90,7 +90,7 @@ struct rtrs_ib_dev {
};
struct rtrs_con {
- struct rtrs_sess *sess;
+ struct rtrs_path *path;
struct ib_qp *qp;
struct ib_cq *cq;
struct rdma_cm_id *cm_id;
@@ -100,7 +100,7 @@ struct rtrs_con {
atomic_t sq_wr_avail;
};
-struct rtrs_sess {
+struct rtrs_path {
struct list_head entry;
struct sockaddr_storage dst_addr;
struct sockaddr_storage src_addr;
@@ -229,11 +229,11 @@ struct rtrs_msg_conn_rsp {
/**
* struct rtrs_msg_info_req
* @type: @RTRS_MSG_INFO_REQ
- * @sessname: Session name chosen by client
+ * @pathname: Path name chosen by client
*/
struct rtrs_msg_info_req {
__le16 type;
- u8 sessname[NAME_MAX];
+ u8 pathname[NAME_MAX];
u8 reserved[15];
};
@@ -313,19 +313,19 @@ int rtrs_iu_post_rdma_write_imm(struct rtrs_con *con, struct rtrs_iu *iu,
int rtrs_post_recv_empty(struct rtrs_con *con, struct ib_cqe *cqe);
-int rtrs_cq_qp_create(struct rtrs_sess *sess, struct rtrs_con *con,
+int rtrs_cq_qp_create(struct rtrs_path *path, struct rtrs_con *con,
u32 max_send_sge, int cq_vector, int nr_cqe,
u32 max_send_wr, u32 max_recv_wr,
enum ib_poll_context poll_ctx);
void rtrs_cq_qp_destroy(struct rtrs_con *con);
-void rtrs_init_hb(struct rtrs_sess *sess, struct ib_cqe *cqe,
+void rtrs_init_hb(struct rtrs_path *path, struct ib_cqe *cqe,
unsigned int interval_ms, unsigned int missed_max,
void (*err_handler)(struct rtrs_con *con),
struct workqueue_struct *wq);
-void rtrs_start_hb(struct rtrs_sess *sess);
-void rtrs_stop_hb(struct rtrs_sess *sess);
-void rtrs_send_hb_ack(struct rtrs_sess *sess);
+void rtrs_start_hb(struct rtrs_path *path);
+void rtrs_stop_hb(struct rtrs_path *path);
+void rtrs_send_hb_ack(struct rtrs_path *path);
void rtrs_rdma_dev_pd_init(enum ib_pd_flags pd_flags,
struct rtrs_rdma_dev_pd *pool);
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-srv-sysfs.c b/drivers/infiniband/ulp/rtrs/rtrs-srv-sysfs.c
index 9c43ce5ba1c1..b94ae12c2795 100644
--- a/drivers/infiniband/ulp/rtrs/rtrs-srv-sysfs.c
+++ b/drivers/infiniband/ulp/rtrs/rtrs-srv-sysfs.c
@@ -15,10 +15,10 @@
static void rtrs_srv_release(struct kobject *kobj)
{
- struct rtrs_srv_sess *sess;
+ struct rtrs_srv_path *srv_path;
- sess = container_of(kobj, struct rtrs_srv_sess, kobj);
- kfree(sess);
+ srv_path = container_of(kobj, struct rtrs_srv_path, kobj);
+ kfree(srv_path);
}
static struct kobj_type ktype = {
@@ -36,24 +36,25 @@ static ssize_t rtrs_srv_disconnect_store(struct kobject *kobj,
struct kobj_attribute *attr,
const char *buf, size_t count)
{
- struct rtrs_srv_sess *sess;
- struct rtrs_sess *s;
+ struct rtrs_srv_path *srv_path;
+ struct rtrs_path *s;
char str[MAXHOSTNAMELEN];
- sess = container_of(kobj, struct rtrs_srv_sess, kobj);
- s = &sess->s;
+ srv_path = container_of(kobj, struct rtrs_srv_path, kobj);
+ s = &srv_path->s;
if (!sysfs_streq(buf, "1")) {
rtrs_err(s, "%s: invalid value: '%s'\n",
attr->attr.name, buf);
return -EINVAL;
}
- sockaddr_to_str((struct sockaddr *)&sess->s.dst_addr, str, sizeof(str));
+ sockaddr_to_str((struct sockaddr *)&srv_path->s.dst_addr, str,
+ sizeof(str));
rtrs_info(s, "disconnect for path %s requested\n", str);
/* first remove sysfs itself to avoid deadlock */
- sysfs_remove_file_self(&sess->kobj, &attr->attr);
- close_sess(sess);
+ sysfs_remove_file_self(&srv_path->kobj, &attr->attr);
+ close_path(srv_path);
return count;
}
@@ -66,11 +67,11 @@ static ssize_t rtrs_srv_hca_port_show(struct kobject *kobj,
struct kobj_attribute *attr,
char *page)
{
- struct rtrs_srv_sess *sess;
+ struct rtrs_srv_path *srv_path;
struct rtrs_con *usr_con;
- sess = container_of(kobj, typeof(*sess), kobj);
- usr_con = sess->s.con[0];
+ srv_path = container_of(kobj, typeof(*srv_path), kobj);
+ usr_con = srv_path->s.con[0];
return sysfs_emit(page, "%u\n", usr_con->cm_id->port_num);
}
@@ -82,11 +83,11 @@ static ssize_t rtrs_srv_hca_name_show(struct kobject *kobj,
struct kobj_attribute *attr,
char *page)
{
- struct rtrs_srv_sess *sess;
+ struct rtrs_srv_path *srv_path;
- sess = container_of(kobj, struct rtrs_srv_sess, kobj);
+ srv_path = container_of(kobj, struct rtrs_srv_path, kobj);
- return sysfs_emit(page, "%s\n", sess->s.dev->ib_dev->name);
+ return sysfs_emit(page, "%s\n", srv_path->s.dev->ib_dev->name);
}
static struct kobj_attribute rtrs_srv_hca_name_attr =
@@ -96,11 +97,11 @@ static ssize_t rtrs_srv_src_addr_show(struct kobject *kobj,
struct kobj_attribute *attr,
char *page)
{
- struct rtrs_srv_sess *sess;
+ struct rtrs_srv_path *srv_path;
int cnt;
- sess = container_of(kobj, struct rtrs_srv_sess, kobj);
- cnt = sockaddr_to_str((struct sockaddr *)&sess->s.dst_addr,
+ srv_path = container_of(kobj, struct rtrs_srv_path, kobj);
+ cnt = sockaddr_to_str((struct sockaddr *)&srv_path->s.dst_addr,
page, PAGE_SIZE);
return cnt + sysfs_emit_at(page, cnt, "\n");
}
@@ -112,11 +113,11 @@ static ssize_t rtrs_srv_dst_addr_show(struct kobject *kobj,
struct kobj_attribute *attr,
char *page)
{
- struct rtrs_srv_sess *sess;
+ struct rtrs_srv_path *srv_path;
int len;
- sess = container_of(kobj, struct rtrs_srv_sess, kobj);
- len = sockaddr_to_str((struct sockaddr *)&sess->s.src_addr, page,
+ srv_path = container_of(kobj, struct rtrs_srv_path, kobj);
+ len = sockaddr_to_str((struct sockaddr *)&srv_path->s.src_addr, page,
PAGE_SIZE);
len += sysfs_emit_at(page, len, "\n");
return len;
@@ -125,7 +126,7 @@ static ssize_t rtrs_srv_dst_addr_show(struct kobject *kobj,
static struct kobj_attribute rtrs_srv_dst_addr_attr =
__ATTR(dst_addr, 0444, rtrs_srv_dst_addr_show, NULL);
-static struct attribute *rtrs_srv_sess_attrs[] = {
+static struct attribute *rtrs_srv_path_attrs[] = {
&rtrs_srv_hca_name_attr.attr,
&rtrs_srv_hca_port_attr.attr,
&rtrs_srv_src_addr_attr.attr,
@@ -134,8 +135,8 @@ static struct attribute *rtrs_srv_sess_attrs[] = {
NULL,
};
-static const struct attribute_group rtrs_srv_sess_attr_group = {
- .attrs = rtrs_srv_sess_attrs,
+static const struct attribute_group rtrs_srv_path_attr_group = {
+ .attrs = rtrs_srv_path_attrs,
};
STAT_ATTR(struct rtrs_srv_stats, rdma,
@@ -151,9 +152,9 @@ static const struct attribute_group rtrs_srv_stats_attr_group = {
.attrs = rtrs_srv_stats_attrs,
};
-static int rtrs_srv_create_once_sysfs_root_folders(struct rtrs_srv_sess *sess)
+static int rtrs_srv_create_once_sysfs_root_folders(struct rtrs_srv_path *srv_path)
{
- struct rtrs_srv *srv = sess->srv;
+ struct rtrs_srv_sess *srv = srv_path->srv;
int err = 0;
mutex_lock(&srv->paths_mutex);
@@ -164,7 +165,7 @@ static int rtrs_srv_create_once_sysfs_root_folders(struct rtrs_srv_sess *sess)
goto unlock;
}
srv->dev.class = rtrs_dev_class;
- err = dev_set_name(&srv->dev, "%s", sess->s.sessname);
+ err = dev_set_name(&srv->dev, "%s", srv_path->s.sessname);
if (err)
goto unlock;
@@ -196,9 +197,9 @@ unlock:
}
static void
-rtrs_srv_destroy_once_sysfs_root_folders(struct rtrs_srv_sess *sess)
+rtrs_srv_destroy_once_sysfs_root_folders(struct rtrs_srv_path *srv_path)
{
- struct rtrs_srv *srv = sess->srv;
+ struct rtrs_srv_sess *srv = srv_path->srv;
mutex_lock(&srv->paths_mutex);
if (!--srv->dev_ref) {
@@ -213,7 +214,7 @@ rtrs_srv_destroy_once_sysfs_root_folders(struct rtrs_srv_sess *sess)
}
}
-static void rtrs_srv_sess_stats_release(struct kobject *kobj)
+static void rtrs_srv_path_stats_release(struct kobject *kobj)
{
struct rtrs_srv_stats *stats;
@@ -224,22 +225,22 @@ static void rtrs_srv_sess_stats_release(struct kobject *kobj)
static struct kobj_type ktype_stats = {
.sysfs_ops = &kobj_sysfs_ops,
- .release = rtrs_srv_sess_stats_release,
+ .release = rtrs_srv_path_stats_release,
};
-static int rtrs_srv_create_stats_files(struct rtrs_srv_sess *sess)
+static int rtrs_srv_create_stats_files(struct rtrs_srv_path *srv_path)
{
int err;
- struct rtrs_sess *s = &sess->s;
+ struct rtrs_path *s = &srv_path->s;
- err = kobject_init_and_add(&sess->stats->kobj_stats, &ktype_stats,
- &sess->kobj, "stats");
+ err = kobject_init_and_add(&srv_path->stats->kobj_stats, &ktype_stats,
+ &srv_path->kobj, "stats");
if (err) {
rtrs_err(s, "kobject_init_and_add(): %d\n", err);
- kobject_put(&sess->stats->kobj_stats);
+ kobject_put(&srv_path->stats->kobj_stats);
return err;
}
- err = sysfs_create_group(&sess->stats->kobj_stats,
+ err = sysfs_create_group(&srv_path->stats->kobj_stats,
&rtrs_srv_stats_attr_group);
if (err) {
rtrs_err(s, "sysfs_create_group(): %d\n", err);
@@ -249,64 +250,64 @@ static int rtrs_srv_create_stats_files(struct rtrs_srv_sess *sess)
return 0;
err:
- kobject_del(&sess->stats->kobj_stats);
- kobject_put(&sess->stats->kobj_stats);
+ kobject_del(&srv_path->stats->kobj_stats);
+ kobject_put(&srv_path->stats->kobj_stats);
return err;
}
-int rtrs_srv_create_sess_files(struct rtrs_srv_sess *sess)
+int rtrs_srv_create_path_files(struct rtrs_srv_path *srv_path)
{
- struct rtrs_srv *srv = sess->srv;
- struct rtrs_sess *s = &sess->s;
+ struct rtrs_srv_sess *srv = srv_path->srv;
+ struct rtrs_path *s = &srv_path->s;
char str[NAME_MAX];
int err;
struct rtrs_addr path = {
- .src = &sess->s.dst_addr,
- .dst = &sess->s.src_addr,
+ .src = &srv_path->s.dst_addr,
+ .dst = &srv_path->s.src_addr,
};
rtrs_addr_to_str(&path, str, sizeof(str));
- err = rtrs_srv_create_once_sysfs_root_folders(sess);
+ err = rtrs_srv_create_once_sysfs_root_folders(srv_path);
if (err)
return err;
- err = kobject_init_and_add(&sess->kobj, &ktype, srv->kobj_paths,
+ err = kobject_init_and_add(&srv_path->kobj, &ktype, srv->kobj_paths,
"%s", str);
if (err) {
rtrs_err(s, "kobject_init_and_add(): %d\n", err);
goto destroy_root;
}
- err = sysfs_create_group(&sess->kobj, &rtrs_srv_sess_attr_group);
+ err = sysfs_create_group(&srv_path->kobj, &rtrs_srv_path_attr_group);
if (err) {
rtrs_err(s, "sysfs_create_group(): %d\n", err);
goto put_kobj;
}
- err = rtrs_srv_create_stats_files(sess);
+ err = rtrs_srv_create_stats_files(srv_path);
if (err)
goto remove_group;
return 0;
remove_group:
- sysfs_remove_group(&sess->kobj, &rtrs_srv_sess_attr_group);
+ sysfs_remove_group(&srv_path->kobj, &rtrs_srv_path_attr_group);
put_kobj:
- kobject_del(&sess->kobj);
+ kobject_del(&srv_path->kobj);
destroy_root:
- kobject_put(&sess->kobj);
- rtrs_srv_destroy_once_sysfs_root_folders(sess);
+ kobject_put(&srv_path->kobj);
+ rtrs_srv_destroy_once_sysfs_root_folders(srv_path);
return err;
}
-void rtrs_srv_destroy_sess_files(struct rtrs_srv_sess *sess)
+void rtrs_srv_destroy_path_files(struct rtrs_srv_path *srv_path)
{
- if (sess->kobj.state_in_sysfs) {
- kobject_del(&sess->stats->kobj_stats);
- kobject_put(&sess->stats->kobj_stats);
- sysfs_remove_group(&sess->kobj, &rtrs_srv_sess_attr_group);
- kobject_put(&sess->kobj);
+ if (srv_path->kobj.state_in_sysfs) {
+ kobject_del(&srv_path->stats->kobj_stats);
+ kobject_put(&srv_path->stats->kobj_stats);
+ sysfs_remove_group(&srv_path->kobj, &rtrs_srv_path_attr_group);
+ kobject_put(&srv_path->kobj);
- rtrs_srv_destroy_once_sysfs_root_folders(sess);
+ rtrs_srv_destroy_once_sysfs_root_folders(srv_path);
}
}
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-srv.c b/drivers/infiniband/ulp/rtrs/rtrs-srv.c
index 7df71f8cf149..24024bce2566 100644
--- a/drivers/infiniband/ulp/rtrs/rtrs-srv.c
+++ b/drivers/infiniband/ulp/rtrs/rtrs-srv.c
@@ -62,19 +62,19 @@ static inline struct rtrs_srv_con *to_srv_con(struct rtrs_con *c)
return container_of(c, struct rtrs_srv_con, c);
}
-static inline struct rtrs_srv_sess *to_srv_sess(struct rtrs_sess *s)
+static inline struct rtrs_srv_path *to_srv_path(struct rtrs_path *s)
{
- return container_of(s, struct rtrs_srv_sess, s);
+ return container_of(s, struct rtrs_srv_path, s);
}
-static bool rtrs_srv_change_state(struct rtrs_srv_sess *sess,
+static bool rtrs_srv_change_state(struct rtrs_srv_path *srv_path,
enum rtrs_srv_state new_state)
{
enum rtrs_srv_state old_state;
bool changed = false;
- spin_lock_irq(&sess->state_lock);
- old_state = sess->state;
+ spin_lock_irq(&srv_path->state_lock);
+ old_state = srv_path->state;
switch (new_state) {
case RTRS_SRV_CONNECTED:
if (old_state == RTRS_SRV_CONNECTING)
@@ -93,8 +93,8 @@ static bool rtrs_srv_change_state(struct rtrs_srv_sess *sess,
break;
}
if (changed)
- sess->state = new_state;
- spin_unlock_irq(&sess->state_lock);
+ srv_path->state = new_state;
+ spin_unlock_irq(&srv_path->state_lock);
return changed;
}
@@ -106,16 +106,16 @@ static void free_id(struct rtrs_srv_op *id)
kfree(id);
}
-static void rtrs_srv_free_ops_ids(struct rtrs_srv_sess *sess)
+static void rtrs_srv_free_ops_ids(struct rtrs_srv_path *srv_path)
{
- struct rtrs_srv *srv = sess->srv;
+ struct rtrs_srv_sess *srv = srv_path->srv;
int i;
- if (sess->ops_ids) {
+ if (srv_path->ops_ids) {
for (i = 0; i < srv->queue_depth; i++)
- free_id(sess->ops_ids[i]);
- kfree(sess->ops_ids);
- sess->ops_ids = NULL;
+ free_id(srv_path->ops_ids[i]);
+ kfree(srv_path->ops_ids);
+ srv_path->ops_ids = NULL;
}
}
@@ -127,21 +127,24 @@ static struct ib_cqe io_comp_cqe = {
static inline void rtrs_srv_inflight_ref_release(struct percpu_ref *ref)
{
- struct rtrs_srv_sess *sess = container_of(ref, struct rtrs_srv_sess, ids_inflight_ref);
+ struct rtrs_srv_path *srv_path = container_of(ref,
+ struct rtrs_srv_path,
+ ids_inflight_ref);
- percpu_ref_exit(&sess->ids_inflight_ref);
- complete(&sess->complete_done);
+ percpu_ref_exit(&srv_path->ids_inflight_ref);
+ complete(&srv_path->complete_done);
}
-static int rtrs_srv_alloc_ops_ids(struct rtrs_srv_sess *sess)
+static int rtrs_srv_alloc_ops_ids(struct rtrs_srv_path *srv_path)
{
- struct rtrs_srv *srv = sess->srv;
+ struct rtrs_srv_sess *srv = srv_path->srv;
struct rtrs_srv_op *id;
int i, ret;
- sess->ops_ids = kcalloc(srv->queue_depth, sizeof(*sess->ops_ids),
- GFP_KERNEL);
- if (!sess->ops_ids)
+ srv_path->ops_ids = kcalloc(srv->queue_depth,
+ sizeof(*srv_path->ops_ids),
+ GFP_KERNEL);
+ if (!srv_path->ops_ids)
goto err;
for (i = 0; i < srv->queue_depth; ++i) {
@@ -149,44 +152,44 @@ static int rtrs_srv_alloc_ops_ids(struct rtrs_srv_sess *sess)
if (!id)
goto err;
- sess->ops_ids[i] = id;
+ srv_path->ops_ids[i] = id;
}
- ret = percpu_ref_init(&sess->ids_inflight_ref,
+ ret = percpu_ref_init(&srv_path->ids_inflight_ref,
rtrs_srv_inflight_ref_release, 0, GFP_KERNEL);
if (ret) {
pr_err("Percpu reference init failed\n");
goto err;
}
- init_completion(&sess->complete_done);
+ init_completion(&srv_path->complete_done);
return 0;
err:
- rtrs_srv_free_ops_ids(sess);
+ rtrs_srv_free_ops_ids(srv_path);
return -ENOMEM;
}
-static inline void rtrs_srv_get_ops_ids(struct rtrs_srv_sess *sess)
+static inline void rtrs_srv_get_ops_ids(struct rtrs_srv_path *srv_path)
{
- percpu_ref_get(&sess->ids_inflight_ref);
+ percpu_ref_get(&srv_path->ids_inflight_ref);
}
-static inline void rtrs_srv_put_ops_ids(struct rtrs_srv_sess *sess)
+static inline void rtrs_srv_put_ops_ids(struct rtrs_srv_path *srv_path)
{
- percpu_ref_put(&sess->ids_inflight_ref);
+ percpu_ref_put(&srv_path->ids_inflight_ref);
}
static void rtrs_srv_reg_mr_done(struct ib_cq *cq, struct ib_wc *wc)
{
struct rtrs_srv_con *con = to_srv_con(wc->qp->qp_context);
- struct rtrs_sess *s = con->c.sess;
- struct rtrs_srv_sess *sess = to_srv_sess(s);
+ struct rtrs_path *s = con->c.path;
+ struct rtrs_srv_path *srv_path = to_srv_path(s);
if (wc->status != IB_WC_SUCCESS) {
rtrs_err(s, "REG MR failed: %s\n",
ib_wc_status_msg(wc->status));
- close_sess(sess);
+ close_path(srv_path);
return;
}
}
@@ -197,9 +200,9 @@ static struct ib_cqe local_reg_cqe = {
static int rdma_write_sg(struct rtrs_srv_op *id)
{
- struct rtrs_sess *s = id->con->c.sess;
- struct rtrs_srv_sess *sess = to_srv_sess(s);
- dma_addr_t dma_addr = sess->dma_addr[id->msg_id];
+ struct rtrs_path *s = id->con->c.path;
+ struct rtrs_srv_path *srv_path = to_srv_path(s);
+ dma_addr_t dma_addr = srv_path->dma_addr[id->msg_id];
struct rtrs_srv_mr *srv_mr;
struct ib_send_wr inv_wr;
struct ib_rdma_wr imm_wr;
@@ -233,7 +236,7 @@ static int rdma_write_sg(struct rtrs_srv_op *id)
return -EINVAL;
}
- plist->lkey = sess->s.dev->ib_pd->local_dma_lkey;
+ plist->lkey = srv_path->s.dev->ib_pd->local_dma_lkey;
offset += plist->length;
wr->wr.sg_list = plist;
@@ -284,7 +287,7 @@ static int rdma_write_sg(struct rtrs_srv_op *id)
if (always_invalidate) {
struct rtrs_msg_rkey_rsp *msg;
- srv_mr = &sess->mrs[id->msg_id];
+ srv_mr = &srv_path->mrs[id->msg_id];
rwr.wr.opcode = IB_WR_REG_MR;
rwr.wr.wr_cqe = &local_reg_cqe;
rwr.wr.num_sge = 0;
@@ -300,11 +303,11 @@ static int rdma_write_sg(struct rtrs_srv_op *id)
list.addr = srv_mr->iu->dma_addr;
list.length = sizeof(*msg);
- list.lkey = sess->s.dev->ib_pd->local_dma_lkey;
+ list.lkey = srv_path->s.dev->ib_pd->local_dma_lkey;
imm_wr.wr.sg_list = &list;
imm_wr.wr.num_sge = 1;
imm_wr.wr.opcode = IB_WR_SEND_WITH_IMM;
- ib_dma_sync_single_for_device(sess->s.dev->ib_dev,
+ ib_dma_sync_single_for_device(srv_path->s.dev->ib_dev,
srv_mr->iu->dma_addr,
srv_mr->iu->size, DMA_TO_DEVICE);
} else {
@@ -317,7 +320,7 @@ static int rdma_write_sg(struct rtrs_srv_op *id)
0, need_inval));
imm_wr.wr.wr_cqe = &io_comp_cqe;
- ib_dma_sync_single_for_device(sess->s.dev->ib_dev, dma_addr,
+ ib_dma_sync_single_for_device(srv_path->s.dev->ib_dev, dma_addr,
offset, DMA_BIDIRECTIONAL);
err = ib_post_send(id->con->c.qp, &id->tx_wr.wr, NULL);
@@ -341,8 +344,8 @@ static int rdma_write_sg(struct rtrs_srv_op *id)
static int send_io_resp_imm(struct rtrs_srv_con *con, struct rtrs_srv_op *id,
int errno)
{
- struct rtrs_sess *s = con->c.sess;
- struct rtrs_srv_sess *sess = to_srv_sess(s);
+ struct rtrs_path *s = con->c.path;
+ struct rtrs_srv_path *srv_path = to_srv_path(s);
struct ib_send_wr inv_wr, *wr = NULL;
struct ib_rdma_wr imm_wr;
struct ib_reg_wr rwr;
@@ -402,7 +405,7 @@ static int send_io_resp_imm(struct rtrs_srv_con *con, struct rtrs_srv_op *id,
struct ib_sge list;
struct rtrs_msg_rkey_rsp *msg;
- srv_mr = &sess->mrs[id->msg_id];
+ srv_mr = &srv_path->mrs[id->msg_id];
rwr.wr.next = &imm_wr.wr;
rwr.wr.opcode = IB_WR_REG_MR;
rwr.wr.wr_cqe = &local_reg_cqe;
@@ -419,11 +422,11 @@ static int send_io_resp_imm(struct rtrs_srv_con *con, struct rtrs_srv_op *id,
list.addr = srv_mr->iu->dma_addr;
list.length = sizeof(*msg);
- list.lkey = sess->s.dev->ib_pd->local_dma_lkey;
+ list.lkey = srv_path->s.dev->ib_pd->local_dma_lkey;
imm_wr.wr.sg_list = &list;
imm_wr.wr.num_sge = 1;
imm_wr.wr.opcode = IB_WR_SEND_WITH_IMM;
- ib_dma_sync_single_for_device(sess->s.dev->ib_dev,
+ ib_dma_sync_single_for_device(srv_path->s.dev->ib_dev,
srv_mr->iu->dma_addr,
srv_mr->iu->size, DMA_TO_DEVICE);
} else {
@@ -444,11 +447,11 @@ static int send_io_resp_imm(struct rtrs_srv_con *con, struct rtrs_srv_op *id,
return err;
}
-void close_sess(struct rtrs_srv_sess *sess)
+void close_path(struct rtrs_srv_path *srv_path)
{
- if (rtrs_srv_change_state(sess, RTRS_SRV_CLOSING))
- queue_work(rtrs_wq, &sess->close_work);
- WARN_ON(sess->state != RTRS_SRV_CLOSING);
+ if (rtrs_srv_change_state(srv_path, RTRS_SRV_CLOSING))
+ queue_work(rtrs_wq, &srv_path->close_work);
+ WARN_ON(srv_path->state != RTRS_SRV_CLOSING);
}
static inline const char *rtrs_srv_state_str(enum rtrs_srv_state state)
@@ -480,35 +483,35 @@ static inline const char *rtrs_srv_state_str(enum rtrs_srv_state state)
*/
bool rtrs_srv_resp_rdma(struct rtrs_srv_op *id, int status)
{
- struct rtrs_srv_sess *sess;
+ struct rtrs_srv_path *srv_path;
struct rtrs_srv_con *con;
- struct rtrs_sess *s;
+ struct rtrs_path *s;
int err;
if (WARN_ON(!id))
return true;
con = id->con;
- s = con->c.sess;
- sess = to_srv_sess(s);
+ s = con->c.path;
+ srv_path = to_srv_path(s);
id->status = status;
- if (sess->state != RTRS_SRV_CONNECTED) {
+ if (srv_path->state != RTRS_SRV_CONNECTED) {
rtrs_err_rl(s,
- "Sending I/O response failed, session %s is disconnected, sess state %s\n",
- kobject_name(&sess->kobj),
- rtrs_srv_state_str(sess->state));
+ "Sending I/O response failed, server path %s is disconnected, path state %s\n",
+ kobject_name(&srv_path->kobj),
+ rtrs_srv_state_str(srv_path->state));
goto out;
}
if (always_invalidate) {
- struct rtrs_srv_mr *mr = &sess->mrs[id->msg_id];
+ struct rtrs_srv_mr *mr = &srv_path->mrs[id->msg_id];
ib_update_fast_reg_key(mr->mr, ib_inc_rkey(mr->mr->rkey));
}
if (atomic_sub_return(1, &con->c.sq_wr_avail) < 0) {
- rtrs_err(s, "IB send queue full: sess=%s cid=%d\n",
- kobject_name(&sess->kobj),
+ rtrs_err(s, "IB send queue full: srv_path=%s cid=%d\n",
+ kobject_name(&srv_path->kobj),
con->c.cid);
atomic_add(1, &con->c.sq_wr_avail);
spin_lock(&con->rsp_wr_wait_lock);
@@ -523,12 +526,12 @@ bool rtrs_srv_resp_rdma(struct rtrs_srv_op *id, int status)
err = rdma_write_sg(id);
if (err) {
- rtrs_err_rl(s, "IO response failed: %d: sess=%s\n", err,
- kobject_name(&sess->kobj));
- close_sess(sess);
+ rtrs_err_rl(s, "IO response failed: %d: srv_path=%s\n", err,
+ kobject_name(&srv_path->kobj));
+ close_path(srv_path);
}
out:
- rtrs_srv_put_ops_ids(sess);
+ rtrs_srv_put_ops_ids(srv_path);
return true;
}
EXPORT_SYMBOL(rtrs_srv_resp_rdma);
@@ -538,33 +541,33 @@ EXPORT_SYMBOL(rtrs_srv_resp_rdma);
* @srv: Session pointer
* @priv: The private pointer that is associated with the session.
*/
-void rtrs_srv_set_sess_priv(struct rtrs_srv *srv, void *priv)
+void rtrs_srv_set_sess_priv(struct rtrs_srv_sess *srv, void *priv)
{
srv->priv = priv;
}
EXPORT_SYMBOL(rtrs_srv_set_sess_priv);
-static void unmap_cont_bufs(struct rtrs_srv_sess *sess)
+static void unmap_cont_bufs(struct rtrs_srv_path *srv_path)
{
int i;
- for (i = 0; i < sess->mrs_num; i++) {
+ for (i = 0; i < srv_path->mrs_num; i++) {
struct rtrs_srv_mr *srv_mr;
- srv_mr = &sess->mrs[i];
- rtrs_iu_free(srv_mr->iu, sess->s.dev->ib_dev, 1);
+ srv_mr = &srv_path->mrs[i];
+ rtrs_iu_free(srv_mr->iu, srv_path->s.dev->ib_dev, 1);
ib_dereg_mr(srv_mr->mr);
- ib_dma_unmap_sg(sess->s.dev->ib_dev, srv_mr->sgt.sgl,
+ ib_dma_unmap_sg(srv_path->s.dev->ib_dev, srv_mr->sgt.sgl,
srv_mr->sgt.nents, DMA_BIDIRECTIONAL);
sg_free_table(&srv_mr->sgt);
}
- kfree(sess->mrs);
+ kfree(srv_path->mrs);
}
-static int map_cont_bufs(struct rtrs_srv_sess *sess)
+static int map_cont_bufs(struct rtrs_srv_path *srv_path)
{
- struct rtrs_srv *srv = sess->srv;
- struct rtrs_sess *ss = &sess->s;
+ struct rtrs_srv_sess *srv = srv_path->srv;
+ struct rtrs_path *ss = &srv_path->s;
int i, mri, err, mrs_num;
unsigned int chunk_bits;
int chunks_per_mr = 1;
@@ -581,19 +584,19 @@ static int map_cont_bufs(struct rtrs_srv_sess *sess)
mrs_num = srv->queue_depth;
} else {
chunks_per_mr =
- sess->s.dev->ib_dev->attrs.max_fast_reg_page_list_len;
+ srv_path->s.dev->ib_dev->attrs.max_fast_reg_page_list_len;
mrs_num = DIV_ROUND_UP(srv->queue_depth, chunks_per_mr);
chunks_per_mr = DIV_ROUND_UP(srv->queue_depth, mrs_num);
}
- sess->mrs = kcalloc(mrs_num, sizeof(*sess->mrs), GFP_KERNEL);
- if (!sess->mrs)
+ srv_path->mrs = kcalloc(mrs_num, sizeof(*srv_path->mrs), GFP_KERNEL);
+ if (!srv_path->mrs)
return -ENOMEM;
- sess->mrs_num = mrs_num;
+ srv_path->mrs_num = mrs_num;
for (mri = 0; mri < mrs_num; mri++) {
- struct rtrs_srv_mr *srv_mr = &sess->mrs[mri];
+ struct rtrs_srv_mr *srv_mr = &srv_path->mrs[mri];
struct sg_table *sgt = &srv_mr->sgt;
struct scatterlist *s;
struct ib_mr *mr;
@@ -612,13 +615,13 @@ static int map_cont_bufs(struct rtrs_srv_sess *sess)
sg_set_page(s, srv->chunks[chunks + i],
max_chunk_size, 0);
- nr = ib_dma_map_sg(sess->s.dev->ib_dev, sgt->sgl,
+ nr = ib_dma_map_sg(srv_path->s.dev->ib_dev, sgt->sgl,
sgt->nents, DMA_BIDIRECTIONAL);
if (nr < sgt->nents) {
err = nr < 0 ? nr : -EINVAL;
goto free_sg;
}
- mr = ib_alloc_mr(sess->s.dev->ib_pd, IB_MR_TYPE_MEM_REG,
+ mr = ib_alloc_mr(srv_path->s.dev->ib_pd, IB_MR_TYPE_MEM_REG,
sgt->nents);
if (IS_ERR(mr)) {
err = PTR_ERR(mr);
@@ -634,7 +637,7 @@ static int map_cont_bufs(struct rtrs_srv_sess *sess)
if (always_invalidate) {
srv_mr->iu = rtrs_iu_alloc(1,
sizeof(struct rtrs_msg_rkey_rsp),
- GFP_KERNEL, sess->s.dev->ib_dev,
+ GFP_KERNEL, srv_path->s.dev->ib_dev,
DMA_TO_DEVICE, rtrs_srv_rdma_done);
if (!srv_mr->iu) {
err = -ENOMEM;
@@ -644,7 +647,7 @@ static int map_cont_bufs(struct rtrs_srv_sess *sess)
}
/* Eventually dma addr for each chunk can be cached */
for_each_sg(sgt->sgl, s, sgt->orig_nents, i)
- sess->dma_addr[chunks + i] = sg_dma_address(s);
+ srv_path->dma_addr[chunks + i] = sg_dma_address(s);
ib_update_fast_reg_key(mr, ib_inc_rkey(mr->rkey));
srv_mr->mr = mr;
@@ -652,75 +655,75 @@ static int map_cont_bufs(struct rtrs_srv_sess *sess)
continue;
err:
while (mri--) {
- srv_mr = &sess->mrs[mri];
+ srv_mr = &srv_path->mrs[mri];
sgt = &srv_mr->sgt;
mr = srv_mr->mr;
- rtrs_iu_free(srv_mr->iu, sess->s.dev->ib_dev, 1);
+ rtrs_iu_free(srv_mr->iu, srv_path->s.dev->ib_dev, 1);
dereg_mr:
ib_dereg_mr(mr);
unmap_sg:
- ib_dma_unmap_sg(sess->s.dev->ib_dev, sgt->sgl,
+ ib_dma_unmap_sg(srv_path->s.dev->ib_dev, sgt->sgl,
sgt->nents, DMA_BIDIRECTIONAL);
free_sg:
sg_free_table(sgt);
}
- kfree(sess->mrs);
+ kfree(srv_path->mrs);
return err;
}
chunk_bits = ilog2(srv->queue_depth - 1) + 1;
- sess->mem_bits = (MAX_IMM_PAYL_BITS - chunk_bits);
+ srv_path->mem_bits = (MAX_IMM_PAYL_BITS - chunk_bits);
return 0;
}
static void rtrs_srv_hb_err_handler(struct rtrs_con *c)
{
- close_sess(to_srv_sess(c->sess));
+ close_path(to_srv_path(c->path));
}
-static void rtrs_srv_init_hb(struct rtrs_srv_sess *sess)
+static void rtrs_srv_init_hb(struct rtrs_srv_path *srv_path)
{
- rtrs_init_hb(&sess->s, &io_comp_cqe,
+ rtrs_init_hb(&srv_path->s, &io_comp_cqe,
RTRS_HB_INTERVAL_MS,
RTRS_HB_MISSED_MAX,
rtrs_srv_hb_err_handler,
rtrs_wq);
}
-static void rtrs_srv_start_hb(struct rtrs_srv_sess *sess)
+static void rtrs_srv_start_hb(struct rtrs_srv_path *srv_path)
{
- rtrs_start_hb(&sess->s);
+ rtrs_start_hb(&srv_path->s);
}
-static void rtrs_srv_stop_hb(struct rtrs_srv_sess *sess)
+static void rtrs_srv_stop_hb(struct rtrs_srv_path *srv_path)
{
- rtrs_stop_hb(&sess->s);
+ rtrs_stop_hb(&srv_path->s);
}
static void rtrs_srv_info_rsp_done(struct ib_cq *cq, struct ib_wc *wc)
{
struct rtrs_srv_con *con = to_srv_con(wc->qp->qp_context);
- struct rtrs_sess *s = con->c.sess;
- struct rtrs_srv_sess *sess = to_srv_sess(s);
+ struct rtrs_path *s = con->c.path;
+ struct rtrs_srv_path *srv_path = to_srv_path(s);
struct rtrs_iu *iu;
iu = container_of(wc->wr_cqe, struct rtrs_iu, cqe);
- rtrs_iu_free(iu, sess->s.dev->ib_dev, 1);
+ rtrs_iu_free(iu, srv_path->s.dev->ib_dev, 1);
if (wc->status != IB_WC_SUCCESS) {
rtrs_err(s, "Sess info response send failed: %s\n",
ib_wc_status_msg(wc->status));
- close_sess(sess);
+ close_path(srv_path);
return;
}
WARN_ON(wc->opcode != IB_WC_SEND);
}
-static void rtrs_srv_sess_up(struct rtrs_srv_sess *sess)
+static void rtrs_srv_path_up(struct rtrs_srv_path *srv_path)
{
- struct rtrs_srv *srv = sess->srv;
+ struct rtrs_srv_sess *srv = srv_path->srv;
struct rtrs_srv_ctx *ctx = srv->ctx;
int up;
@@ -731,18 +734,18 @@ static void rtrs_srv_sess_up(struct rtrs_srv_sess *sess)
mutex_unlock(&srv->paths_ev_mutex);
/* Mark session as established */
- sess->established = true;
+ srv_path->established = true;
}
-static void rtrs_srv_sess_down(struct rtrs_srv_sess *sess)
+static void rtrs_srv_path_down(struct rtrs_srv_path *srv_path)
{
- struct rtrs_srv *srv = sess->srv;
+ struct rtrs_srv_sess *srv = srv_path->srv;
struct rtrs_srv_ctx *ctx = srv->ctx;
- if (!sess->established)
+ if (!srv_path->established)
return;
- sess->established = false;
+ srv_path->established = false;
mutex_lock(&srv->paths_ev_mutex);
WARN_ON(!srv->paths_up);
if (--srv->paths_up == 0)
@@ -750,11 +753,11 @@ static void rtrs_srv_sess_down(struct rtrs_srv_sess *sess)
mutex_unlock(&srv->paths_ev_mutex);
}
-static bool exist_sessname(struct rtrs_srv_ctx *ctx,
- const char *sessname, const uuid_t *path_uuid)
+static bool exist_pathname(struct rtrs_srv_ctx *ctx,
+ const char *pathname, const uuid_t *path_uuid)
{
- struct rtrs_srv *srv;
- struct rtrs_srv_sess *sess;
+ struct rtrs_srv_sess *srv;
+ struct rtrs_srv_path *srv_path;
bool found = false;
mutex_lock(&ctx->srv_mutex);
@@ -767,9 +770,9 @@ static bool exist_sessname(struct rtrs_srv_ctx *ctx,
continue;
}
- list_for_each_entry(sess, &srv->paths_list, s.entry) {
- if (strlen(sess->s.sessname) == strlen(sessname) &&
- !strcmp(sess->s.sessname, sessname)) {
+ list_for_each_entry(srv_path, &srv->paths_list, s.entry) {
+ if (strlen(srv_path->s.sessname) == strlen(pathname) &&
+ !strcmp(srv_path->s.sessname, pathname)) {
found = true;
break;
}
@@ -782,14 +785,14 @@ static bool exist_sessname(struct rtrs_srv_ctx *ctx,
return found;
}
-static int post_recv_sess(struct rtrs_srv_sess *sess);
+static int post_recv_path(struct rtrs_srv_path *srv_path);
static int rtrs_rdma_do_reject(struct rdma_cm_id *cm_id, int errno);
static int process_info_req(struct rtrs_srv_con *con,
struct rtrs_msg_info_req *msg)
{
- struct rtrs_sess *s = con->c.sess;
- struct rtrs_srv_sess *sess = to_srv_sess(s);
+ struct rtrs_path *s = con->c.path;
+ struct rtrs_srv_path *srv_path = to_srv_path(s);
struct ib_send_wr *reg_wr = NULL;
struct rtrs_msg_info_rsp *rsp;
struct rtrs_iu *tx_iu;
@@ -797,31 +800,32 @@ static int process_info_req(struct rtrs_srv_con *con,
int mri, err;
size_t tx_sz;
- err = post_recv_sess(sess);
+ err = post_recv_path(srv_path);
if (err) {
- rtrs_err(s, "post_recv_sess(), err: %d\n", err);
+ rtrs_err(s, "post_recv_path(), err: %d\n", err);
return err;
}
- if (strchr(msg->sessname, '/') || strchr(msg->sessname, '.')) {
- rtrs_err(s, "sessname cannot contain / and .\n");
+ if (strchr(msg->pathname, '/') || strchr(msg->pathname, '.')) {
+ rtrs_err(s, "pathname cannot contain / and .\n");
return -EINVAL;
}
- if (exist_sessname(sess->srv->ctx,
- msg->sessname, &sess->srv->paths_uuid)) {
- rtrs_err(s, "sessname is duplicated: %s\n", msg->sessname);
+ if (exist_pathname(srv_path->srv->ctx,
+ msg->pathname, &srv_path->srv->paths_uuid)) {
+ rtrs_err(s, "pathname is duplicated: %s\n", msg->pathname);
return -EPERM;
}
- strscpy(sess->s.sessname, msg->sessname, sizeof(sess->s.sessname));
+ strscpy(srv_path->s.sessname, msg->pathname,
+ sizeof(srv_path->s.sessname));
- rwr = kcalloc(sess->mrs_num, sizeof(*rwr), GFP_KERNEL);
+ rwr = kcalloc(srv_path->mrs_num, sizeof(*rwr), GFP_KERNEL);
if (!rwr)
return -ENOMEM;
tx_sz = sizeof(*rsp);
- tx_sz += sizeof(rsp->desc[0]) * sess->mrs_num;
- tx_iu = rtrs_iu_alloc(1, tx_sz, GFP_KERNEL, sess->s.dev->ib_dev,
+ tx_sz += sizeof(rsp->desc[0]) * srv_path->mrs_num;
+ tx_iu = rtrs_iu_alloc(1, tx_sz, GFP_KERNEL, srv_path->s.dev->ib_dev,
DMA_TO_DEVICE, rtrs_srv_info_rsp_done);
if (!tx_iu) {
err = -ENOMEM;
@@ -830,10 +834,10 @@ static int process_info_req(struct rtrs_srv_con *con,
rsp = tx_iu->buf;
rsp->type = cpu_to_le16(RTRS_MSG_INFO_RSP);
- rsp->sg_cnt = cpu_to_le16(sess->mrs_num);
+ rsp->sg_cnt = cpu_to_le16(srv_path->mrs_num);
- for (mri = 0; mri < sess->mrs_num; mri++) {
- struct ib_mr *mr = sess->mrs[mri].mr;
+ for (mri = 0; mri < srv_path->mrs_num; mri++) {
+ struct ib_mr *mr = srv_path->mrs[mri].mr;
rsp->desc[mri].addr = cpu_to_le64(mr->iova);
rsp->desc[mri].key = cpu_to_le32(mr->rkey);
@@ -854,13 +858,13 @@ static int process_info_req(struct rtrs_srv_con *con,
reg_wr = &rwr[mri].wr;
}
- err = rtrs_srv_create_sess_files(sess);
+ err = rtrs_srv_create_path_files(srv_path);
if (err)
goto iu_free;
- kobject_get(&sess->kobj);
- get_device(&sess->srv->dev);
- rtrs_srv_change_state(sess, RTRS_SRV_CONNECTED);
- rtrs_srv_start_hb(sess);
+ kobject_get(&srv_path->kobj);
+ get_device(&srv_path->srv->dev);
+ rtrs_srv_change_state(srv_path, RTRS_SRV_CONNECTED);
+ rtrs_srv_start_hb(srv_path);
/*
* We do not account number of established connections at the current
@@ -868,9 +872,10 @@ static int process_info_req(struct rtrs_srv_con *con,
* all connections are successfully established. Thus, simply notify
* listener with a proper event if we are the first path.
*/
- rtrs_srv_sess_up(sess);
+ rtrs_srv_path_up(srv_path);
- ib_dma_sync_single_for_device(sess->s.dev->ib_dev, tx_iu->dma_addr,
+ ib_dma_sync_single_for_device(srv_path->s.dev->ib_dev,
+ tx_iu->dma_addr,
tx_iu->size, DMA_TO_DEVICE);
/* Send info response */
@@ -878,7 +883,7 @@ static int process_info_req(struct rtrs_srv_con *con,
if (err) {
rtrs_err(s, "rtrs_iu_post_send(), err: %d\n", err);
iu_free:
- rtrs_iu_free(tx_iu, sess->s.dev->ib_dev, 1);
+ rtrs_iu_free(tx_iu, srv_path->s.dev->ib_dev, 1);
}
rwr_free:
kfree(rwr);
@@ -889,8 +894,8 @@ rwr_free:
static void rtrs_srv_info_req_done(struct ib_cq *cq, struct ib_wc *wc)
{
struct rtrs_srv_con *con = to_srv_con(wc->qp->qp_context);
- struct rtrs_sess *s = con->c.sess;
- struct rtrs_srv_sess *sess = to_srv_sess(s);
+ struct rtrs_path *s = con->c.path;
+ struct rtrs_srv_path *srv_path = to_srv_path(s);
struct rtrs_msg_info_req *msg;
struct rtrs_iu *iu;
int err;
@@ -910,7 +915,7 @@ static void rtrs_srv_info_req_done(struct ib_cq *cq, struct ib_wc *wc)
wc->byte_len);
goto close;
}
- ib_dma_sync_single_for_cpu(sess->s.dev->ib_dev, iu->dma_addr,
+ ib_dma_sync_single_for_cpu(srv_path->s.dev->ib_dev, iu->dma_addr,
iu->size, DMA_FROM_DEVICE);
msg = iu->buf;
if (le16_to_cpu(msg->type) != RTRS_MSG_INFO_REQ) {
@@ -923,22 +928,22 @@ static void rtrs_srv_info_req_done(struct ib_cq *cq, struct ib_wc *wc)
goto close;
out:
- rtrs_iu_free(iu, sess->s.dev->ib_dev, 1);
+ rtrs_iu_free(iu, srv_path->s.dev->ib_dev, 1);
return;
close:
- close_sess(sess);
+ close_path(srv_path);
goto out;
}
static int post_recv_info_req(struct rtrs_srv_con *con)
{
- struct rtrs_sess *s = con->c.sess;
- struct rtrs_srv_sess *sess = to_srv_sess(s);
+ struct rtrs_path *s = con->c.path;
+ struct rtrs_srv_path *srv_path = to_srv_path(s);
struct rtrs_iu *rx_iu;
int err;
rx_iu = rtrs_iu_alloc(1, sizeof(struct rtrs_msg_info_req),
- GFP_KERNEL, sess->s.dev->ib_dev,
+ GFP_KERNEL, srv_path->s.dev->ib_dev,
DMA_FROM_DEVICE, rtrs_srv_info_req_done);
if (!rx_iu)
return -ENOMEM;
@@ -946,7 +951,7 @@ static int post_recv_info_req(struct rtrs_srv_con *con)
err = rtrs_iu_post_recv(&con->c, rx_iu);
if (err) {
rtrs_err(s, "rtrs_iu_post_recv(), err: %d\n", err);
- rtrs_iu_free(rx_iu, sess->s.dev->ib_dev, 1);
+ rtrs_iu_free(rx_iu, srv_path->s.dev->ib_dev, 1);
return err;
}
@@ -966,20 +971,20 @@ static int post_recv_io(struct rtrs_srv_con *con, size_t q_size)
return 0;
}
-static int post_recv_sess(struct rtrs_srv_sess *sess)
+static int post_recv_path(struct rtrs_srv_path *srv_path)
{
- struct rtrs_srv *srv = sess->srv;
- struct rtrs_sess *s = &sess->s;
+ struct rtrs_srv_sess *srv = srv_path->srv;
+ struct rtrs_path *s = &srv_path->s;
size_t q_size;
int err, cid;
- for (cid = 0; cid < sess->s.con_num; cid++) {
+ for (cid = 0; cid < srv_path->s.con_num; cid++) {
if (cid == 0)
q_size = SERVICE_CON_QUEUE_DEPTH;
else
q_size = srv->queue_depth;
- err = post_recv_io(to_srv_con(sess->s.con[cid]), q_size);
+ err = post_recv_io(to_srv_con(srv_path->s.con[cid]), q_size);
if (err) {
rtrs_err(s, "post_recv_io(), err: %d\n", err);
return err;
@@ -993,9 +998,9 @@ static void process_read(struct rtrs_srv_con *con,
struct rtrs_msg_rdma_read *msg,
u32 buf_id, u32 off)
{
- struct rtrs_sess *s = con->c.sess;
- struct rtrs_srv_sess *sess = to_srv_sess(s);
- struct rtrs_srv *srv = sess->srv;
+ struct rtrs_path *s = con->c.path;
+ struct rtrs_srv_path *srv_path = to_srv_path(s);
+ struct rtrs_srv_sess *srv = srv_path->srv;
struct rtrs_srv_ctx *ctx = srv->ctx;
struct rtrs_srv_op *id;
@@ -1003,10 +1008,10 @@ static void process_read(struct rtrs_srv_con *con,
void *data;
int ret;
- if (sess->state != RTRS_SRV_CONNECTED) {
+ if (srv_path->state != RTRS_SRV_CONNECTED) {
rtrs_err_rl(s,
"Processing read request failed, session is disconnected, sess state %s\n",
- rtrs_srv_state_str(sess->state));
+ rtrs_srv_state_str(srv_path->state));
return;
}
if (msg->sg_cnt != 1 && msg->sg_cnt != 0) {
@@ -1014,9 +1019,9 @@ static void process_read(struct rtrs_srv_con *con,
"Processing read request failed, invalid message\n");
return;
}
- rtrs_srv_get_ops_ids(sess);
- rtrs_srv_update_rdma_stats(sess->stats, off, READ);
- id = sess->ops_ids[buf_id];
+ rtrs_srv_get_ops_ids(srv_path);
+ rtrs_srv_update_rdma_stats(srv_path->stats, off, READ);
+ id = srv_path->ops_ids[buf_id];
id->con = con;
id->dir = READ;
id->msg_id = buf_id;
@@ -1042,18 +1047,18 @@ send_err_msg:
rtrs_err_rl(s,
"Sending err msg for failed RDMA-Write-Req failed, msg_id %d, err: %d\n",
buf_id, ret);
- close_sess(sess);
+ close_path(srv_path);
}
- rtrs_srv_put_ops_ids(sess);
+ rtrs_srv_put_ops_ids(srv_path);
}
static void process_write(struct rtrs_srv_con *con,
struct rtrs_msg_rdma_write *req,
u32 buf_id, u32 off)
{
- struct rtrs_sess *s = con->c.sess;
- struct rtrs_srv_sess *sess = to_srv_sess(s);
- struct rtrs_srv *srv = sess->srv;
+ struct rtrs_path *s = con->c.path;
+ struct rtrs_srv_path *srv_path = to_srv_path(s);
+ struct rtrs_srv_sess *srv = srv_path->srv;
struct rtrs_srv_ctx *ctx = srv->ctx;
struct rtrs_srv_op *id;
@@ -1061,15 +1066,15 @@ static void process_write(struct rtrs_srv_con *con,
void *data;
int ret;
- if (sess->state != RTRS_SRV_CONNECTED) {
+ if (srv_path->state != RTRS_SRV_CONNECTED) {
rtrs_err_rl(s,
"Processing write request failed, session is disconnected, sess state %s\n",
- rtrs_srv_state_str(sess->state));
+ rtrs_srv_state_str(srv_path->state));
return;
}
- rtrs_srv_get_ops_ids(sess);
- rtrs_srv_update_rdma_stats(sess->stats, off, WRITE);
- id = sess->ops_ids[buf_id];
+ rtrs_srv_get_ops_ids(srv_path);
+ rtrs_srv_update_rdma_stats(srv_path->stats, off, WRITE);
+ id = srv_path->ops_ids[buf_id];
id->con = con;
id->dir = WRITE;
id->msg_id = buf_id;
@@ -1094,20 +1099,21 @@ send_err_msg:
rtrs_err_rl(s,
"Processing write request failed, sending I/O response failed, msg_id %d, err: %d\n",
buf_id, ret);
- close_sess(sess);
+ close_path(srv_path);
}
- rtrs_srv_put_ops_ids(sess);
+ rtrs_srv_put_ops_ids(srv_path);
}
static void process_io_req(struct rtrs_srv_con *con, void *msg,
u32 id, u32 off)
{
- struct rtrs_sess *s = con->c.sess;
- struct rtrs_srv_sess *sess = to_srv_sess(s);
+ struct rtrs_path *s = con->c.path;
+ struct rtrs_srv_path *srv_path = to_srv_path(s);
struct rtrs_msg_rdma_hdr *hdr;
unsigned int type;
- ib_dma_sync_single_for_cpu(sess->s.dev->ib_dev, sess->dma_addr[id],
+ ib_dma_sync_single_for_cpu(srv_path->s.dev->ib_dev,
+ srv_path->dma_addr[id],
max_chunk_size, DMA_BIDIRECTIONAL);
hdr = msg;
type = le16_to_cpu(hdr->type);
@@ -1129,7 +1135,7 @@ static void process_io_req(struct rtrs_srv_con *con, void *msg,
return;
err:
- close_sess(sess);
+ close_path(srv_path);
}
static void rtrs_srv_inv_rkey_done(struct ib_cq *cq, struct ib_wc *wc)
@@ -1137,16 +1143,16 @@ static void rtrs_srv_inv_rkey_done(struct ib_cq *cq, struct ib_wc *wc)
struct rtrs_srv_mr *mr =
container_of(wc->wr_cqe, typeof(*mr), inv_cqe);
struct rtrs_srv_con *con = to_srv_con(wc->qp->qp_context);
- struct rtrs_sess *s = con->c.sess;
- struct rtrs_srv_sess *sess = to_srv_sess(s);
- struct rtrs_srv *srv = sess->srv;
+ struct rtrs_path *s = con->c.path;
+ struct rtrs_srv_path *srv_path = to_srv_path(s);
+ struct rtrs_srv_sess *srv = srv_path->srv;
u32 msg_id, off;
void *data;
if (wc->status != IB_WC_SUCCESS) {
rtrs_err(s, "Failed IB_WR_LOCAL_INV: %s\n",
ib_wc_status_msg(wc->status));
- close_sess(sess);
+ close_path(srv_path);
}
msg_id = mr->msg_id;
off = mr->msg_off;
@@ -1194,9 +1200,9 @@ static void rtrs_rdma_process_wr_wait_list(struct rtrs_srv_con *con)
static void rtrs_srv_rdma_done(struct ib_cq *cq, struct ib_wc *wc)
{
struct rtrs_srv_con *con = to_srv_con(wc->qp->qp_context);
- struct rtrs_sess *s = con->c.sess;
- struct rtrs_srv_sess *sess = to_srv_sess(s);
- struct rtrs_srv *srv = sess->srv;
+ struct rtrs_path *s = con->c.path;
+ struct rtrs_srv_path *srv_path = to_srv_path(s);
+ struct rtrs_srv_sess *srv = srv_path->srv;
u32 imm_type, imm_payload;
int err;
@@ -1206,7 +1212,7 @@ static void rtrs_srv_rdma_done(struct ib_cq *cq, struct ib_wc *wc)
"%s (wr_cqe: %p, type: %d, vendor_err: 0x%x, len: %u)\n",
ib_wc_status_msg(wc->status), wc->wr_cqe,
wc->opcode, wc->vendor_err, wc->byte_len);
- close_sess(sess);
+ close_path(srv_path);
}
return;
}
@@ -1222,7 +1228,7 @@ static void rtrs_srv_rdma_done(struct ib_cq *cq, struct ib_wc *wc)
err = rtrs_post_recv_empty(&con->c, &io_comp_cqe);
if (err) {
rtrs_err(s, "rtrs_post_recv(), err: %d\n", err);
- close_sess(sess);
+ close_path(srv_path);
break;
}
rtrs_from_imm(be32_to_cpu(wc->ex.imm_data),
@@ -1231,16 +1237,16 @@ static void rtrs_srv_rdma_done(struct ib_cq *cq, struct ib_wc *wc)
u32 msg_id, off;
void *data;
- msg_id = imm_payload >> sess->mem_bits;
- off = imm_payload & ((1 << sess->mem_bits) - 1);
+ msg_id = imm_payload >> srv_path->mem_bits;
+ off = imm_payload & ((1 << srv_path->mem_bits) - 1);
if (msg_id >= srv->queue_depth || off >= max_chunk_size) {
rtrs_err(s, "Wrong msg_id %u, off %u\n",
msg_id, off);
- close_sess(sess);
+ close_path(srv_path);
return;
}
if (always_invalidate) {
- struct rtrs_srv_mr *mr = &sess->mrs[msg_id];
+ struct rtrs_srv_mr *mr = &srv_path->mrs[msg_id];
mr->msg_off = off;
mr->msg_id = msg_id;
@@ -1248,7 +1254,7 @@ static void rtrs_srv_rdma_done(struct ib_cq *cq, struct ib_wc *wc)
if (err) {
rtrs_err(s, "rtrs_post_recv(), err: %d\n",
err);
- close_sess(sess);
+ close_path(srv_path);
break;
}
} else {
@@ -1257,10 +1263,10 @@ static void rtrs_srv_rdma_done(struct ib_cq *cq, struct ib_wc *wc)
}
} else if (imm_type == RTRS_HB_MSG_IMM) {
WARN_ON(con->c.cid);
- rtrs_send_hb_ack(&sess->s);
+ rtrs_send_hb_ack(&srv_path->s);
} else if (imm_type == RTRS_HB_ACK_IMM) {
WARN_ON(con->c.cid);
- sess->s.hb_missed_cnt = 0;
+ srv_path->s.hb_missed_cnt = 0;
} else {
rtrs_wrn(s, "Unknown IMM type %u\n", imm_type);
}
@@ -1284,22 +1290,23 @@ static void rtrs_srv_rdma_done(struct ib_cq *cq, struct ib_wc *wc)
}
/**
- * rtrs_srv_get_sess_name() - Get rtrs_srv peer hostname.
+ * rtrs_srv_get_path_name() - Get rtrs_srv peer hostname.
* @srv: Session
- * @sessname: Sessname buffer
+ * @pathname: Pathname buffer
* @len: Length of sessname buffer
*/
-int rtrs_srv_get_sess_name(struct rtrs_srv *srv, char *sessname, size_t len)
+int rtrs_srv_get_path_name(struct rtrs_srv_sess *srv, char *pathname,
+ size_t len)
{
- struct rtrs_srv_sess *sess;
+ struct rtrs_srv_path *srv_path;
int err = -ENOTCONN;
mutex_lock(&srv->paths_mutex);
- list_for_each_entry(sess, &srv->paths_list, s.entry) {
- if (sess->state != RTRS_SRV_CONNECTED)
+ list_for_each_entry(srv_path, &srv->paths_list, s.entry) {
+ if (srv_path->state != RTRS_SRV_CONNECTED)
continue;
- strscpy(sessname, sess->s.sessname,
- min_t(size_t, sizeof(sess->s.sessname), len));
+ strscpy(pathname, srv_path->s.sessname,
+ min_t(size_t, sizeof(srv_path->s.sessname), len));
err = 0;
break;
}
@@ -1307,44 +1314,45 @@ int rtrs_srv_get_sess_name(struct rtrs_srv *srv, char *sessname, size_t len)
return err;
}
-EXPORT_SYMBOL(rtrs_srv_get_sess_name);
+EXPORT_SYMBOL(rtrs_srv_get_path_name);
/**
* rtrs_srv_get_queue_depth() - Get rtrs_srv qdepth.
* @srv: Session
*/
-int rtrs_srv_get_queue_depth(struct rtrs_srv *srv)
+int rtrs_srv_get_queue_depth(struct rtrs_srv_sess *srv)
{
return srv->queue_depth;
}
EXPORT_SYMBOL(rtrs_srv_get_queue_depth);
-static int find_next_bit_ring(struct rtrs_srv_sess *sess)
+static int find_next_bit_ring(struct rtrs_srv_path *srv_path)
{
- struct ib_device *ib_dev = sess->s.dev->ib_dev;
+ struct ib_device *ib_dev = srv_path->s.dev->ib_dev;
int v;
- v = cpumask_next(sess->cur_cq_vector, &cq_affinity_mask);
+ v = cpumask_next(srv_path->cur_cq_vector, &cq_affinity_mask);
if (v >= nr_cpu_ids || v >= ib_dev->num_comp_vectors)
v = cpumask_first(&cq_affinity_mask);
return v;
}
-static int rtrs_srv_get_next_cq_vector(struct rtrs_srv_sess *sess)
+static int rtrs_srv_get_next_cq_vector(struct rtrs_srv_path *srv_path)
{
- sess->cur_cq_vector = find_next_bit_ring(sess);
+ srv_path->cur_cq_vector = find_next_bit_ring(srv_path);
- return sess->cur_cq_vector;
+ return srv_path->cur_cq_vector;
}
static void rtrs_srv_dev_release(struct device *dev)
{
- struct rtrs_srv *srv = container_of(dev, struct rtrs_srv, dev);
+ struct rtrs_srv_sess *srv = container_of(dev, struct rtrs_srv_sess,
+ dev);
kfree(srv);
}
-static void free_srv(struct rtrs_srv *srv)
+static void free_srv(struct rtrs_srv_sess *srv)
{
int i;
@@ -1358,11 +1366,11 @@ static void free_srv(struct rtrs_srv *srv)
put_device(&srv->dev);
}
-static struct rtrs_srv *get_or_create_srv(struct rtrs_srv_ctx *ctx,
+static struct rtrs_srv_sess *get_or_create_srv(struct rtrs_srv_ctx *ctx,
const uuid_t *paths_uuid,
bool first_conn)
{
- struct rtrs_srv *srv;
+ struct rtrs_srv_sess *srv;
int i;
mutex_lock(&ctx->srv_mutex);
@@ -1424,7 +1432,7 @@ err_free_srv:
return ERR_PTR(-ENOMEM);
}
-static void put_srv(struct rtrs_srv *srv)
+static void put_srv(struct rtrs_srv_sess *srv)
{
if (refcount_dec_and_test(&srv->refcount)) {
struct rtrs_srv_ctx *ctx = srv->ctx;
@@ -1438,23 +1446,23 @@ static void put_srv(struct rtrs_srv *srv)
}
}
-static void __add_path_to_srv(struct rtrs_srv *srv,
- struct rtrs_srv_sess *sess)
+static void __add_path_to_srv(struct rtrs_srv_sess *srv,
+ struct rtrs_srv_path *srv_path)
{
- list_add_tail(&sess->s.entry, &srv->paths_list);
+ list_add_tail(&srv_path->s.entry, &srv->paths_list);
srv->paths_num++;
WARN_ON(srv->paths_num >= MAX_PATHS_NUM);
}
-static void del_path_from_srv(struct rtrs_srv_sess *sess)
+static void del_path_from_srv(struct rtrs_srv_path *srv_path)
{
- struct rtrs_srv *srv = sess->srv;
+ struct rtrs_srv_sess *srv = srv_path->srv;
if (WARN_ON(!srv))
return;
mutex_lock(&srv->paths_mutex);
- list_del(&sess->s.entry);
+ list_del(&srv_path->s.entry);
WARN_ON(!srv->paths_num);
srv->paths_num--;
mutex_unlock(&srv->paths_mutex);
@@ -1484,47 +1492,47 @@ static int sockaddr_cmp(const struct sockaddr *a, const struct sockaddr *b)
}
}
-static bool __is_path_w_addr_exists(struct rtrs_srv *srv,
+static bool __is_path_w_addr_exists(struct rtrs_srv_sess *srv,
struct rdma_addr *addr)
{
- struct rtrs_srv_sess *sess;
+ struct rtrs_srv_path *srv_path;
- list_for_each_entry(sess, &srv->paths_list, s.entry)
- if (!sockaddr_cmp((struct sockaddr *)&sess->s.dst_addr,
+ list_for_each_entry(srv_path, &srv->paths_list, s.entry)
+ if (!sockaddr_cmp((struct sockaddr *)&srv_path->s.dst_addr,
(struct sockaddr *)&addr->dst_addr) &&
- !sockaddr_cmp((struct sockaddr *)&sess->s.src_addr,
+ !sockaddr_cmp((struct sockaddr *)&srv_path->s.src_addr,
(struct sockaddr *)&addr->src_addr))
return true;
return false;
}
-static void free_sess(struct rtrs_srv_sess *sess)
+static void free_path(struct rtrs_srv_path *srv_path)
{
- if (sess->kobj.state_in_sysfs) {
- kobject_del(&sess->kobj);
- kobject_put(&sess->kobj);
+ if (srv_path->kobj.state_in_sysfs) {
+ kobject_del(&srv_path->kobj);
+ kobject_put(&srv_path->kobj);
} else {
- kfree(sess->stats);
- kfree(sess);
+ kfree(srv_path->stats);
+ kfree(srv_path);
}
}
static void rtrs_srv_close_work(struct work_struct *work)
{
- struct rtrs_srv_sess *sess;
+ struct rtrs_srv_path *srv_path;
struct rtrs_srv_con *con;
int i;
- sess = container_of(work, typeof(*sess), close_work);
+ srv_path = container_of(work, typeof(*srv_path), close_work);
- rtrs_srv_destroy_sess_files(sess);
- rtrs_srv_stop_hb(sess);
+ rtrs_srv_destroy_path_files(srv_path);
+ rtrs_srv_stop_hb(srv_path);
- for (i = 0; i < sess->s.con_num; i++) {
- if (!sess->s.con[i])
+ for (i = 0; i < srv_path->s.con_num; i++) {
+ if (!srv_path->s.con[i])
continue;
- con = to_srv_con(sess->s.con[i]);
+ con = to_srv_con(srv_path->s.con[i]);
rdma_disconnect(con->c.cm_id);
ib_drain_qp(con->c.qp);
}
@@ -1533,41 +1541,41 @@ static void rtrs_srv_close_work(struct work_struct *work)
* Degrade ref count to the usual model with a single shared
* atomic_t counter
*/
- percpu_ref_kill(&sess->ids_inflight_ref);
+ percpu_ref_kill(&srv_path->ids_inflight_ref);
/* Wait for all completion */
- wait_for_completion(&sess->complete_done);
+ wait_for_completion(&srv_path->complete_done);
/* Notify upper layer if we are the last path */
- rtrs_srv_sess_down(sess);
+ rtrs_srv_path_down(srv_path);
- unmap_cont_bufs(sess);
- rtrs_srv_free_ops_ids(sess);
+ unmap_cont_bufs(srv_path);
+ rtrs_srv_free_ops_ids(srv_path);
- for (i = 0; i < sess->s.con_num; i++) {
- if (!sess->s.con[i])
+ for (i = 0; i < srv_path->s.con_num; i++) {
+ if (!srv_path->s.con[i])
continue;
- con = to_srv_con(sess->s.con[i]);
+ con = to_srv_con(srv_path->s.con[i]);
rtrs_cq_qp_destroy(&con->c);
rdma_destroy_id(con->c.cm_id);
kfree(con);
}
- rtrs_ib_dev_put(sess->s.dev);
+ rtrs_ib_dev_put(srv_path->s.dev);
- del_path_from_srv(sess);
- put_srv(sess->srv);
- sess->srv = NULL;
- rtrs_srv_change_state(sess, RTRS_SRV_CLOSED);
+ del_path_from_srv(srv_path);
+ put_srv(srv_path->srv);
+ srv_path->srv = NULL;
+ rtrs_srv_change_state(srv_path, RTRS_SRV_CLOSED);
- kfree(sess->dma_addr);
- kfree(sess->s.con);
- free_sess(sess);
+ kfree(srv_path->dma_addr);
+ kfree(srv_path->s.con);
+ free_path(srv_path);
}
-static int rtrs_rdma_do_accept(struct rtrs_srv_sess *sess,
+static int rtrs_rdma_do_accept(struct rtrs_srv_path *srv_path,
struct rdma_cm_id *cm_id)
{
- struct rtrs_srv *srv = sess->srv;
+ struct rtrs_srv_sess *srv = srv_path->srv;
struct rtrs_msg_conn_rsp msg;
struct rdma_conn_param param;
int err;
@@ -1615,25 +1623,25 @@ static int rtrs_rdma_do_reject(struct rdma_cm_id *cm_id, int errno)
return errno;
}
-static struct rtrs_srv_sess *
-__find_sess(struct rtrs_srv *srv, const uuid_t *sess_uuid)
+static struct rtrs_srv_path *
+__find_path(struct rtrs_srv_sess *srv, const uuid_t *sess_uuid)
{
- struct rtrs_srv_sess *sess;
+ struct rtrs_srv_path *srv_path;
- list_for_each_entry(sess, &srv->paths_list, s.entry) {
- if (uuid_equal(&sess->s.uuid, sess_uuid))
- return sess;
+ list_for_each_entry(srv_path, &srv->paths_list, s.entry) {
+ if (uuid_equal(&srv_path->s.uuid, sess_uuid))
+ return srv_path;
}
return NULL;
}
-static int create_con(struct rtrs_srv_sess *sess,
+static int create_con(struct rtrs_srv_path *srv_path,
struct rdma_cm_id *cm_id,
unsigned int cid)
{
- struct rtrs_srv *srv = sess->srv;
- struct rtrs_sess *s = &sess->s;
+ struct rtrs_srv_sess *srv = srv_path->srv;
+ struct rtrs_path *s = &srv_path->s;
struct rtrs_srv_con *con;
u32 cq_num, max_send_wr, max_recv_wr, wr_limit;
@@ -1648,10 +1656,10 @@ static int create_con(struct rtrs_srv_sess *sess,
spin_lock_init(&con->rsp_wr_wait_lock);
INIT_LIST_HEAD(&con->rsp_wr_wait_list);
con->c.cm_id = cm_id;
- con->c.sess = &sess->s;
+ con->c.path = &srv_path->s;
con->c.cid = cid;
atomic_set(&con->c.wr_cnt, 1);
- wr_limit = sess->s.dev->ib_dev->attrs.max_qp_wr;
+ wr_limit = srv_path->s.dev->ib_dev->attrs.max_qp_wr;
if (con->c.cid == 0) {
/*
@@ -1684,10 +1692,10 @@ static int create_con(struct rtrs_srv_sess *sess,
}
cq_num = max_send_wr + max_recv_wr;
atomic_set(&con->c.sq_wr_avail, max_send_wr);
- cq_vector = rtrs_srv_get_next_cq_vector(sess);
+ cq_vector = rtrs_srv_get_next_cq_vector(srv_path);
/* TODO: SOFTIRQ can be faster, but be careful with softirq context */
- err = rtrs_cq_qp_create(&sess->s, &con->c, 1, cq_vector, cq_num,
+ err = rtrs_cq_qp_create(&srv_path->s, &con->c, 1, cq_vector, cq_num,
max_send_wr, max_recv_wr,
IB_POLL_WORKQUEUE);
if (err) {
@@ -1699,8 +1707,8 @@ static int create_con(struct rtrs_srv_sess *sess,
if (err)
goto free_cqqp;
}
- WARN_ON(sess->s.con[cid]);
- sess->s.con[cid] = &con->c;
+ WARN_ON(srv_path->s.con[cid]);
+ srv_path->s.con[cid] = &con->c;
/*
* Change context from server to current connection. The other
@@ -1719,13 +1727,13 @@ err:
return err;
}
-static struct rtrs_srv_sess *__alloc_sess(struct rtrs_srv *srv,
+static struct rtrs_srv_path *__alloc_path(struct rtrs_srv_sess *srv,
struct rdma_cm_id *cm_id,
unsigned int con_num,
unsigned int recon_cnt,
const uuid_t *uuid)
{
- struct rtrs_srv_sess *sess;
+ struct rtrs_srv_path *srv_path;
int err = -ENOMEM;
char str[NAME_MAX];
struct rtrs_addr path;
@@ -1739,74 +1747,76 @@ static struct rtrs_srv_sess *__alloc_sess(struct rtrs_srv *srv,
pr_err("Path with same addr exists\n");
goto err;
}
- sess = kzalloc(sizeof(*sess), GFP_KERNEL);
- if (!sess)
+ srv_path = kzalloc(sizeof(*srv_path), GFP_KERNEL);
+ if (!srv_path)
goto err;
- sess->stats = kzalloc(sizeof(*sess->stats), GFP_KERNEL);
- if (!sess->stats)
+ srv_path->stats = kzalloc(sizeof(*srv_path->stats), GFP_KERNEL);
+ if (!srv_path->stats)
goto err_free_sess;
- sess->stats->sess = sess;
+ srv_path->stats->srv_path = srv_path;
- sess->dma_addr = kcalloc(srv->queue_depth, sizeof(*sess->dma_addr),
- GFP_KERNEL);
- if (!sess->dma_addr)
+ srv_path->dma_addr = kcalloc(srv->queue_depth,
+ sizeof(*srv_path->dma_addr),
+ GFP_KERNEL);
+ if (!srv_path->dma_addr)
goto err_free_stats;
- sess->s.con = kcalloc(con_num, sizeof(*sess->s.con), GFP_KERNEL);
- if (!sess->s.con)
+ srv_path->s.con = kcalloc(con_num, sizeof(*srv_path->s.con),
+ GFP_KERNEL);
+ if (!srv_path->s.con)
goto err_free_dma_addr;
- sess->state = RTRS_SRV_CONNECTING;
- sess->srv = srv;
- sess->cur_cq_vector = -1;
- sess->s.dst_addr = cm_id->route.addr.dst_addr;
- sess->s.src_addr = cm_id->route.addr.src_addr;
+ srv_path->state = RTRS_SRV_CONNECTING;
+ srv_path->srv = srv;
+ srv_path->cur_cq_vector = -1;
+ srv_path->s.dst_addr = cm_id->route.addr.dst_addr;
+ srv_path->s.src_addr = cm_id->route.addr.src_addr;
/* temporary until receiving session-name from client */
- path.src = &sess->s.src_addr;
- path.dst = &sess->s.dst_addr;
+ path.src = &srv_path->s.src_addr;
+ path.dst = &srv_path->s.dst_addr;
rtrs_addr_to_str(&path, str, sizeof(str));
- strscpy(sess->s.sessname, str, sizeof(sess->s.sessname));
-
- sess->s.con_num = con_num;
- sess->s.irq_con_num = con_num;
- sess->s.recon_cnt = recon_cnt;
- uuid_copy(&sess->s.uuid, uuid);
- spin_lock_init(&sess->state_lock);
- INIT_WORK(&sess->close_work, rtrs_srv_close_work);
- rtrs_srv_init_hb(sess);
-
- sess->s.dev = rtrs_ib_dev_find_or_add(cm_id->device, &dev_pd);
- if (!sess->s.dev) {
+ strscpy(srv_path->s.sessname, str, sizeof(srv_path->s.sessname));
+
+ srv_path->s.con_num = con_num;
+ srv_path->s.irq_con_num = con_num;
+ srv_path->s.recon_cnt = recon_cnt;
+ uuid_copy(&srv_path->s.uuid, uuid);
+ spin_lock_init(&srv_path->state_lock);
+ INIT_WORK(&srv_path->close_work, rtrs_srv_close_work);
+ rtrs_srv_init_hb(srv_path);
+
+ srv_path->s.dev = rtrs_ib_dev_find_or_add(cm_id->device, &dev_pd);
+ if (!srv_path->s.dev) {
err = -ENOMEM;
goto err_free_con;
}
- err = map_cont_bufs(sess);
+ err = map_cont_bufs(srv_path);
if (err)
goto err_put_dev;
- err = rtrs_srv_alloc_ops_ids(sess);
+ err = rtrs_srv_alloc_ops_ids(srv_path);
if (err)
goto err_unmap_bufs;
- __add_path_to_srv(srv, sess);
+ __add_path_to_srv(srv, srv_path);
- return sess;
+ return srv_path;
err_unmap_bufs:
- unmap_cont_bufs(sess);
+ unmap_cont_bufs(srv_path);
err_put_dev:
- rtrs_ib_dev_put(sess->s.dev);
+ rtrs_ib_dev_put(srv_path->s.dev);
err_free_con:
- kfree(sess->s.con);
+ kfree(srv_path->s.con);
err_free_dma_addr:
- kfree(sess->dma_addr);
+ kfree(srv_path->dma_addr);
err_free_stats:
- kfree(sess->stats);
+ kfree(srv_path->stats);
err_free_sess:
- kfree(sess);
+ kfree(srv_path);
err:
return ERR_PTR(err);
}
@@ -1816,8 +1826,8 @@ static int rtrs_rdma_connect(struct rdma_cm_id *cm_id,
size_t len)
{
struct rtrs_srv_ctx *ctx = cm_id->context;
- struct rtrs_srv_sess *sess;
- struct rtrs_srv *srv;
+ struct rtrs_srv_path *srv_path;
+ struct rtrs_srv_sess *srv;
u16 version, con_num, cid;
u16 recon_cnt;
@@ -1857,16 +1867,16 @@ static int rtrs_rdma_connect(struct rdma_cm_id *cm_id,
goto reject_w_err;
}
mutex_lock(&srv->paths_mutex);
- sess = __find_sess(srv, &msg->sess_uuid);
- if (sess) {
- struct rtrs_sess *s = &sess->s;
+ srv_path = __find_path(srv, &msg->sess_uuid);
+ if (srv_path) {
+ struct rtrs_path *s = &srv_path->s;
/* Session already holds a reference */
put_srv(srv);
- if (sess->state != RTRS_SRV_CONNECTING) {
+ if (srv_path->state != RTRS_SRV_CONNECTING) {
rtrs_err(s, "Session in wrong state: %s\n",
- rtrs_srv_state_str(sess->state));
+ rtrs_srv_state_str(srv_path->state));
mutex_unlock(&srv->paths_mutex);
goto reject_w_err;
}
@@ -1886,19 +1896,19 @@ static int rtrs_rdma_connect(struct rdma_cm_id *cm_id,
goto reject_w_err;
}
} else {
- sess = __alloc_sess(srv, cm_id, con_num, recon_cnt,
+ srv_path = __alloc_path(srv, cm_id, con_num, recon_cnt,
&msg->sess_uuid);
- if (IS_ERR(sess)) {
+ if (IS_ERR(srv_path)) {
mutex_unlock(&srv->paths_mutex);
put_srv(srv);
- err = PTR_ERR(sess);
+ err = PTR_ERR(srv_path);
pr_err("RTRS server session allocation failed: %d\n", err);
goto reject_w_err;
}
}
- err = create_con(sess, cm_id, cid);
+ err = create_con(srv_path, cm_id, cid);
if (err) {
- rtrs_err((&sess->s), "create_con(), error %d\n", err);
+ rtrs_err((&srv_path->s), "create_con(), error %d\n", err);
rtrs_rdma_do_reject(cm_id, err);
/*
* Since session has other connections we follow normal way
@@ -1907,9 +1917,9 @@ static int rtrs_rdma_connect(struct rdma_cm_id *cm_id,
*/
goto close_and_return_err;
}
- err = rtrs_rdma_do_accept(sess, cm_id);
+ err = rtrs_rdma_do_accept(srv_path, cm_id);
if (err) {
- rtrs_err((&sess->s), "rtrs_rdma_do_accept(), error %d\n", err);
+ rtrs_err((&srv_path->s), "rtrs_rdma_do_accept(), error %d\n", err);
rtrs_rdma_do_reject(cm_id, err);
/*
* Since current connection was successfully added to the
@@ -1929,7 +1939,7 @@ reject_w_err:
close_and_return_err:
mutex_unlock(&srv->paths_mutex);
- close_sess(sess);
+ close_path(srv_path);
return err;
}
@@ -1937,14 +1947,14 @@ close_and_return_err:
static int rtrs_srv_rdma_cm_handler(struct rdma_cm_id *cm_id,
struct rdma_cm_event *ev)
{
- struct rtrs_srv_sess *sess = NULL;
- struct rtrs_sess *s = NULL;
+ struct rtrs_srv_path *srv_path = NULL;
+ struct rtrs_path *s = NULL;
if (ev->event != RDMA_CM_EVENT_CONNECT_REQUEST) {
struct rtrs_con *c = cm_id->context;
- s = c->sess;
- sess = to_srv_sess(s);
+ s = c->path;
+ srv_path = to_srv_path(s);
}
switch (ev->event) {
@@ -1968,7 +1978,7 @@ static int rtrs_srv_rdma_cm_handler(struct rdma_cm_id *cm_id,
case RDMA_CM_EVENT_ADDR_CHANGE:
case RDMA_CM_EVENT_TIMEWAIT_EXIT:
case RDMA_CM_EVENT_DEVICE_REMOVAL:
- close_sess(sess);
+ close_path(srv_path);
break;
default:
pr_err("Ignoring unexpected CM event %s, err %d\n",
@@ -2176,23 +2186,23 @@ struct rtrs_srv_ctx *rtrs_srv_open(struct rtrs_srv_ops *ops, u16 port)
}
EXPORT_SYMBOL(rtrs_srv_open);
-static void close_sessions(struct rtrs_srv *srv)
+static void close_paths(struct rtrs_srv_sess *srv)
{
- struct rtrs_srv_sess *sess;
+ struct rtrs_srv_path *srv_path;
mutex_lock(&srv->paths_mutex);
- list_for_each_entry(sess, &srv->paths_list, s.entry)
- close_sess(sess);
+ list_for_each_entry(srv_path, &srv->paths_list, s.entry)
+ close_path(srv_path);
mutex_unlock(&srv->paths_mutex);
}
static void close_ctx(struct rtrs_srv_ctx *ctx)
{
- struct rtrs_srv *srv;
+ struct rtrs_srv_sess *srv;
mutex_lock(&ctx->srv_mutex);
list_for_each_entry(srv, &ctx->srv_list, ctx_list)
- close_sessions(srv);
+ close_paths(srv);
mutex_unlock(&ctx->srv_mutex);
flush_workqueue(rtrs_wq);
}
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-srv.h b/drivers/infiniband/ulp/rtrs/rtrs-srv.h
index 7d403c12faf3..6292e87f6afd 100644
--- a/drivers/infiniband/ulp/rtrs/rtrs-srv.h
+++ b/drivers/infiniband/ulp/rtrs/rtrs-srv.h
@@ -37,7 +37,7 @@ struct rtrs_srv_stats_rdma_stats {
struct rtrs_srv_stats {
struct kobject kobj_stats;
struct rtrs_srv_stats_rdma_stats rdma_stats;
- struct rtrs_srv_sess *sess;
+ struct rtrs_srv_path *srv_path;
};
struct rtrs_srv_con {
@@ -71,9 +71,9 @@ struct rtrs_srv_mr {
struct rtrs_iu *iu; /* send buffer for new rkey msg */
};
-struct rtrs_srv_sess {
- struct rtrs_sess s;
- struct rtrs_srv *srv;
+struct rtrs_srv_path {
+ struct rtrs_path s;
+ struct rtrs_srv_sess *srv;
struct work_struct close_work;
enum rtrs_srv_state state;
spinlock_t state_lock;
@@ -90,7 +90,7 @@ struct rtrs_srv_sess {
struct rtrs_srv_stats *stats;
};
-struct rtrs_srv {
+struct rtrs_srv_sess {
struct list_head paths_list;
int paths_up;
struct mutex paths_ev_mutex;
@@ -125,7 +125,7 @@ struct rtrs_srv_ib_ctx {
extern struct class *rtrs_dev_class;
-void close_sess(struct rtrs_srv_sess *sess);
+void close_path(struct rtrs_srv_path *srv_path);
static inline void rtrs_srv_update_rdma_stats(struct rtrs_srv_stats *s,
size_t size, int d)
@@ -142,7 +142,7 @@ ssize_t rtrs_srv_reset_all_help(struct rtrs_srv_stats *stats,
char *page, size_t len);
/* functions which are implemented in rtrs-srv-sysfs.c */
-int rtrs_srv_create_sess_files(struct rtrs_srv_sess *sess);
-void rtrs_srv_destroy_sess_files(struct rtrs_srv_sess *sess);
+int rtrs_srv_create_path_files(struct rtrs_srv_path *srv_path);
+void rtrs_srv_destroy_path_files(struct rtrs_srv_path *srv_path);
#endif /* RTRS_SRV_H */
diff --git a/drivers/infiniband/ulp/rtrs/rtrs.c b/drivers/infiniband/ulp/rtrs/rtrs.c
index 37952c8e768c..4da889103a5f 100644
--- a/drivers/infiniband/ulp/rtrs/rtrs.c
+++ b/drivers/infiniband/ulp/rtrs/rtrs.c
@@ -69,16 +69,16 @@ EXPORT_SYMBOL_GPL(rtrs_iu_free);
int rtrs_iu_post_recv(struct rtrs_con *con, struct rtrs_iu *iu)
{
- struct rtrs_sess *sess = con->sess;
+ struct rtrs_path *path = con->path;
struct ib_recv_wr wr;
struct ib_sge list;
list.addr = iu->dma_addr;
list.length = iu->size;
- list.lkey = sess->dev->ib_pd->local_dma_lkey;
+ list.lkey = path->dev->ib_pd->local_dma_lkey;
if (list.length == 0) {
- rtrs_wrn(con->sess,
+ rtrs_wrn(con->path,
"Posting receive work request failed, sg list is empty\n");
return -EINVAL;
}
@@ -126,7 +126,7 @@ static int rtrs_post_send(struct ib_qp *qp, struct ib_send_wr *head,
int rtrs_iu_post_send(struct rtrs_con *con, struct rtrs_iu *iu, size_t size,
struct ib_send_wr *head)
{
- struct rtrs_sess *sess = con->sess;
+ struct rtrs_path *path = con->path;
struct ib_send_wr wr;
struct ib_sge list;
@@ -135,7 +135,7 @@ int rtrs_iu_post_send(struct rtrs_con *con, struct rtrs_iu *iu, size_t size,
list.addr = iu->dma_addr;
list.length = size;
- list.lkey = sess->dev->ib_pd->local_dma_lkey;
+ list.lkey = path->dev->ib_pd->local_dma_lkey;
wr = (struct ib_send_wr) {
.wr_cqe = &iu->cqe,
@@ -188,11 +188,11 @@ static int rtrs_post_rdma_write_imm_empty(struct rtrs_con *con,
struct ib_send_wr *head)
{
struct ib_rdma_wr wr;
- struct rtrs_sess *sess = con->sess;
+ struct rtrs_path *path = con->path;
enum ib_send_flags sflags;
atomic_dec_if_positive(&con->sq_wr_avail);
- sflags = (atomic_inc_return(&con->wr_cnt) % sess->signal_interval) ?
+ sflags = (atomic_inc_return(&con->wr_cnt) % path->signal_interval) ?
0 : IB_SEND_SIGNALED;
wr = (struct ib_rdma_wr) {
@@ -211,12 +211,12 @@ static void qp_event_handler(struct ib_event *ev, void *ctx)
switch (ev->event) {
case IB_EVENT_COMM_EST:
- rtrs_info(con->sess, "QP event %s (%d) received\n",
+ rtrs_info(con->path, "QP event %s (%d) received\n",
ib_event_msg(ev->event), ev->event);
rdma_notify(con->cm_id, IB_EVENT_COMM_EST);
break;
default:
- rtrs_info(con->sess, "Unhandled QP event %s (%d) received\n",
+ rtrs_info(con->path, "Unhandled QP event %s (%d) received\n",
ib_event_msg(ev->event), ev->event);
break;
}
@@ -224,7 +224,7 @@ static void qp_event_handler(struct ib_event *ev, void *ctx)
static bool is_pollqueue(struct rtrs_con *con)
{
- return con->cid >= con->sess->irq_con_num;
+ return con->cid >= con->path->irq_con_num;
}
static int create_cq(struct rtrs_con *con, int cq_vector, int nr_cqe,
@@ -240,7 +240,7 @@ static int create_cq(struct rtrs_con *con, int cq_vector, int nr_cqe,
cq = ib_cq_pool_get(cm_id->device, nr_cqe, cq_vector, poll_ctx);
if (IS_ERR(cq)) {
- rtrs_err(con->sess, "Creating completion queue failed, errno: %ld\n",
+ rtrs_err(con->path, "Creating completion queue failed, errno: %ld\n",
PTR_ERR(cq));
return PTR_ERR(cq);
}
@@ -271,7 +271,7 @@ static int create_qp(struct rtrs_con *con, struct ib_pd *pd,
ret = rdma_create_qp(cm_id, pd, &init_attr);
if (ret) {
- rtrs_err(con->sess, "Creating QP failed, err: %d\n", ret);
+ rtrs_err(con->path, "Creating QP failed, err: %d\n", ret);
return ret;
}
con->qp = cm_id->qp;
@@ -290,7 +290,7 @@ static void destroy_cq(struct rtrs_con *con)
con->cq = NULL;
}
-int rtrs_cq_qp_create(struct rtrs_sess *sess, struct rtrs_con *con,
+int rtrs_cq_qp_create(struct rtrs_path *path, struct rtrs_con *con,
u32 max_send_sge, int cq_vector, int nr_cqe,
u32 max_send_wr, u32 max_recv_wr,
enum ib_poll_context poll_ctx)
@@ -301,13 +301,13 @@ int rtrs_cq_qp_create(struct rtrs_sess *sess, struct rtrs_con *con,
if (err)
return err;
- err = create_qp(con, sess->dev->ib_pd, max_send_wr, max_recv_wr,
+ err = create_qp(con, path->dev->ib_pd, max_send_wr, max_recv_wr,
max_send_sge);
if (err) {
destroy_cq(con);
return err;
}
- con->sess = sess;
+ con->path = path;
return 0;
}
@@ -323,24 +323,24 @@ void rtrs_cq_qp_destroy(struct rtrs_con *con)
}
EXPORT_SYMBOL_GPL(rtrs_cq_qp_destroy);
-static void schedule_hb(struct rtrs_sess *sess)
+static void schedule_hb(struct rtrs_path *path)
{
- queue_delayed_work(sess->hb_wq, &sess->hb_dwork,
- msecs_to_jiffies(sess->hb_interval_ms));
+ queue_delayed_work(path->hb_wq, &path->hb_dwork,
+ msecs_to_jiffies(path->hb_interval_ms));
}
-void rtrs_send_hb_ack(struct rtrs_sess *sess)
+void rtrs_send_hb_ack(struct rtrs_path *path)
{
- struct rtrs_con *usr_con = sess->con[0];
+ struct rtrs_con *usr_con = path->con[0];
u32 imm;
int err;
imm = rtrs_to_imm(RTRS_HB_ACK_IMM, 0);
- err = rtrs_post_rdma_write_imm_empty(usr_con, sess->hb_cqe, imm,
+ err = rtrs_post_rdma_write_imm_empty(usr_con, path->hb_cqe, imm,
NULL);
if (err) {
- rtrs_err(sess, "send HB ACK failed, errno: %d\n", err);
- sess->hb_err_handler(usr_con);
+ rtrs_err(path, "send HB ACK failed, errno: %d\n", err);
+ path->hb_err_handler(usr_con);
return;
}
}
@@ -349,63 +349,63 @@ EXPORT_SYMBOL_GPL(rtrs_send_hb_ack);
static void hb_work(struct work_struct *work)
{
struct rtrs_con *usr_con;
- struct rtrs_sess *sess;
+ struct rtrs_path *path;
u32 imm;
int err;
- sess = container_of(to_delayed_work(work), typeof(*sess), hb_dwork);
- usr_con = sess->con[0];
+ path = container_of(to_delayed_work(work), typeof(*path), hb_dwork);
+ usr_con = path->con[0];
- if (sess->hb_missed_cnt > sess->hb_missed_max) {
- rtrs_err(sess, "HB missed max reached.\n");
- sess->hb_err_handler(usr_con);
+ if (path->hb_missed_cnt > path->hb_missed_max) {
+ rtrs_err(path, "HB missed max reached.\n");
+ path->hb_err_handler(usr_con);
return;
}
- if (sess->hb_missed_cnt++) {
+ if (path->hb_missed_cnt++) {
/* Reschedule work without sending hb */
- schedule_hb(sess);
+ schedule_hb(path);
return;
}
- sess->hb_last_sent = ktime_get();
+ path->hb_last_sent = ktime_get();
imm = rtrs_to_imm(RTRS_HB_MSG_IMM, 0);
- err = rtrs_post_rdma_write_imm_empty(usr_con, sess->hb_cqe, imm,
+ err = rtrs_post_rdma_write_imm_empty(usr_con, path->hb_cqe, imm,
NULL);
if (err) {
- rtrs_err(sess, "HB send failed, errno: %d\n", err);
- sess->hb_err_handler(usr_con);
+ rtrs_err(path, "HB send failed, errno: %d\n", err);
+ path->hb_err_handler(usr_con);
return;
}
- schedule_hb(sess);
+ schedule_hb(path);
}
-void rtrs_init_hb(struct rtrs_sess *sess, struct ib_cqe *cqe,
+void rtrs_init_hb(struct rtrs_path *path, struct ib_cqe *cqe,
unsigned int interval_ms, unsigned int missed_max,
void (*err_handler)(struct rtrs_con *con),
struct workqueue_struct *wq)
{
- sess->hb_cqe = cqe;
- sess->hb_interval_ms = interval_ms;
- sess->hb_err_handler = err_handler;
- sess->hb_wq = wq;
- sess->hb_missed_max = missed_max;
- sess->hb_missed_cnt = 0;
- INIT_DELAYED_WORK(&sess->hb_dwork, hb_work);
+ path->hb_cqe = cqe;
+ path->hb_interval_ms = interval_ms;
+ path->hb_err_handler = err_handler;
+ path->hb_wq = wq;
+ path->hb_missed_max = missed_max;
+ path->hb_missed_cnt = 0;
+ INIT_DELAYED_WORK(&path->hb_dwork, hb_work);
}
EXPORT_SYMBOL_GPL(rtrs_init_hb);
-void rtrs_start_hb(struct rtrs_sess *sess)
+void rtrs_start_hb(struct rtrs_path *path)
{
- schedule_hb(sess);
+ schedule_hb(path);
}
EXPORT_SYMBOL_GPL(rtrs_start_hb);
-void rtrs_stop_hb(struct rtrs_sess *sess)
+void rtrs_stop_hb(struct rtrs_path *path)
{
- cancel_delayed_work_sync(&sess->hb_dwork);
- sess->hb_missed_cnt = 0;
+ cancel_delayed_work_sync(&path->hb_dwork);
+ path->hb_missed_cnt = 0;
}
EXPORT_SYMBOL_GPL(rtrs_stop_hb);
diff --git a/drivers/infiniband/ulp/rtrs/rtrs.h b/drivers/infiniband/ulp/rtrs/rtrs.h
index 859c79685daf..5e57a7ccc7fb 100644
--- a/drivers/infiniband/ulp/rtrs/rtrs.h
+++ b/drivers/infiniband/ulp/rtrs/rtrs.h
@@ -13,9 +13,9 @@
#include <linux/scatterlist.h>
struct rtrs_permit;
-struct rtrs_clt;
+struct rtrs_clt_sess;
struct rtrs_srv_ctx;
-struct rtrs_srv;
+struct rtrs_srv_sess;
struct rtrs_srv_op;
/*
@@ -52,14 +52,14 @@ struct rtrs_clt_ops {
void (*link_ev)(void *priv, enum rtrs_clt_link_ev ev);
};
-struct rtrs_clt *rtrs_clt_open(struct rtrs_clt_ops *ops,
- const char *sessname,
+struct rtrs_clt_sess *rtrs_clt_open(struct rtrs_clt_ops *ops,
+ const char *pathname,
const struct rtrs_addr *paths,
size_t path_cnt, u16 port,
size_t pdu_sz, u8 reconnect_delay_sec,
s16 max_reconnect_attempts, u32 nr_poll_queues);
-void rtrs_clt_close(struct rtrs_clt *sess);
+void rtrs_clt_close(struct rtrs_clt_sess *clt);
enum wait_type {
RTRS_PERMIT_NOWAIT = 0,
@@ -77,11 +77,12 @@ enum rtrs_clt_con_type {
RTRS_IO_CON
};
-struct rtrs_permit *rtrs_clt_get_permit(struct rtrs_clt *sess,
- enum rtrs_clt_con_type con_type,
- enum wait_type wait);
+struct rtrs_permit *rtrs_clt_get_permit(struct rtrs_clt_sess *sess,
+ enum rtrs_clt_con_type con_type,
+ enum wait_type wait);
-void rtrs_clt_put_permit(struct rtrs_clt *sess, struct rtrs_permit *permit);
+void rtrs_clt_put_permit(struct rtrs_clt_sess *sess,
+ struct rtrs_permit *permit);
/**
* rtrs_clt_req_ops - it holds the request confirmation callback
@@ -98,10 +99,10 @@ struct rtrs_clt_req_ops {
};
int rtrs_clt_request(int dir, struct rtrs_clt_req_ops *ops,
- struct rtrs_clt *sess, struct rtrs_permit *permit,
+ struct rtrs_clt_sess *sess, struct rtrs_permit *permit,
const struct kvec *vec, size_t nr, size_t len,
struct scatterlist *sg, unsigned int sg_cnt);
-int rtrs_clt_rdma_cq_direct(struct rtrs_clt *clt, unsigned int index);
+int rtrs_clt_rdma_cq_direct(struct rtrs_clt_sess *clt, unsigned int index);
/**
* rtrs_attrs - RTRS session attributes
@@ -112,7 +113,7 @@ struct rtrs_attrs {
u32 max_segments;
};
-int rtrs_clt_query(struct rtrs_clt *sess, struct rtrs_attrs *attr);
+int rtrs_clt_query(struct rtrs_clt_sess *sess, struct rtrs_attrs *attr);
/*
* Here goes RTRS server API
@@ -163,7 +164,7 @@ struct rtrs_srv_ops {
* @priv: Private data from user if previously set with
* rtrs_srv_set_sess_priv()
*/
- int (*link_ev)(struct rtrs_srv *sess, enum rtrs_srv_link_ev ev,
+ int (*link_ev)(struct rtrs_srv_sess *sess, enum rtrs_srv_link_ev ev,
void *priv);
};
@@ -173,11 +174,12 @@ void rtrs_srv_close(struct rtrs_srv_ctx *ctx);
bool rtrs_srv_resp_rdma(struct rtrs_srv_op *id, int errno);
-void rtrs_srv_set_sess_priv(struct rtrs_srv *sess, void *priv);
+void rtrs_srv_set_sess_priv(struct rtrs_srv_sess *sess, void *priv);
-int rtrs_srv_get_sess_name(struct rtrs_srv *sess, char *sessname, size_t len);
+int rtrs_srv_get_path_name(struct rtrs_srv_sess *sess, char *pathname,
+ size_t len);
-int rtrs_srv_get_queue_depth(struct rtrs_srv *sess);
+int rtrs_srv_get_queue_depth(struct rtrs_srv_sess *sess);
int rtrs_addr_to_sockaddr(const char *str, size_t len, u16 port,
struct rtrs_addr *addr);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
index fed5f93bf620..26433a62d7f0 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
@@ -497,7 +497,7 @@ struct cpl_t5_pass_accept_rpl {
__be32 opt2;
__be64 opt0;
__be32 iss;
- __be32 rsvd;
+ __be32 rsvd[3];
};
struct cpl_act_open_req {
diff --git a/include/rdma/ib_mad.h b/include/rdma/ib_mad.h
index 465b0d0bdaf8..2e3843b761e8 100644
--- a/include/rdma/ib_mad.h
+++ b/include/rdma/ib_mad.h
@@ -276,6 +276,7 @@ enum ib_port_capability_mask2_bits {
IB_PORT_SWITCH_PORT_STATE_TABLE_SUP = 1 << 3,
IB_PORT_LINK_WIDTH_2X_SUP = 1 << 4,
IB_PORT_LINK_SPEED_HDR_SUP = 1 << 5,
+ IB_PORT_LINK_SPEED_NDR_SUP = 1 << 10,
};
#define OPA_CLASS_PORT_INFO_PR_SUPPORT BIT(26)
diff --git a/include/rdma/ib_smi.h b/include/rdma/ib_smi.h
index fdb8633cbaff..fc16b826b2c1 100644
--- a/include/rdma/ib_smi.h
+++ b/include/rdma/ib_smi.h
@@ -144,5 +144,15 @@ ib_get_smp_direction(struct ib_smp *smp)
#define IB_NOTICE_TRAP_DR_NOTICE 0x80
#define IB_NOTICE_TRAP_DR_TRUNC 0x40
-
+/**
+ * ib_init_query_mad - Initialize query MAD.
+ * @mad: MAD to initialize.
+ */
+static inline void ib_init_query_mad(struct ib_smp *mad)
+{
+ mad->base_version = IB_MGMT_BASE_VERSION;
+ mad->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED;
+ mad->class_version = 1;
+ mad->method = IB_MGMT_METHOD_GET;
+}
#endif /* IB_SMI_H */
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index 6e9ad656ecb7..69d883f7fb41 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -4749,6 +4749,23 @@ static inline u32 rdma_calc_flow_label(u32 lqpn, u32 rqpn)
return (u32)(v & IB_GRH_FLOWLABEL_MASK);
}
+/**
+ * rdma_get_udp_sport - Calculate and set UDP source port based on the flow
+ * label. If flow label is not defined in GRH then
+ * calculate it based on lqpn/rqpn.
+ *
+ * @fl: flow label from GRH
+ * @lqpn: local qp number
+ * @rqpn: remote qp number
+ */
+static inline u16 rdma_get_udp_sport(u32 fl, u32 lqpn, u32 rqpn)
+{
+ if (!fl)
+ fl = rdma_calc_flow_label(lqpn, rqpn);
+
+ return rdma_flow_label_to_udp_sport(fl);
+}
+
const struct ib_port_immutable*
ib_port_immutable_read(struct ib_device *dev, unsigned int port);
#endif /* IB_VERBS_H */
diff --git a/include/uapi/rdma/hns-abi.h b/include/uapi/rdma/hns-abi.h
index 42b177655560..f6fde06db4b4 100644
--- a/include/uapi/rdma/hns-abi.h
+++ b/include/uapi/rdma/hns-abi.h
@@ -77,10 +77,12 @@ enum hns_roce_qp_cap_flags {
HNS_ROCE_QP_CAP_RQ_RECORD_DB = 1 << 0,
HNS_ROCE_QP_CAP_SQ_RECORD_DB = 1 << 1,
HNS_ROCE_QP_CAP_OWNER_DB = 1 << 2,
+ HNS_ROCE_QP_CAP_DIRECT_WQE = 1 << 5,
};
struct hns_roce_ib_create_qp_resp {
__aligned_u64 cap_flags;
+ __aligned_u64 dwqe_mmap_key;
};
struct hns_roce_ib_alloc_ucontext_resp {