summaryrefslogtreecommitdiff
path: root/drivers/infiniband/hw
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/infiniband/hw')
-rw-r--r--drivers/infiniband/hw/Makefile1
-rw-r--r--drivers/infiniband/hw/bnxt_re/ib_verbs.c10
-rw-r--r--drivers/infiniband/hw/bnxt_re/ib_verbs.h2
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_fp.c28
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_fp.h3
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_sp.c2
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_sp.h1
-rw-r--r--drivers/infiniband/hw/bnxt_re/roce_hsi.h4
-rw-r--r--drivers/infiniband/hw/cxgb4/device.c3
-rw-r--r--drivers/infiniband/hw/cxgb4/iw_cxgb4.h1
-rw-r--r--drivers/infiniband/hw/cxgb4/mem.c6
-rw-r--r--drivers/infiniband/hw/efa/efa.h5
-rw-r--r--drivers/infiniband/hw/efa/efa_admin_cmds_defs.h17
-rw-r--r--drivers/infiniband/hw/efa/efa_com_cmd.c53
-rw-r--r--drivers/infiniband/hw/efa/efa_com_cmd.h11
-rw-r--r--drivers/infiniband/hw/efa/efa_main.c1
-rw-r--r--drivers/infiniband/hw/efa/efa_verbs.c91
-rw-r--r--drivers/infiniband/hw/erdma/erdma_verbs.c9
-rw-r--r--drivers/infiniband/hw/erdma/erdma_verbs.h3
-rw-r--r--drivers/infiniband/hw/hfi1/affinity.c96
-rw-r--r--drivers/infiniband/hw/hfi1/debugfs.c28
-rw-r--r--drivers/infiniband/hw/hfi1/debugfs.h9
-rw-r--r--drivers/infiniband/hw/hfi1/fault.c9
-rw-r--r--drivers/infiniband/hw/hfi1/sysfs.c2
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_device.h21
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hem.c18
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v2.c134
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v2.h16
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_main.c32
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_mr.c120
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_qp.c4
-rw-r--r--drivers/infiniband/hw/irdma/verbs.c9
-rw-r--r--drivers/infiniband/hw/mana/counters.c78
-rw-r--r--drivers/infiniband/hw/mana/counters.h18
-rw-r--r--drivers/infiniband/hw/mana/device.c120
-rw-r--r--drivers/infiniband/hw/mana/main.c13
-rw-r--r--drivers/infiniband/hw/mana/mana_ib.h30
-rw-r--r--drivers/infiniband/hw/mana/mr.c8
-rw-r--r--drivers/infiniband/hw/mana/qp.c2
-rw-r--r--drivers/infiniband/hw/mlx4/mlx4_ib.h1
-rw-r--r--drivers/infiniband/hw/mlx4/mr.c4
-rw-r--r--drivers/infiniband/hw/mlx5/Makefile1
-rw-r--r--drivers/infiniband/hw/mlx5/counters.c34
-rw-r--r--drivers/infiniband/hw/mlx5/counters.h13
-rw-r--r--drivers/infiniband/hw/mlx5/cq.c19
-rw-r--r--drivers/infiniband/hw/mlx5/devx.c16
-rw-r--r--drivers/infiniband/hw/mlx5/dm.c2
-rw-r--r--drivers/infiniband/hw/mlx5/dmah.c54
-rw-r--r--drivers/infiniband/hw/mlx5/dmah.h23
-rw-r--r--drivers/infiniband/hw/mlx5/fs.c121
-rw-r--r--drivers/infiniband/hw/mlx5/fs.h8
-rw-r--r--drivers/infiniband/hw/mlx5/ib_rep.c3
-rw-r--r--drivers/infiniband/hw/mlx5/main.c46
-rw-r--r--drivers/infiniband/hw/mlx5/mlx5_ib.h99
-rw-r--r--drivers/infiniband/hw/mlx5/mr.c177
-rw-r--r--drivers/infiniband/hw/mlx5/odp.c40
-rw-r--r--drivers/infiniband/hw/mlx5/umr.c307
-rw-r--r--drivers/infiniband/hw/mlx5/umr.h13
-rw-r--r--drivers/infiniband/hw/mthca/mthca_provider.c6
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_verbs.c6
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_verbs.h3
-rw-r--r--drivers/infiniband/hw/qedr/verbs.c6
-rw-r--r--drivers/infiniband/hw/qedr/verbs.h3
-rw-r--r--drivers/infiniband/hw/qib/Kconfig17
-rw-r--r--drivers/infiniband/hw/qib/Makefile17
-rw-r--r--drivers/infiniband/hw/qib/qib.h1492
-rw-r--r--drivers/infiniband/hw/qib/qib_6120_regs.h977
-rw-r--r--drivers/infiniband/hw/qib/qib_7220.h149
-rw-r--r--drivers/infiniband/hw/qib/qib_7220_regs.h1496
-rw-r--r--drivers/infiniband/hw/qib/qib_7322_regs.h3163
-rw-r--r--drivers/infiniband/hw/qib/qib_common.h798
-rw-r--r--drivers/infiniband/hw/qib/qib_debugfs.c274
-rw-r--r--drivers/infiniband/hw/qib/qib_debugfs.h45
-rw-r--r--drivers/infiniband/hw/qib/qib_diag.c906
-rw-r--r--drivers/infiniband/hw/qib/qib_driver.c798
-rw-r--r--drivers/infiniband/hw/qib/qib_eeprom.c271
-rw-r--r--drivers/infiniband/hw/qib/qib_file_ops.c2401
-rw-r--r--drivers/infiniband/hw/qib/qib_fs.c549
-rw-r--r--drivers/infiniband/hw/qib/qib_iba6120.c3533
-rw-r--r--drivers/infiniband/hw/qib/qib_iba7220.c4596
-rw-r--r--drivers/infiniband/hw/qib/qib_iba7322.c8475
-rw-r--r--drivers/infiniband/hw/qib/qib_init.c1782
-rw-r--r--drivers/infiniband/hw/qib/qib_intr.c241
-rw-r--r--drivers/infiniband/hw/qib/qib_mad.c2450
-rw-r--r--drivers/infiniband/hw/qib/qib_mad.h300
-rw-r--r--drivers/infiniband/hw/qib/qib_pcie.c598
-rw-r--r--drivers/infiniband/hw/qib/qib_pio_copy.c64
-rw-r--r--drivers/infiniband/hw/qib/qib_qp.c454
-rw-r--r--drivers/infiniband/hw/qib/qib_qsfp.c549
-rw-r--r--drivers/infiniband/hw/qib/qib_qsfp.h188
-rw-r--r--drivers/infiniband/hw/qib/qib_rc.c2131
-rw-r--r--drivers/infiniband/hw/qib/qib_ruc.c314
-rw-r--r--drivers/infiniband/hw/qib/qib_sd7220.c1445
-rw-r--r--drivers/infiniband/hw/qib/qib_sdma.c999
-rw-r--r--drivers/infiniband/hw/qib/qib_sysfs.c731
-rw-r--r--drivers/infiniband/hw/qib/qib_twsi.c502
-rw-r--r--drivers/infiniband/hw/qib/qib_tx.c566
-rw-r--r--drivers/infiniband/hw/qib/qib_uc.c521
-rw-r--r--drivers/infiniband/hw/qib/qib_ud.c583
-rw-r--r--drivers/infiniband/hw/qib/qib_user_pages.c137
-rw-r--r--drivers/infiniband/hw/qib/qib_user_sdma.c1470
-rw-r--r--drivers/infiniband/hw/qib/qib_user_sdma.h52
-rw-r--r--drivers/infiniband/hw/qib/qib_verbs.c1705
-rw-r--r--drivers/infiniband/hw/qib/qib_verbs.h398
-rw-r--r--drivers/infiniband/hw/qib/qib_wc_ppc64.c62
-rw-r--r--drivers/infiniband/hw/qib/qib_wc_x86_64.c150
-rw-r--r--drivers/infiniband/hw/usnic/usnic_ib_verbs.c4
-rw-r--r--drivers/infiniband/hw/usnic/usnic_ib_verbs.h1
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_mr.c5
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.h1
110 files changed, 1366 insertions, 49017 deletions
diff --git a/drivers/infiniband/hw/Makefile b/drivers/infiniband/hw/Makefile
index aba96ca9bce5..df61b2299ec0 100644
--- a/drivers/infiniband/hw/Makefile
+++ b/drivers/infiniband/hw/Makefile
@@ -1,6 +1,5 @@
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_INFINIBAND_MTHCA) += mthca/
-obj-$(CONFIG_INFINIBAND_QIB) += qib/
obj-$(CONFIG_INFINIBAND_CXGB4) += cxgb4/
obj-$(CONFIG_INFINIBAND_EFA) += efa/
obj-$(CONFIG_INFINIBAND_IRDMA) += irdma/
diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
index 063801384b2b..37c2bc3bdba5 100644
--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c
+++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
@@ -4235,6 +4235,7 @@ free_mr:
struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *ib_pd, u64 start, u64 length,
u64 virt_addr, int mr_access_flags,
+ struct ib_dmah *dmah,
struct ib_udata *udata)
{
struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
@@ -4242,6 +4243,9 @@ struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *ib_pd, u64 start, u64 length,
struct ib_umem *umem;
struct ib_mr *ib_mr;
+ if (dmah)
+ return ERR_PTR(-EOPNOTSUPP);
+
umem = ib_umem_get(&rdev->ibdev, start, length, mr_access_flags);
if (IS_ERR(umem))
return ERR_CAST(umem);
@@ -4255,6 +4259,7 @@ struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *ib_pd, u64 start, u64 length,
struct ib_mr *bnxt_re_reg_user_mr_dmabuf(struct ib_pd *ib_pd, u64 start,
u64 length, u64 virt_addr, int fd,
int mr_access_flags,
+ struct ib_dmah *dmah,
struct uverbs_attr_bundle *attrs)
{
struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
@@ -4263,6 +4268,9 @@ struct ib_mr *bnxt_re_reg_user_mr_dmabuf(struct ib_pd *ib_pd, u64 start,
struct ib_umem *umem;
struct ib_mr *ib_mr;
+ if (dmah)
+ return ERR_PTR(-EOPNOTSUPP);
+
umem_dmabuf = ib_umem_dmabuf_get_pinned(&rdev->ibdev, start, length,
fd, mr_access_flags);
if (IS_ERR(umem_dmabuf))
@@ -4738,7 +4746,7 @@ static int UVERBS_HANDLER(BNXT_RE_METHOD_GET_TOGGLE_MEM)(struct uverbs_attr_bund
return err;
err = uverbs_copy_to(attrs, BNXT_RE_TOGGLE_MEM_MMAP_OFFSET,
- &offset, sizeof(length));
+ &offset, sizeof(offset));
if (err)
return err;
diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.h b/drivers/infiniband/hw/bnxt_re/ib_verbs.h
index 22c9eb8e9cfc..fe00ab691a51 100644
--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.h
+++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.h
@@ -258,10 +258,12 @@ struct ib_mw *bnxt_re_alloc_mw(struct ib_pd *ib_pd, enum ib_mw_type type,
int bnxt_re_dealloc_mw(struct ib_mw *mw);
struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
u64 virt_addr, int mr_access_flags,
+ struct ib_dmah *dmah,
struct ib_udata *udata);
struct ib_mr *bnxt_re_reg_user_mr_dmabuf(struct ib_pd *ib_pd, u64 start,
u64 length, u64 virt_addr,
int fd, int mr_access_flags,
+ struct ib_dmah *dmah,
struct uverbs_attr_bundle *attrs);
int bnxt_re_alloc_ucontext(struct ib_ucontext *ctx, struct ib_udata *udata);
void bnxt_re_dealloc_ucontext(struct ib_ucontext *context);
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.c b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
index be34c605d516..dfe3177123e5 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_fp.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
@@ -1750,9 +1750,9 @@ static void bnxt_qplib_fill_psn_search(struct bnxt_qplib_qp *qp,
}
}
-static int bnxt_qplib_put_inline(struct bnxt_qplib_qp *qp,
- struct bnxt_qplib_swqe *wqe,
- u16 *idx)
+static unsigned int bnxt_qplib_put_inline(struct bnxt_qplib_qp *qp,
+ struct bnxt_qplib_swqe *wqe,
+ u32 *idx)
{
struct bnxt_qplib_hwq *hwq;
int len, t_len, offt;
@@ -1769,7 +1769,7 @@ static int bnxt_qplib_put_inline(struct bnxt_qplib_qp *qp,
il_src = (void *)wqe->sg_list[indx].addr;
t_len += len;
if (t_len > qp->max_inline_data)
- return -ENOMEM;
+ return BNXT_RE_INVAL_MSG_SIZE;
while (len) {
if (pull_dst) {
pull_dst = false;
@@ -1795,9 +1795,9 @@ static int bnxt_qplib_put_inline(struct bnxt_qplib_qp *qp,
return t_len;
}
-static u32 bnxt_qplib_put_sges(struct bnxt_qplib_hwq *hwq,
- struct bnxt_qplib_sge *ssge,
- u16 nsge, u16 *idx)
+static unsigned int bnxt_qplib_put_sges(struct bnxt_qplib_hwq *hwq,
+ struct bnxt_qplib_sge *ssge,
+ u32 nsge, u32 *idx)
{
struct sq_sge *dsge;
int indx, len = 0;
@@ -1878,14 +1878,12 @@ int bnxt_qplib_post_send(struct bnxt_qplib_qp *qp,
struct bnxt_qplib_hwq *hwq;
struct bnxt_qplib_swq *swq;
bool sch_handler = false;
+ u32 wqe_idx, slots, idx;
u16 wqe_sz, qdf = 0;
bool msn_update;
void *base_hdr;
void *ext_hdr;
__le32 temp32;
- u32 wqe_idx;
- u32 slots;
- u16 idx;
hwq = &sq->hwq;
if (qp->state != CMDQ_MODIFY_QP_NEW_STATE_RTS &&
@@ -1937,8 +1935,10 @@ int bnxt_qplib_post_send(struct bnxt_qplib_qp *qp,
else
data_len = bnxt_qplib_put_sges(hwq, wqe->sg_list, wqe->num_sge,
&idx);
- if (data_len < 0)
- goto queue_err;
+ if (data_len > BNXT_RE_MAX_MSG_SIZE) {
+ rc = -EINVAL;
+ goto done;
+ }
/* Make sure we update MSN table only for wired wqes */
msn_update = true;
/* Specifics */
@@ -2139,8 +2139,8 @@ int bnxt_qplib_post_recv(struct bnxt_qplib_qp *qp,
struct bnxt_qplib_hwq *hwq;
struct bnxt_qplib_swq *swq;
bool sch_handler = false;
- u16 wqe_sz, idx;
- u32 wqe_idx;
+ u32 wqe_idx, idx;
+ u16 wqe_sz;
int rc = 0;
hwq = &rq->hwq;
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.h b/drivers/infiniband/hw/bnxt_re/qplib_fp.h
index 0d9487c889ff..ab125f1d949e 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_fp.h
+++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.h
@@ -346,6 +346,9 @@ struct bnxt_qplib_qp {
u8 tos_dscp;
};
+#define BNXT_RE_MAX_MSG_SIZE 0x80000000
+#define BNXT_RE_INVAL_MSG_SIZE 0xFFFFFFFF
+
#define BNXT_QPLIB_MAX_CQE_ENTRY_SIZE sizeof(struct cq_base)
#define CQE_CNT_PER_PG (PAGE_SIZE / BNXT_QPLIB_MAX_CQE_ENTRY_SIZE)
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.c b/drivers/infiniband/hw/bnxt_re/qplib_sp.c
index 9efd32a3dc55..68981399598d 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_sp.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.c
@@ -674,7 +674,7 @@ int bnxt_qplib_reg_mr(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mr,
req.log2_pbl_pg_size = cpu_to_le16(((ilog2(PAGE_SIZE) <<
CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_SFT) &
CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_MASK));
- req.access = (mr->access_flags & 0xFFFF);
+ req.access = (mr->access_flags & BNXT_QPLIB_MR_ACCESS_MASK);
req.va = cpu_to_le64(mr->va);
req.key = cpu_to_le32(mr->lkey);
if (_is_alloc_mr_unified(res->dattr->dev_cap_flags))
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.h b/drivers/infiniband/hw/bnxt_re/qplib_sp.h
index e626b05038a1..09faf4a1e849 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_sp.h
+++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.h
@@ -111,6 +111,7 @@ struct bnxt_qplib_mrw {
struct bnxt_qplib_pd *pd;
int type;
u32 access_flags;
+#define BNXT_QPLIB_MR_ACCESS_MASK 0xFF
#define BNXT_QPLIB_FR_PMR 0x80000000
u32 lkey;
u32 rkey;
diff --git a/drivers/infiniband/hw/bnxt_re/roce_hsi.h b/drivers/infiniband/hw/bnxt_re/roce_hsi.h
index 7eceb3e9f4ce..024845f945ff 100644
--- a/drivers/infiniband/hw/bnxt_re/roce_hsi.h
+++ b/drivers/infiniband/hw/bnxt_re/roce_hsi.h
@@ -39,8 +39,8 @@
#ifndef __BNXT_RE_HSI_H__
#define __BNXT_RE_HSI_H__
-/* include bnxt_hsi.h from bnxt_en driver */
-#include "bnxt_hsi.h"
+/* include linux/bnxt/hsi.h */
+#include <linux/bnxt/hsi.h>
/* tx_doorbell (size:32b/4B) */
struct tx_doorbell {
diff --git a/drivers/infiniband/hw/cxgb4/device.c b/drivers/infiniband/hw/cxgb4/device.c
index 034b85c42255..b67747ae6a68 100644
--- a/drivers/infiniband/hw/cxgb4/device.c
+++ b/drivers/infiniband/hw/cxgb4/device.c
@@ -905,8 +905,7 @@ static int c4iw_rdev_open(struct c4iw_rdev *rdev)
return 0;
err_free_status_page_and_wr_log:
- if (c4iw_wr_log && rdev->wr_log)
- kfree(rdev->wr_log);
+ kfree(rdev->wr_log);
free_page((unsigned long)rdev->status_page);
destroy_ocqp_pool:
c4iw_ocqp_pool_destroy(rdev);
diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
index 5b3007acaa1f..e17c1252536b 100644
--- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
+++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
@@ -1006,6 +1006,7 @@ int c4iw_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
void c4iw_dealloc(struct uld_ctx *ctx);
struct ib_mr *c4iw_reg_user_mr(struct ib_pd *pd, u64 start,
u64 length, u64 virt, int acc,
+ struct ib_dmah *dmah,
struct ib_udata *udata);
struct ib_mr *c4iw_get_dma_mr(struct ib_pd *pd, int acc);
int c4iw_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata);
diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c
index a2c71a1d93d5..dcdfe250bdbe 100644
--- a/drivers/infiniband/hw/cxgb4/mem.c
+++ b/drivers/infiniband/hw/cxgb4/mem.c
@@ -489,7 +489,8 @@ err_free_mhp:
}
struct ib_mr *c4iw_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
- u64 virt, int acc, struct ib_udata *udata)
+ u64 virt, int acc, struct ib_dmah *dmah,
+ struct ib_udata *udata)
{
__be64 *pages;
int shift, n, i;
@@ -501,6 +502,9 @@ struct ib_mr *c4iw_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
pr_debug("ib_pd %p\n", pd);
+ if (dmah)
+ return ERR_PTR(-EOPNOTSUPP);
+
if (length == ~0ULL)
return ERR_PTR(-EINVAL);
diff --git a/drivers/infiniband/hw/efa/efa.h b/drivers/infiniband/hw/efa/efa.h
index 838182d0409c..96f9c3bc98b2 100644
--- a/drivers/infiniband/hw/efa/efa.h
+++ b/drivers/infiniband/hw/efa/efa.h
@@ -107,6 +107,7 @@ struct efa_cq {
u16 cq_idx;
/* NULL when no interrupts requested */
struct efa_eq *eq;
+ struct ib_umem *umem;
};
struct efa_qp {
@@ -162,12 +163,16 @@ int efa_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *init_attr,
int efa_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata);
int efa_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
struct uverbs_attr_bundle *attrs);
+int efa_create_cq_umem(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
+ struct ib_umem *umem, struct uverbs_attr_bundle *attrs);
struct ib_mr *efa_reg_mr(struct ib_pd *ibpd, u64 start, u64 length,
u64 virt_addr, int access_flags,
+ struct ib_dmah *dmah,
struct ib_udata *udata);
struct ib_mr *efa_reg_user_mr_dmabuf(struct ib_pd *ibpd, u64 start,
u64 length, u64 virt_addr,
int fd, int access_flags,
+ struct ib_dmah *dmah,
struct uverbs_attr_bundle *attrs);
int efa_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata);
int efa_get_port_immutable(struct ib_device *ibdev, u32 port_num,
diff --git a/drivers/infiniband/hw/efa/efa_admin_cmds_defs.h b/drivers/infiniband/hw/efa/efa_admin_cmds_defs.h
index fe0b6aec7839..57178dad5eb7 100644
--- a/drivers/infiniband/hw/efa/efa_admin_cmds_defs.h
+++ b/drivers/infiniband/hw/efa/efa_admin_cmds_defs.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */
/*
- * Copyright 2018-2024 Amazon.com, Inc. or its affiliates. All rights reserved.
+ * Copyright 2018-2025 Amazon.com, Inc. or its affiliates. All rights reserved.
*/
#ifndef _EFA_ADMIN_CMDS_H_
@@ -68,6 +68,7 @@ enum efa_admin_get_stats_type {
EFA_ADMIN_GET_STATS_TYPE_MESSAGES = 1,
EFA_ADMIN_GET_STATS_TYPE_RDMA_READ = 2,
EFA_ADMIN_GET_STATS_TYPE_RDMA_WRITE = 3,
+ EFA_ADMIN_GET_STATS_TYPE_NETWORK = 4,
};
enum efa_admin_get_stats_scope {
@@ -651,6 +652,18 @@ struct efa_admin_rdma_write_stats {
u64 write_recv_bytes;
};
+struct efa_admin_network_stats {
+ u64 retrans_bytes;
+
+ u64 retrans_pkts;
+
+ u64 retrans_timeout_events;
+
+ u64 unresponsive_remote_events;
+
+ u64 impaired_remote_conn_events;
+};
+
struct efa_admin_acq_get_stats_resp {
struct efa_admin_acq_common_desc acq_common_desc;
@@ -662,6 +675,8 @@ struct efa_admin_acq_get_stats_resp {
struct efa_admin_rdma_read_stats rdma_read_stats;
struct efa_admin_rdma_write_stats rdma_write_stats;
+
+ struct efa_admin_network_stats network_stats;
} u;
};
diff --git a/drivers/infiniband/hw/efa/efa_com_cmd.c b/drivers/infiniband/hw/efa/efa_com_cmd.c
index c6b89c45fdc9..9ead02800ac7 100644
--- a/drivers/infiniband/hw/efa/efa_com_cmd.c
+++ b/drivers/infiniband/hw/efa/efa_com_cmd.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause
/*
- * Copyright 2018-2024 Amazon.com, Inc. or its affiliates. All rights reserved.
+ * Copyright 2018-2025 Amazon.com, Inc. or its affiliates. All rights reserved.
*/
#include "efa_com.h"
@@ -769,6 +769,11 @@ int efa_com_get_stats(struct efa_com_dev *edev,
struct efa_com_admin_queue *aq = &edev->aq;
struct efa_admin_aq_get_stats_cmd cmd = {};
struct efa_admin_acq_get_stats_resp resp;
+ struct efa_admin_rdma_write_stats *rws;
+ struct efa_admin_rdma_read_stats *rrs;
+ struct efa_admin_messages_stats *ms;
+ struct efa_admin_network_stats *ns;
+ struct efa_admin_basic_stats *bs;
int err;
cmd.aq_common_descriptor.opcode = EFA_ADMIN_GET_STATS;
@@ -791,29 +796,41 @@ int efa_com_get_stats(struct efa_com_dev *edev,
switch (cmd.type) {
case EFA_ADMIN_GET_STATS_TYPE_BASIC:
- result->basic_stats.tx_bytes = resp.u.basic_stats.tx_bytes;
- result->basic_stats.tx_pkts = resp.u.basic_stats.tx_pkts;
- result->basic_stats.rx_bytes = resp.u.basic_stats.rx_bytes;
- result->basic_stats.rx_pkts = resp.u.basic_stats.rx_pkts;
- result->basic_stats.rx_drops = resp.u.basic_stats.rx_drops;
+ bs = &resp.u.basic_stats;
+ result->basic_stats.tx_bytes = bs->tx_bytes;
+ result->basic_stats.tx_pkts = bs->tx_pkts;
+ result->basic_stats.rx_bytes = bs->rx_bytes;
+ result->basic_stats.rx_pkts = bs->rx_pkts;
+ result->basic_stats.rx_drops = bs->rx_drops;
break;
case EFA_ADMIN_GET_STATS_TYPE_MESSAGES:
- result->messages_stats.send_bytes = resp.u.messages_stats.send_bytes;
- result->messages_stats.send_wrs = resp.u.messages_stats.send_wrs;
- result->messages_stats.recv_bytes = resp.u.messages_stats.recv_bytes;
- result->messages_stats.recv_wrs = resp.u.messages_stats.recv_wrs;
+ ms = &resp.u.messages_stats;
+ result->messages_stats.send_bytes = ms->send_bytes;
+ result->messages_stats.send_wrs = ms->send_wrs;
+ result->messages_stats.recv_bytes = ms->recv_bytes;
+ result->messages_stats.recv_wrs = ms->recv_wrs;
break;
case EFA_ADMIN_GET_STATS_TYPE_RDMA_READ:
- result->rdma_read_stats.read_wrs = resp.u.rdma_read_stats.read_wrs;
- result->rdma_read_stats.read_bytes = resp.u.rdma_read_stats.read_bytes;
- result->rdma_read_stats.read_wr_err = resp.u.rdma_read_stats.read_wr_err;
- result->rdma_read_stats.read_resp_bytes = resp.u.rdma_read_stats.read_resp_bytes;
+ rrs = &resp.u.rdma_read_stats;
+ result->rdma_read_stats.read_wrs = rrs->read_wrs;
+ result->rdma_read_stats.read_bytes = rrs->read_bytes;
+ result->rdma_read_stats.read_wr_err = rrs->read_wr_err;
+ result->rdma_read_stats.read_resp_bytes = rrs->read_resp_bytes;
break;
case EFA_ADMIN_GET_STATS_TYPE_RDMA_WRITE:
- result->rdma_write_stats.write_wrs = resp.u.rdma_write_stats.write_wrs;
- result->rdma_write_stats.write_bytes = resp.u.rdma_write_stats.write_bytes;
- result->rdma_write_stats.write_wr_err = resp.u.rdma_write_stats.write_wr_err;
- result->rdma_write_stats.write_recv_bytes = resp.u.rdma_write_stats.write_recv_bytes;
+ rws = &resp.u.rdma_write_stats;
+ result->rdma_write_stats.write_wrs = rws->write_wrs;
+ result->rdma_write_stats.write_bytes = rws->write_bytes;
+ result->rdma_write_stats.write_wr_err = rws->write_wr_err;
+ result->rdma_write_stats.write_recv_bytes = rws->write_recv_bytes;
+ break;
+ case EFA_ADMIN_GET_STATS_TYPE_NETWORK:
+ ns = &resp.u.network_stats;
+ result->network_stats.retrans_bytes = ns->retrans_bytes;
+ result->network_stats.retrans_pkts = ns->retrans_pkts;
+ result->network_stats.retrans_timeout_events = ns->retrans_timeout_events;
+ result->network_stats.unresponsive_remote_events = ns->unresponsive_remote_events;
+ result->network_stats.impaired_remote_conn_events = ns->impaired_remote_conn_events;
break;
}
diff --git a/drivers/infiniband/hw/efa/efa_com_cmd.h b/drivers/infiniband/hw/efa/efa_com_cmd.h
index 5511355b700d..3ac2686abba1 100644
--- a/drivers/infiniband/hw/efa/efa_com_cmd.h
+++ b/drivers/infiniband/hw/efa/efa_com_cmd.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */
/*
- * Copyright 2018-2024 Amazon.com, Inc. or its affiliates. All rights reserved.
+ * Copyright 2018-2025 Amazon.com, Inc. or its affiliates. All rights reserved.
*/
#ifndef _EFA_COM_CMD_H_
@@ -283,11 +283,20 @@ struct efa_com_rdma_write_stats {
u64 write_recv_bytes;
};
+struct efa_com_network_stats {
+ u64 retrans_bytes;
+ u64 retrans_pkts;
+ u64 retrans_timeout_events;
+ u64 unresponsive_remote_events;
+ u64 impaired_remote_conn_events;
+};
+
union efa_com_get_stats_result {
struct efa_com_basic_stats basic_stats;
struct efa_com_messages_stats messages_stats;
struct efa_com_rdma_read_stats rdma_read_stats;
struct efa_com_rdma_write_stats rdma_write_stats;
+ struct efa_com_network_stats network_stats;
};
int efa_com_create_qp(struct efa_com_dev *edev,
diff --git a/drivers/infiniband/hw/efa/efa_main.c b/drivers/infiniband/hw/efa/efa_main.c
index 4f03c0ec819f..6c415b9adb5f 100644
--- a/drivers/infiniband/hw/efa/efa_main.c
+++ b/drivers/infiniband/hw/efa/efa_main.c
@@ -372,6 +372,7 @@ static const struct ib_device_ops efa_dev_ops = {
.alloc_pd = efa_alloc_pd,
.alloc_ucontext = efa_alloc_ucontext,
.create_cq = efa_create_cq,
+ .create_cq_umem = efa_create_cq_umem,
.create_qp = efa_create_qp,
.create_user_ah = efa_create_ah,
.dealloc_pd = efa_dealloc_pd,
diff --git a/drivers/infiniband/hw/efa/efa_verbs.c b/drivers/infiniband/hw/efa/efa_verbs.c
index a8645a40730f..886923d5fe50 100644
--- a/drivers/infiniband/hw/efa/efa_verbs.c
+++ b/drivers/infiniband/hw/efa/efa_verbs.c
@@ -64,6 +64,11 @@ struct efa_user_mmap_entry {
op(EFA_RDMA_WRITE_BYTES, "rdma_write_bytes") \
op(EFA_RDMA_WRITE_WR_ERR, "rdma_write_wr_err") \
op(EFA_RDMA_WRITE_RECV_BYTES, "rdma_write_recv_bytes") \
+ op(EFA_RETRANS_BYTES, "retrans_bytes") \
+ op(EFA_RETRANS_PKTS, "retrans_pkts") \
+ op(EFA_RETRANS_TIMEOUT_EVENS, "retrans_timeout_events") \
+ op(EFA_UNRESPONSIVE_REMOTE_EVENTS, "unresponsive_remote_events") \
+ op(EFA_IMPAIRED_REMOTE_CONN_EVENTS, "impaired_remote_conn_events") \
#define EFA_STATS_ENUM(ename, name) ename,
#define EFA_STATS_STR(ename, nam) \
@@ -249,6 +254,7 @@ int efa_query_device(struct ib_device *ibdev,
resp.max_rdma_size = dev_attr->max_rdma_size;
resp.device_caps |= EFA_QUERY_DEVICE_CAPS_CQ_WITH_SGID;
+ resp.device_caps |= EFA_QUERY_DEVICE_CAPS_CQ_WITH_EXT_MEM;
if (EFA_DEV_CAP(dev, RDMA_READ))
resp.device_caps |= EFA_QUERY_DEVICE_CAPS_RDMA_READ;
@@ -1082,8 +1088,11 @@ int efa_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
xa_erase(&dev->cqs_xa, cq->cq_idx);
synchronize_irq(cq->eq->irq.irqn);
}
- efa_free_mapped(dev, cq->cpu_addr, cq->dma_addr, cq->size,
- DMA_FROM_DEVICE);
+
+ if (cq->umem)
+ ib_umem_release(cq->umem);
+ else
+ efa_free_mapped(dev, cq->cpu_addr, cq->dma_addr, cq->size, DMA_FROM_DEVICE);
return 0;
}
@@ -1122,8 +1131,8 @@ static int cq_mmap_entries_setup(struct efa_dev *dev, struct efa_cq *cq,
return 0;
}
-int efa_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
- struct uverbs_attr_bundle *attrs)
+int efa_create_cq_umem(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
+ struct ib_umem *umem, struct uverbs_attr_bundle *attrs)
{
struct ib_udata *udata = &attrs->driver_udata;
struct efa_ucontext *ucontext = rdma_udata_to_drv_context(
@@ -1202,11 +1211,30 @@ int efa_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
cq->ucontext = ucontext;
cq->size = PAGE_ALIGN(cmd.cq_entry_size * entries * cmd.num_sub_cqs);
- cq->cpu_addr = efa_zalloc_mapped(dev, &cq->dma_addr, cq->size,
- DMA_FROM_DEVICE);
- if (!cq->cpu_addr) {
- err = -ENOMEM;
- goto err_out;
+
+ if (umem) {
+ if (umem->length < cq->size) {
+ ibdev_dbg(&dev->ibdev, "External memory too small\n");
+ err = -EINVAL;
+ goto err_free_mem;
+ }
+
+ if (!ib_umem_is_contiguous(umem)) {
+ ibdev_dbg(&dev->ibdev, "Non contiguous CQ unsupported\n");
+ err = -EINVAL;
+ goto err_free_mem;
+ }
+
+ cq->cpu_addr = NULL;
+ cq->dma_addr = ib_umem_start_dma_addr(umem);
+ cq->umem = umem;
+ } else {
+ cq->cpu_addr = efa_zalloc_mapped(dev, &cq->dma_addr, cq->size,
+ DMA_FROM_DEVICE);
+ if (!cq->cpu_addr) {
+ err = -ENOMEM;
+ goto err_out;
+ }
}
params.uarn = cq->ucontext->uarn;
@@ -1223,7 +1251,7 @@ int efa_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
err = efa_com_create_cq(&dev->edev, &params, &result);
if (err)
- goto err_free_mapped;
+ goto err_free_mem;
resp.db_off = result.db_off;
resp.cq_idx = result.cq_idx;
@@ -1231,7 +1259,9 @@ int efa_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
cq->ibcq.cqe = result.actual_depth;
WARN_ON_ONCE(entries != result.actual_depth);
- err = cq_mmap_entries_setup(dev, cq, &resp, result.db_valid);
+ if (!umem)
+ err = cq_mmap_entries_setup(dev, cq, &resp, result.db_valid);
+
if (err) {
ibdev_dbg(ibdev, "Could not setup cq[%u] mmap entries\n",
cq->cq_idx);
@@ -1269,15 +1299,23 @@ err_remove_mmap:
efa_cq_user_mmap_entries_remove(cq);
err_destroy_cq:
efa_destroy_cq_idx(dev, cq->cq_idx);
-err_free_mapped:
- efa_free_mapped(dev, cq->cpu_addr, cq->dma_addr, cq->size,
- DMA_FROM_DEVICE);
+err_free_mem:
+ if (umem)
+ ib_umem_release(umem);
+ else
+ efa_free_mapped(dev, cq->cpu_addr, cq->dma_addr, cq->size, DMA_FROM_DEVICE);
err_out:
atomic64_inc(&dev->stats.create_cq_err);
return err;
}
+int efa_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
+ struct uverbs_attr_bundle *attrs)
+{
+ return efa_create_cq_umem(ibcq, attr, NULL, attrs);
+}
+
static int umem_to_page_list(struct efa_dev *dev,
struct ib_umem *umem,
u64 *page_list,
@@ -1727,6 +1765,7 @@ static int efa_register_mr(struct ib_pd *ibpd, struct efa_mr *mr, u64 start,
struct ib_mr *efa_reg_user_mr_dmabuf(struct ib_pd *ibpd, u64 start,
u64 length, u64 virt_addr,
int fd, int access_flags,
+ struct ib_dmah *dmah,
struct uverbs_attr_bundle *attrs)
{
struct efa_dev *dev = to_edev(ibpd->device);
@@ -1734,6 +1773,11 @@ struct ib_mr *efa_reg_user_mr_dmabuf(struct ib_pd *ibpd, u64 start,
struct efa_mr *mr;
int err;
+ if (dmah) {
+ err = -EOPNOTSUPP;
+ goto err_out;
+ }
+
mr = efa_alloc_mr(ibpd, access_flags, &attrs->driver_udata);
if (IS_ERR(mr)) {
err = PTR_ERR(mr);
@@ -1766,12 +1810,18 @@ err_out:
struct ib_mr *efa_reg_mr(struct ib_pd *ibpd, u64 start, u64 length,
u64 virt_addr, int access_flags,
+ struct ib_dmah *dmah,
struct ib_udata *udata)
{
struct efa_dev *dev = to_edev(ibpd->device);
struct efa_mr *mr;
int err;
+ if (dmah) {
+ err = -EOPNOTSUPP;
+ goto err_out;
+ }
+
mr = efa_alloc_mr(ibpd, access_flags, udata);
if (IS_ERR(mr)) {
err = PTR_ERR(mr);
@@ -2186,6 +2236,7 @@ static int efa_fill_port_stats(struct efa_dev *dev, struct rdma_hw_stats *stats,
struct efa_com_rdma_write_stats *rws;
struct efa_com_rdma_read_stats *rrs;
struct efa_com_messages_stats *ms;
+ struct efa_com_network_stats *ns;
struct efa_com_basic_stats *bs;
int err;
@@ -2238,6 +2289,18 @@ static int efa_fill_port_stats(struct efa_dev *dev, struct rdma_hw_stats *stats,
stats->value[EFA_RDMA_WRITE_RECV_BYTES] = rws->write_recv_bytes;
}
+ params.type = EFA_ADMIN_GET_STATS_TYPE_NETWORK;
+ err = efa_com_get_stats(&dev->edev, &params, &result);
+ if (err)
+ return err;
+
+ ns = &result.network_stats;
+ stats->value[EFA_RETRANS_BYTES] = ns->retrans_bytes;
+ stats->value[EFA_RETRANS_PKTS] = ns->retrans_pkts;
+ stats->value[EFA_RETRANS_TIMEOUT_EVENS] = ns->retrans_timeout_events;
+ stats->value[EFA_UNRESPONSIVE_REMOTE_EVENTS] = ns->unresponsive_remote_events;
+ stats->value[EFA_IMPAIRED_REMOTE_CONN_EVENTS] = ns->impaired_remote_conn_events;
+
return ARRAY_SIZE(efa_port_stats_descs);
}
diff --git a/drivers/infiniband/hw/erdma/erdma_verbs.c b/drivers/infiniband/hw/erdma/erdma_verbs.c
index af36a8d2df22..94c211df09d8 100644
--- a/drivers/infiniband/hw/erdma/erdma_verbs.c
+++ b/drivers/infiniband/hw/erdma/erdma_verbs.c
@@ -629,7 +629,8 @@ err_free_mtt:
static void erdma_destroy_mtt_buf_sg(struct erdma_dev *dev,
struct erdma_mtt *mtt)
{
- dma_unmap_sg(&dev->pdev->dev, mtt->sglist, mtt->nsg, DMA_TO_DEVICE);
+ dma_unmap_sg(&dev->pdev->dev, mtt->sglist,
+ DIV_ROUND_UP(mtt->size, PAGE_SIZE), DMA_TO_DEVICE);
vfree(mtt->sglist);
}
@@ -1199,13 +1200,17 @@ int erdma_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
}
struct ib_mr *erdma_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 len,
- u64 virt, int access, struct ib_udata *udata)
+ u64 virt, int access, struct ib_dmah *dmah,
+ struct ib_udata *udata)
{
struct erdma_mr *mr = NULL;
struct erdma_dev *dev = to_edev(ibpd->device);
u32 stag;
int ret;
+ if (dmah)
+ return ERR_PTR(-EOPNOTSUPP);
+
if (!len || len > dev->attrs.max_mr_size)
return ERR_PTR(-EINVAL);
diff --git a/drivers/infiniband/hw/erdma/erdma_verbs.h b/drivers/infiniband/hw/erdma/erdma_verbs.h
index f9408ccc8bad..ef411b81fbd7 100644
--- a/drivers/infiniband/hw/erdma/erdma_verbs.h
+++ b/drivers/infiniband/hw/erdma/erdma_verbs.h
@@ -452,7 +452,8 @@ int erdma_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata);
void erdma_disassociate_ucontext(struct ib_ucontext *ibcontext);
int erdma_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags);
struct ib_mr *erdma_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 len,
- u64 virt, int access, struct ib_udata *udata);
+ u64 virt, int access, struct ib_dmah *dmah,
+ struct ib_udata *udata);
struct ib_mr *erdma_get_dma_mr(struct ib_pd *ibpd, int rights);
int erdma_dereg_mr(struct ib_mr *ibmr, struct ib_udata *data);
int erdma_mmap(struct ib_ucontext *ctx, struct vm_area_struct *vma);
diff --git a/drivers/infiniband/hw/hfi1/affinity.c b/drivers/infiniband/hw/hfi1/affinity.c
index 7ead8746b79b..ee7fedc67b86 100644
--- a/drivers/infiniband/hw/hfi1/affinity.c
+++ b/drivers/infiniband/hw/hfi1/affinity.c
@@ -92,9 +92,7 @@ static void cpu_mask_set_put(struct cpu_mask_set *set, int cpu)
/* Initialize non-HT cpu cores mask */
void init_real_cpu_mask(void)
{
- int possible, curr_cpu, i, ht;
-
- cpumask_clear(&node_affinity.real_cpu_mask);
+ int possible, curr_cpu, ht;
/* Start with cpu online mask as the real cpu mask */
cpumask_copy(&node_affinity.real_cpu_mask, cpu_online_mask);
@@ -110,17 +108,10 @@ void init_real_cpu_mask(void)
* "real" cores. Assumes that HT cores are not enumerated in
* succession (except in the single core case).
*/
- curr_cpu = cpumask_first(&node_affinity.real_cpu_mask);
- for (i = 0; i < possible / ht; i++)
- curr_cpu = cpumask_next(curr_cpu, &node_affinity.real_cpu_mask);
- /*
- * Step 2. Remove the remaining HT siblings. Use cpumask_next() to
- * skip any gaps.
- */
- for (; i < possible; i++) {
- cpumask_clear_cpu(curr_cpu, &node_affinity.real_cpu_mask);
- curr_cpu = cpumask_next(curr_cpu, &node_affinity.real_cpu_mask);
- }
+ curr_cpu = cpumask_nth(possible / ht, &node_affinity.real_cpu_mask) + 1;
+
+ /* Step 2. Remove the remaining HT siblings. */
+ cpumask_clear_cpus(&node_affinity.real_cpu_mask, curr_cpu, nr_cpu_ids - curr_cpu);
}
int node_affinity_init(void)
@@ -346,9 +337,10 @@ static int _dev_comp_vect_cpu_get(struct hfi1_devdata *dd,
&entry->def_intr.used);
/* If there are non-interrupt CPUs available, use them first */
- if (!cpumask_empty(non_intr_cpus))
- cpu = cpumask_first(non_intr_cpus);
- else /* Otherwise, use interrupt CPUs */
+ cpu = cpumask_first(non_intr_cpus);
+
+ /* Otherwise, use interrupt CPUs */
+ if (cpu >= nr_cpu_ids)
cpu = cpumask_first(available_cpus);
if (cpu >= nr_cpu_ids) { /* empty */
@@ -963,32 +955,23 @@ void hfi1_put_irq_affinity(struct hfi1_devdata *dd,
static void find_hw_thread_mask(uint hw_thread_no, cpumask_var_t hw_thread_mask,
struct hfi1_affinity_node_list *affinity)
{
- int possible, curr_cpu, i;
- uint num_cores_per_socket = node_affinity.num_online_cpus /
- affinity->num_core_siblings /
- node_affinity.num_online_nodes;
+ int curr_cpu;
+ uint num_cores;
cpumask_copy(hw_thread_mask, &affinity->proc.mask);
- if (affinity->num_core_siblings > 0) {
- /* Removing other siblings not needed for now */
- possible = cpumask_weight(hw_thread_mask);
- curr_cpu = cpumask_first(hw_thread_mask);
- for (i = 0;
- i < num_cores_per_socket * node_affinity.num_online_nodes;
- i++)
- curr_cpu = cpumask_next(curr_cpu, hw_thread_mask);
-
- for (; i < possible; i++) {
- cpumask_clear_cpu(curr_cpu, hw_thread_mask);
- curr_cpu = cpumask_next(curr_cpu, hw_thread_mask);
- }
- /* Identifying correct HW threads within physical cores */
- cpumask_shift_left(hw_thread_mask, hw_thread_mask,
- num_cores_per_socket *
- node_affinity.num_online_nodes *
- hw_thread_no);
- }
+ if (affinity->num_core_siblings == 0)
+ return;
+
+ num_cores = rounddown(node_affinity.num_online_cpus / affinity->num_core_siblings,
+ node_affinity.num_online_nodes);
+
+ /* Removing other siblings not needed for now */
+ curr_cpu = cpumask_nth(num_cores * node_affinity.num_online_nodes, hw_thread_mask) + 1;
+ cpumask_clear_cpus(hw_thread_mask, curr_cpu, nr_cpu_ids - curr_cpu);
+
+ /* Identifying correct HW threads within physical cores */
+ cpumask_shift_left(hw_thread_mask, hw_thread_mask, num_cores * hw_thread_no);
}
int hfi1_get_proc_affinity(int node)
@@ -1087,22 +1070,19 @@ int hfi1_get_proc_affinity(int node)
* If HT cores are enabled, identify which HW threads within the
* physical cores should be used.
*/
- if (affinity->num_core_siblings > 0) {
- for (i = 0; i < affinity->num_core_siblings; i++) {
- find_hw_thread_mask(i, hw_thread_mask, affinity);
+ for (i = 0; i < affinity->num_core_siblings; i++) {
+ find_hw_thread_mask(i, hw_thread_mask, affinity);
- /*
- * If there's at least one available core for this HW
- * thread number, stop looking for a core.
- *
- * diff will always be not empty at least once in this
- * loop as the used mask gets reset when
- * (set->mask == set->used) before this loop.
- */
- cpumask_andnot(diff, hw_thread_mask, &set->used);
- if (!cpumask_empty(diff))
- break;
- }
+ /*
+ * If there's at least one available core for this HW
+ * thread number, stop looking for a core.
+ *
+ * diff will always be not empty at least once in this
+ * loop as the used mask gets reset when
+ * (set->mask == set->used) before this loop.
+ */
+ if (cpumask_andnot(diff, hw_thread_mask, &set->used))
+ break;
}
hfi1_cdbg(PROC, "Same available HW thread on all physical CPUs: %*pbl",
cpumask_pr_args(hw_thread_mask));
@@ -1133,8 +1113,7 @@ int hfi1_get_proc_affinity(int node)
* used for process assignments using the same method as
* the preferred NUMA node.
*/
- cpumask_andnot(diff, available_mask, intrs_mask);
- if (!cpumask_empty(diff))
+ if (cpumask_andnot(diff, available_mask, intrs_mask))
cpumask_copy(available_mask, diff);
/* If we don't have CPUs on the preferred node, use other NUMA nodes */
@@ -1150,8 +1129,7 @@ int hfi1_get_proc_affinity(int node)
* At first, we don't want to place processes on the same
* CPUs as interrupt handlers.
*/
- cpumask_andnot(diff, available_mask, intrs_mask);
- if (!cpumask_empty(diff))
+ if (cpumask_andnot(diff, available_mask, intrs_mask))
cpumask_copy(available_mask, diff);
}
hfi1_cdbg(PROC, "Possible CPUs for process: %*pbl",
diff --git a/drivers/infiniband/hw/hfi1/debugfs.c b/drivers/infiniband/hw/hfi1/debugfs.c
index a1e01b447265..ac37ab7f8995 100644
--- a/drivers/infiniband/hw/hfi1/debugfs.c
+++ b/drivers/infiniband/hw/hfi1/debugfs.c
@@ -22,34 +22,6 @@
static struct dentry *hfi1_dbg_root;
-/* wrappers to enforce srcu in seq file */
-ssize_t hfi1_seq_read(struct file *file, char __user *buf, size_t size,
- loff_t *ppos)
-{
- struct dentry *d = file->f_path.dentry;
- ssize_t r;
-
- r = debugfs_file_get(d);
- if (unlikely(r))
- return r;
- r = seq_read(file, buf, size, ppos);
- debugfs_file_put(d);
- return r;
-}
-
-loff_t hfi1_seq_lseek(struct file *file, loff_t offset, int whence)
-{
- struct dentry *d = file->f_path.dentry;
- loff_t r;
-
- r = debugfs_file_get(d);
- if (unlikely(r))
- return r;
- r = seq_lseek(file, offset, whence);
- debugfs_file_put(d);
- return r;
-}
-
#define private2dd(file) (file_inode(file)->i_private)
#define private2ppd(file) (file_inode(file)->i_private)
diff --git a/drivers/infiniband/hw/hfi1/debugfs.h b/drivers/infiniband/hw/hfi1/debugfs.h
index 54d952a4016c..65b48839abc6 100644
--- a/drivers/infiniband/hw/hfi1/debugfs.h
+++ b/drivers/infiniband/hw/hfi1/debugfs.h
@@ -33,16 +33,11 @@ static int _##name##_open(struct inode *inode, struct file *s) \
static const struct file_operations _##name##_file_ops = { \
.owner = THIS_MODULE, \
.open = _##name##_open, \
- .read = hfi1_seq_read, \
- .llseek = hfi1_seq_lseek, \
+ .read = seq_read, \
+ .llseek = seq_lseek, \
.release = seq_release \
}
-
-ssize_t hfi1_seq_read(struct file *file, char __user *buf, size_t size,
- loff_t *ppos);
-loff_t hfi1_seq_lseek(struct file *file, loff_t offset, int whence);
-
#ifdef CONFIG_DEBUG_FS
void hfi1_dbg_ibdev_init(struct hfi1_ibdev *ibd);
void hfi1_dbg_ibdev_exit(struct hfi1_ibdev *ibd);
diff --git a/drivers/infiniband/hw/hfi1/fault.c b/drivers/infiniband/hw/hfi1/fault.c
index ec9ee59fcf0c..a45cbffd52c7 100644
--- a/drivers/infiniband/hw/hfi1/fault.c
+++ b/drivers/infiniband/hw/hfi1/fault.c
@@ -104,9 +104,6 @@ static ssize_t fault_opcodes_write(struct file *file, const char __user *buf,
goto free_data;
}
- ret = debugfs_file_get(file->f_path.dentry);
- if (unlikely(ret))
- goto free_data;
ptr = data;
token = ptr;
for (ptr = data; *ptr; ptr = end + 1, token = ptr) {
@@ -154,7 +151,6 @@ static ssize_t fault_opcodes_write(struct file *file, const char __user *buf,
}
ret = len;
- debugfs_file_put(file->f_path.dentry);
free_data:
kfree(data);
return ret;
@@ -173,9 +169,6 @@ static ssize_t fault_opcodes_read(struct file *file, char __user *buf,
data = kcalloc(datalen, sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
- ret = debugfs_file_get(file->f_path.dentry);
- if (unlikely(ret))
- goto free_data;
bit = find_first_bit(fault->opcodes, bitsize);
while (bit < bitsize) {
zero = find_next_zero_bit(fault->opcodes, bitsize, bit);
@@ -189,11 +182,9 @@ static ssize_t fault_opcodes_read(struct file *file, char __user *buf,
bit);
bit = find_next_bit(fault->opcodes, bitsize, zero);
}
- debugfs_file_put(file->f_path.dentry);
data[size - 1] = '\n';
data[size] = '\0';
ret = simple_read_from_buffer(buf, len, pos, data, size);
-free_data:
kfree(data);
return ret;
}
diff --git a/drivers/infiniband/hw/hfi1/sysfs.c b/drivers/infiniband/hw/hfi1/sysfs.c
index d94216c7d576..372cfd13dc61 100644
--- a/drivers/infiniband/hw/hfi1/sysfs.c
+++ b/drivers/infiniband/hw/hfi1/sysfs.c
@@ -134,7 +134,7 @@ static struct attribute *port_cc_attributes[] = {
static const struct attribute_group port_cc_group = {
.name = "CCMgtA",
.attrs = port_cc_attributes,
- .bin_attrs_new = port_cc_bin_attributes,
+ .bin_attrs = port_cc_bin_attributes,
};
/* Start sc2vl */
diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h
index 1dcc9cbb4678..78ee04a48a74 100644
--- a/drivers/infiniband/hw/hns/hns_roce_device.h
+++ b/drivers/infiniband/hw/hns/hns_roce_device.h
@@ -316,16 +316,6 @@ struct hns_roce_mtr {
struct hns_roce_hem_cfg hem_cfg; /* config for hardware addressing */
};
-struct hns_roce_mw {
- struct ib_mw ibmw;
- u32 pdn;
- u32 rkey;
- int enabled; /* MW's active status */
- u32 pbl_hop_num;
- u32 pbl_ba_pg_sz;
- u32 pbl_buf_pg_sz;
-};
-
struct hns_roce_mr {
struct ib_mr ibmr;
u64 iova; /* MR's virtual original addr */
@@ -856,6 +846,7 @@ struct hns_roce_caps {
u16 default_ceq_arm_st;
u8 cong_cap;
enum hns_roce_cong_type default_cong_type;
+ u32 max_ack_req_msg_len;
};
enum hns_roce_device_state {
@@ -933,7 +924,6 @@ struct hns_roce_hw {
struct hns_roce_mr *mr, int flags,
void *mb_buf);
int (*frmr_write_mtpt)(void *mb_buf, struct hns_roce_mr *mr);
- int (*mw_write_mtpt)(void *mb_buf, struct hns_roce_mw *mw);
void (*write_cqc)(struct hns_roce_dev *hr_dev,
struct hns_roce_cq *hr_cq, void *mb_buf, u64 *mtts,
dma_addr_t dma_handle);
@@ -1078,11 +1068,6 @@ static inline struct hns_roce_mr *to_hr_mr(struct ib_mr *ibmr)
return container_of(ibmr, struct hns_roce_mr, ibmr);
}
-static inline struct hns_roce_mw *to_hr_mw(struct ib_mw *ibmw)
-{
- return container_of(ibmw, struct hns_roce_mw, ibmw);
-}
-
static inline struct hns_roce_qp *to_hr_qp(struct ib_qp *ibqp)
{
return container_of(ibqp, struct hns_roce_qp, ibqp);
@@ -1234,6 +1219,7 @@ int hns_roce_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata);
struct ib_mr *hns_roce_get_dma_mr(struct ib_pd *pd, int acc);
struct ib_mr *hns_roce_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
u64 virt_addr, int access_flags,
+ struct ib_dmah *dmah,
struct ib_udata *udata);
struct ib_mr *hns_roce_rereg_user_mr(struct ib_mr *mr, int flags, u64 start,
u64 length, u64 virt_addr,
@@ -1246,9 +1232,6 @@ int hns_roce_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
int hns_roce_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata);
unsigned long key_to_hw_index(u32 key);
-int hns_roce_alloc_mw(struct ib_mw *mw, struct ib_udata *udata);
-int hns_roce_dealloc_mw(struct ib_mw *ibmw);
-
void hns_roce_buf_free(struct hns_roce_dev *hr_dev, struct hns_roce_buf *buf);
struct hns_roce_buf *hns_roce_buf_alloc(struct hns_roce_dev *hr_dev, u32 size,
u32 page_shift, u32 flags);
diff --git a/drivers/infiniband/hw/hns/hns_roce_hem.c b/drivers/infiniband/hw/hns/hns_roce_hem.c
index ca0798224e56..3d479c63b117 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hem.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hem.c
@@ -249,15 +249,12 @@ int hns_roce_calc_hem_mhop(struct hns_roce_dev *hr_dev,
}
static struct hns_roce_hem *hns_roce_alloc_hem(struct hns_roce_dev *hr_dev,
- unsigned long hem_alloc_size,
- gfp_t gfp_mask)
+ unsigned long hem_alloc_size)
{
struct hns_roce_hem *hem;
int order;
void *buf;
- WARN_ON(gfp_mask & __GFP_HIGHMEM);
-
order = get_order(hem_alloc_size);
if (PAGE_SIZE << order != hem_alloc_size) {
dev_err(hr_dev->dev, "invalid hem_alloc_size: %lu!\n",
@@ -265,13 +262,12 @@ static struct hns_roce_hem *hns_roce_alloc_hem(struct hns_roce_dev *hr_dev,
return NULL;
}
- hem = kmalloc(sizeof(*hem),
- gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN));
+ hem = kmalloc(sizeof(*hem), GFP_KERNEL);
if (!hem)
return NULL;
buf = dma_alloc_coherent(hr_dev->dev, hem_alloc_size,
- &hem->dma, gfp_mask);
+ &hem->dma, GFP_KERNEL);
if (!buf)
goto fail;
@@ -378,7 +374,6 @@ static int alloc_mhop_hem(struct hns_roce_dev *hr_dev,
{
u32 bt_size = mhop->bt_chunk_size;
struct device *dev = hr_dev->dev;
- gfp_t flag;
u64 bt_ba;
u32 size;
int ret;
@@ -417,8 +412,7 @@ static int alloc_mhop_hem(struct hns_roce_dev *hr_dev,
* alloc bt space chunk for MTT/CQE.
*/
size = table->type < HEM_TYPE_MTT ? mhop->buf_chunk_size : bt_size;
- flag = GFP_KERNEL | __GFP_NOWARN;
- table->hem[index->buf] = hns_roce_alloc_hem(hr_dev, size, flag);
+ table->hem[index->buf] = hns_roce_alloc_hem(hr_dev, size);
if (!table->hem[index->buf]) {
ret = -ENOMEM;
goto err_alloc_hem;
@@ -546,9 +540,7 @@ int hns_roce_table_get(struct hns_roce_dev *hr_dev,
goto out;
}
- table->hem[i] = hns_roce_alloc_hem(hr_dev,
- table->table_chunk_size,
- GFP_KERNEL | __GFP_NOWARN);
+ table->hem[i] = hns_roce_alloc_hem(hr_dev, table->table_chunk_size);
if (!table->hem[i]) {
ret = -ENOMEM;
goto out;
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
index fa8747656f25..64bca08f3f1a 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
@@ -144,7 +144,7 @@ static void set_frmr_seg(struct hns_roce_v2_rc_send_wqe *rc_sq_wqe,
u64 pbl_ba;
/* use ib_access_flags */
- hr_reg_write_bool(fseg, FRMR_BIND_EN, wr->access & IB_ACCESS_MW_BIND);
+ hr_reg_write_bool(fseg, FRMR_BIND_EN, 0);
hr_reg_write_bool(fseg, FRMR_ATOMIC,
wr->access & IB_ACCESS_REMOTE_ATOMIC);
hr_reg_write_bool(fseg, FRMR_RR, wr->access & IB_ACCESS_REMOTE_READ);
@@ -2196,31 +2196,36 @@ static void apply_func_caps(struct hns_roce_dev *hr_dev)
static int hns_roce_query_caps(struct hns_roce_dev *hr_dev)
{
- struct hns_roce_cmq_desc desc[HNS_ROCE_QUERY_PF_CAPS_CMD_NUM];
+ struct hns_roce_cmq_desc desc[HNS_ROCE_QUERY_PF_CAPS_CMD_NUM] = {};
struct hns_roce_caps *caps = &hr_dev->caps;
struct hns_roce_query_pf_caps_a *resp_a;
struct hns_roce_query_pf_caps_b *resp_b;
struct hns_roce_query_pf_caps_c *resp_c;
struct hns_roce_query_pf_caps_d *resp_d;
struct hns_roce_query_pf_caps_e *resp_e;
+ struct hns_roce_query_pf_caps_f *resp_f;
enum hns_roce_opcode_type cmd;
int ctx_hop_num;
int pbl_hop_num;
+ int cmd_num;
int ret;
int i;
cmd = hr_dev->is_vf ? HNS_ROCE_OPC_QUERY_VF_CAPS_NUM :
HNS_ROCE_OPC_QUERY_PF_CAPS_NUM;
+ cmd_num = hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08 ?
+ HNS_ROCE_QUERY_PF_CAPS_CMD_NUM_HIP08 :
+ HNS_ROCE_QUERY_PF_CAPS_CMD_NUM;
- for (i = 0; i < HNS_ROCE_QUERY_PF_CAPS_CMD_NUM; i++) {
+ for (i = 0; i < cmd_num - 1; i++) {
hns_roce_cmq_setup_basic_desc(&desc[i], cmd, true);
- if (i < (HNS_ROCE_QUERY_PF_CAPS_CMD_NUM - 1))
- desc[i].flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
- else
- desc[i].flag &= ~cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
+ desc[i].flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
}
- ret = hns_roce_cmq_send(hr_dev, desc, HNS_ROCE_QUERY_PF_CAPS_CMD_NUM);
+ hns_roce_cmq_setup_basic_desc(&desc[cmd_num - 1], cmd, true);
+ desc[cmd_num - 1].flag &= ~cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
+
+ ret = hns_roce_cmq_send(hr_dev, desc, cmd_num);
if (ret)
return ret;
@@ -2229,6 +2234,7 @@ static int hns_roce_query_caps(struct hns_roce_dev *hr_dev)
resp_c = (struct hns_roce_query_pf_caps_c *)desc[2].data;
resp_d = (struct hns_roce_query_pf_caps_d *)desc[3].data;
resp_e = (struct hns_roce_query_pf_caps_e *)desc[4].data;
+ resp_f = (struct hns_roce_query_pf_caps_f *)desc[5].data;
caps->local_ca_ack_delay = resp_a->local_ca_ack_delay;
caps->max_sq_sg = le16_to_cpu(resp_a->max_sq_sg);
@@ -2293,6 +2299,8 @@ static int hns_roce_query_caps(struct hns_roce_dev *hr_dev)
caps->reserved_srqs = hr_reg_read(resp_e, PF_CAPS_E_RSV_SRQS);
caps->reserved_lkey = hr_reg_read(resp_e, PF_CAPS_E_RSV_LKEYS);
+ caps->max_ack_req_msg_len = le32_to_cpu(resp_f->max_ack_req_msg_len);
+
caps->qpc_hop_num = ctx_hop_num;
caps->sccc_hop_num = ctx_hop_num;
caps->srqc_hop_num = ctx_hop_num;
@@ -2627,7 +2635,7 @@ static struct ib_pd *free_mr_init_pd(struct hns_roce_dev *hr_dev)
struct ib_pd *pd;
hr_pd = kzalloc(sizeof(*hr_pd), GFP_KERNEL);
- if (ZERO_OR_NULL_PTR(hr_pd))
+ if (!hr_pd)
return NULL;
pd = &hr_pd->ibpd;
pd->device = ibdev;
@@ -2658,7 +2666,7 @@ static struct ib_cq *free_mr_init_cq(struct hns_roce_dev *hr_dev)
cq_init_attr.cqe = HNS_ROCE_FREE_MR_USED_CQE_NUM;
hr_cq = kzalloc(sizeof(*hr_cq), GFP_KERNEL);
- if (ZERO_OR_NULL_PTR(hr_cq))
+ if (!hr_cq)
return NULL;
cq = &hr_cq->ib_cq;
@@ -2691,7 +2699,7 @@ static int free_mr_init_qp(struct hns_roce_dev *hr_dev, struct ib_cq *cq,
int ret;
hr_qp = kzalloc(sizeof(*hr_qp), GFP_KERNEL);
- if (ZERO_OR_NULL_PTR(hr_qp))
+ if (!hr_qp)
return -ENOMEM;
qp = &hr_qp->ibqp;
@@ -2986,14 +2994,22 @@ static int hns_roce_v2_init(struct hns_roce_dev *hr_dev)
{
int ret;
+ if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08) {
+ ret = free_mr_init(hr_dev);
+ if (ret) {
+ dev_err(hr_dev->dev, "failed to init free mr!\n");
+ return ret;
+ }
+ }
+
/* The hns ROCEE requires the extdb info to be cleared before using */
ret = hns_roce_clear_extdb_list_info(hr_dev);
if (ret)
- return ret;
+ goto err_clear_extdb_failed;
ret = get_hem_table(hr_dev);
if (ret)
- return ret;
+ goto err_get_hem_table_failed;
if (hr_dev->is_vf)
return 0;
@@ -3008,6 +3024,11 @@ static int hns_roce_v2_init(struct hns_roce_dev *hr_dev)
err_llm_init_failed:
put_hem_table(hr_dev);
+err_get_hem_table_failed:
+ hns_roce_function_clear(hr_dev);
+err_clear_extdb_failed:
+ if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08)
+ free_mr_exit(hr_dev);
return ret;
}
@@ -3313,8 +3334,6 @@ static int hns_roce_v2_write_mtpt(struct hns_roce_dev *hr_dev,
hr_reg_write(mpt_entry, MPT_ST, V2_MPT_ST_VALID);
hr_reg_write(mpt_entry, MPT_PD, mr->pd);
- hr_reg_write_bool(mpt_entry, MPT_BIND_EN,
- mr->access & IB_ACCESS_MW_BIND);
hr_reg_write_bool(mpt_entry, MPT_ATOMIC_EN,
mr->access & IB_ACCESS_REMOTE_ATOMIC);
hr_reg_write_bool(mpt_entry, MPT_RR_EN,
@@ -3358,8 +3377,6 @@ static int hns_roce_v2_rereg_write_mtpt(struct hns_roce_dev *hr_dev,
hr_reg_write(mpt_entry, MPT_PD, mr->pd);
if (flags & IB_MR_REREG_ACCESS) {
- hr_reg_write(mpt_entry, MPT_BIND_EN,
- (mr_access_flags & IB_ACCESS_MW_BIND ? 1 : 0));
hr_reg_write(mpt_entry, MPT_ATOMIC_EN,
mr_access_flags & IB_ACCESS_REMOTE_ATOMIC ? 1 : 0);
hr_reg_write(mpt_entry, MPT_RR_EN,
@@ -3397,7 +3414,6 @@ static int hns_roce_v2_frmr_write_mtpt(void *mb_buf, struct hns_roce_mr *mr)
hr_reg_enable(mpt_entry, MPT_R_INV_EN);
hr_reg_enable(mpt_entry, MPT_FRE);
- hr_reg_clear(mpt_entry, MPT_MR_MW);
hr_reg_enable(mpt_entry, MPT_BPD);
hr_reg_clear(mpt_entry, MPT_PA);
@@ -3417,38 +3433,6 @@ static int hns_roce_v2_frmr_write_mtpt(void *mb_buf, struct hns_roce_mr *mr)
return 0;
}
-static int hns_roce_v2_mw_write_mtpt(void *mb_buf, struct hns_roce_mw *mw)
-{
- struct hns_roce_v2_mpt_entry *mpt_entry;
-
- mpt_entry = mb_buf;
- memset(mpt_entry, 0, sizeof(*mpt_entry));
-
- hr_reg_write(mpt_entry, MPT_ST, V2_MPT_ST_FREE);
- hr_reg_write(mpt_entry, MPT_PD, mw->pdn);
-
- hr_reg_enable(mpt_entry, MPT_R_INV_EN);
- hr_reg_enable(mpt_entry, MPT_LW_EN);
-
- hr_reg_enable(mpt_entry, MPT_MR_MW);
- hr_reg_enable(mpt_entry, MPT_BPD);
- hr_reg_clear(mpt_entry, MPT_PA);
- hr_reg_write(mpt_entry, MPT_BQP,
- mw->ibmw.type == IB_MW_TYPE_1 ? 0 : 1);
-
- mpt_entry->lkey = cpu_to_le32(mw->rkey);
-
- hr_reg_write(mpt_entry, MPT_PBL_HOP_NUM,
- mw->pbl_hop_num == HNS_ROCE_HOP_NUM_0 ? 0 :
- mw->pbl_hop_num);
- hr_reg_write(mpt_entry, MPT_PBL_BA_PG_SZ,
- mw->pbl_ba_pg_sz + PG_SHIFT_OFFSET);
- hr_reg_write(mpt_entry, MPT_PBL_BUF_PG_SZ,
- mw->pbl_buf_pg_sz + PG_SHIFT_OFFSET);
-
- return 0;
-}
-
static int free_mr_post_send_lp_wqe(struct hns_roce_qp *hr_qp)
{
struct hns_roce_dev *hr_dev = to_hr_dev(hr_qp->ibqp.device);
@@ -3849,7 +3833,6 @@ static const u32 wc_send_op_map[] = {
HR_WC_OP_MAP(ATOM_MSK_CMP_AND_SWAP, MASKED_COMP_SWAP),
HR_WC_OP_MAP(ATOM_MSK_FETCH_AND_ADD, MASKED_FETCH_ADD),
HR_WC_OP_MAP(FAST_REG_PMR, REG_MR),
- HR_WC_OP_MAP(BIND_MW, REG_MR),
};
static int to_ib_wc_send_op(u32 hr_opcode)
@@ -4560,7 +4543,9 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
dma_addr_t trrl_ba;
dma_addr_t irrl_ba;
enum ib_mtu ib_mtu;
+ u8 ack_req_freq;
const u8 *smac;
+ int lp_msg_len;
u8 lp_pktn_ini;
u64 *mtts;
u8 *dmac;
@@ -4643,7 +4628,8 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
return -EINVAL;
#define MIN_LP_MSG_LEN 1024
/* mtu * (2 ^ lp_pktn_ini) should be in the range of 1024 to mtu */
- lp_pktn_ini = ilog2(max(mtu, MIN_LP_MSG_LEN) / mtu);
+ lp_msg_len = max(mtu, MIN_LP_MSG_LEN);
+ lp_pktn_ini = ilog2(lp_msg_len / mtu);
if (attr_mask & IB_QP_PATH_MTU) {
hr_reg_write(context, QPC_MTU, ib_mtu);
@@ -4653,8 +4639,22 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
hr_reg_write(context, QPC_LP_PKTN_INI, lp_pktn_ini);
hr_reg_clear(qpc_mask, QPC_LP_PKTN_INI);
- /* ACK_REQ_FREQ should be larger than or equal to LP_PKTN_INI */
- hr_reg_write(context, QPC_ACK_REQ_FREQ, lp_pktn_ini);
+ /*
+ * There are several constraints for ACK_REQ_FREQ:
+ * 1. mtu * (2 ^ ACK_REQ_FREQ) should not be too large, otherwise
+ * it may cause some unexpected retries when sending large
+ * payload.
+ * 2. ACK_REQ_FREQ should be larger than or equal to LP_PKTN_INI.
+ * 3. ACK_REQ_FREQ must be equal to LP_PKTN_INI when using LDCP
+ * or HC3 congestion control algorithm.
+ */
+ if (hr_qp->cong_type == CONG_TYPE_LDCP ||
+ hr_qp->cong_type == CONG_TYPE_HC3 ||
+ hr_dev->caps.max_ack_req_msg_len < lp_msg_len)
+ ack_req_freq = lp_pktn_ini;
+ else
+ ack_req_freq = ilog2(hr_dev->caps.max_ack_req_msg_len / mtu);
+ hr_reg_write(context, QPC_ACK_REQ_FREQ, ack_req_freq);
hr_reg_clear(qpc_mask, QPC_ACK_REQ_FREQ);
hr_reg_clear(qpc_mask, QPC_RX_REQ_PSN_ERR);
@@ -5349,11 +5349,10 @@ static int hns_roce_v2_modify_qp(struct ib_qp *ibqp,
{
struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
- struct hns_roce_v2_qp_context ctx[2];
- struct hns_roce_v2_qp_context *context = ctx;
- struct hns_roce_v2_qp_context *qpc_mask = ctx + 1;
+ struct hns_roce_v2_qp_context *context;
+ struct hns_roce_v2_qp_context *qpc_mask;
struct ib_device *ibdev = &hr_dev->ib_dev;
- int ret;
+ int ret = -ENOMEM;
if (attr_mask & ~IB_QP_ATTR_STANDARD_BITS)
return -EOPNOTSUPP;
@@ -5364,7 +5363,11 @@ static int hns_roce_v2_modify_qp(struct ib_qp *ibqp,
* we should set all bits of the relevant fields in context mask to
* 0 at the same time, else set them to 0x1.
*/
- memset(context, 0, hr_dev->caps.qpc_sz);
+ context = kvzalloc(sizeof(*context), GFP_KERNEL);
+ qpc_mask = kvzalloc(sizeof(*qpc_mask), GFP_KERNEL);
+ if (!context || !qpc_mask)
+ goto out;
+
memset(qpc_mask, 0xff, hr_dev->caps.qpc_sz);
ret = hns_roce_v2_set_abs_fields(ibqp, attr, attr_mask, cur_state,
@@ -5406,6 +5409,8 @@ static int hns_roce_v2_modify_qp(struct ib_qp *ibqp,
clear_qp(hr_qp);
out:
+ kvfree(qpc_mask);
+ kvfree(context);
return ret;
}
@@ -6948,7 +6953,6 @@ static const struct hns_roce_hw hns_roce_hw_v2 = {
.write_mtpt = hns_roce_v2_write_mtpt,
.rereg_write_mtpt = hns_roce_v2_rereg_write_mtpt,
.frmr_write_mtpt = hns_roce_v2_frmr_write_mtpt,
- .mw_write_mtpt = hns_roce_v2_mw_write_mtpt,
.write_cqc = hns_roce_v2_write_cqc,
.set_hem = hns_roce_v2_set_hem,
.clear_hem = hns_roce_v2_clear_hem,
@@ -7044,21 +7048,11 @@ static int __hns_roce_hw_v2_init_instance(struct hnae3_handle *handle)
goto error_failed_roce_init;
}
- if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08) {
- ret = free_mr_init(hr_dev);
- if (ret) {
- dev_err(hr_dev->dev, "failed to init free mr!\n");
- goto error_failed_free_mr_init;
- }
- }
handle->priv = hr_dev;
return 0;
-error_failed_free_mr_init:
- hns_roce_exit(hr_dev);
-
error_failed_roce_init:
kfree(hr_dev->priv);
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
index bc7466830eaf..e64a04d6f85b 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
@@ -814,24 +814,16 @@ struct hns_roce_v2_mpt_entry {
#define V2_MPT_BYTE_8_LW_EN_S 7
-#define V2_MPT_BYTE_8_MW_CNT_S 8
-#define V2_MPT_BYTE_8_MW_CNT_M GENMASK(31, 8)
-
#define V2_MPT_BYTE_12_FRE_S 0
#define V2_MPT_BYTE_12_PA_S 1
-#define V2_MPT_BYTE_12_MR_MW_S 4
-
#define V2_MPT_BYTE_12_BPD_S 5
#define V2_MPT_BYTE_12_BQP_S 6
#define V2_MPT_BYTE_12_INNER_PA_VLD_S 7
-#define V2_MPT_BYTE_12_MW_BIND_QPN_S 8
-#define V2_MPT_BYTE_12_MW_BIND_QPN_M GENMASK(31, 8)
-
#define V2_MPT_BYTE_48_PBL_BA_H_S 0
#define V2_MPT_BYTE_48_PBL_BA_H_M GENMASK(28, 0)
@@ -1168,7 +1160,8 @@ struct hns_roce_cfg_gmv_tb_b {
#define GMV_TB_B_SMAC_H GMV_TB_B_FIELD_LOC(47, 32)
#define GMV_TB_B_SGID_IDX GMV_TB_B_FIELD_LOC(71, 64)
-#define HNS_ROCE_QUERY_PF_CAPS_CMD_NUM 5
+#define HNS_ROCE_QUERY_PF_CAPS_CMD_NUM_HIP08 5
+#define HNS_ROCE_QUERY_PF_CAPS_CMD_NUM 6
struct hns_roce_query_pf_caps_a {
u8 number_ports;
u8 local_ca_ack_delay;
@@ -1280,6 +1273,11 @@ struct hns_roce_query_pf_caps_e {
__le16 aeq_period;
};
+struct hns_roce_query_pf_caps_f {
+ __le32 max_ack_req_msg_len;
+ __le32 rsv[5];
+};
+
#define PF_CAPS_E_FIELD_LOC(h, l) \
FIELD_LOC(struct hns_roce_query_pf_caps_e, h, l)
diff --git a/drivers/infiniband/hw/hns/hns_roce_main.c b/drivers/infiniband/hw/hns/hns_roce_main.c
index e7a497cc125c..d50f36f8a110 100644
--- a/drivers/infiniband/hw/hns/hns_roce_main.c
+++ b/drivers/infiniband/hw/hns/hns_roce_main.c
@@ -672,13 +672,6 @@ static const struct ib_device_ops hns_roce_dev_mr_ops = {
.rereg_user_mr = hns_roce_rereg_user_mr,
};
-static const struct ib_device_ops hns_roce_dev_mw_ops = {
- .alloc_mw = hns_roce_alloc_mw,
- .dealloc_mw = hns_roce_dealloc_mw,
-
- INIT_RDMA_OBJ_SIZE(ib_mw, hns_roce_mw, ibmw),
-};
-
static const struct ib_device_ops hns_roce_dev_frmr_ops = {
.alloc_mr = hns_roce_alloc_mr,
.map_mr_sg = hns_roce_map_mr_sg,
@@ -732,9 +725,6 @@ static int hns_roce_register_device(struct hns_roce_dev *hr_dev)
if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_REREG_MR)
ib_set_device_ops(ib_dev, &hns_roce_dev_mr_ops);
- if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_MW)
- ib_set_device_ops(ib_dev, &hns_roce_dev_mw_ops);
-
if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_FRMR)
ib_set_device_ops(ib_dev, &hns_roce_dev_frmr_ops);
@@ -947,10 +937,7 @@ err_unmap_dmpt:
static void hns_roce_teardown_hca(struct hns_roce_dev *hr_dev)
{
hns_roce_cleanup_bitmap(hr_dev);
-
- if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_CQ_RECORD_DB ||
- hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_RECORD_DB)
- mutex_destroy(&hr_dev->pgdir_mutex);
+ mutex_destroy(&hr_dev->pgdir_mutex);
}
/**
@@ -965,11 +952,11 @@ static int hns_roce_setup_hca(struct hns_roce_dev *hr_dev)
spin_lock_init(&hr_dev->sm_lock);
- if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_CQ_RECORD_DB ||
- hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_RECORD_DB) {
- INIT_LIST_HEAD(&hr_dev->pgdir_list);
- mutex_init(&hr_dev->pgdir_mutex);
- }
+ INIT_LIST_HEAD(&hr_dev->qp_list);
+ spin_lock_init(&hr_dev->qp_list_lock);
+
+ INIT_LIST_HEAD(&hr_dev->pgdir_list);
+ mutex_init(&hr_dev->pgdir_mutex);
hns_roce_init_uar_table(hr_dev);
@@ -1001,9 +988,7 @@ static int hns_roce_setup_hca(struct hns_roce_dev *hr_dev)
err_uar_table_free:
ida_destroy(&hr_dev->uar_ida.ida);
- if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_CQ_RECORD_DB ||
- hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_RECORD_DB)
- mutex_destroy(&hr_dev->pgdir_mutex);
+ mutex_destroy(&hr_dev->pgdir_mutex);
return ret;
}
@@ -1132,9 +1117,6 @@ int hns_roce_init(struct hns_roce_dev *hr_dev)
}
}
- INIT_LIST_HEAD(&hr_dev->qp_list);
- spin_lock_init(&hr_dev->qp_list_lock);
-
ret = hns_roce_register_device(hr_dev);
if (ret)
goto error_failed_register_device;
diff --git a/drivers/infiniband/hw/hns/hns_roce_mr.c b/drivers/infiniband/hw/hns/hns_roce_mr.c
index 93a48b41955b..0f037e545520 100644
--- a/drivers/infiniband/hw/hns/hns_roce_mr.c
+++ b/drivers/infiniband/hw/hns/hns_roce_mr.c
@@ -231,12 +231,18 @@ err_free:
struct ib_mr *hns_roce_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
u64 virt_addr, int access_flags,
+ struct ib_dmah *dmah,
struct ib_udata *udata)
{
struct hns_roce_dev *hr_dev = to_hr_dev(pd->device);
struct hns_roce_mr *mr;
int ret;
+ if (dmah) {
+ ret = -EOPNOTSUPP;
+ goto err_out;
+ }
+
mr = kzalloc(sizeof(*mr), GFP_KERNEL);
if (!mr) {
ret = -ENOMEM;
@@ -483,120 +489,6 @@ err_page_list:
return sg_num;
}
-static void hns_roce_mw_free(struct hns_roce_dev *hr_dev,
- struct hns_roce_mw *mw)
-{
- struct device *dev = hr_dev->dev;
- int ret;
-
- if (mw->enabled) {
- ret = hns_roce_destroy_hw_ctx(hr_dev, HNS_ROCE_CMD_DESTROY_MPT,
- key_to_hw_index(mw->rkey) &
- (hr_dev->caps.num_mtpts - 1));
- if (ret)
- dev_warn(dev, "MW DESTROY_MPT failed (%d)\n", ret);
-
- hns_roce_table_put(hr_dev, &hr_dev->mr_table.mtpt_table,
- key_to_hw_index(mw->rkey));
- }
-
- ida_free(&hr_dev->mr_table.mtpt_ida.ida,
- (int)key_to_hw_index(mw->rkey));
-}
-
-static int hns_roce_mw_enable(struct hns_roce_dev *hr_dev,
- struct hns_roce_mw *mw)
-{
- struct hns_roce_mr_table *mr_table = &hr_dev->mr_table;
- struct hns_roce_cmd_mailbox *mailbox;
- struct device *dev = hr_dev->dev;
- unsigned long mtpt_idx = key_to_hw_index(mw->rkey);
- int ret;
-
- /* prepare HEM entry memory */
- ret = hns_roce_table_get(hr_dev, &mr_table->mtpt_table, mtpt_idx);
- if (ret)
- return ret;
-
- mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
- if (IS_ERR(mailbox)) {
- ret = PTR_ERR(mailbox);
- goto err_table;
- }
-
- ret = hr_dev->hw->mw_write_mtpt(mailbox->buf, mw);
- if (ret) {
- dev_err(dev, "MW write mtpt fail!\n");
- goto err_page;
- }
-
- ret = hns_roce_create_hw_ctx(hr_dev, mailbox, HNS_ROCE_CMD_CREATE_MPT,
- mtpt_idx & (hr_dev->caps.num_mtpts - 1));
- if (ret) {
- dev_err(dev, "MW CREATE_MPT failed (%d)\n", ret);
- goto err_page;
- }
-
- mw->enabled = 1;
-
- hns_roce_free_cmd_mailbox(hr_dev, mailbox);
-
- return 0;
-
-err_page:
- hns_roce_free_cmd_mailbox(hr_dev, mailbox);
-
-err_table:
- hns_roce_table_put(hr_dev, &mr_table->mtpt_table, mtpt_idx);
-
- return ret;
-}
-
-int hns_roce_alloc_mw(struct ib_mw *ibmw, struct ib_udata *udata)
-{
- struct hns_roce_dev *hr_dev = to_hr_dev(ibmw->device);
- struct hns_roce_ida *mtpt_ida = &hr_dev->mr_table.mtpt_ida;
- struct ib_device *ibdev = &hr_dev->ib_dev;
- struct hns_roce_mw *mw = to_hr_mw(ibmw);
- int ret;
- int id;
-
- /* Allocate a key for mw from mr_table */
- id = ida_alloc_range(&mtpt_ida->ida, mtpt_ida->min, mtpt_ida->max,
- GFP_KERNEL);
- if (id < 0) {
- ibdev_err(ibdev, "failed to alloc id for MW key, id(%d)\n", id);
- return -ENOMEM;
- }
-
- mw->rkey = hw_index_to_key(id);
-
- ibmw->rkey = mw->rkey;
- mw->pdn = to_hr_pd(ibmw->pd)->pdn;
- mw->pbl_hop_num = hr_dev->caps.pbl_hop_num;
- mw->pbl_ba_pg_sz = hr_dev->caps.pbl_ba_pg_sz;
- mw->pbl_buf_pg_sz = hr_dev->caps.pbl_buf_pg_sz;
-
- ret = hns_roce_mw_enable(hr_dev, mw);
- if (ret)
- goto err_mw;
-
- return 0;
-
-err_mw:
- hns_roce_mw_free(hr_dev, mw);
- return ret;
-}
-
-int hns_roce_dealloc_mw(struct ib_mw *ibmw)
-{
- struct hns_roce_dev *hr_dev = to_hr_dev(ibmw->device);
- struct hns_roce_mw *mw = to_hr_mw(ibmw);
-
- hns_roce_mw_free(hr_dev, mw);
- return 0;
-}
-
static int mtr_map_region(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
struct hns_roce_buf_region *region, dma_addr_t *pages,
int max_count)
diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c
index 9f376a2232b0..6ff1b8ce580c 100644
--- a/drivers/infiniband/hw/hns/hns_roce_qp.c
+++ b/drivers/infiniband/hw/hns/hns_roce_qp.c
@@ -1003,14 +1003,14 @@ static int alloc_kernel_wrid(struct hns_roce_dev *hr_dev,
int ret;
sq_wrid = kcalloc(hr_qp->sq.wqe_cnt, sizeof(u64), GFP_KERNEL);
- if (ZERO_OR_NULL_PTR(sq_wrid)) {
+ if (!sq_wrid) {
ibdev_err(ibdev, "failed to alloc SQ wrid.\n");
return -ENOMEM;
}
if (hr_qp->rq.wqe_cnt) {
rq_wrid = kcalloc(hr_qp->rq.wqe_cnt, sizeof(u64), GFP_KERNEL);
- if (ZERO_OR_NULL_PTR(rq_wrid)) {
+ if (!rq_wrid) {
ibdev_err(ibdev, "failed to alloc RQ wrid.\n");
ret = -ENOMEM;
goto err_sq;
diff --git a/drivers/infiniband/hw/irdma/verbs.c b/drivers/infiniband/hw/irdma/verbs.c
index 1e8c92826de2..da5a41b275d8 100644
--- a/drivers/infiniband/hw/irdma/verbs.c
+++ b/drivers/infiniband/hw/irdma/verbs.c
@@ -3013,10 +3013,12 @@ static int irdma_reg_user_mr_type_cq(struct irdma_mem_reg_req req,
* @len: length of mr
* @virt: virtual address
* @access: access of mr
+ * @dmah: dma handle
* @udata: user data
*/
static struct ib_mr *irdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 len,
u64 virt, int access,
+ struct ib_dmah *dmah,
struct ib_udata *udata)
{
#define IRDMA_MEM_REG_MIN_REQ_LEN offsetofend(struct irdma_mem_reg_req, sq_pages)
@@ -3026,6 +3028,9 @@ static struct ib_mr *irdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 len,
struct irdma_mr *iwmr = NULL;
int err;
+ if (dmah)
+ return ERR_PTR(-EOPNOTSUPP);
+
if (len > iwdev->rf->sc_dev.hw_attrs.max_mr_size)
return ERR_PTR(-EINVAL);
@@ -3085,6 +3090,7 @@ error:
static struct ib_mr *irdma_reg_user_mr_dmabuf(struct ib_pd *pd, u64 start,
u64 len, u64 virt,
int fd, int access,
+ struct ib_dmah *dmah,
struct uverbs_attr_bundle *attrs)
{
struct irdma_device *iwdev = to_iwdev(pd->device);
@@ -3092,6 +3098,9 @@ static struct ib_mr *irdma_reg_user_mr_dmabuf(struct ib_pd *pd, u64 start,
struct irdma_mr *iwmr;
int err;
+ if (dmah)
+ return ERR_PTR(-EOPNOTSUPP);
+
if (len > iwdev->rf->sc_dev.hw_attrs.max_mr_size)
return ERR_PTR(-EINVAL);
diff --git a/drivers/infiniband/hw/mana/counters.c b/drivers/infiniband/hw/mana/counters.c
index e533ce21013d..e964e74be48d 100644
--- a/drivers/infiniband/hw/mana/counters.c
+++ b/drivers/infiniband/hw/mana/counters.c
@@ -32,8 +32,32 @@ static const struct rdma_stat_desc mana_ib_port_stats_desc[] = {
[MANA_IB_RATE_INC_EVENTS].name = "rate_inc_events",
[MANA_IB_NUM_QPS_RECOVERED].name = "num_qps_recovered",
[MANA_IB_CURRENT_RATE].name = "current_rate",
+ [MANA_IB_DUP_RX_REQ].name = "dup_rx_requests",
+ [MANA_IB_TX_BYTES].name = "tx_bytes",
+ [MANA_IB_RX_BYTES].name = "rx_bytes",
+ [MANA_IB_RX_SEND_REQ].name = "rx_send_requests",
+ [MANA_IB_RX_WRITE_REQ].name = "rx_write_requests",
+ [MANA_IB_RX_READ_REQ].name = "rx_read_requests",
+ [MANA_IB_TX_PKT].name = "tx_packets",
+ [MANA_IB_RX_PKT].name = "rx_packets",
};
+static const struct rdma_stat_desc mana_ib_device_stats_desc[] = {
+ [MANA_IB_SENT_CNPS].name = "sent_cnps",
+ [MANA_IB_RECEIVED_ECNS].name = "received_ecns",
+ [MANA_IB_RECEIVED_CNP_COUNT].name = "received_cnp_count",
+ [MANA_IB_QP_CONGESTED_EVENTS].name = "qp_congested_events",
+ [MANA_IB_QP_RECOVERED_EVENTS].name = "qp_recovered_events",
+ [MANA_IB_DEV_RATE_INC_EVENTS].name = "rate_inc_events",
+};
+
+struct rdma_hw_stats *mana_ib_alloc_hw_device_stats(struct ib_device *ibdev)
+{
+ return rdma_alloc_hw_stats_struct(mana_ib_device_stats_desc,
+ ARRAY_SIZE(mana_ib_device_stats_desc),
+ RDMA_HW_STATS_DEFAULT_LIFESPAN);
+}
+
struct rdma_hw_stats *mana_ib_alloc_hw_port_stats(struct ib_device *ibdev,
u32 port_num)
{
@@ -42,8 +66,39 @@ struct rdma_hw_stats *mana_ib_alloc_hw_port_stats(struct ib_device *ibdev,
RDMA_HW_STATS_DEFAULT_LIFESPAN);
}
-int mana_ib_get_hw_stats(struct ib_device *ibdev, struct rdma_hw_stats *stats,
- u32 port_num, int index)
+static int mana_ib_get_hw_device_stats(struct ib_device *ibdev, struct rdma_hw_stats *stats)
+{
+ struct mana_ib_dev *mdev = container_of(ibdev, struct mana_ib_dev,
+ ib_dev);
+ struct mana_rnic_query_device_cntrs_resp resp = {};
+ struct mana_rnic_query_device_cntrs_req req = {};
+ int err;
+
+ mana_gd_init_req_hdr(&req.hdr, MANA_IB_QUERY_DEVICE_COUNTERS,
+ sizeof(req), sizeof(resp));
+ req.hdr.dev_id = mdev->gdma_dev->dev_id;
+ req.adapter = mdev->adapter_handle;
+
+ err = mana_gd_send_request(mdev_to_gc(mdev), sizeof(req), &req,
+ sizeof(resp), &resp);
+ if (err) {
+ ibdev_err(&mdev->ib_dev, "Failed to query device counters err %d",
+ err);
+ return err;
+ }
+
+ stats->value[MANA_IB_SENT_CNPS] = resp.sent_cnps;
+ stats->value[MANA_IB_RECEIVED_ECNS] = resp.received_ecns;
+ stats->value[MANA_IB_RECEIVED_CNP_COUNT] = resp.received_cnp_count;
+ stats->value[MANA_IB_QP_CONGESTED_EVENTS] = resp.qp_congested_events;
+ stats->value[MANA_IB_QP_RECOVERED_EVENTS] = resp.qp_recovered_events;
+ stats->value[MANA_IB_DEV_RATE_INC_EVENTS] = resp.rate_inc_events;
+
+ return ARRAY_SIZE(mana_ib_device_stats_desc);
+}
+
+static int mana_ib_get_hw_port_stats(struct ib_device *ibdev, struct rdma_hw_stats *stats,
+ u32 port_num)
{
struct mana_ib_dev *mdev = container_of(ibdev, struct mana_ib_dev,
ib_dev);
@@ -53,6 +108,7 @@ int mana_ib_get_hw_stats(struct ib_device *ibdev, struct rdma_hw_stats *stats,
mana_gd_init_req_hdr(&req.hdr, MANA_IB_QUERY_VF_COUNTERS,
sizeof(req), sizeof(resp));
+ req.hdr.resp.msg_version = GDMA_MESSAGE_V2;
req.hdr.dev_id = mdev->gdma_dev->dev_id;
req.adapter = mdev->adapter_handle;
@@ -101,5 +157,23 @@ int mana_ib_get_hw_stats(struct ib_device *ibdev, struct rdma_hw_stats *stats,
stats->value[MANA_IB_NUM_QPS_RECOVERED] = resp.num_qps_recovered;
stats->value[MANA_IB_CURRENT_RATE] = resp.current_rate;
+ stats->value[MANA_IB_DUP_RX_REQ] = resp.dup_rx_req;
+ stats->value[MANA_IB_TX_BYTES] = resp.tx_bytes;
+ stats->value[MANA_IB_RX_BYTES] = resp.rx_bytes;
+ stats->value[MANA_IB_RX_SEND_REQ] = resp.rx_send_req;
+ stats->value[MANA_IB_RX_WRITE_REQ] = resp.rx_write_req;
+ stats->value[MANA_IB_RX_READ_REQ] = resp.rx_read_req;
+ stats->value[MANA_IB_TX_PKT] = resp.tx_pkt;
+ stats->value[MANA_IB_RX_PKT] = resp.rx_pkt;
+
return ARRAY_SIZE(mana_ib_port_stats_desc);
}
+
+int mana_ib_get_hw_stats(struct ib_device *ibdev, struct rdma_hw_stats *stats,
+ u32 port_num, int index)
+{
+ if (!port_num)
+ return mana_ib_get_hw_device_stats(ibdev, stats);
+ else
+ return mana_ib_get_hw_port_stats(ibdev, stats, port_num);
+}
diff --git a/drivers/infiniband/hw/mana/counters.h b/drivers/infiniband/hw/mana/counters.h
index 7ff92d27f6c3..f68e776bb41d 100644
--- a/drivers/infiniband/hw/mana/counters.h
+++ b/drivers/infiniband/hw/mana/counters.h
@@ -35,10 +35,28 @@ enum mana_ib_port_counters {
MANA_IB_RATE_INC_EVENTS,
MANA_IB_NUM_QPS_RECOVERED,
MANA_IB_CURRENT_RATE,
+ MANA_IB_DUP_RX_REQ,
+ MANA_IB_TX_BYTES,
+ MANA_IB_RX_BYTES,
+ MANA_IB_RX_SEND_REQ,
+ MANA_IB_RX_WRITE_REQ,
+ MANA_IB_RX_READ_REQ,
+ MANA_IB_TX_PKT,
+ MANA_IB_RX_PKT,
+};
+
+enum mana_ib_device_counters {
+ MANA_IB_SENT_CNPS,
+ MANA_IB_RECEIVED_ECNS,
+ MANA_IB_RECEIVED_CNP_COUNT,
+ MANA_IB_QP_CONGESTED_EVENTS,
+ MANA_IB_QP_RECOVERED_EVENTS,
+ MANA_IB_DEV_RATE_INC_EVENTS,
};
struct rdma_hw_stats *mana_ib_alloc_hw_port_stats(struct ib_device *ibdev,
u32 port_num);
+struct rdma_hw_stats *mana_ib_alloc_hw_device_stats(struct ib_device *ibdev);
int mana_ib_get_hw_stats(struct ib_device *ibdev, struct rdma_hw_stats *stats,
u32 port_num, int index);
#endif /* _COUNTERS_H_ */
diff --git a/drivers/infiniband/hw/mana/device.c b/drivers/infiniband/hw/mana/device.c
index 165c0a1e67d1..fa60872f169f 100644
--- a/drivers/infiniband/hw/mana/device.c
+++ b/drivers/infiniband/hw/mana/device.c
@@ -65,6 +65,10 @@ static const struct ib_device_ops mana_ib_stats_ops = {
.get_hw_stats = mana_ib_get_hw_stats,
};
+static const struct ib_device_ops mana_ib_device_stats_ops = {
+ .alloc_hw_device_stats = mana_ib_alloc_hw_device_stats,
+};
+
static int mana_ib_netdev_event(struct notifier_block *this,
unsigned long event, void *ptr)
{
@@ -73,28 +77,31 @@ static int mana_ib_netdev_event(struct notifier_block *this,
struct gdma_context *gc = dev->gdma_dev->gdma_context;
struct mana_context *mc = gc->mana.driver_data;
struct net_device *ndev;
+ int i;
/* Only process events from our parent device */
- if (event_dev != mc->ports[0])
- return NOTIFY_DONE;
-
- switch (event) {
- case NETDEV_CHANGEUPPER:
- ndev = mana_get_primary_netdev(mc, 0, &dev->dev_tracker);
- /*
- * RDMA core will setup GID based on updated netdev.
- * It's not possible to race with the core as rtnl lock is being
- * held.
- */
- ib_device_set_netdev(&dev->ib_dev, ndev, 1);
-
- /* mana_get_primary_netdev() returns ndev with refcount held */
- netdev_put(ndev, &dev->dev_tracker);
-
- return NOTIFY_OK;
- default:
- return NOTIFY_DONE;
- }
+ for (i = 0; i < dev->ib_dev.phys_port_cnt; i++)
+ if (event_dev == mc->ports[i]) {
+ switch (event) {
+ case NETDEV_CHANGEUPPER:
+ ndev = mana_get_primary_netdev(mc, i, &dev->dev_tracker);
+ /*
+ * RDMA core will setup GID based on updated netdev.
+ * It's not possible to race with the core as rtnl lock is being
+ * held.
+ */
+ ib_device_set_netdev(&dev->ib_dev, ndev, i + 1);
+
+ /* mana_get_primary_netdev() returns ndev with refcount held */
+ if (ndev)
+ netdev_put(ndev, &dev->dev_tracker);
+
+ return NOTIFY_OK;
+ default:
+ return NOTIFY_DONE;
+ }
+ }
+ return NOTIFY_DONE;
}
static int mana_ib_probe(struct auxiliary_device *adev,
@@ -107,7 +114,7 @@ static int mana_ib_probe(struct auxiliary_device *adev,
struct net_device *ndev;
struct mana_ib_dev *dev;
u8 mac_addr[ETH_ALEN];
- int ret;
+ int ret, i;
dev = ib_alloc_device(mana_ib_dev, ib_dev);
if (!dev)
@@ -122,51 +129,56 @@ static int mana_ib_probe(struct auxiliary_device *adev,
if (mana_ib_is_rnic(dev)) {
dev->ib_dev.phys_port_cnt = 1;
- ndev = mana_get_primary_netdev(mc, 0, &dev->dev_tracker);
- if (!ndev) {
- ret = -ENODEV;
- ibdev_err(&dev->ib_dev, "Failed to get netdev for IB port 1");
- goto free_ib_device;
- }
- ether_addr_copy(mac_addr, ndev->dev_addr);
- addrconf_addr_eui48((u8 *)&dev->ib_dev.node_guid, ndev->dev_addr);
- ret = ib_device_set_netdev(&dev->ib_dev, ndev, 1);
- /* mana_get_primary_netdev() returns ndev with refcount held */
- netdev_put(ndev, &dev->dev_tracker);
- if (ret) {
- ibdev_err(&dev->ib_dev, "Failed to set ib netdev, ret %d", ret);
- goto free_ib_device;
- }
-
- dev->nb.notifier_call = mana_ib_netdev_event;
- ret = register_netdevice_notifier(&dev->nb);
- if (ret) {
- ibdev_err(&dev->ib_dev, "Failed to register net notifier, %d",
- ret);
- goto free_ib_device;
- }
-
+ addrconf_addr_eui48((u8 *)&dev->ib_dev.node_guid, mc->ports[0]->dev_addr);
ret = mana_ib_gd_query_adapter_caps(dev);
if (ret) {
ibdev_err(&dev->ib_dev, "Failed to query device caps, ret %d", ret);
- goto deregister_net_notifier;
+ goto free_ib_device;
}
ib_set_device_ops(&dev->ib_dev, &mana_ib_stats_ops);
+ if (dev->adapter_caps.feature_flags & MANA_IB_FEATURE_DEV_COUNTERS_SUPPORT)
+ ib_set_device_ops(&dev->ib_dev, &mana_ib_device_stats_ops);
ret = mana_ib_create_eqs(dev);
if (ret) {
ibdev_err(&dev->ib_dev, "Failed to create EQs, ret %d", ret);
- goto deregister_net_notifier;
+ goto free_ib_device;
}
ret = mana_ib_gd_create_rnic_adapter(dev);
if (ret)
goto destroy_eqs;
- ret = mana_ib_gd_config_mac(dev, ADDR_OP_ADD, mac_addr);
+ if (dev->adapter_caps.feature_flags & MANA_IB_FEATURE_MULTI_PORTS_SUPPORT)
+ dev->ib_dev.phys_port_cnt = mc->num_ports;
+
+ for (i = 0; i < dev->ib_dev.phys_port_cnt; i++) {
+ ndev = mana_get_primary_netdev(mc, i, &dev->dev_tracker);
+ if (!ndev) {
+ ret = -ENODEV;
+ ibdev_err(&dev->ib_dev,
+ "Failed to get netdev for IB port %d", i + 1);
+ goto destroy_rnic;
+ }
+ ether_addr_copy(mac_addr, ndev->dev_addr);
+ ret = ib_device_set_netdev(&dev->ib_dev, ndev, i + 1);
+ /* mana_get_primary_netdev() returns ndev with refcount held */
+ netdev_put(ndev, &dev->dev_tracker);
+ if (ret) {
+ ibdev_err(&dev->ib_dev, "Failed to set ib netdev, ret %d", ret);
+ goto destroy_rnic;
+ }
+ ret = mana_ib_gd_config_mac(dev, ADDR_OP_ADD, mac_addr);
+ if (ret) {
+ ibdev_err(&dev->ib_dev, "Failed to add Mac address, ret %d", ret);
+ goto destroy_rnic;
+ }
+ }
+ dev->nb.notifier_call = mana_ib_netdev_event;
+ ret = register_netdevice_notifier(&dev->nb);
if (ret) {
- ibdev_err(&dev->ib_dev, "Failed to add Mac address, ret %d", ret);
+ ibdev_err(&dev->ib_dev, "Failed to register net notifier, %d", ret);
goto destroy_rnic;
}
} else {
@@ -182,7 +194,7 @@ static int mana_ib_probe(struct auxiliary_device *adev,
MANA_AV_BUFFER_SIZE, 0);
if (!dev->av_pool) {
ret = -ENOMEM;
- goto destroy_rnic;
+ goto deregister_net_notifier;
}
ibdev_dbg(&dev->ib_dev, "mdev=%p id=%d num_ports=%d\n", mdev,
@@ -199,15 +211,15 @@ static int mana_ib_probe(struct auxiliary_device *adev,
deallocate_pool:
dma_pool_destroy(dev->av_pool);
+deregister_net_notifier:
+ if (mana_ib_is_rnic(dev))
+ unregister_netdevice_notifier(&dev->nb);
destroy_rnic:
if (mana_ib_is_rnic(dev))
mana_ib_gd_destroy_rnic_adapter(dev);
destroy_eqs:
if (mana_ib_is_rnic(dev))
mana_ib_destroy_eqs(dev);
-deregister_net_notifier:
- if (mana_ib_is_rnic(dev))
- unregister_netdevice_notifier(&dev->nb);
free_ib_device:
xa_destroy(&dev->qp_table_wq);
ib_dealloc_device(&dev->ib_dev);
@@ -221,9 +233,9 @@ static void mana_ib_remove(struct auxiliary_device *adev)
ib_unregister_device(&dev->ib_dev);
dma_pool_destroy(dev->av_pool);
if (mana_ib_is_rnic(dev)) {
+ unregister_netdevice_notifier(&dev->nb);
mana_ib_gd_destroy_rnic_adapter(dev);
mana_ib_destroy_eqs(dev);
- unregister_netdevice_notifier(&dev->nb);
}
xa_destroy(&dev->qp_table_wq);
ib_dealloc_device(&dev->ib_dev);
diff --git a/drivers/infiniband/hw/mana/main.c b/drivers/infiniband/hw/mana/main.c
index 41a24a186f9d..6a2471f2e804 100644
--- a/drivers/infiniband/hw/mana/main.c
+++ b/drivers/infiniband/hw/mana/main.c
@@ -563,8 +563,14 @@ int mana_ib_get_port_immutable(struct ib_device *ibdev, u32 port_num,
immutable->gid_tbl_len = attr.gid_tbl_len;
if (mana_ib_is_rnic(dev)) {
- immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP;
- immutable->max_mad_size = IB_MGMT_MAD_SIZE;
+ if (port_num == 1) {
+ immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP;
+ immutable->max_mad_size = IB_MGMT_MAD_SIZE;
+ } else {
+ immutable->core_cap_flags = RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP
+ | RDMA_CORE_CAP_ETH_AH;
+ immutable->max_mad_size = 0;
+ }
} else {
immutable->core_cap_flags = RDMA_CORE_PORT_RAW_PACKET;
}
@@ -633,8 +639,9 @@ int mana_ib_query_port(struct ib_device *ibdev, u32 port,
props->pkey_tbl_len = 1;
if (mana_ib_is_rnic(dev)) {
props->gid_tbl_len = 16;
- props->port_cap_flags = IB_PORT_CM_SUP;
props->ip_gids = true;
+ if (port == 1)
+ props->port_cap_flags = IB_PORT_CM_SUP;
}
return 0;
diff --git a/drivers/infiniband/hw/mana/mana_ib.h b/drivers/infiniband/hw/mana/mana_ib.h
index 42bebd6cd4f7..5d31034ac7fb 100644
--- a/drivers/infiniband/hw/mana/mana_ib.h
+++ b/drivers/infiniband/hw/mana/mana_ib.h
@@ -210,6 +210,7 @@ enum mana_ib_command_code {
MANA_IB_DESTROY_RC_QP = 0x3000b,
MANA_IB_SET_QP_STATE = 0x3000d,
MANA_IB_QUERY_VF_COUNTERS = 0x30022,
+ MANA_IB_QUERY_DEVICE_COUNTERS = 0x30023,
};
struct mana_ib_query_adapter_caps_req {
@@ -218,6 +219,8 @@ struct mana_ib_query_adapter_caps_req {
enum mana_ib_adapter_features {
MANA_IB_FEATURE_CLIENT_ERROR_CQE_SUPPORT = BIT(4),
+ MANA_IB_FEATURE_DEV_COUNTERS_SUPPORT = BIT(5),
+ MANA_IB_FEATURE_MULTI_PORTS_SUPPORT = BIT(6),
};
struct mana_ib_query_adapter_caps_resp {
@@ -514,6 +517,31 @@ struct mana_rnic_query_vf_cntrs_resp {
u64 rate_inc_events;
u64 num_qps_recovered;
u64 current_rate;
+ u64 dup_rx_req;
+ u64 tx_bytes;
+ u64 rx_bytes;
+ u64 rx_send_req;
+ u64 rx_write_req;
+ u64 rx_read_req;
+ u64 tx_pkt;
+ u64 rx_pkt;
+}; /* HW Data */
+
+struct mana_rnic_query_device_cntrs_req {
+ struct gdma_req_hdr hdr;
+ mana_handle_t adapter;
+}; /* HW Data */
+
+struct mana_rnic_query_device_cntrs_resp {
+ struct gdma_resp_hdr hdr;
+ u32 sent_cnps;
+ u32 received_ecns;
+ u32 reserved1;
+ u32 received_cnp_count;
+ u32 qp_congested_events;
+ u32 qp_recovered_events;
+ u32 rate_inc_events;
+ u32 reserved2;
}; /* HW Data */
static inline struct gdma_context *mdev_to_gc(struct mana_ib_dev *mdev)
@@ -605,6 +633,7 @@ struct ib_mr *mana_ib_get_dma_mr(struct ib_pd *ibpd, int access_flags);
struct ib_mr *mana_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
u64 iova, int access_flags,
+ struct ib_dmah *dmah,
struct ib_udata *udata);
int mana_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata);
@@ -694,5 +723,6 @@ int mana_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags);
struct ib_mr *mana_ib_reg_user_mr_dmabuf(struct ib_pd *ibpd, u64 start, u64 length,
u64 iova, int fd, int mr_access_flags,
+ struct ib_dmah *dmah,
struct uverbs_attr_bundle *attrs);
#endif
diff --git a/drivers/infiniband/hw/mana/mr.c b/drivers/infiniband/hw/mana/mr.c
index 6d974d0a8400..55701046ffba 100644
--- a/drivers/infiniband/hw/mana/mr.c
+++ b/drivers/infiniband/hw/mana/mr.c
@@ -106,6 +106,7 @@ static int mana_ib_gd_destroy_mr(struct mana_ib_dev *dev, u64 mr_handle)
struct ib_mr *mana_ib_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 length,
u64 iova, int access_flags,
+ struct ib_dmah *dmah,
struct ib_udata *udata)
{
struct mana_ib_pd *pd = container_of(ibpd, struct mana_ib_pd, ibpd);
@@ -116,6 +117,9 @@ struct ib_mr *mana_ib_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 length,
u64 dma_region_handle;
int err;
+ if (dmah)
+ return ERR_PTR(-EOPNOTSUPP);
+
dev = container_of(ibdev, struct mana_ib_dev, ib_dev);
ibdev_dbg(ibdev,
@@ -188,6 +192,7 @@ err_free:
struct ib_mr *mana_ib_reg_user_mr_dmabuf(struct ib_pd *ibpd, u64 start, u64 length,
u64 iova, int fd, int access_flags,
+ struct ib_dmah *dmah,
struct uverbs_attr_bundle *attrs)
{
struct mana_ib_pd *pd = container_of(ibpd, struct mana_ib_pd, ibpd);
@@ -199,6 +204,9 @@ struct ib_mr *mana_ib_reg_user_mr_dmabuf(struct ib_pd *ibpd, u64 start, u64 leng
u64 dma_region_handle;
int err;
+ if (dmah)
+ return ERR_PTR(-EOPNOTSUPP);
+
dev = container_of(ibdev, struct mana_ib_dev, ib_dev);
access_flags &= ~IB_ACCESS_OPTIONAL;
diff --git a/drivers/infiniband/hw/mana/qp.c b/drivers/infiniband/hw/mana/qp.c
index 14fd7d6c54a2..a6bf4d539e67 100644
--- a/drivers/infiniband/hw/mana/qp.c
+++ b/drivers/infiniband/hw/mana/qp.c
@@ -772,7 +772,7 @@ static int mana_ib_gd_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
req.ah_attr.dest_port = ROCE_V2_UDP_DPORT;
req.ah_attr.src_port = rdma_get_udp_sport(attr->ah_attr.grh.flow_label,
ibqp->qp_num, attr->dest_qp_num);
- req.ah_attr.traffic_class = attr->ah_attr.grh.traffic_class;
+ req.ah_attr.traffic_class = attr->ah_attr.grh.traffic_class >> 2;
req.ah_attr.hop_limit = attr->ah_attr.grh.hop_limit;
}
diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h
index f53b1846594c..5df5b955114e 100644
--- a/drivers/infiniband/hw/mlx4/mlx4_ib.h
+++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h
@@ -759,6 +759,7 @@ int mlx4_ib_umem_write_mtt(struct mlx4_ib_dev *dev, struct mlx4_mtt *mtt,
struct ib_umem *umem);
struct ib_mr *mlx4_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
u64 virt_addr, int access_flags,
+ struct ib_dmah *dmah,
struct ib_udata *udata);
int mlx4_ib_dereg_mr(struct ib_mr *mr, struct ib_udata *udata);
int mlx4_ib_alloc_mw(struct ib_mw *mw, struct ib_udata *udata);
diff --git a/drivers/infiniband/hw/mlx4/mr.c b/drivers/infiniband/hw/mlx4/mr.c
index e77645a673fb..94464f1694d9 100644
--- a/drivers/infiniband/hw/mlx4/mr.c
+++ b/drivers/infiniband/hw/mlx4/mr.c
@@ -139,6 +139,7 @@ static struct ib_umem *mlx4_get_umem_mr(struct ib_device *device, u64 start,
struct ib_mr *mlx4_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
u64 virt_addr, int access_flags,
+ struct ib_dmah *dmah,
struct ib_udata *udata)
{
struct mlx4_ib_dev *dev = to_mdev(pd->device);
@@ -147,6 +148,9 @@ struct ib_mr *mlx4_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
int err;
int n;
+ if (dmah)
+ return ERR_PTR(-EOPNOTSUPP);
+
mr = kzalloc(sizeof(*mr), GFP_KERNEL);
if (!mr)
return ERR_PTR(-ENOMEM);
diff --git a/drivers/infiniband/hw/mlx5/Makefile b/drivers/infiniband/hw/mlx5/Makefile
index 11878ddf7cc7..dd7bb377f491 100644
--- a/drivers/infiniband/hw/mlx5/Makefile
+++ b/drivers/infiniband/hw/mlx5/Makefile
@@ -8,6 +8,7 @@ mlx5_ib-y := ah.o \
cq.o \
data_direct.o \
dm.o \
+ dmah.o \
doorbell.o \
fs.o \
gsi.o \
diff --git a/drivers/infiniband/hw/mlx5/counters.c b/drivers/infiniband/hw/mlx5/counters.c
index b847084dcd99..e042e0719ead 100644
--- a/drivers/infiniband/hw/mlx5/counters.c
+++ b/drivers/infiniband/hw/mlx5/counters.c
@@ -16,6 +16,18 @@ struct mlx5_ib_counter {
u32 type;
};
+struct mlx5_rdma_counter {
+ struct rdma_counter rdma_counter;
+
+ struct mlx5_fc *fc[MLX5_IB_OPCOUNTER_MAX];
+ struct xarray qpn_opfc_xa;
+};
+
+static struct mlx5_rdma_counter *to_mcounter(struct rdma_counter *counter)
+{
+ return container_of(counter, struct mlx5_rdma_counter, rdma_counter);
+}
+
#define INIT_Q_COUNTER(_name) \
{ .name = #_name, .offset = MLX5_BYTE_OFF(query_q_counter_out, _name)}
@@ -398,7 +410,7 @@ static int do_get_hw_stats(struct ib_device *ibdev,
return ret;
/* We don't expose device counters over Vports */
- if (is_mdev_switchdev_mode(dev->mdev) && port_num != 0)
+ if (is_mdev_switchdev_mode(dev->mdev) && dev->is_rep && port_num != 0)
goto done;
if (MLX5_CAP_PCAM_FEATURE(dev->mdev, rx_icrc_encapsulated_counter)) {
@@ -418,7 +430,7 @@ static int do_get_hw_stats(struct ib_device *ibdev,
*/
goto done;
}
- ret = mlx5_lag_query_cong_counters(dev->mdev,
+ ret = mlx5_lag_query_cong_counters(mdev,
stats->value +
cnts->num_q_counters,
cnts->num_cong_counters,
@@ -602,7 +614,7 @@ static int mlx5_ib_counter_dealloc(struct rdma_counter *counter)
return 0;
WARN_ON(!xa_empty(&mcounter->qpn_opfc_xa));
- mlx5r_fs_destroy_fcs(dev, counter);
+ mlx5r_fs_destroy_fcs(dev, mcounter->fc);
MLX5_SET(dealloc_q_counter_in, in, opcode,
MLX5_CMD_OP_DEALLOC_Q_COUNTER);
MLX5_SET(dealloc_q_counter_in, in, counter_set_id, counter->id);
@@ -612,6 +624,7 @@ static int mlx5_ib_counter_dealloc(struct rdma_counter *counter)
static int mlx5_ib_counter_bind_qp(struct rdma_counter *counter,
struct ib_qp *qp, u32 port)
{
+ struct mlx5_rdma_counter *mcounter = to_mcounter(counter);
struct mlx5_ib_dev *dev = to_mdev(qp->device);
bool new = false;
int err;
@@ -635,7 +648,11 @@ static int mlx5_ib_counter_bind_qp(struct rdma_counter *counter,
if (err)
goto fail_set_counter;
- err = mlx5r_fs_bind_op_fc(qp, counter, port);
+ if (!counter->mode.bind_opcnt)
+ return 0;
+
+ err = mlx5r_fs_bind_op_fc(qp, mcounter->fc, &mcounter->qpn_opfc_xa,
+ port);
if (err)
goto fail_bind_op_fc;
@@ -655,9 +672,12 @@ fail_set_counter:
static int mlx5_ib_counter_unbind_qp(struct ib_qp *qp, u32 port)
{
struct rdma_counter *counter = qp->counter;
+ struct mlx5_rdma_counter *mcounter;
int err;
- mlx5r_fs_unbind_op_fc(qp, counter);
+ mcounter = to_mcounter(counter);
+
+ mlx5r_fs_unbind_op_fc(qp, &mcounter->qpn_opfc_xa);
err = mlx5_ib_qp_set_counter(qp, NULL);
if (err)
@@ -666,7 +686,9 @@ static int mlx5_ib_counter_unbind_qp(struct ib_qp *qp, u32 port)
return 0;
fail_set_counter:
- mlx5r_fs_bind_op_fc(qp, counter, port);
+ if (counter->mode.bind_opcnt)
+ mlx5r_fs_bind_op_fc(qp, mcounter->fc,
+ &mcounter->qpn_opfc_xa, port);
return err;
}
diff --git a/drivers/infiniband/hw/mlx5/counters.h b/drivers/infiniband/hw/mlx5/counters.h
index bd03cee42014..a04e7dd59455 100644
--- a/drivers/infiniband/hw/mlx5/counters.h
+++ b/drivers/infiniband/hw/mlx5/counters.h
@@ -8,19 +8,6 @@
#include "mlx5_ib.h"
-struct mlx5_rdma_counter {
- struct rdma_counter rdma_counter;
-
- struct mlx5_fc *fc[MLX5_IB_OPCOUNTER_MAX];
- struct xarray qpn_opfc_xa;
-};
-
-static inline struct mlx5_rdma_counter *
-to_mcounter(struct rdma_counter *counter)
-{
- return container_of(counter, struct mlx5_rdma_counter, rdma_counter);
-}
-
int mlx5_ib_counters_init(struct mlx5_ib_dev *dev);
void mlx5_ib_counters_cleanup(struct mlx5_ib_dev *dev);
void mlx5_ib_counters_clear_description(struct ib_counters *counters);
diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c
index 1aa5311b03e9..9c8003a78334 100644
--- a/drivers/infiniband/hw/mlx5/cq.c
+++ b/drivers/infiniband/hw/mlx5/cq.c
@@ -1055,20 +1055,31 @@ err_cqb:
return err;
}
-int mlx5_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata)
+int mlx5_ib_pre_destroy_cq(struct ib_cq *cq)
{
struct mlx5_ib_dev *dev = to_mdev(cq->device);
struct mlx5_ib_cq *mcq = to_mcq(cq);
+
+ return mlx5_core_destroy_cq(dev->mdev, &mcq->mcq);
+}
+
+void mlx5_ib_post_destroy_cq(struct ib_cq *cq)
+{
+ destroy_cq_kernel(to_mdev(cq->device), to_mcq(cq));
+}
+
+int mlx5_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata)
+{
int ret;
- ret = mlx5_core_destroy_cq(dev->mdev, &mcq->mcq);
+ ret = mlx5_ib_pre_destroy_cq(cq);
if (ret)
return ret;
if (udata)
- destroy_cq_user(mcq, udata);
+ destroy_cq_user(to_mcq(cq), udata);
else
- destroy_cq_kernel(dev, mcq);
+ mlx5_ib_post_destroy_cq(cq);
return 0;
}
diff --git a/drivers/infiniband/hw/mlx5/devx.c b/drivers/infiniband/hw/mlx5/devx.c
index 2479da8620ca..028d9f031dde 100644
--- a/drivers/infiniband/hw/mlx5/devx.c
+++ b/drivers/infiniband/hw/mlx5/devx.c
@@ -159,7 +159,7 @@ int mlx5_ib_devx_create(struct mlx5_ib_dev *dev, bool is_user, u64 req_ucaps)
uctx = MLX5_ADDR_OF(create_uctx_in, in, uctx);
if (is_user &&
(MLX5_CAP_GEN(dev->mdev, uctx_cap) & MLX5_UCTX_CAP_RAW_TX) &&
- capable(CAP_NET_RAW))
+ rdma_dev_has_raw_cap(&dev->ib_dev))
cap |= MLX5_UCTX_CAP_RAW_TX;
if (is_user &&
(MLX5_CAP_GEN(dev->mdev, uctx_cap) &
@@ -1393,6 +1393,10 @@ static int devx_handle_mkey_create(struct mlx5_ib_dev *dev,
}
MLX5_SET(create_mkey_in, in, mkey_umem_valid, 1);
+ /* TPH is not allowed to bypass the regular kernel's verbs flow */
+ MLX5_SET(mkc, mkc, pcie_tph_en, 0);
+ MLX5_SET(mkc, mkc, pcie_tph_steering_tag_index,
+ MLX5_MKC_PCIE_TPH_NO_STEERING_TAG_INDEX);
return 0;
}
@@ -1958,6 +1962,7 @@ subscribe_event_xa_alloc(struct mlx5_devx_event_table *devx_event_table,
/* Level1 is valid for future use, no need to free */
return -ENOMEM;
+ INIT_LIST_HEAD(&obj_event->obj_sub_list);
err = xa_insert(&event->object_ids,
key_level2,
obj_event,
@@ -1966,7 +1971,6 @@ subscribe_event_xa_alloc(struct mlx5_devx_event_table *devx_event_table,
kfree(obj_event);
return err;
}
- INIT_LIST_HEAD(&obj_event->obj_sub_list);
}
return 0;
@@ -2669,7 +2673,7 @@ static void devx_wait_async_destroy(struct mlx5_async_cmd *cmd)
void mlx5_ib_ufile_hw_cleanup(struct ib_uverbs_file *ufile)
{
- struct mlx5_async_cmd async_cmd[MAX_ASYNC_CMDS];
+ struct mlx5_async_cmd *async_cmd;
struct ib_ucontext *ucontext = ufile->ucontext;
struct ib_device *device = ucontext->device;
struct mlx5_ib_dev *dev = to_mdev(device);
@@ -2678,6 +2682,10 @@ void mlx5_ib_ufile_hw_cleanup(struct ib_uverbs_file *ufile)
int head = 0;
int tail = 0;
+ async_cmd = kcalloc(MAX_ASYNC_CMDS, sizeof(*async_cmd), GFP_KERNEL);
+ if (!async_cmd)
+ return;
+
list_for_each_entry(uobject, &ufile->uobjects, list) {
WARN_ON(uverbs_try_lock_object(uobject, UVERBS_LOOKUP_WRITE));
@@ -2713,6 +2721,8 @@ void mlx5_ib_ufile_hw_cleanup(struct ib_uverbs_file *ufile)
devx_wait_async_destroy(&async_cmd[head % MAX_ASYNC_CMDS]);
head++;
}
+
+ kfree(async_cmd);
}
static ssize_t devx_async_cmd_event_read(struct file *filp, char __user *buf,
diff --git a/drivers/infiniband/hw/mlx5/dm.c b/drivers/infiniband/hw/mlx5/dm.c
index b4c97fb62abf..9ded2b7c1e31 100644
--- a/drivers/infiniband/hw/mlx5/dm.c
+++ b/drivers/infiniband/hw/mlx5/dm.c
@@ -282,7 +282,7 @@ static struct ib_dm *handle_alloc_dm_memic(struct ib_ucontext *ctx,
int err;
u64 address;
- if (!MLX5_CAP_DEV_MEM(dm_db->dev, memic))
+ if (!dm_db || !MLX5_CAP_DEV_MEM(dm_db->dev, memic))
return ERR_PTR(-EOPNOTSUPP);
dm = kzalloc(sizeof(*dm), GFP_KERNEL);
diff --git a/drivers/infiniband/hw/mlx5/dmah.c b/drivers/infiniband/hw/mlx5/dmah.c
new file mode 100644
index 000000000000..362a88992ffa
--- /dev/null
+++ b/drivers/infiniband/hw/mlx5/dmah.c
@@ -0,0 +1,54 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/*
+ * Copyright (c) 2025, NVIDIA CORPORATION & AFFILIATES. All rights reserved
+ */
+
+#include <rdma/uverbs_std_types.h>
+#include <linux/pci-tph.h>
+#include "dmah.h"
+
+#define UVERBS_MODULE_NAME mlx5_ib
+#include <rdma/uverbs_named_ioctl.h>
+
+static int mlx5_ib_alloc_dmah(struct ib_dmah *ibdmah,
+ struct uverbs_attr_bundle *attrs)
+{
+ struct mlx5_core_dev *mdev = to_mdev(ibdmah->device)->mdev;
+ struct mlx5_ib_dmah *dmah = to_mdmah(ibdmah);
+ u16 st_bits = BIT(IB_DMAH_CPU_ID_EXISTS) |
+ BIT(IB_DMAH_MEM_TYPE_EXISTS);
+ int err;
+
+ /* PH is a must for TPH following PCIe spec 6.2-1.0 */
+ if (!(ibdmah->valid_fields & BIT(IB_DMAH_PH_EXISTS)))
+ return -EINVAL;
+
+ /* ST is optional; however, partial data for it is not allowed */
+ if (ibdmah->valid_fields & st_bits) {
+ if ((ibdmah->valid_fields & st_bits) != st_bits)
+ return -EINVAL;
+ err = mlx5_st_alloc_index(mdev, ibdmah->mem_type,
+ ibdmah->cpu_id, &dmah->st_index);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int mlx5_ib_dealloc_dmah(struct ib_dmah *ibdmah,
+ struct uverbs_attr_bundle *attrs)
+{
+ struct mlx5_ib_dmah *dmah = to_mdmah(ibdmah);
+ struct mlx5_core_dev *mdev = to_mdev(ibdmah->device)->mdev;
+
+ if (ibdmah->valid_fields & BIT(IB_DMAH_CPU_ID_EXISTS))
+ return mlx5_st_dealloc_index(mdev, dmah->st_index);
+
+ return 0;
+}
+
+const struct ib_device_ops mlx5_ib_dev_dmah_ops = {
+ .alloc_dmah = mlx5_ib_alloc_dmah,
+ .dealloc_dmah = mlx5_ib_dealloc_dmah,
+};
diff --git a/drivers/infiniband/hw/mlx5/dmah.h b/drivers/infiniband/hw/mlx5/dmah.h
new file mode 100644
index 000000000000..68de72b4744a
--- /dev/null
+++ b/drivers/infiniband/hw/mlx5/dmah.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/*
+ * Copyright (c) 2025, NVIDIA CORPORATION & AFFILIATES. All rights reserved
+ */
+
+#ifndef _MLX5_IB_DMAH_H
+#define _MLX5_IB_DMAH_H
+
+#include "mlx5_ib.h"
+
+extern const struct ib_device_ops mlx5_ib_dev_dmah_ops;
+
+struct mlx5_ib_dmah {
+ struct ib_dmah ibdmah;
+ u16 st_index;
+};
+
+static inline struct mlx5_ib_dmah *to_mdmah(struct ib_dmah *ibdmah)
+{
+ return container_of(ibdmah, struct mlx5_ib_dmah, ibdmah);
+}
+
+#endif /* _MLX5_IB_DMAH_H */
diff --git a/drivers/infiniband/hw/mlx5/fs.c b/drivers/infiniband/hw/mlx5/fs.c
index 680627f1de33..b0f7663c24c1 100644
--- a/drivers/infiniband/hw/mlx5/fs.c
+++ b/drivers/infiniband/hw/mlx5/fs.c
@@ -1012,14 +1012,14 @@ static int get_per_qp_prio(struct mlx5_ib_dev *dev,
return 0;
}
-static struct mlx5_per_qp_opfc *
-get_per_qp_opfc(struct mlx5_rdma_counter *mcounter, u32 qp_num, bool *new)
+static struct mlx5_per_qp_opfc *get_per_qp_opfc(struct xarray *qpn_opfc_xa,
+ u32 qp_num, bool *new)
{
struct mlx5_per_qp_opfc *per_qp_opfc;
*new = false;
- per_qp_opfc = xa_load(&mcounter->qpn_opfc_xa, qp_num);
+ per_qp_opfc = xa_load(qpn_opfc_xa, qp_num);
if (per_qp_opfc)
return per_qp_opfc;
per_qp_opfc = kzalloc(sizeof(*per_qp_opfc), GFP_KERNEL);
@@ -1032,7 +1032,8 @@ get_per_qp_opfc(struct mlx5_rdma_counter *mcounter, u32 qp_num, bool *new)
}
static int add_op_fc_rules(struct mlx5_ib_dev *dev,
- struct mlx5_rdma_counter *mcounter,
+ struct mlx5_fc *fc_arr[MLX5_IB_OPCOUNTER_MAX],
+ struct xarray *qpn_opfc_xa,
struct mlx5_per_qp_opfc *per_qp_opfc,
struct mlx5_ib_flow_prio *prio,
enum mlx5_ib_optional_counter_type type,
@@ -1055,7 +1056,7 @@ static int add_op_fc_rules(struct mlx5_ib_dev *dev,
return 0;
}
- opfc->fc = mcounter->fc[type];
+ opfc->fc = fc_arr[type];
spec = kcalloc(MAX_OPFC_RULES, sizeof(*spec), GFP_KERNEL);
if (!spec) {
@@ -1148,8 +1149,7 @@ static int add_op_fc_rules(struct mlx5_ib_dev *dev,
}
prio->refcount += spec_num;
- err = xa_err(xa_store(&mcounter->qpn_opfc_xa, qp_num, per_qp_opfc,
- GFP_KERNEL));
+ err = xa_err(xa_store(qpn_opfc_xa, qp_num, per_qp_opfc, GFP_KERNEL));
if (err)
goto del_rules;
@@ -1168,8 +1168,9 @@ null_fc:
return err;
}
-static bool is_fc_shared_and_in_use(struct mlx5_rdma_counter *mcounter,
- u32 type, struct mlx5_fc **fc)
+static bool
+is_fc_shared_and_in_use(struct mlx5_fc *fc_arr[MLX5_IB_OPCOUNTER_MAX], u32 type,
+ struct mlx5_fc **fc)
{
u32 shared_fc_type;
@@ -1190,7 +1191,7 @@ static bool is_fc_shared_and_in_use(struct mlx5_rdma_counter *mcounter,
return false;
}
- *fc = mcounter->fc[shared_fc_type];
+ *fc = fc_arr[shared_fc_type];
if (!(*fc))
return false;
@@ -1198,24 +1199,23 @@ static bool is_fc_shared_and_in_use(struct mlx5_rdma_counter *mcounter,
}
void mlx5r_fs_destroy_fcs(struct mlx5_ib_dev *dev,
- struct rdma_counter *counter)
+ struct mlx5_fc *fc_arr[MLX5_IB_OPCOUNTER_MAX])
{
- struct mlx5_rdma_counter *mcounter = to_mcounter(counter);
struct mlx5_fc *in_use_fc;
int i;
for (i = MLX5_IB_OPCOUNTER_CC_RX_CE_PKTS_PER_QP;
i <= MLX5_IB_OPCOUNTER_RDMA_RX_BYTES_PER_QP; i++) {
- if (!mcounter->fc[i])
+ if (!fc_arr[i])
continue;
- if (is_fc_shared_and_in_use(mcounter, i, &in_use_fc)) {
- mcounter->fc[i] = NULL;
+ if (is_fc_shared_and_in_use(fc_arr, i, &in_use_fc)) {
+ fc_arr[i] = NULL;
continue;
}
- mlx5_fc_destroy(dev->mdev, mcounter->fc[i]);
- mcounter->fc[i] = NULL;
+ mlx5_fc_destroy(dev->mdev, fc_arr[i]);
+ fc_arr[i] = NULL;
}
}
@@ -1359,16 +1359,15 @@ void mlx5_ib_fs_remove_op_fc(struct mlx5_ib_dev *dev,
put_per_qp_prio(dev, type);
}
-void mlx5r_fs_unbind_op_fc(struct ib_qp *qp, struct rdma_counter *counter)
+void mlx5r_fs_unbind_op_fc(struct ib_qp *qp, struct xarray *qpn_opfc_xa)
{
- struct mlx5_rdma_counter *mcounter = to_mcounter(counter);
- struct mlx5_ib_dev *dev = to_mdev(counter->device);
+ struct mlx5_ib_dev *dev = to_mdev(qp->device);
struct mlx5_per_qp_opfc *per_qp_opfc;
struct mlx5_ib_op_fc *in_use_opfc;
struct mlx5_ib_flow_prio *prio;
int i, j;
- per_qp_opfc = xa_load(&mcounter->qpn_opfc_xa, qp->qp_num);
+ per_qp_opfc = xa_load(qpn_opfc_xa, qp->qp_num);
if (!per_qp_opfc)
return;
@@ -1394,13 +1393,13 @@ void mlx5r_fs_unbind_op_fc(struct ib_qp *qp, struct rdma_counter *counter)
}
kfree(per_qp_opfc);
- xa_erase(&mcounter->qpn_opfc_xa, qp->qp_num);
+ xa_erase(qpn_opfc_xa, qp->qp_num);
}
-int mlx5r_fs_bind_op_fc(struct ib_qp *qp, struct rdma_counter *counter,
- u32 port)
+int mlx5r_fs_bind_op_fc(struct ib_qp *qp,
+ struct mlx5_fc *fc_arr[MLX5_IB_OPCOUNTER_MAX],
+ struct xarray *qpn_opfc_xa, u32 port)
{
- struct mlx5_rdma_counter *mcounter = to_mcounter(counter);
struct mlx5_ib_dev *dev = to_mdev(qp->device);
struct mlx5_per_qp_opfc *per_qp_opfc;
struct mlx5_ib_flow_prio *prio;
@@ -1410,9 +1409,6 @@ int mlx5r_fs_bind_op_fc(struct ib_qp *qp, struct rdma_counter *counter,
int i, err, per_qp_type;
bool new;
- if (!counter->mode.bind_opcnt)
- return 0;
-
cnts = &dev->port[port - 1].cnts;
for (i = 0; i <= MLX5_IB_OPCOUNTER_RDMA_RX_BYTES; i++) {
@@ -1424,23 +1420,22 @@ int mlx5r_fs_bind_op_fc(struct ib_qp *qp, struct rdma_counter *counter,
prio = get_opfc_prio(dev, per_qp_type);
WARN_ON(!prio->flow_table);
- if (is_fc_shared_and_in_use(mcounter, per_qp_type, &in_use_fc))
- mcounter->fc[per_qp_type] = in_use_fc;
+ if (is_fc_shared_and_in_use(fc_arr, per_qp_type, &in_use_fc))
+ fc_arr[per_qp_type] = in_use_fc;
- if (!mcounter->fc[per_qp_type]) {
- mcounter->fc[per_qp_type] = mlx5_fc_create(dev->mdev,
- false);
- if (IS_ERR(mcounter->fc[per_qp_type]))
- return PTR_ERR(mcounter->fc[per_qp_type]);
+ if (!fc_arr[per_qp_type]) {
+ fc_arr[per_qp_type] = mlx5_fc_create(dev->mdev, false);
+ if (IS_ERR(fc_arr[per_qp_type]))
+ return PTR_ERR(fc_arr[per_qp_type]);
}
- per_qp_opfc = get_per_qp_opfc(mcounter, qp->qp_num, &new);
+ per_qp_opfc = get_per_qp_opfc(qpn_opfc_xa, qp->qp_num, &new);
if (!per_qp_opfc) {
err = -ENOMEM;
goto free_fc;
}
- err = add_op_fc_rules(dev, mcounter, per_qp_opfc, prio,
- per_qp_type, qp->qp_num, port);
+ err = add_op_fc_rules(dev, fc_arr, qpn_opfc_xa, per_qp_opfc,
+ prio, per_qp_type, qp->qp_num, port);
if (err)
goto del_rules;
}
@@ -1448,12 +1443,12 @@ int mlx5r_fs_bind_op_fc(struct ib_qp *qp, struct rdma_counter *counter,
return 0;
del_rules:
- mlx5r_fs_unbind_op_fc(qp, counter);
+ mlx5r_fs_unbind_op_fc(qp, qpn_opfc_xa);
if (new)
kfree(per_qp_opfc);
free_fc:
- if (xa_empty(&mcounter->qpn_opfc_xa))
- mlx5r_fs_destroy_fcs(dev, counter);
+ if (xa_empty(qpn_opfc_xa))
+ mlx5r_fs_destroy_fcs(dev, fc_arr);
return err;
}
@@ -1966,7 +1961,8 @@ _get_flow_table(struct mlx5_ib_dev *dev, u16 user_priority,
break;
case MLX5_FLOW_NAMESPACE_RDMA_TRANSPORT_RX:
case MLX5_FLOW_NAMESPACE_RDMA_TRANSPORT_TX:
- if (ib_port == 0 || user_priority > MLX5_RDMA_TRANSPORT_BYPASS_PRIO)
+ if (ib_port == 0 ||
+ user_priority >= MLX5_RDMA_TRANSPORT_BYPASS_PRIO)
return ERR_PTR(-EINVAL);
ret = mlx5_ib_fill_transport_ns_info(dev, ns_type, &flags,
&vport_idx, &vport,
@@ -2016,10 +2012,10 @@ _get_flow_table(struct mlx5_ib_dev *dev, u16 user_priority,
prio = &dev->flow_db->rdma_tx[priority];
break;
case MLX5_FLOW_NAMESPACE_RDMA_TRANSPORT_RX:
- prio = &dev->flow_db->rdma_transport_rx[ib_port - 1];
+ prio = &dev->flow_db->rdma_transport_rx[priority][ib_port - 1];
break;
case MLX5_FLOW_NAMESPACE_RDMA_TRANSPORT_TX:
- prio = &dev->flow_db->rdma_transport_tx[ib_port - 1];
+ prio = &dev->flow_db->rdma_transport_tx[priority][ib_port - 1];
break;
default: return ERR_PTR(-EINVAL);
}
@@ -2458,7 +2454,7 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_CREATE_FLOW)(
struct mlx5_ib_dev *dev;
u32 flags;
- if (!capable(CAP_NET_RAW))
+ if (!rdma_uattrs_has_raw_cap(attrs))
return -EPERM;
fs_matcher = uverbs_attr_get_obj(attrs,
@@ -2989,7 +2985,7 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_STEERING_ANCHOR_CREATE)(
u32 ft_id;
int err;
- if (!capable(CAP_NET_RAW))
+ if (!rdma_dev_has_raw_cap(&dev->ib_dev))
return -EPERM;
err = uverbs_get_const(&ib_uapi_ft_type, attrs,
@@ -3466,31 +3462,40 @@ static const struct ib_device_ops flow_ops = {
int mlx5_ib_fs_init(struct mlx5_ib_dev *dev)
{
+ int i, j;
+
dev->flow_db = kzalloc(sizeof(*dev->flow_db), GFP_KERNEL);
if (!dev->flow_db)
return -ENOMEM;
- dev->flow_db->rdma_transport_rx = kcalloc(dev->num_ports,
- sizeof(struct mlx5_ib_flow_prio),
- GFP_KERNEL);
- if (!dev->flow_db->rdma_transport_rx)
- goto free_flow_db;
+ for (i = 0; i < MLX5_RDMA_TRANSPORT_BYPASS_PRIO; i++) {
+ dev->flow_db->rdma_transport_rx[i] =
+ kcalloc(dev->num_ports,
+ sizeof(struct mlx5_ib_flow_prio), GFP_KERNEL);
+ if (!dev->flow_db->rdma_transport_rx[i])
+ goto free_rdma_transport_rx;
+ }
- dev->flow_db->rdma_transport_tx = kcalloc(dev->num_ports,
- sizeof(struct mlx5_ib_flow_prio),
- GFP_KERNEL);
- if (!dev->flow_db->rdma_transport_tx)
- goto free_rdma_transport_rx;
+ for (j = 0; j < MLX5_RDMA_TRANSPORT_BYPASS_PRIO; j++) {
+ dev->flow_db->rdma_transport_tx[j] =
+ kcalloc(dev->num_ports,
+ sizeof(struct mlx5_ib_flow_prio), GFP_KERNEL);
+ if (!dev->flow_db->rdma_transport_tx[j])
+ goto free_rdma_transport_tx;
+ }
mutex_init(&dev->flow_db->lock);
ib_set_device_ops(&dev->ib_dev, &flow_ops);
return 0;
+free_rdma_transport_tx:
+ while (j--)
+ kfree(dev->flow_db->rdma_transport_tx[j]);
free_rdma_transport_rx:
- kfree(dev->flow_db->rdma_transport_rx);
-free_flow_db:
+ while (i--)
+ kfree(dev->flow_db->rdma_transport_rx[i]);
kfree(dev->flow_db);
return -ENOMEM;
}
diff --git a/drivers/infiniband/hw/mlx5/fs.h b/drivers/infiniband/hw/mlx5/fs.h
index 2ebe86e5be10..7abba0e2837c 100644
--- a/drivers/infiniband/hw/mlx5/fs.h
+++ b/drivers/infiniband/hw/mlx5/fs.h
@@ -13,6 +13,8 @@ void mlx5_ib_fs_cleanup_anchor(struct mlx5_ib_dev *dev);
static inline void mlx5_ib_fs_cleanup(struct mlx5_ib_dev *dev)
{
+ int i;
+
/* When a steering anchor is created, a special flow table is also
* created for the user to reference. Since the user can reference it,
* the kernel cannot trust that when the user destroys the steering
@@ -25,8 +27,10 @@ static inline void mlx5_ib_fs_cleanup(struct mlx5_ib_dev *dev)
* is a safe assumption that all references are gone.
*/
mlx5_ib_fs_cleanup_anchor(dev);
- kfree(dev->flow_db->rdma_transport_tx);
- kfree(dev->flow_db->rdma_transport_rx);
+ for (i = 0; i < MLX5_RDMA_TRANSPORT_BYPASS_PRIO; i++)
+ kfree(dev->flow_db->rdma_transport_tx[i]);
+ for (i = 0; i < MLX5_RDMA_TRANSPORT_BYPASS_PRIO; i++)
+ kfree(dev->flow_db->rdma_transport_rx[i]);
kfree(dev->flow_db);
}
#endif /* _MLX5_IB_FS_H */
diff --git a/drivers/infiniband/hw/mlx5/ib_rep.c b/drivers/infiniband/hw/mlx5/ib_rep.c
index 49af1cfbe6d1..cc8859d3c2f5 100644
--- a/drivers/infiniband/hw/mlx5/ib_rep.c
+++ b/drivers/infiniband/hw/mlx5/ib_rep.c
@@ -88,7 +88,8 @@ mlx5_ib_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
else
return mlx5_ib_set_vport_rep(lag_master, rep, vport_index);
- ibdev = ib_alloc_device(mlx5_ib_dev, ib_dev);
+ ibdev = ib_alloc_device_with_net(mlx5_ib_dev, ib_dev,
+ mlx5_core_net(lag_master));
if (!ibdev)
return -ENOMEM;
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index ce7610740412..d456e4fde3e1 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -50,6 +50,7 @@
#include <rdma/ib_ucaps.h>
#include "macsec.h"
#include "data_direct.h"
+#include "dmah.h"
#define UVERBS_MODULE_NAME mlx5_ib
#include <rdma/uverbs_named_ioctl.h>
@@ -1791,6 +1792,33 @@ static void deallocate_uars(struct mlx5_ib_dev *dev,
context->devx_uid);
}
+static int mlx5_ib_enable_lb_mp(struct mlx5_core_dev *master,
+ struct mlx5_core_dev *slave)
+{
+ int err;
+
+ err = mlx5_nic_vport_update_local_lb(master, true);
+ if (err)
+ return err;
+
+ err = mlx5_nic_vport_update_local_lb(slave, true);
+ if (err)
+ goto out;
+
+ return 0;
+
+out:
+ mlx5_nic_vport_update_local_lb(master, false);
+ return err;
+}
+
+static void mlx5_ib_disable_lb_mp(struct mlx5_core_dev *master,
+ struct mlx5_core_dev *slave)
+{
+ mlx5_nic_vport_update_local_lb(slave, false);
+ mlx5_nic_vport_update_local_lb(master, false);
+}
+
int mlx5_ib_enable_lb(struct mlx5_ib_dev *dev, bool td, bool qp)
{
int err = 0;
@@ -3495,6 +3523,8 @@ static void mlx5_ib_unbind_slave_port(struct mlx5_ib_dev *ibdev,
lockdep_assert_held(&mlx5_ib_multiport_mutex);
+ mlx5_ib_disable_lb_mp(ibdev->mdev, mpi->mdev);
+
mlx5_core_mp_event_replay(ibdev->mdev,
MLX5_DRIVER_EVENT_AFFILIATION_REMOVED,
NULL);
@@ -3590,6 +3620,10 @@ static bool mlx5_ib_bind_slave_port(struct mlx5_ib_dev *ibdev,
MLX5_DRIVER_EVENT_AFFILIATION_DONE,
&key);
+ err = mlx5_ib_enable_lb_mp(ibdev->mdev, mpi->mdev);
+ if (err)
+ goto unbind;
+
return true;
unbind:
@@ -4157,7 +4191,9 @@ static const struct ib_device_ops mlx5_ib_dev_ops = {
.modify_port = mlx5_ib_modify_port,
.modify_qp = mlx5_ib_modify_qp,
.modify_srq = mlx5_ib_modify_srq,
+ .pre_destroy_cq = mlx5_ib_pre_destroy_cq,
.poll_cq = mlx5_ib_poll_cq,
+ .post_destroy_cq = mlx5_ib_post_destroy_cq,
.post_recv = mlx5_ib_post_recv_nodrain,
.post_send = mlx5_ib_post_send_nodrain,
.post_srq_recv = mlx5_ib_post_srq_recv,
@@ -4179,6 +4215,7 @@ static const struct ib_device_ops mlx5_ib_dev_ops = {
INIT_RDMA_OBJ_SIZE(ib_ah, mlx5_ib_ah, ibah),
INIT_RDMA_OBJ_SIZE(ib_counters, mlx5_ib_mcounters, ibcntrs),
INIT_RDMA_OBJ_SIZE(ib_cq, mlx5_ib_cq, ibcq),
+ INIT_RDMA_OBJ_SIZE(ib_dmah, mlx5_ib_dmah, ibdmah),
INIT_RDMA_OBJ_SIZE(ib_pd, mlx5_ib_pd, ibpd),
INIT_RDMA_OBJ_SIZE(ib_qp, mlx5_ib_qp, ibqp),
INIT_RDMA_OBJ_SIZE(ib_srq, mlx5_ib_srq, ibsrq),
@@ -4306,6 +4343,9 @@ static int mlx5_ib_stage_caps_init(struct mlx5_ib_dev *dev)
MLX5_GENERAL_OBJ_TYPES_CAP_SW_ICM)
ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_dm_ops);
+ if (mdev->st)
+ ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_dmah_ops);
+
ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_ops);
if (IS_ENABLED(CONFIG_INFINIBAND_USER_ACCESS))
@@ -4791,7 +4831,8 @@ static struct ib_device *mlx5_ib_add_sub_dev(struct ib_device *parent,
!MLX5_CAP_GEN_2(mparent->mdev, multiplane_qp_ud))
return ERR_PTR(-EOPNOTSUPP);
- mplane = ib_alloc_device(mlx5_ib_dev, ib_dev);
+ mplane = ib_alloc_device_with_net(mlx5_ib_dev, ib_dev,
+ mlx5_core_net(mparent->mdev));
if (!mplane)
return ERR_PTR(-ENOMEM);
@@ -4905,7 +4946,8 @@ static int mlx5r_probe(struct auxiliary_device *adev,
num_ports = max(MLX5_CAP_GEN(mdev, num_ports),
MLX5_CAP_GEN(mdev, num_vhca_ports));
- dev = ib_alloc_device(mlx5_ib_dev, ib_dev);
+ dev = ib_alloc_device_with_net(mlx5_ib_dev, ib_dev,
+ mlx5_core_net(mdev));
if (!dev)
return -ENOMEM;
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
index fde859d207ae..7ffc7ee92cf0 100644
--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
+++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
@@ -104,19 +104,6 @@ unsigned long __mlx5_umem_find_best_quantized_pgoff(
__mlx5_bit_sz(typ, page_offset_fld), 0, scale, \
page_offset_quantized)
-static inline unsigned long
-mlx5_umem_dmabuf_find_best_pgsz(struct ib_umem_dmabuf *umem_dmabuf)
-{
- /*
- * mkeys used for dmabuf are fixed at PAGE_SIZE because we must be able
- * to hold any sgl after a move operation. Ideally the mkc page size
- * could be changed at runtime to be optimal, but right now the driver
- * cannot do that.
- */
- return ib_umem_find_best_pgsz(&umem_dmabuf->umem, PAGE_SIZE,
- umem_dmabuf->umem.iova);
-}
-
enum {
MLX5_IB_MMAP_OFFSET_START = 9,
MLX5_IB_MMAP_OFFSET_END = 255,
@@ -320,8 +307,8 @@ struct mlx5_ib_flow_db {
struct mlx5_ib_flow_prio rdma_tx[MLX5_IB_NUM_FLOW_FT];
struct mlx5_ib_flow_prio opfcs[MLX5_IB_OPCOUNTER_MAX];
struct mlx5_flow_table *lag_demux_ft;
- struct mlx5_ib_flow_prio *rdma_transport_rx;
- struct mlx5_ib_flow_prio *rdma_transport_tx;
+ struct mlx5_ib_flow_prio *rdma_transport_rx[MLX5_RDMA_TRANSPORT_BYPASS_PRIO];
+ struct mlx5_ib_flow_prio *rdma_transport_tx[MLX5_RDMA_TRANSPORT_BYPASS_PRIO];
/* Protect flow steering bypass flow tables
* when add/del flow rules.
* only single add/removal of flow steering rule could be done
@@ -352,6 +339,7 @@ struct mlx5_ib_flow_db {
#define MLX5_IB_UPD_XLT_ACCESS BIT(5)
#define MLX5_IB_UPD_XLT_INDIRECT BIT(6)
#define MLX5_IB_UPD_XLT_DOWNGRADE BIT(7)
+#define MLX5_IB_UPD_XLT_KEEP_PGSZ BIT(8)
/* Private QP creation flags to be passed in ib_qp_init_attr.create_flags.
*
@@ -650,8 +638,13 @@ enum mlx5_mkey_type {
MLX5_MKEY_IMPLICIT_CHILD,
};
+/* Used for non-existent ph value */
+#define MLX5_IB_NO_PH 0xff
+
struct mlx5r_cache_rb_key {
u8 ats:1;
+ u8 ph;
+ u16 st_index;
unsigned int access_mode;
unsigned int access_flags;
unsigned int ndescs;
@@ -739,6 +732,8 @@ struct mlx5_ib_mr {
struct mlx5_ib_mr *dd_crossed_mr;
struct list_head dd_node;
u8 revoked :1;
+ /* Indicates previous dmabuf page fault occurred */
+ u8 dmabuf_faulted:1;
struct mlx5_ib_mkey null_mmkey;
};
};
@@ -899,13 +894,14 @@ void mlx5_ib_fs_remove_op_fc(struct mlx5_ib_dev *dev,
struct mlx5_ib_op_fc *opfc,
enum mlx5_ib_optional_counter_type type);
-int mlx5r_fs_bind_op_fc(struct ib_qp *qp, struct rdma_counter *counter,
- u32 port);
+int mlx5r_fs_bind_op_fc(struct ib_qp *qp,
+ struct mlx5_fc *fc_arr[MLX5_IB_OPCOUNTER_MAX],
+ struct xarray *qpn_opfc_xa, u32 port);
-void mlx5r_fs_unbind_op_fc(struct ib_qp *qp, struct rdma_counter *counter);
+void mlx5r_fs_unbind_op_fc(struct ib_qp *qp, struct xarray *qpn_opfc_xa);
void mlx5r_fs_destroy_fcs(struct mlx5_ib_dev *dev,
- struct rdma_counter *counter);
+ struct mlx5_fc *fc_arr[MLX5_IB_OPCOUNTER_MAX]);
struct mlx5_ib_multiport_info;
@@ -1372,16 +1368,20 @@ int mlx5_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
struct uverbs_attr_bundle *attrs);
int mlx5_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata);
int mlx5_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
+int mlx5_ib_pre_destroy_cq(struct ib_cq *cq);
+void mlx5_ib_post_destroy_cq(struct ib_cq *cq);
int mlx5_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags);
int mlx5_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period);
int mlx5_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata);
struct ib_mr *mlx5_ib_get_dma_mr(struct ib_pd *pd, int acc);
struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
u64 virt_addr, int access_flags,
+ struct ib_dmah *dmah,
struct ib_udata *udata);
struct ib_mr *mlx5_ib_reg_user_mr_dmabuf(struct ib_pd *pd, u64 start,
u64 length, u64 virt_addr,
int fd, int access_flags,
+ struct ib_dmah *dmah,
struct uverbs_attr_bundle *attrs);
int mlx5_ib_advise_mr(struct ib_pd *pd,
enum ib_uverbs_advise_mr_advice advice,
@@ -1748,20 +1748,71 @@ static inline u32 smi_to_native_portnum(struct mlx5_ib_dev *dev, u32 port)
return (port - 1) / dev->num_ports + 1;
}
+static inline unsigned int get_max_log_entity_size_cap(struct mlx5_ib_dev *dev,
+ int access_mode)
+{
+ int max_log_size = 0;
+
+ if (access_mode == MLX5_MKC_ACCESS_MODE_MTT)
+ max_log_size =
+ MLX5_CAP_GEN_2(dev->mdev, max_mkey_log_entity_size_mtt);
+ else if (access_mode == MLX5_MKC_ACCESS_MODE_KSM)
+ max_log_size = MLX5_CAP_GEN_2(
+ dev->mdev, max_mkey_log_entity_size_fixed_buffer);
+
+ if (!max_log_size ||
+ (max_log_size > 31 &&
+ !MLX5_CAP_GEN_2(dev->mdev, umr_log_entity_size_5)))
+ max_log_size = 31;
+
+ return max_log_size;
+}
+
+static inline unsigned int get_min_log_entity_size_cap(struct mlx5_ib_dev *dev,
+ int access_mode)
+{
+ int min_log_size = 0;
+
+ if (access_mode == MLX5_MKC_ACCESS_MODE_KSM &&
+ MLX5_CAP_GEN_2(dev->mdev,
+ min_mkey_log_entity_size_fixed_buffer_valid))
+ min_log_size = MLX5_CAP_GEN_2(
+ dev->mdev, min_mkey_log_entity_size_fixed_buffer);
+ else
+ min_log_size =
+ MLX5_CAP_GEN_2(dev->mdev, log_min_mkey_entity_size);
+
+ min_log_size = max(min_log_size, MLX5_ADAPTER_PAGE_SHIFT);
+ return min_log_size;
+}
+
/*
* For mkc users, instead of a page_offset the command has a start_iova which
* specifies both the page_offset and the on-the-wire IOVA
*/
static __always_inline unsigned long
mlx5_umem_mkc_find_best_pgsz(struct mlx5_ib_dev *dev, struct ib_umem *umem,
- u64 iova)
+ u64 iova, int access_mode)
{
- int page_size_bits =
- MLX5_CAP_GEN_2(dev->mdev, umr_log_entity_size_5) ? 6 : 5;
- unsigned long bitmap =
- __mlx5_log_page_size_to_bitmap(page_size_bits, 0);
+ unsigned int max_log_entity_size_cap, min_log_entity_size_cap;
+ unsigned long bitmap;
+
+ max_log_entity_size_cap = get_max_log_entity_size_cap(dev, access_mode);
+ min_log_entity_size_cap = get_min_log_entity_size_cap(dev, access_mode);
+
+ bitmap = GENMASK_ULL(max_log_entity_size_cap, min_log_entity_size_cap);
return ib_umem_find_best_pgsz(umem, bitmap, iova);
}
+static inline unsigned long
+mlx5_umem_dmabuf_find_best_pgsz(struct ib_umem_dmabuf *umem_dmabuf,
+ int access_mode)
+{
+ return mlx5_umem_mkc_find_best_pgsz(to_mdev(umem_dmabuf->umem.ibdev),
+ &umem_dmabuf->umem,
+ umem_dmabuf->umem.iova,
+ access_mode);
+}
+
#endif /* MLX5_IB_H */
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index 57f9bc2a4a3a..1317f2cb38a4 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -44,6 +44,7 @@
#include "mlx5_ib.h"
#include "umr.h"
#include "data_direct.h"
+#include "dmah.h"
enum {
MAX_PENDING_REG_MR = 8,
@@ -57,7 +58,7 @@ create_mkey_callback(int status, struct mlx5_async_work *context);
static struct mlx5_ib_mr *reg_create(struct ib_pd *pd, struct ib_umem *umem,
u64 iova, int access_flags,
unsigned long page_size, bool populate,
- int access_mode);
+ int access_mode, u16 st_index, u8 ph);
static int __mlx5_ib_dereg_mr(struct ib_mr *ibmr);
static void set_mkc_access_pd_addr_fields(void *mkc, int acc, u64 start_addr,
@@ -256,6 +257,14 @@ static void set_cache_mkc(struct mlx5_cache_ent *ent, void *mkc)
get_mkc_octo_size(ent->rb_key.access_mode,
ent->rb_key.ndescs));
MLX5_SET(mkc, mkc, log_page_size, PAGE_SHIFT);
+
+ if (ent->rb_key.ph != MLX5_IB_NO_PH) {
+ MLX5_SET(mkc, mkc, pcie_tph_en, 1);
+ MLX5_SET(mkc, mkc, pcie_tph_ph, ent->rb_key.ph);
+ if (ent->rb_key.st_index != MLX5_MKC_PCIE_TPH_NO_STEERING_TAG_INDEX)
+ MLX5_SET(mkc, mkc, pcie_tph_steering_tag_index,
+ ent->rb_key.st_index);
+ }
}
/* Asynchronously schedule new MRs to be populated in the cache. */
@@ -641,6 +650,14 @@ static int cache_ent_key_cmp(struct mlx5r_cache_rb_key key1,
if (res)
return res;
+ res = key1.st_index - key2.st_index;
+ if (res)
+ return res;
+
+ res = key1.ph - key2.ph;
+ if (res)
+ return res;
+
/*
* keep ndescs the last in the compare table since the find function
* searches for an exact match on all properties and only closest
@@ -712,6 +729,8 @@ mkey_cache_ent_from_rb_key(struct mlx5_ib_dev *dev,
smallest->rb_key.access_mode == rb_key.access_mode &&
smallest->rb_key.access_flags == rb_key.access_flags &&
smallest->rb_key.ats == rb_key.ats &&
+ smallest->rb_key.st_index == rb_key.st_index &&
+ smallest->rb_key.ph == rb_key.ph &&
smallest->rb_key.ndescs <= ndescs_limit) ?
smallest :
NULL;
@@ -786,7 +805,8 @@ struct mlx5_ib_mr *mlx5_mr_cache_alloc(struct mlx5_ib_dev *dev,
struct mlx5r_cache_rb_key rb_key = {
.ndescs = ndescs,
.access_mode = access_mode,
- .access_flags = get_unchangeable_access_flags(dev, access_flags)
+ .access_flags = get_unchangeable_access_flags(dev, access_flags),
+ .ph = MLX5_IB_NO_PH,
};
struct mlx5_cache_ent *ent = mkey_cache_ent_from_rb_key(dev, rb_key);
@@ -943,6 +963,7 @@ int mlx5_mkey_cache_init(struct mlx5_ib_dev *dev)
struct rb_root *root = &dev->cache.rb_root;
struct mlx5r_cache_rb_key rb_key = {
.access_mode = MLX5_MKC_ACCESS_MODE_MTT,
+ .ph = MLX5_IB_NO_PH,
};
struct mlx5_cache_ent *ent;
struct rb_node *node;
@@ -1119,7 +1140,8 @@ static unsigned int mlx5_umem_dmabuf_default_pgsz(struct ib_umem *umem,
static struct mlx5_ib_mr *alloc_cacheable_mr(struct ib_pd *pd,
struct ib_umem *umem, u64 iova,
- int access_flags, int access_mode)
+ int access_flags, int access_mode,
+ u16 st_index, u8 ph)
{
struct mlx5_ib_dev *dev = to_mdev(pd->device);
struct mlx5r_cache_rb_key rb_key = {};
@@ -1130,7 +1152,8 @@ static struct mlx5_ib_mr *alloc_cacheable_mr(struct ib_pd *pd,
if (umem->is_dmabuf)
page_size = mlx5_umem_dmabuf_default_pgsz(umem, iova);
else
- page_size = mlx5_umem_mkc_find_best_pgsz(dev, umem, iova);
+ page_size = mlx5_umem_mkc_find_best_pgsz(dev, umem, iova,
+ access_mode);
if (WARN_ON(!page_size))
return ERR_PTR(-EINVAL);
@@ -1138,6 +1161,8 @@ static struct mlx5_ib_mr *alloc_cacheable_mr(struct ib_pd *pd,
rb_key.ndescs = ib_umem_num_dma_blocks(umem, page_size);
rb_key.ats = mlx5_umem_needs_ats(dev, umem, access_flags);
rb_key.access_flags = get_unchangeable_access_flags(dev, access_flags);
+ rb_key.st_index = st_index;
+ rb_key.ph = ph;
ent = mkey_cache_ent_from_rb_key(dev, rb_key);
/*
* If the MR can't come from the cache then synchronously create an uncached
@@ -1145,7 +1170,8 @@ static struct mlx5_ib_mr *alloc_cacheable_mr(struct ib_pd *pd,
*/
if (!ent) {
mutex_lock(&dev->slow_path_mutex);
- mr = reg_create(pd, umem, iova, access_flags, page_size, false, access_mode);
+ mr = reg_create(pd, umem, iova, access_flags, page_size, false, access_mode,
+ st_index, ph);
mutex_unlock(&dev->slow_path_mutex);
if (IS_ERR(mr))
return mr;
@@ -1230,7 +1256,7 @@ err_1:
static struct mlx5_ib_mr *reg_create(struct ib_pd *pd, struct ib_umem *umem,
u64 iova, int access_flags,
unsigned long page_size, bool populate,
- int access_mode)
+ int access_mode, u16 st_index, u8 ph)
{
struct mlx5_ib_dev *dev = to_mdev(pd->device);
struct mlx5_ib_mr *mr;
@@ -1240,7 +1266,8 @@ static struct mlx5_ib_mr *reg_create(struct ib_pd *pd, struct ib_umem *umem,
u32 *in;
int err;
bool pg_cap = !!(MLX5_CAP_GEN(dev->mdev, pg)) &&
- (access_mode == MLX5_MKC_ACCESS_MODE_MTT);
+ (access_mode == MLX5_MKC_ACCESS_MODE_MTT) &&
+ (ph == MLX5_IB_NO_PH);
bool ksm_mode = (access_mode == MLX5_MKC_ACCESS_MODE_KSM);
if (!page_size)
@@ -1304,6 +1331,13 @@ static struct mlx5_ib_mr *reg_create(struct ib_pd *pd, struct ib_umem *umem,
get_octo_len(iova, umem->length, mr->page_shift));
}
+ if (ph != MLX5_IB_NO_PH) {
+ MLX5_SET(mkc, mkc, pcie_tph_en, 1);
+ MLX5_SET(mkc, mkc, pcie_tph_ph, ph);
+ if (st_index != MLX5_MKC_PCIE_TPH_NO_STEERING_TAG_INDEX)
+ MLX5_SET(mkc, mkc, pcie_tph_steering_tag_index, st_index);
+ }
+
err = mlx5_ib_create_mkey(dev, &mr->mmkey, in, inlen);
if (err) {
mlx5_ib_warn(dev, "create mkey failed\n");
@@ -1423,24 +1457,37 @@ struct ib_mr *mlx5_ib_reg_dm_mr(struct ib_pd *pd, struct ib_dm *dm,
}
static struct ib_mr *create_real_mr(struct ib_pd *pd, struct ib_umem *umem,
- u64 iova, int access_flags)
+ u64 iova, int access_flags,
+ struct ib_dmah *dmah)
{
struct mlx5_ib_dev *dev = to_mdev(pd->device);
struct mlx5_ib_mr *mr = NULL;
bool xlt_with_umr;
+ u16 st_index = MLX5_MKC_PCIE_TPH_NO_STEERING_TAG_INDEX;
+ u8 ph = MLX5_IB_NO_PH;
int err;
+ if (dmah) {
+ struct mlx5_ib_dmah *mdmah = to_mdmah(dmah);
+
+ ph = dmah->ph;
+ if (dmah->valid_fields & BIT(IB_DMAH_CPU_ID_EXISTS))
+ st_index = mdmah->st_index;
+ }
+
xlt_with_umr = mlx5r_umr_can_load_pas(dev, umem->length);
if (xlt_with_umr) {
mr = alloc_cacheable_mr(pd, umem, iova, access_flags,
- MLX5_MKC_ACCESS_MODE_MTT);
+ MLX5_MKC_ACCESS_MODE_MTT,
+ st_index, ph);
} else {
- unsigned long page_size =
- mlx5_umem_mkc_find_best_pgsz(dev, umem, iova);
+ unsigned long page_size = mlx5_umem_mkc_find_best_pgsz(
+ dev, umem, iova, MLX5_MKC_ACCESS_MODE_MTT);
mutex_lock(&dev->slow_path_mutex);
mr = reg_create(pd, umem, iova, access_flags, page_size,
- true, MLX5_MKC_ACCESS_MODE_MTT);
+ true, MLX5_MKC_ACCESS_MODE_MTT,
+ st_index, ph);
mutex_unlock(&dev->slow_path_mutex);
}
if (IS_ERR(mr)) {
@@ -1504,7 +1551,9 @@ static struct ib_mr *create_user_odp_mr(struct ib_pd *pd, u64 start, u64 length,
return ERR_CAST(odp);
mr = alloc_cacheable_mr(pd, &odp->umem, iova, access_flags,
- MLX5_MKC_ACCESS_MODE_MTT);
+ MLX5_MKC_ACCESS_MODE_MTT,
+ MLX5_MKC_PCIE_TPH_NO_STEERING_TAG_INDEX,
+ MLX5_IB_NO_PH);
if (IS_ERR(mr)) {
ib_umem_release(&odp->umem);
return ERR_CAST(mr);
@@ -1528,13 +1577,15 @@ err_dereg_mr:
struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
u64 iova, int access_flags,
+ struct ib_dmah *dmah,
struct ib_udata *udata)
{
struct mlx5_ib_dev *dev = to_mdev(pd->device);
struct ib_umem *umem;
int err;
- if (!IS_ENABLED(CONFIG_INFINIBAND_USER_MEM))
+ if (!IS_ENABLED(CONFIG_INFINIBAND_USER_MEM) ||
+ ((access_flags & IB_ACCESS_ON_DEMAND) && dmah))
return ERR_PTR(-EOPNOTSUPP);
mlx5_ib_dbg(dev, "start 0x%llx, iova 0x%llx, length 0x%llx, access_flags 0x%x\n",
@@ -1550,7 +1601,7 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
umem = ib_umem_get(&dev->ib_dev, start, length, access_flags);
if (IS_ERR(umem))
return ERR_CAST(umem);
- return create_real_mr(pd, umem, iova, access_flags);
+ return create_real_mr(pd, umem, iova, access_flags, dmah);
}
static void mlx5_ib_dmabuf_invalidate_cb(struct dma_buf_attachment *attach)
@@ -1575,12 +1626,15 @@ static struct dma_buf_attach_ops mlx5_ib_dmabuf_attach_ops = {
static struct ib_mr *
reg_user_mr_dmabuf(struct ib_pd *pd, struct device *dma_device,
u64 offset, u64 length, u64 virt_addr,
- int fd, int access_flags, int access_mode)
+ int fd, int access_flags, int access_mode,
+ struct ib_dmah *dmah)
{
bool pinned_mode = (access_mode == MLX5_MKC_ACCESS_MODE_KSM);
struct mlx5_ib_dev *dev = to_mdev(pd->device);
struct mlx5_ib_mr *mr = NULL;
struct ib_umem_dmabuf *umem_dmabuf;
+ u16 st_index = MLX5_MKC_PCIE_TPH_NO_STEERING_TAG_INDEX;
+ u8 ph = MLX5_IB_NO_PH;
int err;
err = mlx5r_umr_resource_init(dev);
@@ -1603,8 +1657,17 @@ reg_user_mr_dmabuf(struct ib_pd *pd, struct device *dma_device,
return ERR_CAST(umem_dmabuf);
}
+ if (dmah) {
+ struct mlx5_ib_dmah *mdmah = to_mdmah(dmah);
+
+ ph = dmah->ph;
+ if (dmah->valid_fields & BIT(IB_DMAH_CPU_ID_EXISTS))
+ st_index = mdmah->st_index;
+ }
+
mr = alloc_cacheable_mr(pd, &umem_dmabuf->umem, virt_addr,
- access_flags, access_mode);
+ access_flags, access_mode,
+ st_index, ph);
if (IS_ERR(mr)) {
ib_umem_release(&umem_dmabuf->umem);
return ERR_CAST(mr);
@@ -1661,7 +1724,8 @@ reg_user_mr_dmabuf_by_data_direct(struct ib_pd *pd, u64 offset,
access_flags &= ~IB_ACCESS_RELAXED_ORDERING;
crossed_mr = reg_user_mr_dmabuf(pd, &data_direct_dev->pdev->dev,
offset, length, virt_addr, fd,
- access_flags, MLX5_MKC_ACCESS_MODE_KSM);
+ access_flags, MLX5_MKC_ACCESS_MODE_KSM,
+ NULL);
if (IS_ERR(crossed_mr)) {
ret = PTR_ERR(crossed_mr);
goto end;
@@ -1688,6 +1752,7 @@ end:
struct ib_mr *mlx5_ib_reg_user_mr_dmabuf(struct ib_pd *pd, u64 offset,
u64 length, u64 virt_addr,
int fd, int access_flags,
+ struct ib_dmah *dmah,
struct uverbs_attr_bundle *attrs)
{
struct mlx5_ib_dev *dev = to_mdev(pd->device);
@@ -1720,7 +1785,8 @@ struct ib_mr *mlx5_ib_reg_user_mr_dmabuf(struct ib_pd *pd, u64 offset,
return reg_user_mr_dmabuf(pd, pd->device->dma_device,
offset, length, virt_addr,
- fd, access_flags, MLX5_MKC_ACCESS_MODE_MTT);
+ fd, access_flags, MLX5_MKC_ACCESS_MODE_MTT,
+ dmah);
}
/*
@@ -1754,7 +1820,8 @@ static bool can_use_umr_rereg_pas(struct mlx5_ib_mr *mr,
if (!mlx5r_umr_can_load_pas(dev, new_umem->length))
return false;
- *page_size = mlx5_umem_mkc_find_best_pgsz(dev, new_umem, iova);
+ *page_size = mlx5_umem_mkc_find_best_pgsz(
+ dev, new_umem, iova, mr->mmkey.cache_ent->rb_key.access_mode);
if (WARN_ON(!*page_size))
return false;
return (mr->mmkey.cache_ent->rb_key.ndescs) >=
@@ -1817,7 +1884,8 @@ struct ib_mr *mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
struct mlx5_ib_mr *mr = to_mmr(ib_mr);
int err;
- if (!IS_ENABLED(CONFIG_INFINIBAND_USER_MEM) || mr->data_direct)
+ if (!IS_ENABLED(CONFIG_INFINIBAND_USER_MEM) || mr->data_direct ||
+ mr->mmkey.rb_key.ph != MLX5_IB_NO_PH)
return ERR_PTR(-EOPNOTSUPP);
mlx5_ib_dbg(
@@ -1861,7 +1929,7 @@ struct ib_mr *mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
atomic_sub(ib_umem_num_pages(umem), &dev->mdev->priv.reg_pages);
return create_real_mr(new_pd, umem, mr->ibmr.iova,
- new_access_flags);
+ new_access_flags, NULL);
}
/*
@@ -1892,7 +1960,7 @@ struct ib_mr *mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
}
return NULL;
}
- return create_real_mr(new_pd, new_umem, iova, new_access_flags);
+ return create_real_mr(new_pd, new_umem, iova, new_access_flags, NULL);
}
/*
@@ -1901,7 +1969,7 @@ struct ib_mr *mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
*/
recreate:
return mlx5_ib_reg_user_mr(new_pd, start, length, iova,
- new_access_flags, udata);
+ new_access_flags, NULL, udata);
}
static int
@@ -2027,23 +2095,50 @@ void mlx5_ib_revoke_data_direct_mrs(struct mlx5_ib_dev *dev)
}
}
-static int mlx5_revoke_mr(struct mlx5_ib_mr *mr)
+static int mlx5_umr_revoke_mr_with_lock(struct mlx5_ib_mr *mr)
{
- struct mlx5_ib_dev *dev = to_mdev(mr->ibmr.device);
- struct mlx5_cache_ent *ent = mr->mmkey.cache_ent;
- bool is_odp = is_odp_mr(mr);
bool is_odp_dma_buf = is_dmabuf_mr(mr) &&
- !to_ib_umem_dmabuf(mr->umem)->pinned;
- bool from_cache = !!ent;
- int ret = 0;
+ !to_ib_umem_dmabuf(mr->umem)->pinned;
+ bool is_odp = is_odp_mr(mr);
+ int ret;
if (is_odp)
mutex_lock(&to_ib_umem_odp(mr->umem)->umem_mutex);
if (is_odp_dma_buf)
- dma_resv_lock(to_ib_umem_dmabuf(mr->umem)->attach->dmabuf->resv, NULL);
+ dma_resv_lock(to_ib_umem_dmabuf(mr->umem)->attach->dmabuf->resv,
+ NULL);
+
+ ret = mlx5r_umr_revoke_mr(mr);
- if (mr->mmkey.cacheable && !mlx5r_umr_revoke_mr(mr) && !cache_ent_find_and_store(dev, mr)) {
+ if (is_odp) {
+ if (!ret)
+ to_ib_umem_odp(mr->umem)->private = NULL;
+ mutex_unlock(&to_ib_umem_odp(mr->umem)->umem_mutex);
+ }
+
+ if (is_odp_dma_buf) {
+ if (!ret)
+ to_ib_umem_dmabuf(mr->umem)->private = NULL;
+ dma_resv_unlock(
+ to_ib_umem_dmabuf(mr->umem)->attach->dmabuf->resv);
+ }
+
+ return ret;
+}
+
+static int mlx5r_handle_mkey_cleanup(struct mlx5_ib_mr *mr)
+{
+ bool is_odp_dma_buf = is_dmabuf_mr(mr) &&
+ !to_ib_umem_dmabuf(mr->umem)->pinned;
+ struct mlx5_ib_dev *dev = to_mdev(mr->ibmr.device);
+ struct mlx5_cache_ent *ent = mr->mmkey.cache_ent;
+ bool is_odp = is_odp_mr(mr);
+ bool from_cache = !!ent;
+ int ret;
+
+ if (mr->mmkey.cacheable && !mlx5_umr_revoke_mr_with_lock(mr) &&
+ !cache_ent_find_and_store(dev, mr)) {
ent = mr->mmkey.cache_ent;
/* upon storing to a clean temp entry - schedule its cleanup */
spin_lock_irq(&ent->mkeys_queue.lock);
@@ -2055,7 +2150,7 @@ static int mlx5_revoke_mr(struct mlx5_ib_mr *mr)
ent->tmp_cleanup_scheduled = true;
}
spin_unlock_irq(&ent->mkeys_queue.lock);
- goto out;
+ return 0;
}
if (ent) {
@@ -2064,8 +2159,14 @@ static int mlx5_revoke_mr(struct mlx5_ib_mr *mr)
mr->mmkey.cache_ent = NULL;
spin_unlock_irq(&ent->mkeys_queue.lock);
}
+
+ if (is_odp)
+ mutex_lock(&to_ib_umem_odp(mr->umem)->umem_mutex);
+
+ if (is_odp_dma_buf)
+ dma_resv_lock(to_ib_umem_dmabuf(mr->umem)->attach->dmabuf->resv,
+ NULL);
ret = destroy_mkey(dev, mr);
-out:
if (is_odp) {
if (!ret)
to_ib_umem_odp(mr->umem)->private = NULL;
@@ -2075,9 +2176,9 @@ out:
if (is_odp_dma_buf) {
if (!ret)
to_ib_umem_dmabuf(mr->umem)->private = NULL;
- dma_resv_unlock(to_ib_umem_dmabuf(mr->umem)->attach->dmabuf->resv);
+ dma_resv_unlock(
+ to_ib_umem_dmabuf(mr->umem)->attach->dmabuf->resv);
}
-
return ret;
}
@@ -2126,7 +2227,7 @@ static int __mlx5_ib_dereg_mr(struct ib_mr *ibmr)
}
/* Stop DMA */
- rc = mlx5_revoke_mr(mr);
+ rc = mlx5r_handle_mkey_cleanup(mr);
if (rc)
return rc;
diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c
index eaa2f9f5f3a9..0e8ae85af5a6 100644
--- a/drivers/infiniband/hw/mlx5/odp.c
+++ b/drivers/infiniband/hw/mlx5/odp.c
@@ -259,8 +259,8 @@ static void destroy_unused_implicit_child_mr(struct mlx5_ib_mr *mr)
}
if (MLX5_CAP_ODP(mr_to_mdev(mr)->mdev, mem_page_fault))
- __xa_erase(&mr_to_mdev(mr)->odp_mkeys,
- mlx5_base_mkey(mr->mmkey.key));
+ xa_erase(&mr_to_mdev(mr)->odp_mkeys,
+ mlx5_base_mkey(mr->mmkey.key));
xa_unlock(&imr->implicit_children);
/* Freeing a MR is a sleeping operation, so bounce to a work queue */
@@ -532,8 +532,8 @@ static struct mlx5_ib_mr *implicit_get_child_mr(struct mlx5_ib_mr *imr,
}
if (MLX5_CAP_ODP(dev->mdev, mem_page_fault)) {
- ret = __xa_store(&dev->odp_mkeys, mlx5_base_mkey(mr->mmkey.key),
- &mr->mmkey, GFP_KERNEL);
+ ret = xa_store(&dev->odp_mkeys, mlx5_base_mkey(mr->mmkey.key),
+ &mr->mmkey, GFP_KERNEL);
if (xa_is_err(ret)) {
ret = ERR_PTR(xa_err(ret));
__xa_erase(&imr->implicit_children, idx);
@@ -836,9 +836,13 @@ static int pagefault_dmabuf_mr(struct mlx5_ib_mr *mr, size_t bcnt,
u32 *bytes_mapped, u32 flags)
{
struct ib_umem_dmabuf *umem_dmabuf = to_ib_umem_dmabuf(mr->umem);
+ int access_mode = mr->data_direct ? MLX5_MKC_ACCESS_MODE_KSM :
+ MLX5_MKC_ACCESS_MODE_MTT;
+ unsigned int old_page_shift = mr->page_shift;
+ unsigned int page_shift;
+ unsigned long page_size;
u32 xlt_flags = 0;
int err;
- unsigned long page_size;
if (flags & MLX5_PF_FLAGS_ENABLE)
xlt_flags |= MLX5_IB_UPD_XLT_ENABLE;
@@ -850,20 +854,33 @@ static int pagefault_dmabuf_mr(struct mlx5_ib_mr *mr, size_t bcnt,
return err;
}
- page_size = mlx5_umem_dmabuf_find_best_pgsz(umem_dmabuf);
+ page_size = mlx5_umem_dmabuf_find_best_pgsz(umem_dmabuf, access_mode);
if (!page_size) {
ib_umem_dmabuf_unmap_pages(umem_dmabuf);
err = -EINVAL;
} else {
- if (mr->data_direct)
- err = mlx5r_umr_update_data_direct_ksm_pas(mr, xlt_flags);
- else
- err = mlx5r_umr_update_mr_pas(mr, xlt_flags);
+ page_shift = order_base_2(page_size);
+ if (page_shift != mr->page_shift && mr->dmabuf_faulted) {
+ err = mlx5r_umr_dmabuf_update_pgsz(mr, xlt_flags,
+ page_shift);
+ } else {
+ mr->page_shift = page_shift;
+ if (mr->data_direct)
+ err = mlx5r_umr_update_data_direct_ksm_pas(
+ mr, xlt_flags);
+ else
+ err = mlx5r_umr_update_mr_pas(mr,
+ xlt_flags);
+ }
}
dma_resv_unlock(umem_dmabuf->attach->dmabuf->resv);
- if (err)
+ if (err) {
+ mr->page_shift = old_page_shift;
return err;
+ }
+
+ mr->dmabuf_faulted = 1;
if (bytes_mapped)
*bytes_mapped += bcnt;
@@ -1866,6 +1883,7 @@ int mlx5_odp_init_mkey_cache(struct mlx5_ib_dev *dev)
struct mlx5r_cache_rb_key rb_key = {
.access_mode = MLX5_MKC_ACCESS_MODE_KSM,
.ndescs = mlx5_imr_ksm_entries,
+ .ph = MLX5_IB_NO_PH,
};
struct mlx5_cache_ent *ent;
diff --git a/drivers/infiniband/hw/mlx5/umr.c b/drivers/infiniband/hw/mlx5/umr.c
index 5be4426a2884..7ef35cddce81 100644
--- a/drivers/infiniband/hw/mlx5/umr.c
+++ b/drivers/infiniband/hw/mlx5/umr.c
@@ -32,13 +32,15 @@ static __be64 get_umr_disable_mr_mask(void)
return cpu_to_be64(result);
}
-static __be64 get_umr_update_translation_mask(void)
+static __be64 get_umr_update_translation_mask(struct mlx5_ib_dev *dev)
{
u64 result;
result = MLX5_MKEY_MASK_LEN |
MLX5_MKEY_MASK_PAGE_SIZE |
MLX5_MKEY_MASK_START_ADDR;
+ if (MLX5_CAP_GEN_2(dev->mdev, umr_log_entity_size_5))
+ result |= MLX5_MKEY_MASK_PAGE_SIZE_5;
return cpu_to_be64(result);
}
@@ -654,9 +656,12 @@ static void mlx5r_umr_final_update_xlt(struct mlx5_ib_dev *dev,
flags & MLX5_IB_UPD_XLT_ENABLE || flags & MLX5_IB_UPD_XLT_ADDR;
if (update_translation) {
- wqe->ctrl_seg.mkey_mask |= get_umr_update_translation_mask();
+ wqe->ctrl_seg.mkey_mask |= get_umr_update_translation_mask(dev);
if (!mr->ibmr.length)
MLX5_SET(mkc, &wqe->mkey_seg, length64, 1);
+ if (flags & MLX5_IB_UPD_XLT_KEEP_PGSZ)
+ wqe->ctrl_seg.mkey_mask &=
+ cpu_to_be64(~MLX5_MKEY_MASK_PAGE_SIZE);
}
wqe->ctrl_seg.xlt_octowords =
@@ -664,46 +669,78 @@ static void mlx5r_umr_final_update_xlt(struct mlx5_ib_dev *dev,
wqe->data_seg.byte_count = cpu_to_be32(sg->length);
}
+static void
+_mlx5r_umr_init_wqe(struct mlx5_ib_mr *mr, struct mlx5r_umr_wqe *wqe,
+ struct ib_sge *sg, unsigned int flags,
+ unsigned int page_shift, bool dd)
+{
+ struct mlx5_ib_dev *dev = mr_to_mdev(mr);
+
+ mlx5r_umr_set_update_xlt_ctrl_seg(&wqe->ctrl_seg, flags, sg);
+ mlx5r_umr_set_update_xlt_mkey_seg(dev, &wqe->mkey_seg, mr, page_shift);
+ if (dd) /* Use the data direct internal kernel PD */
+ MLX5_SET(mkc, &wqe->mkey_seg, pd, dev->ddr.pdn);
+ mlx5r_umr_set_update_xlt_data_seg(&wqe->data_seg, sg);
+}
+
static int
-_mlx5r_umr_update_mr_pas(struct mlx5_ib_mr *mr, unsigned int flags, bool dd)
+_mlx5r_umr_update_mr_pas(struct mlx5_ib_mr *mr, unsigned int flags, bool dd,
+ size_t start_block, size_t nblocks)
{
size_t ent_size = dd ? sizeof(struct mlx5_ksm) : sizeof(struct mlx5_mtt);
struct mlx5_ib_dev *dev = mr_to_mdev(mr);
struct device *ddev = &dev->mdev->pdev->dev;
struct mlx5r_umr_wqe wqe = {};
+ size_t processed_blocks = 0;
struct ib_block_iter biter;
+ size_t cur_block_idx = 0;
struct mlx5_ksm *cur_ksm;
struct mlx5_mtt *cur_mtt;
size_t orig_sg_length;
+ size_t total_blocks;
size_t final_size;
void *curr_entry;
struct ib_sge sg;
void *entry;
- u64 offset = 0;
+ u64 offset;
int err = 0;
- entry = mlx5r_umr_create_xlt(dev, &sg,
- ib_umem_num_dma_blocks(mr->umem, 1 << mr->page_shift),
- ent_size, flags);
+ total_blocks = ib_umem_num_dma_blocks(mr->umem, 1UL << mr->page_shift);
+ if (start_block > total_blocks)
+ return -EINVAL;
+
+ /* nblocks 0 means update all blocks starting from start_block */
+ if (nblocks)
+ total_blocks = nblocks;
+
+ entry = mlx5r_umr_create_xlt(dev, &sg, total_blocks, ent_size, flags);
if (!entry)
return -ENOMEM;
orig_sg_length = sg.length;
- mlx5r_umr_set_update_xlt_ctrl_seg(&wqe.ctrl_seg, flags, &sg);
- mlx5r_umr_set_update_xlt_mkey_seg(dev, &wqe.mkey_seg, mr,
- mr->page_shift);
- if (dd) {
- /* Use the data direct internal kernel PD */
- MLX5_SET(mkc, &wqe.mkey_seg, pd, dev->ddr.pdn);
+
+ _mlx5r_umr_init_wqe(mr, &wqe, &sg, flags, mr->page_shift, dd);
+
+ /* Set initial translation offset to start_block */
+ offset = (u64)start_block * ent_size;
+ mlx5r_umr_update_offset(&wqe.ctrl_seg, offset);
+
+ if (dd)
cur_ksm = entry;
- } else {
+ else
cur_mtt = entry;
- }
-
- mlx5r_umr_set_update_xlt_data_seg(&wqe.data_seg, &sg);
curr_entry = entry;
+
rdma_umem_for_each_dma_block(mr->umem, &biter, BIT(mr->page_shift)) {
+ if (cur_block_idx < start_block) {
+ cur_block_idx++;
+ continue;
+ }
+
+ if (nblocks && processed_blocks >= nblocks)
+ break;
+
if (curr_entry == entry + sg.length) {
dma_sync_single_for_device(ddev, sg.addr, sg.length,
DMA_TO_DEVICE);
@@ -725,6 +762,11 @@ _mlx5r_umr_update_mr_pas(struct mlx5_ib_mr *mr, unsigned int flags, bool dd)
if (dd) {
cur_ksm->va = cpu_to_be64(rdma_block_iter_dma_address(&biter));
cur_ksm->key = cpu_to_be32(dev->ddr.mkey);
+ if (mr->umem->is_dmabuf &&
+ (flags & MLX5_IB_UPD_XLT_ZAP)) {
+ cur_ksm->va = 0;
+ cur_ksm->key = 0;
+ }
cur_ksm++;
curr_entry = cur_ksm;
} else {
@@ -736,6 +778,8 @@ _mlx5r_umr_update_mr_pas(struct mlx5_ib_mr *mr, unsigned int flags, bool dd)
cur_mtt++;
curr_entry = cur_mtt;
}
+
+ processed_blocks++;
}
final_size = curr_entry - entry;
@@ -752,13 +796,32 @@ err:
return err;
}
-int mlx5r_umr_update_data_direct_ksm_pas(struct mlx5_ib_mr *mr, unsigned int flags)
+int mlx5r_umr_update_data_direct_ksm_pas_range(struct mlx5_ib_mr *mr,
+ unsigned int flags,
+ size_t start_block,
+ size_t nblocks)
{
/* No invalidation flow is expected */
- if (WARN_ON(!mr->umem->is_dmabuf) || (flags & MLX5_IB_UPD_XLT_ZAP))
+ if (WARN_ON(!mr->umem->is_dmabuf) || ((flags & MLX5_IB_UPD_XLT_ZAP) &&
+ !(flags & MLX5_IB_UPD_XLT_KEEP_PGSZ)))
return -EINVAL;
- return _mlx5r_umr_update_mr_pas(mr, flags, true);
+ return _mlx5r_umr_update_mr_pas(mr, flags, true, start_block, nblocks);
+}
+
+int mlx5r_umr_update_data_direct_ksm_pas(struct mlx5_ib_mr *mr,
+ unsigned int flags)
+{
+ return mlx5r_umr_update_data_direct_ksm_pas_range(mr, flags, 0, 0);
+}
+
+int mlx5r_umr_update_mr_pas_range(struct mlx5_ib_mr *mr, unsigned int flags,
+ size_t start_block, size_t nblocks)
+{
+ if (WARN_ON(mr->umem->is_odp))
+ return -EINVAL;
+
+ return _mlx5r_umr_update_mr_pas(mr, flags, false, start_block, nblocks);
}
/*
@@ -768,10 +831,7 @@ int mlx5r_umr_update_data_direct_ksm_pas(struct mlx5_ib_mr *mr, unsigned int fla
*/
int mlx5r_umr_update_mr_pas(struct mlx5_ib_mr *mr, unsigned int flags)
{
- if (WARN_ON(mr->umem->is_odp))
- return -EINVAL;
-
- return _mlx5r_umr_update_mr_pas(mr, flags, false);
+ return mlx5r_umr_update_mr_pas_range(mr, flags, 0, 0);
}
static bool umr_can_use_indirect_mkey(struct mlx5_ib_dev *dev)
@@ -864,3 +924,202 @@ int mlx5r_umr_update_xlt(struct mlx5_ib_mr *mr, u64 idx, int npages,
mlx5r_umr_unmap_free_xlt(dev, xlt, &sg);
return err;
}
+
+/*
+ * Update only the page-size (log_page_size) field of an existing memory key
+ * using UMR. This is useful when the MR's physical layout stays the same
+ * but the optimal page shift has changed (e.g. dmabuf after pages are
+ * pinned and the HW can switch from 4K to huge-page alignment).
+ */
+int mlx5r_umr_update_mr_page_shift(struct mlx5_ib_mr *mr,
+ unsigned int page_shift,
+ bool dd)
+{
+ struct mlx5_ib_dev *dev = mr_to_mdev(mr);
+ struct mlx5r_umr_wqe wqe = {};
+ int err;
+
+ /* Build UMR wqe: we touch only PAGE_SIZE, so use the dedicated mask */
+ wqe.ctrl_seg.mkey_mask = get_umr_update_translation_mask(dev);
+
+ /* MR must be free while page size is modified */
+ wqe.ctrl_seg.flags = MLX5_UMR_CHECK_FREE | MLX5_UMR_INLINE;
+
+ /* Fill mkey segment with the new page size, keep the rest unchanged */
+ MLX5_SET(mkc, &wqe.mkey_seg, log_page_size, page_shift);
+
+ if (dd)
+ MLX5_SET(mkc, &wqe.mkey_seg, pd, dev->ddr.pdn);
+ else
+ MLX5_SET(mkc, &wqe.mkey_seg, pd, to_mpd(mr->ibmr.pd)->pdn);
+
+ MLX5_SET64(mkc, &wqe.mkey_seg, start_addr, mr->ibmr.iova);
+ MLX5_SET64(mkc, &wqe.mkey_seg, len, mr->ibmr.length);
+ MLX5_SET(mkc, &wqe.mkey_seg, qpn, 0xffffff);
+ MLX5_SET(mkc, &wqe.mkey_seg, mkey_7_0,
+ mlx5_mkey_variant(mr->mmkey.key));
+
+ err = mlx5r_umr_post_send_wait(dev, mr->mmkey.key, &wqe, false);
+ if (!err)
+ mr->page_shift = page_shift;
+
+ return err;
+}
+
+static inline int
+_mlx5r_dmabuf_umr_update_pas(struct mlx5_ib_mr *mr, unsigned int flags,
+ size_t start_block, size_t nblocks, bool dd)
+{
+ if (dd)
+ return mlx5r_umr_update_data_direct_ksm_pas_range(mr, flags,
+ start_block,
+ nblocks);
+ else
+ return mlx5r_umr_update_mr_pas_range(mr, flags, start_block,
+ nblocks);
+}
+
+/**
+ * This function makes an mkey non-present by zapping the translation entries of
+ * the mkey by zapping (zeroing out) the first N entries, where N is determined
+ * by the largest page size supported by the device and the MR length.
+ * It then updates the mkey's page size to the largest possible value, ensuring
+ * the MR is completely non-present and safe for further updates.
+ * It is useful to update the page size of a dmabuf MR on a page fault.
+ *
+ * Return: On success, returns the number of entries that were zapped.
+ * On error, returns a negative error code.
+ */
+static int _mlx5r_umr_zap_mkey(struct mlx5_ib_mr *mr,
+ unsigned int flags,
+ unsigned int page_shift,
+ size_t *nblocks,
+ bool dd)
+{
+ unsigned int old_page_shift = mr->page_shift;
+ struct mlx5_ib_dev *dev = mr_to_mdev(mr);
+ unsigned int max_page_shift;
+ size_t page_shift_nblocks;
+ unsigned int max_log_size;
+ int access_mode;
+ int err;
+
+ access_mode = dd ? MLX5_MKC_ACCESS_MODE_KSM : MLX5_MKC_ACCESS_MODE_MTT;
+ flags |= MLX5_IB_UPD_XLT_KEEP_PGSZ | MLX5_IB_UPD_XLT_ZAP |
+ MLX5_IB_UPD_XLT_ATOMIC;
+ max_log_size = get_max_log_entity_size_cap(dev, access_mode);
+ max_page_shift = order_base_2(mr->ibmr.length);
+ max_page_shift = min(max(max_page_shift, page_shift), max_log_size);
+ /* Count blocks in units of max_page_shift, we will zap exactly this
+ * many to make the whole MR non-present.
+ * Block size must be aligned to MLX5_UMR_FLEX_ALIGNMENT since it may
+ * be used as offset into the XLT later on.
+ */
+ *nblocks = ib_umem_num_dma_blocks(mr->umem, 1UL << max_page_shift);
+ if (dd)
+ *nblocks = ALIGN(*nblocks, MLX5_UMR_KSM_NUM_ENTRIES_ALIGNMENT);
+ else
+ *nblocks = ALIGN(*nblocks, MLX5_UMR_MTT_NUM_ENTRIES_ALIGNMENT);
+ page_shift_nblocks = ib_umem_num_dma_blocks(mr->umem,
+ 1UL << page_shift);
+ /* If the number of blocks at max possible page shift is greater than
+ * the number of blocks at the new page size, we should just go over the
+ * whole mkey entries.
+ */
+ if (*nblocks >= page_shift_nblocks)
+ *nblocks = 0;
+
+ /* Make the first nblocks entries non-present without changing
+ * page size yet.
+ */
+ if (*nblocks)
+ mr->page_shift = max_page_shift;
+ err = _mlx5r_dmabuf_umr_update_pas(mr, flags, 0, *nblocks, dd);
+ if (err) {
+ mr->page_shift = old_page_shift;
+ return err;
+ }
+
+ /* Change page size to the max page size now that the MR is completely
+ * non-present.
+ */
+ if (*nblocks) {
+ err = mlx5r_umr_update_mr_page_shift(mr, max_page_shift, dd);
+ if (err) {
+ mr->page_shift = old_page_shift;
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * mlx5r_umr_dmabuf_update_pgsz - Safely update DMABUF MR page size and its
+ * entries accordingly
+ * @mr: The memory region to update
+ * @xlt_flags: Translation table update flags
+ * @page_shift: The new (optimized) page shift to use
+ *
+ * This function updates the page size and mkey translation entries for a DMABUF
+ * MR in a safe, multi-step process to avoid exposing partially updated mappings
+ * The update is performed in 5 steps:
+ * 1. Make the first X entries non-present, while X is calculated to be
+ * minimal according to a large page shift that can be used to cover the
+ * MR length.
+ * 2. Update the page size to the large supported page size
+ * 3. Load the remaining N-X entries according to the (optimized) page_shift
+ * 4. Update the page size according to the (optimized) page_shift
+ * 5. Load the first X entries with the correct translations
+ *
+ * This ensures that at no point is the MR accessible with a partially updated
+ * translation table, maintaining correctness and preventing access to stale or
+ * inconsistent mappings.
+ *
+ * Returns 0 on success or a negative error code on failure.
+ */
+int mlx5r_umr_dmabuf_update_pgsz(struct mlx5_ib_mr *mr, u32 xlt_flags,
+ unsigned int page_shift)
+{
+ unsigned int old_page_shift = mr->page_shift;
+ size_t zapped_blocks;
+ size_t total_blocks;
+ int err;
+
+ err = _mlx5r_umr_zap_mkey(mr, xlt_flags, page_shift, &zapped_blocks,
+ mr->data_direct);
+ if (err)
+ return err;
+
+ /* _mlx5r_umr_zap_mkey already enables the mkey */
+ xlt_flags &= ~MLX5_IB_UPD_XLT_ENABLE;
+ mr->page_shift = page_shift;
+ total_blocks = ib_umem_num_dma_blocks(mr->umem, 1UL << mr->page_shift);
+ if (zapped_blocks && zapped_blocks < total_blocks) {
+ /* Update PAS according to the new page size but don't update
+ * the page size in the mkey yet.
+ */
+ err = _mlx5r_dmabuf_umr_update_pas(
+ mr,
+ xlt_flags | MLX5_IB_UPD_XLT_KEEP_PGSZ,
+ zapped_blocks,
+ total_blocks - zapped_blocks,
+ mr->data_direct);
+ if (err)
+ goto err;
+ }
+
+ err = mlx5r_umr_update_mr_page_shift(mr, mr->page_shift,
+ mr->data_direct);
+ if (err)
+ goto err;
+ err = _mlx5r_dmabuf_umr_update_pas(mr, xlt_flags, 0, zapped_blocks,
+ mr->data_direct);
+ if (err)
+ goto err;
+
+ return 0;
+err:
+ mr->page_shift = old_page_shift;
+ return err;
+}
diff --git a/drivers/infiniband/hw/mlx5/umr.h b/drivers/infiniband/hw/mlx5/umr.h
index 4a02c9b5aad8..e9361f0140e7 100644
--- a/drivers/infiniband/hw/mlx5/umr.h
+++ b/drivers/infiniband/hw/mlx5/umr.h
@@ -94,9 +94,20 @@ struct mlx5r_umr_wqe {
int mlx5r_umr_revoke_mr(struct mlx5_ib_mr *mr);
int mlx5r_umr_rereg_pd_access(struct mlx5_ib_mr *mr, struct ib_pd *pd,
int access_flags);
-int mlx5r_umr_update_mr_pas(struct mlx5_ib_mr *mr, unsigned int flags);
+int mlx5r_umr_update_data_direct_ksm_pas_range(struct mlx5_ib_mr *mr,
+ unsigned int flags,
+ size_t start_block,
+ size_t nblocks);
int mlx5r_umr_update_data_direct_ksm_pas(struct mlx5_ib_mr *mr, unsigned int flags);
+int mlx5r_umr_update_mr_pas_range(struct mlx5_ib_mr *mr, unsigned int flags,
+ size_t start_block, size_t nblocks);
+int mlx5r_umr_update_mr_pas(struct mlx5_ib_mr *mr, unsigned int flags);
int mlx5r_umr_update_xlt(struct mlx5_ib_mr *mr, u64 idx, int npages,
int page_shift, int flags);
+int mlx5r_umr_update_mr_page_shift(struct mlx5_ib_mr *mr,
+ unsigned int page_shift,
+ bool dd);
+int mlx5r_umr_dmabuf_update_pgsz(struct mlx5_ib_mr *mr, u32 xlt_flags,
+ unsigned int page_shift);
#endif /* _MLX5_IB_UMR_H */
diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c
index 6a1e2e79ddc3..dd572d76866c 100644
--- a/drivers/infiniband/hw/mthca/mthca_provider.c
+++ b/drivers/infiniband/hw/mthca/mthca_provider.c
@@ -825,7 +825,8 @@ static struct ib_mr *mthca_get_dma_mr(struct ib_pd *pd, int acc)
}
static struct ib_mr *mthca_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
- u64 virt, int acc, struct ib_udata *udata)
+ u64 virt, int acc, struct ib_dmah *dmah,
+ struct ib_udata *udata)
{
struct mthca_dev *dev = to_mdev(pd->device);
struct ib_block_iter biter;
@@ -838,6 +839,9 @@ static struct ib_mr *mthca_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
int err = 0;
int write_mtt_size;
+ if (dmah)
+ return ERR_PTR(-EOPNOTSUPP);
+
if (udata->inlen < sizeof ucmd) {
if (!context->reg_mr_warned) {
mthca_warn(dev, "Process '%s' did not pass in MR attrs.\n",
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
index 979de8f8df14..46d911fd38de 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
@@ -847,13 +847,17 @@ static void build_user_pbes(struct ocrdma_dev *dev, struct ocrdma_mr *mr)
}
struct ib_mr *ocrdma_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 len,
- u64 usr_addr, int acc, struct ib_udata *udata)
+ u64 usr_addr, int acc, struct ib_dmah *dmah,
+ struct ib_udata *udata)
{
int status = -ENOMEM;
struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device);
struct ocrdma_mr *mr;
struct ocrdma_pd *pd;
+ if (dmah)
+ return ERR_PTR(-EOPNOTSUPP);
+
pd = get_ocrdma_pd(ibpd);
if (acc & IB_ACCESS_REMOTE_WRITE && !(acc & IB_ACCESS_LOCAL_WRITE))
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h
index 0644346d8d98..6c5c3755b8a9 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h
@@ -98,7 +98,8 @@ int ocrdma_post_srq_recv(struct ib_srq *, const struct ib_recv_wr *,
int ocrdma_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata);
struct ib_mr *ocrdma_get_dma_mr(struct ib_pd *, int acc);
struct ib_mr *ocrdma_reg_user_mr(struct ib_pd *, u64 start, u64 length,
- u64 virt, int acc, struct ib_udata *);
+ u64 virt, int acc, struct ib_dmah *dmah,
+ struct ib_udata *);
struct ib_mr *ocrdma_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
u32 max_num_sg);
int ocrdma_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c
index 568a5b18803f..ab9bf0922979 100644
--- a/drivers/infiniband/hw/qedr/verbs.c
+++ b/drivers/infiniband/hw/qedr/verbs.c
@@ -2953,13 +2953,17 @@ done:
}
struct ib_mr *qedr_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 len,
- u64 usr_addr, int acc, struct ib_udata *udata)
+ u64 usr_addr, int acc, struct ib_dmah *dmah,
+ struct ib_udata *udata)
{
struct qedr_dev *dev = get_qedr_dev(ibpd->device);
struct qedr_mr *mr;
struct qedr_pd *pd;
int rc = -ENOMEM;
+ if (dmah)
+ return ERR_PTR(-EOPNOTSUPP);
+
pd = get_qedr_pd(ibpd);
DP_DEBUG(dev, QEDR_MSG_MR,
"qedr_register user mr pd = %d start = %lld, len = %lld, usr_addr = %lld, acc = %d\n",
diff --git a/drivers/infiniband/hw/qedr/verbs.h b/drivers/infiniband/hw/qedr/verbs.h
index 5731458abb06..62420a15101b 100644
--- a/drivers/infiniband/hw/qedr/verbs.h
+++ b/drivers/infiniband/hw/qedr/verbs.h
@@ -79,7 +79,8 @@ int qedr_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata);
struct ib_mr *qedr_get_dma_mr(struct ib_pd *, int acc);
struct ib_mr *qedr_reg_user_mr(struct ib_pd *, u64 start, u64 length,
- u64 virt, int acc, struct ib_udata *);
+ u64 virt, int acc, struct ib_dmah *dmah,
+ struct ib_udata *);
int qedr_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
int sg_nents, unsigned int *sg_offset);
diff --git a/drivers/infiniband/hw/qib/Kconfig b/drivers/infiniband/hw/qib/Kconfig
deleted file mode 100644
index 6c4895777042..000000000000
--- a/drivers/infiniband/hw/qib/Kconfig
+++ /dev/null
@@ -1,17 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-config INFINIBAND_QIB
- tristate "Intel PCIe HCA support"
- depends on 64BIT && INFINIBAND_RDMAVT
- depends on PCI
- help
- This is a low-level driver for Intel PCIe QLE InfiniBand host
- channel adapters. This driver does not support the Intel
- HyperTransport card (model QHT7140).
-
-config INFINIBAND_QIB_DCA
- bool "QIB DCA support"
- depends on INFINIBAND_QIB && DCA && SMP && !(INFINIBAND_QIB=y && DCA=m)
- default y
- help
- Setting this enables DCA support on some Intel chip sets
- with the iba7322 HCA.
diff --git a/drivers/infiniband/hw/qib/Makefile b/drivers/infiniband/hw/qib/Makefile
deleted file mode 100644
index 80ffab88fbca..000000000000
--- a/drivers/infiniband/hw/qib/Makefile
+++ /dev/null
@@ -1,17 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-obj-$(CONFIG_INFINIBAND_QIB) += ib_qib.o
-
-ib_qib-y := qib_diag.o qib_driver.o qib_eeprom.o \
- qib_file_ops.o qib_fs.o qib_init.o qib_intr.o \
- qib_mad.o qib_pcie.o qib_pio_copy.o \
- qib_qp.o qib_qsfp.o qib_rc.o qib_ruc.o qib_sdma.o \
- qib_sysfs.o qib_twsi.o qib_tx.o qib_uc.o qib_ud.o \
- qib_user_pages.o qib_user_sdma.o qib_iba7220.o \
- qib_sd7220.o qib_iba7322.o qib_verbs.o
-
-# 6120 has no fallback if no MSI interrupts, others can do INTx
-ib_qib-$(CONFIG_PCI_MSI) += qib_iba6120.o
-
-ib_qib-$(CONFIG_X86_64) += qib_wc_x86_64.o
-ib_qib-$(CONFIG_PPC64) += qib_wc_ppc64.o
-ib_qib-$(CONFIG_DEBUG_FS) += qib_debugfs.o
diff --git a/drivers/infiniband/hw/qib/qib.h b/drivers/infiniband/hw/qib/qib.h
deleted file mode 100644
index 8ee4edd7883c..000000000000
--- a/drivers/infiniband/hw/qib/qib.h
+++ /dev/null
@@ -1,1492 +0,0 @@
-#ifndef _QIB_KERNEL_H
-#define _QIB_KERNEL_H
-/*
- * Copyright (c) 2012 - 2017 Intel Corporation. All rights reserved.
- * Copyright (c) 2006 - 2012 QLogic Corporation. All rights reserved.
- * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-/*
- * This header file is the base header file for qlogic_ib kernel code
- * qib_user.h serves a similar purpose for user code.
- */
-
-#include <linux/interrupt.h>
-#include <linux/pci.h>
-#include <linux/dma-mapping.h>
-#include <linux/mutex.h>
-#include <linux/list.h>
-#include <linux/scatterlist.h>
-#include <linux/slab.h>
-#include <linux/io.h>
-#include <linux/fs.h>
-#include <linux/completion.h>
-#include <linux/kref.h>
-#include <linux/sched.h>
-#include <linux/kthread.h>
-#include <linux/xarray.h>
-#include <rdma/ib_hdrs.h>
-#include <rdma/rdma_vt.h>
-
-#include "qib_common.h"
-#include "qib_verbs.h"
-
-/* only s/w major version of QLogic_IB we can handle */
-#define QIB_CHIP_VERS_MAJ 2U
-
-/* don't care about this except printing */
-#define QIB_CHIP_VERS_MIN 0U
-
-/* The Organization Unique Identifier (Mfg code), and its position in GUID */
-#define QIB_OUI 0x001175
-#define QIB_OUI_LSB 40
-
-/*
- * per driver stats, either not device nor port-specific, or
- * summed over all of the devices and ports.
- * They are described by name via ipathfs filesystem, so layout
- * and number of elements can change without breaking compatibility.
- * If members are added or deleted qib_statnames[] in qib_fs.c must
- * change to match.
- */
-struct qlogic_ib_stats {
- __u64 sps_ints; /* number of interrupts handled */
- __u64 sps_errints; /* number of error interrupts */
- __u64 sps_txerrs; /* tx-related packet errors */
- __u64 sps_rcverrs; /* non-crc rcv packet errors */
- __u64 sps_hwerrs; /* hardware errors reported (parity, etc.) */
- __u64 sps_nopiobufs; /* no pio bufs avail from kernel */
- __u64 sps_ctxts; /* number of contexts currently open */
- __u64 sps_lenerrs; /* number of kernel packets where RHF != LRH len */
- __u64 sps_buffull;
- __u64 sps_hdrfull;
-};
-
-extern struct qlogic_ib_stats qib_stats;
-extern const struct pci_error_handlers qib_pci_err_handler;
-
-#define QIB_CHIP_SWVERSION QIB_CHIP_VERS_MAJ
-/*
- * First-cut critierion for "device is active" is
- * two thousand dwords combined Tx, Rx traffic per
- * 5-second interval. SMA packets are 64 dwords,
- * and occur "a few per second", presumably each way.
- */
-#define QIB_TRAFFIC_ACTIVE_THRESHOLD (2000)
-
-/*
- * Below contains all data related to a single context (formerly called port).
- */
-
-#ifdef CONFIG_DEBUG_FS
-struct qib_opcode_stats_perctx;
-#endif
-
-struct qib_ctxtdata {
- void **rcvegrbuf;
- dma_addr_t *rcvegrbuf_phys;
- /* rcvhdrq base, needs mmap before useful */
- void *rcvhdrq;
- /* kernel virtual address where hdrqtail is updated */
- void *rcvhdrtail_kvaddr;
- /*
- * temp buffer for expected send setup, allocated at open, instead
- * of each setup call
- */
- void *tid_pg_list;
- /*
- * Shared page for kernel to signal user processes that send buffers
- * need disarming. The process should call QIB_CMD_DISARM_BUFS
- * or QIB_CMD_ACK_EVENT with IPATH_EVENT_DISARM_BUFS set.
- */
- unsigned long *user_event_mask;
- /* when waiting for rcv or pioavail */
- wait_queue_head_t wait;
- /*
- * rcvegr bufs base, physical, must fit
- * in 44 bits so 32 bit programs mmap64 44 bit works)
- */
- dma_addr_t rcvegr_phys;
- /* mmap of hdrq, must fit in 44 bits */
- dma_addr_t rcvhdrq_phys;
- dma_addr_t rcvhdrqtailaddr_phys;
-
- /*
- * number of opens (including slave sub-contexts) on this instance
- * (ignoring forks, dup, etc. for now)
- */
- int cnt;
- /*
- * how much space to leave at start of eager TID entries for
- * protocol use, on each TID
- */
- /* instead of calculating it */
- unsigned ctxt;
- /* local node of context */
- int node_id;
- /* non-zero if ctxt is being shared. */
- u16 subctxt_cnt;
- /* non-zero if ctxt is being shared. */
- u16 subctxt_id;
- /* number of eager TID entries. */
- u16 rcvegrcnt;
- /* index of first eager TID entry. */
- u16 rcvegr_tid_base;
- /* number of pio bufs for this ctxt (all procs, if shared) */
- u32 piocnt;
- /* first pio buffer for this ctxt */
- u32 pio_base;
- /* chip offset of PIO buffers for this ctxt */
- u32 piobufs;
- /* how many alloc_pages() chunks in rcvegrbuf_pages */
- u32 rcvegrbuf_chunks;
- /* how many egrbufs per chunk */
- u16 rcvegrbufs_perchunk;
- /* ilog2 of above */
- u16 rcvegrbufs_perchunk_shift;
- /* order for rcvegrbuf_pages */
- size_t rcvegrbuf_size;
- /* rcvhdrq size (for freeing) */
- size_t rcvhdrq_size;
- /* per-context flags for fileops/intr communication */
- unsigned long flag;
- /* next expected TID to check when looking for free */
- u32 tidcursor;
- /* WAIT_RCV that timed out, no interrupt */
- u32 rcvwait_to;
- /* WAIT_PIO that timed out, no interrupt */
- u32 piowait_to;
- /* WAIT_RCV already happened, no wait */
- u32 rcvnowait;
- /* WAIT_PIO already happened, no wait */
- u32 pionowait;
- /* total number of polled urgent packets */
- u32 urgent;
- /* saved total number of polled urgent packets for poll edge trigger */
- u32 urgent_poll;
- /* pid of process using this ctxt */
- pid_t pid;
- pid_t subpid[QLOGIC_IB_MAX_SUBCTXT];
- /* same size as task_struct .comm[], command that opened context */
- char comm[TASK_COMM_LEN];
- /* pkeys set by this use of this ctxt */
- u16 pkeys[4];
- /* so file ops can get at unit */
- struct qib_devdata *dd;
- /* so funcs that need physical port can get it easily */
- struct qib_pportdata *ppd;
- /* A page of memory for rcvhdrhead, rcvegrhead, rcvegrtail * N */
- void *subctxt_uregbase;
- /* An array of pages for the eager receive buffers * N */
- void *subctxt_rcvegrbuf;
- /* An array of pages for the eager header queue entries * N */
- void *subctxt_rcvhdr_base;
- /* The version of the library which opened this ctxt */
- u32 userversion;
- /* Bitmask of active slaves */
- u32 active_slaves;
- /* Type of packets or conditions we want to poll for */
- u16 poll_type;
- /* receive packet sequence counter */
- u8 seq_cnt;
- u8 redirect_seq_cnt;
- /* ctxt rcvhdrq head offset */
- u32 head;
- /* QPs waiting for context processing */
- struct list_head qp_wait_list;
-#ifdef CONFIG_DEBUG_FS
- /* verbs stats per CTX */
- struct qib_opcode_stats_perctx *opstats;
-#endif
-};
-
-struct rvt_sge_state;
-
-struct qib_sdma_txreq {
- int flags;
- int sg_count;
- dma_addr_t addr;
- void (*callback)(struct qib_sdma_txreq *, int);
- u16 start_idx; /* sdma private */
- u16 next_descq_idx; /* sdma private */
- struct list_head list; /* sdma private */
-};
-
-struct qib_sdma_desc {
- __le64 qw[2];
-};
-
-struct qib_verbs_txreq {
- struct qib_sdma_txreq txreq;
- struct rvt_qp *qp;
- struct rvt_swqe *wqe;
- u32 dwords;
- u16 hdr_dwords;
- u16 hdr_inx;
- struct qib_pio_header *align_buf;
- struct rvt_mregion *mr;
- struct rvt_sge_state *ss;
-};
-
-#define QIB_SDMA_TXREQ_F_USELARGEBUF 0x1
-#define QIB_SDMA_TXREQ_F_HEADTOHOST 0x2
-#define QIB_SDMA_TXREQ_F_INTREQ 0x4
-#define QIB_SDMA_TXREQ_F_FREEBUF 0x8
-#define QIB_SDMA_TXREQ_F_FREEDESC 0x10
-
-#define QIB_SDMA_TXREQ_S_OK 0
-#define QIB_SDMA_TXREQ_S_SENDERROR 1
-#define QIB_SDMA_TXREQ_S_ABORTED 2
-#define QIB_SDMA_TXREQ_S_SHUTDOWN 3
-
-/*
- * Get/Set IB link-level config parameters for f_get/set_ib_cfg()
- * Mostly for MADs that set or query link parameters, also ipath
- * config interfaces
- */
-#define QIB_IB_CFG_LIDLMC 0 /* LID (LS16b) and Mask (MS16b) */
-#define QIB_IB_CFG_LWID_ENB 2 /* allowed Link-width */
-#define QIB_IB_CFG_LWID 3 /* currently active Link-width */
-#define QIB_IB_CFG_SPD_ENB 4 /* allowed Link speeds */
-#define QIB_IB_CFG_SPD 5 /* current Link spd */
-#define QIB_IB_CFG_RXPOL_ENB 6 /* Auto-RX-polarity enable */
-#define QIB_IB_CFG_LREV_ENB 7 /* Auto-Lane-reversal enable */
-#define QIB_IB_CFG_LINKLATENCY 8 /* Link Latency (IB1.2 only) */
-#define QIB_IB_CFG_HRTBT 9 /* IB heartbeat off/enable/auto; DDR/QDR only */
-#define QIB_IB_CFG_OP_VLS 10 /* operational VLs */
-#define QIB_IB_CFG_VL_HIGH_CAP 11 /* num of VL high priority weights */
-#define QIB_IB_CFG_VL_LOW_CAP 12 /* num of VL low priority weights */
-#define QIB_IB_CFG_OVERRUN_THRESH 13 /* IB overrun threshold */
-#define QIB_IB_CFG_PHYERR_THRESH 14 /* IB PHY error threshold */
-#define QIB_IB_CFG_LINKDEFAULT 15 /* IB link default (sleep/poll) */
-#define QIB_IB_CFG_PKEYS 16 /* update partition keys */
-#define QIB_IB_CFG_MTU 17 /* update MTU in IBC */
-#define QIB_IB_CFG_LSTATE 18 /* update linkcmd and linkinitcmd in IBC */
-#define QIB_IB_CFG_VL_HIGH_LIMIT 19
-#define QIB_IB_CFG_PMA_TICKS 20 /* PMA sample tick resolution */
-#define QIB_IB_CFG_PORT 21 /* switch port we are connected to */
-
-/*
- * for CFG_LSTATE: LINKCMD in upper 16 bits, LINKINITCMD in lower 16
- * IB_LINKINITCMD_POLL and SLEEP are also used as set/get values for
- * QIB_IB_CFG_LINKDEFAULT cmd
- */
-#define IB_LINKCMD_DOWN (0 << 16)
-#define IB_LINKCMD_ARMED (1 << 16)
-#define IB_LINKCMD_ACTIVE (2 << 16)
-#define IB_LINKINITCMD_NOP 0
-#define IB_LINKINITCMD_POLL 1
-#define IB_LINKINITCMD_SLEEP 2
-#define IB_LINKINITCMD_DISABLE 3
-
-/*
- * valid states passed to qib_set_linkstate() user call
- */
-#define QIB_IB_LINKDOWN 0
-#define QIB_IB_LINKARM 1
-#define QIB_IB_LINKACTIVE 2
-#define QIB_IB_LINKDOWN_ONLY 3
-#define QIB_IB_LINKDOWN_SLEEP 4
-#define QIB_IB_LINKDOWN_DISABLE 5
-
-/*
- * These 7 values (SDR, DDR, and QDR may be ORed for auto-speed
- * negotiation) are used for the 3rd argument to path_f_set_ib_cfg
- * with cmd QIB_IB_CFG_SPD_ENB, by direct calls or via sysfs. They
- * are also the possible values for qib_link_speed_enabled and active
- * The values were chosen to match values used within the IB spec.
- */
-#define QIB_IB_SDR 1
-#define QIB_IB_DDR 2
-#define QIB_IB_QDR 4
-
-#define QIB_DEFAULT_MTU 4096
-
-/* max number of IB ports supported per HCA */
-#define QIB_MAX_IB_PORTS 2
-
-/*
- * Possible IB config parameters for f_get/set_ib_table()
- */
-#define QIB_IB_TBL_VL_HIGH_ARB 1 /* Get/set VL high priority weights */
-#define QIB_IB_TBL_VL_LOW_ARB 2 /* Get/set VL low priority weights */
-
-/*
- * Possible "operations" for f_rcvctrl(ppd, op, ctxt)
- * these are bits so they can be combined, e.g.
- * QIB_RCVCTRL_INTRAVAIL_ENB | QIB_RCVCTRL_CTXT_ENB
- */
-#define QIB_RCVCTRL_TAILUPD_ENB 0x01
-#define QIB_RCVCTRL_TAILUPD_DIS 0x02
-#define QIB_RCVCTRL_CTXT_ENB 0x04
-#define QIB_RCVCTRL_CTXT_DIS 0x08
-#define QIB_RCVCTRL_INTRAVAIL_ENB 0x10
-#define QIB_RCVCTRL_INTRAVAIL_DIS 0x20
-#define QIB_RCVCTRL_PKEY_ENB 0x40 /* Note, default is enabled */
-#define QIB_RCVCTRL_PKEY_DIS 0x80
-#define QIB_RCVCTRL_BP_ENB 0x0100
-#define QIB_RCVCTRL_BP_DIS 0x0200
-#define QIB_RCVCTRL_TIDFLOW_ENB 0x0400
-#define QIB_RCVCTRL_TIDFLOW_DIS 0x0800
-
-/*
- * Possible "operations" for f_sendctrl(ppd, op, var)
- * these are bits so they can be combined, e.g.
- * QIB_SENDCTRL_BUFAVAIL_ENB | QIB_SENDCTRL_ENB
- * Some operations (e.g. DISARM, ABORT) are known to
- * be "one-shot", so do not modify shadow.
- */
-#define QIB_SENDCTRL_DISARM (0x1000)
-#define QIB_SENDCTRL_DISARM_BUF(bufn) ((bufn) | QIB_SENDCTRL_DISARM)
- /* available (0x2000) */
-#define QIB_SENDCTRL_AVAIL_DIS (0x4000)
-#define QIB_SENDCTRL_AVAIL_ENB (0x8000)
-#define QIB_SENDCTRL_AVAIL_BLIP (0x10000)
-#define QIB_SENDCTRL_SEND_DIS (0x20000)
-#define QIB_SENDCTRL_SEND_ENB (0x40000)
-#define QIB_SENDCTRL_FLUSH (0x80000)
-#define QIB_SENDCTRL_CLEAR (0x100000)
-#define QIB_SENDCTRL_DISARM_ALL (0x200000)
-
-/*
- * These are the generic indices for requesting per-port
- * counter values via the f_portcntr function. They
- * are always returned as 64 bit values, although most
- * are 32 bit counters.
- */
-/* send-related counters */
-#define QIBPORTCNTR_PKTSEND 0U
-#define QIBPORTCNTR_WORDSEND 1U
-#define QIBPORTCNTR_PSXMITDATA 2U
-#define QIBPORTCNTR_PSXMITPKTS 3U
-#define QIBPORTCNTR_PSXMITWAIT 4U
-#define QIBPORTCNTR_SENDSTALL 5U
-/* receive-related counters */
-#define QIBPORTCNTR_PKTRCV 6U
-#define QIBPORTCNTR_PSRCVDATA 7U
-#define QIBPORTCNTR_PSRCVPKTS 8U
-#define QIBPORTCNTR_RCVEBP 9U
-#define QIBPORTCNTR_RCVOVFL 10U
-#define QIBPORTCNTR_WORDRCV 11U
-/* IB link related error counters */
-#define QIBPORTCNTR_RXLOCALPHYERR 12U
-#define QIBPORTCNTR_RXVLERR 13U
-#define QIBPORTCNTR_ERRICRC 14U
-#define QIBPORTCNTR_ERRVCRC 15U
-#define QIBPORTCNTR_ERRLPCRC 16U
-#define QIBPORTCNTR_BADFORMAT 17U
-#define QIBPORTCNTR_ERR_RLEN 18U
-#define QIBPORTCNTR_IBSYMBOLERR 19U
-#define QIBPORTCNTR_INVALIDRLEN 20U
-#define QIBPORTCNTR_UNSUPVL 21U
-#define QIBPORTCNTR_EXCESSBUFOVFL 22U
-#define QIBPORTCNTR_ERRLINK 23U
-#define QIBPORTCNTR_IBLINKDOWN 24U
-#define QIBPORTCNTR_IBLINKERRRECOV 25U
-#define QIBPORTCNTR_LLI 26U
-/* other error counters */
-#define QIBPORTCNTR_RXDROPPKT 27U
-#define QIBPORTCNTR_VL15PKTDROP 28U
-#define QIBPORTCNTR_ERRPKEY 29U
-#define QIBPORTCNTR_KHDROVFL 30U
-/* sampling counters (these are actually control registers) */
-#define QIBPORTCNTR_PSINTERVAL 31U
-#define QIBPORTCNTR_PSSTART 32U
-#define QIBPORTCNTR_PSSTAT 33U
-
-/* how often we check for packet activity for "power on hours (in seconds) */
-#define ACTIVITY_TIMER 5
-
-#define MAX_NAME_SIZE 64
-
-#ifdef CONFIG_INFINIBAND_QIB_DCA
-struct qib_irq_notify;
-#endif
-
-struct qib_msix_entry {
- void *arg;
-#ifdef CONFIG_INFINIBAND_QIB_DCA
- int dca;
- int rcv;
- struct qib_irq_notify *notifier;
-#endif
- cpumask_var_t mask;
-};
-
-/* Below is an opaque struct. Each chip (device) can maintain
- * private data needed for its operation, but not germane to the
- * rest of the driver. For convenience, we define another that
- * is chip-specific, per-port
- */
-struct qib_chip_specific;
-struct qib_chipport_specific;
-
-enum qib_sdma_states {
- qib_sdma_state_s00_hw_down,
- qib_sdma_state_s10_hw_start_up_wait,
- qib_sdma_state_s20_idle,
- qib_sdma_state_s30_sw_clean_up_wait,
- qib_sdma_state_s40_hw_clean_up_wait,
- qib_sdma_state_s50_hw_halt_wait,
- qib_sdma_state_s99_running,
-};
-
-enum qib_sdma_events {
- qib_sdma_event_e00_go_hw_down,
- qib_sdma_event_e10_go_hw_start,
- qib_sdma_event_e20_hw_started,
- qib_sdma_event_e30_go_running,
- qib_sdma_event_e40_sw_cleaned,
- qib_sdma_event_e50_hw_cleaned,
- qib_sdma_event_e60_hw_halted,
- qib_sdma_event_e70_go_idle,
- qib_sdma_event_e7220_err_halted,
- qib_sdma_event_e7322_err_halted,
- qib_sdma_event_e90_timer_tick,
-};
-
-struct sdma_set_state_action {
- unsigned op_enable:1;
- unsigned op_intenable:1;
- unsigned op_halt:1;
- unsigned op_drain:1;
- unsigned go_s99_running_tofalse:1;
- unsigned go_s99_running_totrue:1;
-};
-
-struct qib_sdma_state {
- struct kref kref;
- struct completion comp;
- enum qib_sdma_states current_state;
- struct sdma_set_state_action *set_state_action;
- unsigned current_op;
- unsigned go_s99_running;
- unsigned first_sendbuf;
- unsigned last_sendbuf; /* really last +1 */
- /* debugging/devel */
- enum qib_sdma_states previous_state;
- unsigned previous_op;
- enum qib_sdma_events last_event;
-};
-
-struct xmit_wait {
- struct timer_list timer;
- u64 counter;
- u8 flags;
- struct cache {
- u64 psxmitdata;
- u64 psrcvdata;
- u64 psxmitpkts;
- u64 psrcvpkts;
- u64 psxmitwait;
- } counter_cache;
-};
-
-/*
- * The structure below encapsulates data relevant to a physical IB Port.
- * Current chips support only one such port, but the separation
- * clarifies things a bit. Note that to conform to IB conventions,
- * port-numbers are one-based. The first or only port is port1.
- */
-struct qib_pportdata {
- struct qib_ibport ibport_data;
-
- struct qib_devdata *dd;
- struct qib_chippport_specific *cpspec; /* chip-specific per-port */
-
- /* GUID for this interface, in network order */
- __be64 guid;
-
- /* QIB_POLL, etc. link-state specific flags, per port */
- u32 lflags;
- /* qib_lflags driver is waiting for */
- u32 state_wanted;
- spinlock_t lflags_lock;
-
- /* ref count for each pkey */
- atomic_t pkeyrefs[4];
-
- /*
- * this address is mapped readonly into user processes so they can
- * get status cheaply, whenever they want. One qword of status per port
- */
- u64 *statusp;
-
- /* SendDMA related entries */
-
- /* read mostly */
- struct qib_sdma_desc *sdma_descq;
- struct workqueue_struct *qib_wq;
- struct qib_sdma_state sdma_state;
- dma_addr_t sdma_descq_phys;
- volatile __le64 *sdma_head_dma; /* DMA'ed by chip */
- dma_addr_t sdma_head_phys;
- u16 sdma_descq_cnt;
-
- /* read/write using lock */
- spinlock_t sdma_lock ____cacheline_aligned_in_smp;
- struct list_head sdma_activelist;
- struct list_head sdma_userpending;
- u64 sdma_descq_added;
- u64 sdma_descq_removed;
- u16 sdma_descq_tail;
- u16 sdma_descq_head;
- u8 sdma_generation;
- u8 sdma_intrequest;
-
- struct tasklet_struct sdma_sw_clean_up_task
- ____cacheline_aligned_in_smp;
-
- wait_queue_head_t state_wait; /* for state_wanted */
-
- /* HoL blocking for SMP replies */
- unsigned hol_state;
- struct timer_list hol_timer;
-
- /*
- * Shadow copies of registers; size indicates read access size.
- * Most of them are readonly, but some are write-only register,
- * where we manipulate the bits in the shadow copy, and then write
- * the shadow copy to qlogic_ib.
- *
- * We deliberately make most of these 32 bits, since they have
- * restricted range. For any that we read, we won't to generate 32
- * bit accesses, since Opteron will generate 2 separate 32 bit HT
- * transactions for a 64 bit read, and we want to avoid unnecessary
- * bus transactions.
- */
-
- /* This is the 64 bit group */
- /* last ibcstatus. opaque outside chip-specific code */
- u64 lastibcstat;
-
- /* these are the "32 bit" regs */
-
- /*
- * the following two are 32-bit bitmasks, but {test,clear,set}_bit
- * all expect bit fields to be "unsigned long"
- */
- unsigned long p_rcvctrl; /* shadow per-port rcvctrl */
- unsigned long p_sendctrl; /* shadow per-port sendctrl */
-
- u32 ibmtu; /* The MTU programmed for this unit */
- /*
- * Current max size IB packet (in bytes) including IB headers, that
- * we can send. Changes when ibmtu changes.
- */
- u32 ibmaxlen;
- /*
- * ibmaxlen at init time, limited by chip and by receive buffer
- * size. Not changed after init.
- */
- u32 init_ibmaxlen;
- /* LID programmed for this instance */
- u16 lid;
- /* list of pkeys programmed; 0 if not set */
- u16 pkeys[4];
- /* LID mask control */
- u8 lmc;
- u8 link_width_supported;
- u16 link_speed_supported;
- u8 link_width_enabled;
- u16 link_speed_enabled;
- u8 link_width_active;
- u16 link_speed_active;
- u8 vls_supported;
- u8 vls_operational;
- /* Rx Polarity inversion (compensate for ~tx on partner) */
- u8 rx_pol_inv;
-
- u8 hw_pidx; /* physical port index */
- u32 port; /* IB port number and index into dd->pports - 1 */
-
- u8 delay_mult;
-
- /* used to override LED behavior */
- u8 led_override; /* Substituted for normal value, if non-zero */
- u16 led_override_timeoff; /* delta to next timer event */
- u8 led_override_vals[2]; /* Alternates per blink-frame */
- u8 led_override_phase; /* Just counts, LSB picks from vals[] */
- atomic_t led_override_timer_active;
- /* Used to flash LEDs in override mode */
- struct timer_list led_override_timer;
- struct xmit_wait cong_stats;
- struct timer_list symerr_clear_timer;
-
- /* Synchronize access between driver writes and sysfs reads */
- spinlock_t cc_shadow_lock
- ____cacheline_aligned_in_smp;
-
- /* Shadow copy of the congestion control table */
- struct cc_table_shadow *ccti_entries_shadow;
-
- /* Shadow copy of the congestion control entries */
- struct ib_cc_congestion_setting_attr_shadow *congestion_entries_shadow;
-
- /* List of congestion control table entries */
- struct ib_cc_table_entry_shadow *ccti_entries;
-
- /* 16 congestion entries with each entry corresponding to a SL */
- struct ib_cc_congestion_entry_shadow *congestion_entries;
-
- /* Maximum number of congestion control entries that the agent expects
- * the manager to send.
- */
- u16 cc_supported_table_entries;
-
- /* Total number of congestion control table entries */
- u16 total_cct_entry;
-
- /* Bit map identifying service level */
- u16 cc_sl_control_map;
-
- /* maximum congestion control table index */
- u16 ccti_limit;
-
- /* CA's max number of 64 entry units in the congestion control table */
- u8 cc_max_table_entries;
-};
-
-/* Observers. Not to be taken lightly, possibly not to ship. */
-/*
- * If a diag read or write is to (bottom <= offset <= top),
- * the "hook" is called, allowing, e.g. shadows to be
- * updated in sync with the driver. struct diag_observer
- * is the "visible" part.
- */
-struct diag_observer;
-
-typedef int (*diag_hook) (struct qib_devdata *dd,
- const struct diag_observer *op,
- u32 offs, u64 *data, u64 mask, int only_32);
-
-struct diag_observer {
- diag_hook hook;
- u32 bottom;
- u32 top;
-};
-
-extern int qib_register_observer(struct qib_devdata *dd,
- const struct diag_observer *op);
-
-/* Only declared here, not defined. Private to diags */
-struct diag_observer_list_elt;
-
-/* device data struct now contains only "general per-device" info.
- * fields related to a physical IB port are in a qib_pportdata struct,
- * described above) while fields only used by a particular chip-type are in
- * a qib_chipdata struct, whose contents are opaque to this file.
- */
-struct qib_devdata {
- struct qib_ibdev verbs_dev; /* must be first */
- struct list_head list;
- /* pointers to related structs for this device */
- /* pci access data structure */
- struct pci_dev *pcidev;
- struct cdev *user_cdev;
- struct cdev *diag_cdev;
- struct device *user_device;
- struct device *diag_device;
-
- /* mem-mapped pointer to base of chip regs */
- u64 __iomem *kregbase;
- /* end of mem-mapped chip space excluding sendbuf and user regs */
- u64 __iomem *kregend;
- /* physical address of chip for io_remap, etc. */
- resource_size_t physaddr;
- /* qib_cfgctxts pointers */
- struct qib_ctxtdata **rcd; /* Receive Context Data */
-
- /* qib_pportdata, points to array of (physical) port-specific
- * data structs, indexed by pidx (0..n-1)
- */
- struct qib_pportdata *pport;
- struct qib_chip_specific *cspec; /* chip-specific */
-
- /* kvirt address of 1st 2k pio buffer */
- void __iomem *pio2kbase;
- /* kvirt address of 1st 4k pio buffer */
- void __iomem *pio4kbase;
- /* mem-mapped pointer to base of PIO buffers (if using WC PAT) */
- void __iomem *piobase;
- /* mem-mapped pointer to base of user chip regs (if using WC PAT) */
- u64 __iomem *userbase;
- void __iomem *piovl15base; /* base of VL15 buffers, if not WC */
- /*
- * points to area where PIOavail registers will be DMA'ed.
- * Has to be on a page of it's own, because the page will be
- * mapped into user program space. This copy is *ONLY* ever
- * written by DMA, not by the driver! Need a copy per device
- * when we get to multiple devices
- */
- volatile __le64 *pioavailregs_dma; /* DMA'ed by chip */
- /* physical address where updates occur */
- dma_addr_t pioavailregs_phys;
-
- /* device-specific implementations of functions needed by
- * common code. Contrary to previous consensus, we can't
- * really just point to a device-specific table, because we
- * may need to "bend", e.g. *_f_put_tid
- */
- /* fallback to alternate interrupt type if possible */
- int (*f_intr_fallback)(struct qib_devdata *);
- /* hard reset chip */
- int (*f_reset)(struct qib_devdata *);
- void (*f_quiet_serdes)(struct qib_pportdata *);
- int (*f_bringup_serdes)(struct qib_pportdata *);
- int (*f_early_init)(struct qib_devdata *);
- void (*f_clear_tids)(struct qib_devdata *, struct qib_ctxtdata *);
- void (*f_put_tid)(struct qib_devdata *, u64 __iomem*,
- u32, unsigned long);
- void (*f_cleanup)(struct qib_devdata *);
- void (*f_setextled)(struct qib_pportdata *, u32);
- /* fill out chip-specific fields */
- int (*f_get_base_info)(struct qib_ctxtdata *, struct qib_base_info *);
- /* free irq */
- void (*f_free_irq)(struct qib_devdata *);
- struct qib_message_header *(*f_get_msgheader)
- (struct qib_devdata *, __le32 *);
- void (*f_config_ctxts)(struct qib_devdata *);
- int (*f_get_ib_cfg)(struct qib_pportdata *, int);
- int (*f_set_ib_cfg)(struct qib_pportdata *, int, u32);
- int (*f_set_ib_loopback)(struct qib_pportdata *, const char *);
- int (*f_get_ib_table)(struct qib_pportdata *, int, void *);
- int (*f_set_ib_table)(struct qib_pportdata *, int, void *);
- u32 (*f_iblink_state)(u64);
- u8 (*f_ibphys_portstate)(u64);
- void (*f_xgxs_reset)(struct qib_pportdata *);
- /* per chip actions needed for IB Link up/down changes */
- int (*f_ib_updown)(struct qib_pportdata *, int, u64);
- u32 __iomem *(*f_getsendbuf)(struct qib_pportdata *, u64, u32 *);
- /* Read/modify/write of GPIO pins (potentially chip-specific */
- int (*f_gpio_mod)(struct qib_devdata *dd, u32 out, u32 dir,
- u32 mask);
- /* Enable writes to config EEPROM (if supported) */
- int (*f_eeprom_wen)(struct qib_devdata *dd, int wen);
- /*
- * modify rcvctrl shadow[s] and write to appropriate chip-regs.
- * see above QIB_RCVCTRL_xxx_ENB/DIS for operations.
- * (ctxt == -1) means "all contexts", only meaningful for
- * clearing. Could remove if chip_spec shutdown properly done.
- */
- void (*f_rcvctrl)(struct qib_pportdata *, unsigned int op,
- int ctxt);
- /* Read/modify/write sendctrl appropriately for op and port. */
- void (*f_sendctrl)(struct qib_pportdata *, u32 op);
- void (*f_set_intr_state)(struct qib_devdata *, u32);
- void (*f_set_armlaunch)(struct qib_devdata *, u32);
- void (*f_wantpiobuf_intr)(struct qib_devdata *, u32);
- int (*f_late_initreg)(struct qib_devdata *);
- int (*f_init_sdma_regs)(struct qib_pportdata *);
- u16 (*f_sdma_gethead)(struct qib_pportdata *);
- int (*f_sdma_busy)(struct qib_pportdata *);
- void (*f_sdma_update_tail)(struct qib_pportdata *, u16);
- void (*f_sdma_set_desc_cnt)(struct qib_pportdata *, unsigned);
- void (*f_sdma_sendctrl)(struct qib_pportdata *, unsigned);
- void (*f_sdma_hw_clean_up)(struct qib_pportdata *);
- void (*f_sdma_hw_start_up)(struct qib_pportdata *);
- void (*f_sdma_init_early)(struct qib_pportdata *);
- void (*f_set_cntr_sample)(struct qib_pportdata *, u32, u32);
- void (*f_update_usrhead)(struct qib_ctxtdata *, u64, u32, u32, u32);
- u32 (*f_hdrqempty)(struct qib_ctxtdata *);
- u64 (*f_portcntr)(struct qib_pportdata *, u32);
- u32 (*f_read_cntrs)(struct qib_devdata *, loff_t, char **,
- u64 **);
- u32 (*f_read_portcntrs)(struct qib_devdata *, loff_t, u32,
- char **, u64 **);
- u32 (*f_setpbc_control)(struct qib_pportdata *, u32, u8, u8);
- void (*f_initvl15_bufs)(struct qib_devdata *);
- void (*f_init_ctxt)(struct qib_ctxtdata *);
- void (*f_txchk_change)(struct qib_devdata *, u32, u32, u32,
- struct qib_ctxtdata *);
- void (*f_writescratch)(struct qib_devdata *, u32);
- int (*f_tempsense_rd)(struct qib_devdata *, int regnum);
-#ifdef CONFIG_INFINIBAND_QIB_DCA
- int (*f_notify_dca)(struct qib_devdata *, unsigned long event);
-#endif
-
- char *boardname; /* human readable board info */
-
- /* template for writing TIDs */
- u64 tidtemplate;
- /* value to write to free TIDs */
- u64 tidinvalid;
-
- /* number of registers used for pioavail */
- u32 pioavregs;
- /* device (not port) flags, basically device capabilities */
- u32 flags;
- /* last buffer for user use */
- u32 lastctxt_piobuf;
-
- /* reset value */
- u64 z_int_counter;
- /* percpu intcounter */
- u64 __percpu *int_counter;
-
- /* pio bufs allocated per ctxt */
- u32 pbufsctxt;
- /* if remainder on bufs/ctxt, ctxts < extrabuf get 1 extra */
- u32 ctxts_extrabuf;
- /*
- * number of ctxts configured as max; zero is set to number chip
- * supports, less gives more pio bufs/ctxt, etc.
- */
- u32 cfgctxts;
- /*
- * number of ctxts available for PSM open
- */
- u32 freectxts;
-
- /*
- * hint that we should update pioavailshadow before
- * looking for a PIO buffer
- */
- u32 upd_pio_shadow;
-
- /* internal debugging stats */
- u32 maxpkts_call;
- u32 avgpkts_call;
- u64 nopiobufs;
-
- /* PCI Vendor ID (here for NodeInfo) */
- u16 vendorid;
- /* PCI Device ID (here for NodeInfo) */
- u16 deviceid;
- /* for write combining settings */
- int wc_cookie;
- unsigned long wc_base;
- unsigned long wc_len;
-
- /* shadow copy of struct page *'s for exp tid pages */
- struct page **pageshadow;
- /* shadow copy of dma handles for exp tid pages */
- dma_addr_t *physshadow;
- u64 __iomem *egrtidbase;
- spinlock_t sendctrl_lock; /* protect changes to sendctrl shadow */
- /* around rcd and (user ctxts) ctxt_cnt use (intr vs free) */
- spinlock_t uctxt_lock; /* rcd and user context changes */
- /*
- * per unit status, see also portdata statusp
- * mapped readonly into user processes so they can get unit and
- * IB link status cheaply
- */
- u64 *devstatusp;
- char *freezemsg; /* freeze msg if hw error put chip in freeze */
- u32 freezelen; /* max length of freezemsg */
- /* timer used to prevent stats overflow, error throttling, etc. */
- struct timer_list stats_timer;
-
- /* timer to verify interrupts work, and fallback if possible */
- struct timer_list intrchk_timer;
- unsigned long ureg_align; /* user register alignment */
-
- /*
- * Protects pioavailshadow, pioavailkernel, pio_need_disarm, and
- * pio_writing.
- */
- spinlock_t pioavail_lock;
- /*
- * index of last buffer to optimize search for next
- */
- u32 last_pio;
- /*
- * min kernel pio buffer to optimize search
- */
- u32 min_kernel_pio;
- /*
- * Shadow copies of registers; size indicates read access size.
- * Most of them are readonly, but some are write-only register,
- * where we manipulate the bits in the shadow copy, and then write
- * the shadow copy to qlogic_ib.
- *
- * We deliberately make most of these 32 bits, since they have
- * restricted range. For any that we read, we won't to generate 32
- * bit accesses, since Opteron will generate 2 separate 32 bit HT
- * transactions for a 64 bit read, and we want to avoid unnecessary
- * bus transactions.
- */
-
- /* This is the 64 bit group */
-
- unsigned long pioavailshadow[6];
- /* bitmap of send buffers available for the kernel to use with PIO. */
- unsigned long pioavailkernel[6];
- /* bitmap of send buffers which need to be disarmed. */
- unsigned long pio_need_disarm[3];
- /* bitmap of send buffers which are being written to. */
- unsigned long pio_writing[3];
- /* kr_revision shadow */
- u64 revision;
- /* Base GUID for device (from eeprom, network order) */
- __be64 base_guid;
-
- /*
- * kr_sendpiobufbase value (chip offset of pio buffers), and the
- * base of the 2KB buffer s(user processes only use 2K)
- */
- u64 piobufbase;
- u32 pio2k_bufbase;
-
- /* these are the "32 bit" regs */
-
- /* number of GUIDs in the flash for this interface */
- u32 nguid;
- /*
- * the following two are 32-bit bitmasks, but {test,clear,set}_bit
- * all expect bit fields to be "unsigned long"
- */
- unsigned long rcvctrl; /* shadow per device rcvctrl */
- unsigned long sendctrl; /* shadow per device sendctrl */
-
- /* value we put in kr_rcvhdrcnt */
- u32 rcvhdrcnt;
- /* value we put in kr_rcvhdrsize */
- u32 rcvhdrsize;
- /* value we put in kr_rcvhdrentsize */
- u32 rcvhdrentsize;
- /* kr_ctxtcnt value */
- u32 ctxtcnt;
- /* kr_pagealign value */
- u32 palign;
- /* number of "2KB" PIO buffers */
- u32 piobcnt2k;
- /* size in bytes of "2KB" PIO buffers */
- u32 piosize2k;
- /* max usable size in dwords of a "2KB" PIO buffer before going "4KB" */
- u32 piosize2kmax_dwords;
- /* number of "4KB" PIO buffers */
- u32 piobcnt4k;
- /* size in bytes of "4KB" PIO buffers */
- u32 piosize4k;
- /* kr_rcvegrbase value */
- u32 rcvegrbase;
- /* kr_rcvtidbase value */
- u32 rcvtidbase;
- /* kr_rcvtidcnt value */
- u32 rcvtidcnt;
- /* kr_userregbase */
- u32 uregbase;
- /* shadow the control register contents */
- u32 control;
-
- /* chip address space used by 4k pio buffers */
- u32 align4k;
- /* size of each rcvegrbuffer */
- u16 rcvegrbufsize;
- /* log2 of above */
- u16 rcvegrbufsize_shift;
- /* localbus width (1, 2,4,8,16,32) from config space */
- u32 lbus_width;
- /* localbus speed in MHz */
- u32 lbus_speed;
- int unit; /* unit # of this chip */
-
- /* start of CHIP_SPEC move to chipspec, but need code changes */
- /* low and high portions of MSI capability/vector */
- u32 msi_lo;
- /* saved after PCIe init for restore after reset */
- u32 msi_hi;
- /* MSI data (vector) saved for restore */
- u16 msi_data;
- /* so we can rewrite it after a chip reset */
- u32 pcibar0;
- /* so we can rewrite it after a chip reset */
- u32 pcibar1;
- u64 rhdrhead_intr_off;
-
- /*
- * ASCII serial number, from flash, large enough for original
- * all digit strings, and longer QLogic serial number format
- */
- u8 serial[16];
- /* human readable board version */
- u8 boardversion[96];
- u8 lbus_info[32]; /* human readable localbus info */
- /* chip major rev, from qib_revision */
- u8 majrev;
- /* chip minor rev, from qib_revision */
- u8 minrev;
-
- /* Misc small ints */
- /* Number of physical ports available */
- u8 num_pports;
- /* Lowest context number which can be used by user processes */
- u8 first_user_ctxt;
- u8 n_krcv_queues;
- u8 qpn_mask;
- u8 skip_kctxt_mask;
-
- u16 rhf_offset; /* offset of RHF within receive header entry */
-
- /*
- * GPIO pins for twsi-connected devices, and device code for eeprom
- */
- u8 gpio_sda_num;
- u8 gpio_scl_num;
- u8 twsi_eeprom_dev;
- u8 board_atten;
-
- /* Support (including locks) for EEPROM logging of errors and time */
- /* control access to actual counters, timer */
- spinlock_t eep_st_lock;
- /* control high-level access to EEPROM */
- struct mutex eep_lock;
- uint64_t traffic_wds;
- struct qib_diag_client *diag_client;
- spinlock_t qib_diag_trans_lock; /* protect diag observer ops */
- struct diag_observer_list_elt *diag_observer_list;
-
- u8 psxmitwait_supported;
- /* cycle length of PS* counters in HW (in picoseconds) */
- u16 psxmitwait_check_rate;
- /* high volume overflow errors defered to tasklet */
- struct tasklet_struct error_tasklet;
-
- int assigned_node_id; /* NUMA node closest to HCA */
-};
-
-/* hol_state values */
-#define QIB_HOL_UP 0
-#define QIB_HOL_INIT 1
-
-#define QIB_SDMA_SENDCTRL_OP_ENABLE (1U << 0)
-#define QIB_SDMA_SENDCTRL_OP_INTENABLE (1U << 1)
-#define QIB_SDMA_SENDCTRL_OP_HALT (1U << 2)
-#define QIB_SDMA_SENDCTRL_OP_CLEANUP (1U << 3)
-#define QIB_SDMA_SENDCTRL_OP_DRAIN (1U << 4)
-
-/* operation types for f_txchk_change() */
-#define TXCHK_CHG_TYPE_DIS1 3
-#define TXCHK_CHG_TYPE_ENAB1 2
-#define TXCHK_CHG_TYPE_KERN 1
-#define TXCHK_CHG_TYPE_USER 0
-
-#define QIB_CHASE_TIME msecs_to_jiffies(145)
-#define QIB_CHASE_DIS_TIME msecs_to_jiffies(160)
-
-/* Private data for file operations */
-struct qib_filedata {
- struct qib_ctxtdata *rcd;
- unsigned subctxt;
- unsigned tidcursor;
- struct qib_user_sdma_queue *pq;
- int rec_cpu_num; /* for cpu affinity; -1 if none */
-};
-
-extern struct xarray qib_dev_table;
-extern struct qib_devdata *qib_lookup(int unit);
-extern u32 qib_cpulist_count;
-extern unsigned long *qib_cpulist;
-extern unsigned qib_cc_table_size;
-
-int qib_init(struct qib_devdata *, int);
-int init_chip_wc_pat(struct qib_devdata *dd, u32);
-int qib_enable_wc(struct qib_devdata *dd);
-void qib_disable_wc(struct qib_devdata *dd);
-int qib_count_units(int *npresentp, int *nupp);
-int qib_count_active_units(void);
-
-int qib_cdev_init(int minor, const char *name,
- const struct file_operations *fops,
- struct cdev **cdevp, struct device **devp);
-void qib_cdev_cleanup(struct cdev **cdevp, struct device **devp);
-int qib_dev_init(void);
-void qib_dev_cleanup(void);
-
-int qib_diag_add(struct qib_devdata *);
-void qib_diag_remove(struct qib_devdata *);
-void qib_handle_e_ibstatuschanged(struct qib_pportdata *, u64);
-void qib_sdma_update_tail(struct qib_pportdata *, u16); /* hold sdma_lock */
-
-int qib_decode_err(struct qib_devdata *dd, char *buf, size_t blen, u64 err);
-void qib_bad_intrstatus(struct qib_devdata *);
-void qib_handle_urcv(struct qib_devdata *, u64);
-
-/* clean up any per-chip chip-specific stuff */
-void qib_chip_cleanup(struct qib_devdata *);
-/* clean up any chip type-specific stuff */
-void qib_chip_done(void);
-
-/* check to see if we have to force ordering for write combining */
-int qib_unordered_wc(void);
-void qib_pio_copy(void __iomem *to, const void *from, size_t count);
-
-void qib_disarm_piobufs(struct qib_devdata *, unsigned, unsigned);
-int qib_disarm_piobufs_ifneeded(struct qib_ctxtdata *);
-void qib_disarm_piobufs_set(struct qib_devdata *, unsigned long *, unsigned);
-void qib_cancel_sends(struct qib_pportdata *);
-
-int qib_create_rcvhdrq(struct qib_devdata *, struct qib_ctxtdata *);
-int qib_setup_eagerbufs(struct qib_ctxtdata *);
-void qib_set_ctxtcnt(struct qib_devdata *);
-int qib_create_ctxts(struct qib_devdata *dd);
-struct qib_ctxtdata *qib_create_ctxtdata(struct qib_pportdata *, u32, int);
-int qib_init_pportdata(struct qib_pportdata *, struct qib_devdata *, u8, u8);
-void qib_free_ctxtdata(struct qib_devdata *, struct qib_ctxtdata *);
-
-u32 qib_kreceive(struct qib_ctxtdata *, u32 *, u32 *);
-int qib_reset_device(int);
-int qib_wait_linkstate(struct qib_pportdata *, u32, int);
-int qib_set_linkstate(struct qib_pportdata *, u8);
-int qib_set_mtu(struct qib_pportdata *, u16);
-int qib_set_lid(struct qib_pportdata *, u32, u8);
-void qib_hol_down(struct qib_pportdata *);
-void qib_hol_init(struct qib_pportdata *);
-void qib_hol_up(struct qib_pportdata *);
-void qib_hol_event(struct timer_list *);
-void qib_disable_after_error(struct qib_devdata *);
-int qib_set_uevent_bits(struct qib_pportdata *, const int);
-
-/* for use in system calls, where we want to know device type, etc. */
-#define ctxt_fp(fp) \
- (((struct qib_filedata *)(fp)->private_data)->rcd)
-#define subctxt_fp(fp) \
- (((struct qib_filedata *)(fp)->private_data)->subctxt)
-#define tidcursor_fp(fp) \
- (((struct qib_filedata *)(fp)->private_data)->tidcursor)
-#define user_sdma_queue_fp(fp) \
- (((struct qib_filedata *)(fp)->private_data)->pq)
-
-static inline struct qib_devdata *dd_from_ppd(struct qib_pportdata *ppd)
-{
- return ppd->dd;
-}
-
-static inline struct qib_devdata *dd_from_dev(struct qib_ibdev *dev)
-{
- return container_of(dev, struct qib_devdata, verbs_dev);
-}
-
-static inline struct qib_devdata *dd_from_ibdev(struct ib_device *ibdev)
-{
- return dd_from_dev(to_idev(ibdev));
-}
-
-static inline struct qib_pportdata *ppd_from_ibp(struct qib_ibport *ibp)
-{
- return container_of(ibp, struct qib_pportdata, ibport_data);
-}
-
-static inline struct qib_ibport *to_iport(struct ib_device *ibdev, u32 port)
-{
- struct qib_devdata *dd = dd_from_ibdev(ibdev);
- u32 pidx = port - 1; /* IB number port from 1, hdw from 0 */
-
- WARN_ON(pidx >= dd->num_pports);
- return &dd->pport[pidx].ibport_data;
-}
-
-/*
- * values for dd->flags (_device_ related flags) and
- */
-#define QIB_HAS_LINK_LATENCY 0x1 /* supports link latency (IB 1.2) */
-#define QIB_INITTED 0x2 /* chip and driver up and initted */
-#define QIB_DOING_RESET 0x4 /* in the middle of doing chip reset */
-#define QIB_PRESENT 0x8 /* chip accesses can be done */
-#define QIB_PIO_FLUSH_WC 0x10 /* Needs Write combining flush for PIO */
-#define QIB_HAS_THRESH_UPDATE 0x40
-#define QIB_HAS_SDMA_TIMEOUT 0x80
-#define QIB_USE_SPCL_TRIG 0x100 /* SpecialTrigger launch enabled */
-#define QIB_NODMA_RTAIL 0x200 /* rcvhdrtail register DMA enabled */
-#define QIB_HAS_INTX 0x800 /* Supports INTx interrupts */
-#define QIB_HAS_SEND_DMA 0x1000 /* Supports Send DMA */
-#define QIB_HAS_VLSUPP 0x2000 /* Supports multiple VLs; PBC different */
-#define QIB_HAS_HDRSUPP 0x4000 /* Supports header suppression */
-#define QIB_BADINTR 0x8000 /* severe interrupt problems */
-#define QIB_DCA_ENABLED 0x10000 /* Direct Cache Access enabled */
-#define QIB_HAS_QSFP 0x20000 /* device (card instance) has QSFP */
-#define QIB_SHUTDOWN 0x40000 /* device is shutting down */
-
-/*
- * values for ppd->lflags (_ib_port_ related flags)
- */
-#define QIBL_LINKV 0x1 /* IB link state valid */
-#define QIBL_LINKDOWN 0x8 /* IB link is down */
-#define QIBL_LINKINIT 0x10 /* IB link level is up */
-#define QIBL_LINKARMED 0x20 /* IB link is ARMED */
-#define QIBL_LINKACTIVE 0x40 /* IB link is ACTIVE */
-/* leave a gap for more IB-link state */
-#define QIBL_IB_AUTONEG_INPROG 0x1000 /* non-IBTA DDR/QDR neg active */
-#define QIBL_IB_AUTONEG_FAILED 0x2000 /* non-IBTA DDR/QDR neg failed */
-#define QIBL_IB_LINK_DISABLED 0x4000 /* Linkdown-disable forced,
- * Do not try to bring up */
-#define QIBL_IB_FORCE_NOTIFY 0x8000 /* force notify on next ib change */
-
-/* IB dword length mask in PBC (lower 11 bits); same for all chips */
-#define QIB_PBC_LENGTH_MASK ((1 << 11) - 1)
-
-
-/* ctxt_flag bit offsets */
- /* waiting for a packet to arrive */
-#define QIB_CTXT_WAITING_RCV 2
- /* master has not finished initializing */
-#define QIB_CTXT_MASTER_UNINIT 4
- /* waiting for an urgent packet to arrive */
-#define QIB_CTXT_WAITING_URG 5
-
-/* free up any allocated data at closes */
-void qib_free_data(struct qib_ctxtdata *dd);
-void qib_chg_pioavailkernel(struct qib_devdata *, unsigned, unsigned,
- u32, struct qib_ctxtdata *);
-struct qib_devdata *qib_init_iba7322_funcs(struct pci_dev *,
- const struct pci_device_id *);
-struct qib_devdata *qib_init_iba7220_funcs(struct pci_dev *,
- const struct pci_device_id *);
-struct qib_devdata *qib_init_iba6120_funcs(struct pci_dev *,
- const struct pci_device_id *);
-void qib_free_devdata(struct qib_devdata *);
-struct qib_devdata *qib_alloc_devdata(struct pci_dev *pdev, size_t extra);
-
-#define QIB_TWSI_NO_DEV 0xFF
-/* Below qib_twsi_ functions must be called with eep_lock held */
-int qib_twsi_reset(struct qib_devdata *dd);
-int qib_twsi_blk_rd(struct qib_devdata *dd, int dev, int addr, void *buffer,
- int len);
-int qib_twsi_blk_wr(struct qib_devdata *dd, int dev, int addr,
- const void *buffer, int len);
-void qib_get_eeprom_info(struct qib_devdata *);
-void qib_dump_lookup_output_queue(struct qib_devdata *);
-void qib_force_pio_avail_update(struct qib_devdata *);
-void qib_clear_symerror_on_linkup(struct timer_list *t);
-
-/*
- * Set LED override, only the two LSBs have "public" meaning, but
- * any non-zero value substitutes them for the Link and LinkTrain
- * LED states.
- */
-#define QIB_LED_PHYS 1 /* Physical (linktraining) GREEN LED */
-#define QIB_LED_LOG 2 /* Logical (link) YELLOW LED */
-void qib_set_led_override(struct qib_pportdata *ppd, unsigned int val);
-
-/* send dma routines */
-int qib_setup_sdma(struct qib_pportdata *);
-void qib_teardown_sdma(struct qib_pportdata *);
-void __qib_sdma_intr(struct qib_pportdata *);
-void qib_sdma_intr(struct qib_pportdata *);
-void qib_user_sdma_send_desc(struct qib_pportdata *dd,
- struct list_head *pktlist);
-int qib_sdma_verbs_send(struct qib_pportdata *, struct rvt_sge_state *,
- u32, struct qib_verbs_txreq *);
-/* ppd->sdma_lock should be locked before calling this. */
-int qib_sdma_make_progress(struct qib_pportdata *dd);
-
-/* must be called under qib_sdma_lock */
-static inline u16 qib_sdma_descq_freecnt(const struct qib_pportdata *ppd)
-{
- return ppd->sdma_descq_cnt -
- (ppd->sdma_descq_added - ppd->sdma_descq_removed) - 1;
-}
-
-static inline int __qib_sdma_running(struct qib_pportdata *ppd)
-{
- return ppd->sdma_state.current_state == qib_sdma_state_s99_running;
-}
-int qib_sdma_running(struct qib_pportdata *);
-void dump_sdma_state(struct qib_pportdata *ppd);
-void __qib_sdma_process_event(struct qib_pportdata *, enum qib_sdma_events);
-void qib_sdma_process_event(struct qib_pportdata *, enum qib_sdma_events);
-
-/*
- * number of words used for protocol header if not set by qib_userinit();
- */
-#define QIB_DFLT_RCVHDRSIZE 9
-
-/*
- * We need to be able to handle an IB header of at least 24 dwords.
- * We need the rcvhdrq large enough to handle largest IB header, but
- * still have room for a 2KB MTU standard IB packet.
- * Additionally, some processor/memory controller combinations
- * benefit quite strongly from having the DMA'ed data be cacheline
- * aligned and a cacheline multiple, so we set the size to 32 dwords
- * (2 64-byte primary cachelines for pretty much all processors of
- * interest). The alignment hurts nothing, other than using somewhat
- * more memory.
- */
-#define QIB_RCVHDR_ENTSIZE 32
-
-int qib_get_user_pages(unsigned long, size_t, struct page **);
-void qib_release_user_pages(struct page **, size_t);
-int qib_eeprom_read(struct qib_devdata *, u8, void *, int);
-int qib_eeprom_write(struct qib_devdata *, u8, const void *, int);
-u32 __iomem *qib_getsendbuf_range(struct qib_devdata *, u32 *, u32, u32);
-void qib_sendbuf_done(struct qib_devdata *, unsigned);
-
-static inline void qib_clear_rcvhdrtail(const struct qib_ctxtdata *rcd)
-{
- *((u64 *) rcd->rcvhdrtail_kvaddr) = 0ULL;
-}
-
-static inline u32 qib_get_rcvhdrtail(const struct qib_ctxtdata *rcd)
-{
- /*
- * volatile because it's a DMA target from the chip, routine is
- * inlined, and don't want register caching or reordering.
- */
- return (u32) le64_to_cpu(
- *((volatile __le64 *)rcd->rcvhdrtail_kvaddr)); /* DMA'ed */
-}
-
-/*
- * sysfs interface.
- */
-
-extern const struct attribute_group qib_attr_group;
-extern const struct attribute_group *qib_attr_port_groups[];
-
-int qib_device_create(struct qib_devdata *);
-void qib_device_remove(struct qib_devdata *);
-
-/* Hook for sysfs read of QSFP */
-extern int qib_qsfp_dump(struct qib_pportdata *ppd, char *buf, int len);
-
-int __init qib_init_qibfs(void);
-int __exit qib_exit_qibfs(void);
-
-int qibfs_add(struct qib_devdata *);
-int qibfs_remove(struct qib_devdata *);
-
-int qib_pcie_init(struct pci_dev *, const struct pci_device_id *);
-int qib_pcie_ddinit(struct qib_devdata *, struct pci_dev *,
- const struct pci_device_id *);
-void qib_pcie_ddcleanup(struct qib_devdata *);
-int qib_pcie_params(struct qib_devdata *dd, u32 minw, u32 *nent);
-void qib_free_irq(struct qib_devdata *dd);
-int qib_reinit_intr(struct qib_devdata *dd);
-void qib_pcie_getcmd(struct qib_devdata *, u16 *, u8 *, u8 *);
-void qib_pcie_reenable(struct qib_devdata *, u16, u8, u8);
-/* interrupts for device */
-u64 qib_int_counter(struct qib_devdata *);
-/* interrupt for all devices */
-u64 qib_sps_ints(void);
-
-/*
- * dma_addr wrappers - all 0's invalid for hw
- */
-int qib_map_page(struct pci_dev *d, struct page *p, dma_addr_t *daddr);
-struct pci_dev *qib_get_pci_dev(struct rvt_dev_info *rdi);
-
-/*
- * Flush write combining store buffers (if present) and perform a write
- * barrier.
- */
-static inline void qib_flush_wc(void)
-{
-#if defined(CONFIG_X86_64)
- asm volatile("sfence" : : : "memory");
-#else
- wmb(); /* no reorder around wc flush */
-#endif
-}
-
-/* global module parameter variables */
-extern unsigned qib_ibmtu;
-extern ushort qib_cfgctxts;
-extern ushort qib_num_cfg_vls;
-extern ushort qib_mini_init; /* If set, do few (ideally 0) writes to chip */
-extern unsigned qib_n_krcv_queues;
-extern unsigned qib_sdma_fetch_arb;
-extern unsigned qib_compat_ddr_negotiate;
-extern int qib_special_trigger;
-extern unsigned qib_numa_aware;
-
-extern struct mutex qib_mutex;
-
-/* Number of seconds before our card status check... */
-#define STATUS_TIMEOUT 60
-
-#define QIB_DRV_NAME "ib_qib"
-#define QIB_USER_MINOR_BASE 0
-#define QIB_TRACE_MINOR 127
-#define QIB_DIAGPKT_MINOR 128
-#define QIB_DIAG_MINOR_BASE 129
-#define QIB_NMINORS 255
-
-#define PCI_VENDOR_ID_PATHSCALE 0x1fc1
-#define PCI_VENDOR_ID_QLOGIC 0x1077
-#define PCI_DEVICE_ID_QLOGIC_IB_6120 0x10
-#define PCI_DEVICE_ID_QLOGIC_IB_7220 0x7220
-#define PCI_DEVICE_ID_QLOGIC_IB_7322 0x7322
-
-/*
- * qib_early_err is used (only!) to print early errors before devdata is
- * allocated, or when dd->pcidev may not be valid, and at the tail end of
- * cleanup when devdata may have been freed, etc. qib_dev_porterr is
- * the same as qib_dev_err, but is used when the message really needs
- * the IB port# to be definitive as to what's happening..
- * All of these go to the trace log, and the trace log entry is done
- * first to avoid possible serial port delays from printk.
- */
-#define qib_early_err(dev, fmt, ...) \
- dev_err(dev, fmt, ##__VA_ARGS__)
-
-#define qib_dev_err(dd, fmt, ...) \
- dev_err(&(dd)->pcidev->dev, "%s: " fmt, \
- rvt_get_ibdev_name(&(dd)->verbs_dev.rdi), ##__VA_ARGS__)
-
-#define qib_dev_warn(dd, fmt, ...) \
- dev_warn(&(dd)->pcidev->dev, "%s: " fmt, \
- rvt_get_ibdev_name(&(dd)->verbs_dev.rdi), ##__VA_ARGS__)
-
-#define qib_dev_porterr(dd, port, fmt, ...) \
- dev_err(&(dd)->pcidev->dev, "%s: IB%u:%u " fmt, \
- rvt_get_ibdev_name(&(dd)->verbs_dev.rdi), (dd)->unit, (port), \
- ##__VA_ARGS__)
-
-#define qib_devinfo(pcidev, fmt, ...) \
- dev_info(&(pcidev)->dev, fmt, ##__VA_ARGS__)
-
-/*
- * this is used for formatting hw error messages...
- */
-struct qib_hwerror_msgs {
- u64 mask;
- const char *msg;
- size_t sz;
-};
-
-#define QLOGIC_IB_HWE_MSG(a, b) { .mask = a, .msg = b }
-
-/* in qib_intr.c... */
-void qib_format_hwerrors(u64 hwerrs,
- const struct qib_hwerror_msgs *hwerrmsgs,
- size_t nhwerrmsgs, char *msg, size_t lmsg);
-
-void qib_stop_send_queue(struct rvt_qp *qp);
-void qib_quiesce_qp(struct rvt_qp *qp);
-void qib_flush_qp_waiters(struct rvt_qp *qp);
-int qib_mtu_to_path_mtu(u32 mtu);
-u32 qib_mtu_from_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp, u32 pmtu);
-void qib_notify_error_qp(struct rvt_qp *qp);
-int qib_get_pmtu_from_attr(struct rvt_dev_info *rdi, struct rvt_qp *qp,
- struct ib_qp_attr *attr);
-
-#endif /* _QIB_KERNEL_H */
diff --git a/drivers/infiniband/hw/qib/qib_6120_regs.h b/drivers/infiniband/hw/qib/qib_6120_regs.h
deleted file mode 100644
index e16cb6f7de2c..000000000000
--- a/drivers/infiniband/hw/qib/qib_6120_regs.h
+++ /dev/null
@@ -1,977 +0,0 @@
-/*
- * Copyright (c) 2008, 2009, 2010 QLogic Corporation. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-/* This file is mechanically generated from RTL. Any hand-edits will be lost! */
-
-#define QIB_6120_Revision_OFFS 0x0
-#define QIB_6120_Revision_R_Simulator_LSB 0x3F
-#define QIB_6120_Revision_R_Simulator_RMASK 0x1
-#define QIB_6120_Revision_Reserved_LSB 0x28
-#define QIB_6120_Revision_Reserved_RMASK 0x7FFFFF
-#define QIB_6120_Revision_BoardID_LSB 0x20
-#define QIB_6120_Revision_BoardID_RMASK 0xFF
-#define QIB_6120_Revision_R_SW_LSB 0x18
-#define QIB_6120_Revision_R_SW_RMASK 0xFF
-#define QIB_6120_Revision_R_Arch_LSB 0x10
-#define QIB_6120_Revision_R_Arch_RMASK 0xFF
-#define QIB_6120_Revision_R_ChipRevMajor_LSB 0x8
-#define QIB_6120_Revision_R_ChipRevMajor_RMASK 0xFF
-#define QIB_6120_Revision_R_ChipRevMinor_LSB 0x0
-#define QIB_6120_Revision_R_ChipRevMinor_RMASK 0xFF
-
-#define QIB_6120_Control_OFFS 0x8
-#define QIB_6120_Control_TxLatency_LSB 0x4
-#define QIB_6120_Control_TxLatency_RMASK 0x1
-#define QIB_6120_Control_PCIERetryBufDiagEn_LSB 0x3
-#define QIB_6120_Control_PCIERetryBufDiagEn_RMASK 0x1
-#define QIB_6120_Control_LinkEn_LSB 0x2
-#define QIB_6120_Control_LinkEn_RMASK 0x1
-#define QIB_6120_Control_FreezeMode_LSB 0x1
-#define QIB_6120_Control_FreezeMode_RMASK 0x1
-#define QIB_6120_Control_SyncReset_LSB 0x0
-#define QIB_6120_Control_SyncReset_RMASK 0x1
-
-#define QIB_6120_PageAlign_OFFS 0x10
-
-#define QIB_6120_PortCnt_OFFS 0x18
-
-#define QIB_6120_SendRegBase_OFFS 0x30
-
-#define QIB_6120_UserRegBase_OFFS 0x38
-
-#define QIB_6120_CntrRegBase_OFFS 0x40
-
-#define QIB_6120_Scratch_OFFS 0x48
-#define QIB_6120_Scratch_TopHalf_LSB 0x20
-#define QIB_6120_Scratch_TopHalf_RMASK 0xFFFFFFFF
-#define QIB_6120_Scratch_BottomHalf_LSB 0x0
-#define QIB_6120_Scratch_BottomHalf_RMASK 0xFFFFFFFF
-
-#define QIB_6120_IntBlocked_OFFS 0x60
-#define QIB_6120_IntBlocked_ErrorIntBlocked_LSB 0x1F
-#define QIB_6120_IntBlocked_ErrorIntBlocked_RMASK 0x1
-#define QIB_6120_IntBlocked_PioSetIntBlocked_LSB 0x1E
-#define QIB_6120_IntBlocked_PioSetIntBlocked_RMASK 0x1
-#define QIB_6120_IntBlocked_PioBufAvailIntBlocked_LSB 0x1D
-#define QIB_6120_IntBlocked_PioBufAvailIntBlocked_RMASK 0x1
-#define QIB_6120_IntBlocked_assertGPIOIntBlocked_LSB 0x1C
-#define QIB_6120_IntBlocked_assertGPIOIntBlocked_RMASK 0x1
-#define QIB_6120_IntBlocked_Reserved_LSB 0xF
-#define QIB_6120_IntBlocked_Reserved_RMASK 0x1FFF
-#define QIB_6120_IntBlocked_RcvAvail4IntBlocked_LSB 0x10
-#define QIB_6120_IntBlocked_RcvAvail4IntBlocked_RMASK 0x1
-#define QIB_6120_IntBlocked_RcvAvail3IntBlocked_LSB 0xF
-#define QIB_6120_IntBlocked_RcvAvail3IntBlocked_RMASK 0x1
-#define QIB_6120_IntBlocked_RcvAvail2IntBlocked_LSB 0xE
-#define QIB_6120_IntBlocked_RcvAvail2IntBlocked_RMASK 0x1
-#define QIB_6120_IntBlocked_RcvAvail1IntBlocked_LSB 0xD
-#define QIB_6120_IntBlocked_RcvAvail1IntBlocked_RMASK 0x1
-#define QIB_6120_IntBlocked_RcvAvail0IntBlocked_LSB 0xC
-#define QIB_6120_IntBlocked_RcvAvail0IntBlocked_RMASK 0x1
-#define QIB_6120_IntBlocked_Reserved1_LSB 0x5
-#define QIB_6120_IntBlocked_Reserved1_RMASK 0x7F
-#define QIB_6120_IntBlocked_RcvUrg4IntBlocked_LSB 0x4
-#define QIB_6120_IntBlocked_RcvUrg4IntBlocked_RMASK 0x1
-#define QIB_6120_IntBlocked_RcvUrg3IntBlocked_LSB 0x3
-#define QIB_6120_IntBlocked_RcvUrg3IntBlocked_RMASK 0x1
-#define QIB_6120_IntBlocked_RcvUrg2IntBlocked_LSB 0x2
-#define QIB_6120_IntBlocked_RcvUrg2IntBlocked_RMASK 0x1
-#define QIB_6120_IntBlocked_RcvUrg1IntBlocked_LSB 0x1
-#define QIB_6120_IntBlocked_RcvUrg1IntBlocked_RMASK 0x1
-#define QIB_6120_IntBlocked_RcvUrg0IntBlocked_LSB 0x0
-#define QIB_6120_IntBlocked_RcvUrg0IntBlocked_RMASK 0x1
-
-#define QIB_6120_IntMask_OFFS 0x68
-#define QIB_6120_IntMask_ErrorIntMask_LSB 0x1F
-#define QIB_6120_IntMask_ErrorIntMask_RMASK 0x1
-#define QIB_6120_IntMask_PioSetIntMask_LSB 0x1E
-#define QIB_6120_IntMask_PioSetIntMask_RMASK 0x1
-#define QIB_6120_IntMask_PioBufAvailIntMask_LSB 0x1D
-#define QIB_6120_IntMask_PioBufAvailIntMask_RMASK 0x1
-#define QIB_6120_IntMask_assertGPIOIntMask_LSB 0x1C
-#define QIB_6120_IntMask_assertGPIOIntMask_RMASK 0x1
-#define QIB_6120_IntMask_Reserved_LSB 0x11
-#define QIB_6120_IntMask_Reserved_RMASK 0x7FF
-#define QIB_6120_IntMask_RcvAvail4IntMask_LSB 0x10
-#define QIB_6120_IntMask_RcvAvail4IntMask_RMASK 0x1
-#define QIB_6120_IntMask_RcvAvail3IntMask_LSB 0xF
-#define QIB_6120_IntMask_RcvAvail3IntMask_RMASK 0x1
-#define QIB_6120_IntMask_RcvAvail2IntMask_LSB 0xE
-#define QIB_6120_IntMask_RcvAvail2IntMask_RMASK 0x1
-#define QIB_6120_IntMask_RcvAvail1IntMask_LSB 0xD
-#define QIB_6120_IntMask_RcvAvail1IntMask_RMASK 0x1
-#define QIB_6120_IntMask_RcvAvail0IntMask_LSB 0xC
-#define QIB_6120_IntMask_RcvAvail0IntMask_RMASK 0x1
-#define QIB_6120_IntMask_Reserved1_LSB 0x5
-#define QIB_6120_IntMask_Reserved1_RMASK 0x7F
-#define QIB_6120_IntMask_RcvUrg4IntMask_LSB 0x4
-#define QIB_6120_IntMask_RcvUrg4IntMask_RMASK 0x1
-#define QIB_6120_IntMask_RcvUrg3IntMask_LSB 0x3
-#define QIB_6120_IntMask_RcvUrg3IntMask_RMASK 0x1
-#define QIB_6120_IntMask_RcvUrg2IntMask_LSB 0x2
-#define QIB_6120_IntMask_RcvUrg2IntMask_RMASK 0x1
-#define QIB_6120_IntMask_RcvUrg1IntMask_LSB 0x1
-#define QIB_6120_IntMask_RcvUrg1IntMask_RMASK 0x1
-#define QIB_6120_IntMask_RcvUrg0IntMask_LSB 0x0
-#define QIB_6120_IntMask_RcvUrg0IntMask_RMASK 0x1
-
-#define QIB_6120_IntStatus_OFFS 0x70
-#define QIB_6120_IntStatus_Error_LSB 0x1F
-#define QIB_6120_IntStatus_Error_RMASK 0x1
-#define QIB_6120_IntStatus_PioSent_LSB 0x1E
-#define QIB_6120_IntStatus_PioSent_RMASK 0x1
-#define QIB_6120_IntStatus_PioBufAvail_LSB 0x1D
-#define QIB_6120_IntStatus_PioBufAvail_RMASK 0x1
-#define QIB_6120_IntStatus_assertGPIO_LSB 0x1C
-#define QIB_6120_IntStatus_assertGPIO_RMASK 0x1
-#define QIB_6120_IntStatus_Reserved_LSB 0xF
-#define QIB_6120_IntStatus_Reserved_RMASK 0x1FFF
-#define QIB_6120_IntStatus_RcvAvail4_LSB 0x10
-#define QIB_6120_IntStatus_RcvAvail4_RMASK 0x1
-#define QIB_6120_IntStatus_RcvAvail3_LSB 0xF
-#define QIB_6120_IntStatus_RcvAvail3_RMASK 0x1
-#define QIB_6120_IntStatus_RcvAvail2_LSB 0xE
-#define QIB_6120_IntStatus_RcvAvail2_RMASK 0x1
-#define QIB_6120_IntStatus_RcvAvail1_LSB 0xD
-#define QIB_6120_IntStatus_RcvAvail1_RMASK 0x1
-#define QIB_6120_IntStatus_RcvAvail0_LSB 0xC
-#define QIB_6120_IntStatus_RcvAvail0_RMASK 0x1
-#define QIB_6120_IntStatus_Reserved1_LSB 0x5
-#define QIB_6120_IntStatus_Reserved1_RMASK 0x7F
-#define QIB_6120_IntStatus_RcvUrg4_LSB 0x4
-#define QIB_6120_IntStatus_RcvUrg4_RMASK 0x1
-#define QIB_6120_IntStatus_RcvUrg3_LSB 0x3
-#define QIB_6120_IntStatus_RcvUrg3_RMASK 0x1
-#define QIB_6120_IntStatus_RcvUrg2_LSB 0x2
-#define QIB_6120_IntStatus_RcvUrg2_RMASK 0x1
-#define QIB_6120_IntStatus_RcvUrg1_LSB 0x1
-#define QIB_6120_IntStatus_RcvUrg1_RMASK 0x1
-#define QIB_6120_IntStatus_RcvUrg0_LSB 0x0
-#define QIB_6120_IntStatus_RcvUrg0_RMASK 0x1
-
-#define QIB_6120_IntClear_OFFS 0x78
-#define QIB_6120_IntClear_ErrorIntClear_LSB 0x1F
-#define QIB_6120_IntClear_ErrorIntClear_RMASK 0x1
-#define QIB_6120_IntClear_PioSetIntClear_LSB 0x1E
-#define QIB_6120_IntClear_PioSetIntClear_RMASK 0x1
-#define QIB_6120_IntClear_PioBufAvailIntClear_LSB 0x1D
-#define QIB_6120_IntClear_PioBufAvailIntClear_RMASK 0x1
-#define QIB_6120_IntClear_assertGPIOIntClear_LSB 0x1C
-#define QIB_6120_IntClear_assertGPIOIntClear_RMASK 0x1
-#define QIB_6120_IntClear_Reserved_LSB 0xF
-#define QIB_6120_IntClear_Reserved_RMASK 0x1FFF
-#define QIB_6120_IntClear_RcvAvail4IntClear_LSB 0x10
-#define QIB_6120_IntClear_RcvAvail4IntClear_RMASK 0x1
-#define QIB_6120_IntClear_RcvAvail3IntClear_LSB 0xF
-#define QIB_6120_IntClear_RcvAvail3IntClear_RMASK 0x1
-#define QIB_6120_IntClear_RcvAvail2IntClear_LSB 0xE
-#define QIB_6120_IntClear_RcvAvail2IntClear_RMASK 0x1
-#define QIB_6120_IntClear_RcvAvail1IntClear_LSB 0xD
-#define QIB_6120_IntClear_RcvAvail1IntClear_RMASK 0x1
-#define QIB_6120_IntClear_RcvAvail0IntClear_LSB 0xC
-#define QIB_6120_IntClear_RcvAvail0IntClear_RMASK 0x1
-#define QIB_6120_IntClear_Reserved1_LSB 0x5
-#define QIB_6120_IntClear_Reserved1_RMASK 0x7F
-#define QIB_6120_IntClear_RcvUrg4IntClear_LSB 0x4
-#define QIB_6120_IntClear_RcvUrg4IntClear_RMASK 0x1
-#define QIB_6120_IntClear_RcvUrg3IntClear_LSB 0x3
-#define QIB_6120_IntClear_RcvUrg3IntClear_RMASK 0x1
-#define QIB_6120_IntClear_RcvUrg2IntClear_LSB 0x2
-#define QIB_6120_IntClear_RcvUrg2IntClear_RMASK 0x1
-#define QIB_6120_IntClear_RcvUrg1IntClear_LSB 0x1
-#define QIB_6120_IntClear_RcvUrg1IntClear_RMASK 0x1
-#define QIB_6120_IntClear_RcvUrg0IntClear_LSB 0x0
-#define QIB_6120_IntClear_RcvUrg0IntClear_RMASK 0x1
-
-#define QIB_6120_ErrMask_OFFS 0x80
-#define QIB_6120_ErrMask_Reserved_LSB 0x34
-#define QIB_6120_ErrMask_Reserved_RMASK 0xFFF
-#define QIB_6120_ErrMask_HardwareErrMask_LSB 0x33
-#define QIB_6120_ErrMask_HardwareErrMask_RMASK 0x1
-#define QIB_6120_ErrMask_ResetNegatedMask_LSB 0x32
-#define QIB_6120_ErrMask_ResetNegatedMask_RMASK 0x1
-#define QIB_6120_ErrMask_InvalidAddrErrMask_LSB 0x31
-#define QIB_6120_ErrMask_InvalidAddrErrMask_RMASK 0x1
-#define QIB_6120_ErrMask_IBStatusChangedMask_LSB 0x30
-#define QIB_6120_ErrMask_IBStatusChangedMask_RMASK 0x1
-#define QIB_6120_ErrMask_Reserved1_LSB 0x26
-#define QIB_6120_ErrMask_Reserved1_RMASK 0x3FF
-#define QIB_6120_ErrMask_SendUnsupportedVLErrMask_LSB 0x25
-#define QIB_6120_ErrMask_SendUnsupportedVLErrMask_RMASK 0x1
-#define QIB_6120_ErrMask_SendUnexpectedPktNumErrMask_LSB 0x24
-#define QIB_6120_ErrMask_SendUnexpectedPktNumErrMask_RMASK 0x1
-#define QIB_6120_ErrMask_SendPioArmLaunchErrMask_LSB 0x23
-#define QIB_6120_ErrMask_SendPioArmLaunchErrMask_RMASK 0x1
-#define QIB_6120_ErrMask_SendDroppedDataPktErrMask_LSB 0x22
-#define QIB_6120_ErrMask_SendDroppedDataPktErrMask_RMASK 0x1
-#define QIB_6120_ErrMask_SendDroppedSmpPktErrMask_LSB 0x21
-#define QIB_6120_ErrMask_SendDroppedSmpPktErrMask_RMASK 0x1
-#define QIB_6120_ErrMask_SendPktLenErrMask_LSB 0x20
-#define QIB_6120_ErrMask_SendPktLenErrMask_RMASK 0x1
-#define QIB_6120_ErrMask_SendUnderRunErrMask_LSB 0x1F
-#define QIB_6120_ErrMask_SendUnderRunErrMask_RMASK 0x1
-#define QIB_6120_ErrMask_SendMaxPktLenErrMask_LSB 0x1E
-#define QIB_6120_ErrMask_SendMaxPktLenErrMask_RMASK 0x1
-#define QIB_6120_ErrMask_SendMinPktLenErrMask_LSB 0x1D
-#define QIB_6120_ErrMask_SendMinPktLenErrMask_RMASK 0x1
-#define QIB_6120_ErrMask_Reserved2_LSB 0x12
-#define QIB_6120_ErrMask_Reserved2_RMASK 0x7FF
-#define QIB_6120_ErrMask_RcvIBLostLinkErrMask_LSB 0x11
-#define QIB_6120_ErrMask_RcvIBLostLinkErrMask_RMASK 0x1
-#define QIB_6120_ErrMask_RcvHdrErrMask_LSB 0x10
-#define QIB_6120_ErrMask_RcvHdrErrMask_RMASK 0x1
-#define QIB_6120_ErrMask_RcvHdrLenErrMask_LSB 0xF
-#define QIB_6120_ErrMask_RcvHdrLenErrMask_RMASK 0x1
-#define QIB_6120_ErrMask_RcvBadTidErrMask_LSB 0xE
-#define QIB_6120_ErrMask_RcvBadTidErrMask_RMASK 0x1
-#define QIB_6120_ErrMask_RcvHdrFullErrMask_LSB 0xD
-#define QIB_6120_ErrMask_RcvHdrFullErrMask_RMASK 0x1
-#define QIB_6120_ErrMask_RcvEgrFullErrMask_LSB 0xC
-#define QIB_6120_ErrMask_RcvEgrFullErrMask_RMASK 0x1
-#define QIB_6120_ErrMask_RcvBadVersionErrMask_LSB 0xB
-#define QIB_6120_ErrMask_RcvBadVersionErrMask_RMASK 0x1
-#define QIB_6120_ErrMask_RcvIBFlowErrMask_LSB 0xA
-#define QIB_6120_ErrMask_RcvIBFlowErrMask_RMASK 0x1
-#define QIB_6120_ErrMask_RcvEBPErrMask_LSB 0x9
-#define QIB_6120_ErrMask_RcvEBPErrMask_RMASK 0x1
-#define QIB_6120_ErrMask_RcvUnsupportedVLErrMask_LSB 0x8
-#define QIB_6120_ErrMask_RcvUnsupportedVLErrMask_RMASK 0x1
-#define QIB_6120_ErrMask_RcvUnexpectedCharErrMask_LSB 0x7
-#define QIB_6120_ErrMask_RcvUnexpectedCharErrMask_RMASK 0x1
-#define QIB_6120_ErrMask_RcvShortPktLenErrMask_LSB 0x6
-#define QIB_6120_ErrMask_RcvShortPktLenErrMask_RMASK 0x1
-#define QIB_6120_ErrMask_RcvLongPktLenErrMask_LSB 0x5
-#define QIB_6120_ErrMask_RcvLongPktLenErrMask_RMASK 0x1
-#define QIB_6120_ErrMask_RcvMaxPktLenErrMask_LSB 0x4
-#define QIB_6120_ErrMask_RcvMaxPktLenErrMask_RMASK 0x1
-#define QIB_6120_ErrMask_RcvMinPktLenErrMask_LSB 0x3
-#define QIB_6120_ErrMask_RcvMinPktLenErrMask_RMASK 0x1
-#define QIB_6120_ErrMask_RcvICRCErrMask_LSB 0x2
-#define QIB_6120_ErrMask_RcvICRCErrMask_RMASK 0x1
-#define QIB_6120_ErrMask_RcvVCRCErrMask_LSB 0x1
-#define QIB_6120_ErrMask_RcvVCRCErrMask_RMASK 0x1
-#define QIB_6120_ErrMask_RcvFormatErrMask_LSB 0x0
-#define QIB_6120_ErrMask_RcvFormatErrMask_RMASK 0x1
-
-#define QIB_6120_ErrStatus_OFFS 0x88
-#define QIB_6120_ErrStatus_Reserved_LSB 0x34
-#define QIB_6120_ErrStatus_Reserved_RMASK 0xFFF
-#define QIB_6120_ErrStatus_HardwareErr_LSB 0x33
-#define QIB_6120_ErrStatus_HardwareErr_RMASK 0x1
-#define QIB_6120_ErrStatus_ResetNegated_LSB 0x32
-#define QIB_6120_ErrStatus_ResetNegated_RMASK 0x1
-#define QIB_6120_ErrStatus_InvalidAddrErr_LSB 0x31
-#define QIB_6120_ErrStatus_InvalidAddrErr_RMASK 0x1
-#define QIB_6120_ErrStatus_IBStatusChanged_LSB 0x30
-#define QIB_6120_ErrStatus_IBStatusChanged_RMASK 0x1
-#define QIB_6120_ErrStatus_Reserved1_LSB 0x26
-#define QIB_6120_ErrStatus_Reserved1_RMASK 0x3FF
-#define QIB_6120_ErrStatus_SendUnsupportedVLErr_LSB 0x25
-#define QIB_6120_ErrStatus_SendUnsupportedVLErr_RMASK 0x1
-#define QIB_6120_ErrStatus_SendUnexpectedPktNumErr_LSB 0x24
-#define QIB_6120_ErrStatus_SendUnexpectedPktNumErr_RMASK 0x1
-#define QIB_6120_ErrStatus_SendPioArmLaunchErr_LSB 0x23
-#define QIB_6120_ErrStatus_SendPioArmLaunchErr_RMASK 0x1
-#define QIB_6120_ErrStatus_SendDroppedDataPktErr_LSB 0x22
-#define QIB_6120_ErrStatus_SendDroppedDataPktErr_RMASK 0x1
-#define QIB_6120_ErrStatus_SendDroppedSmpPktErr_LSB 0x21
-#define QIB_6120_ErrStatus_SendDroppedSmpPktErr_RMASK 0x1
-#define QIB_6120_ErrStatus_SendPktLenErr_LSB 0x20
-#define QIB_6120_ErrStatus_SendPktLenErr_RMASK 0x1
-#define QIB_6120_ErrStatus_SendUnderRunErr_LSB 0x1F
-#define QIB_6120_ErrStatus_SendUnderRunErr_RMASK 0x1
-#define QIB_6120_ErrStatus_SendMaxPktLenErr_LSB 0x1E
-#define QIB_6120_ErrStatus_SendMaxPktLenErr_RMASK 0x1
-#define QIB_6120_ErrStatus_SendMinPktLenErr_LSB 0x1D
-#define QIB_6120_ErrStatus_SendMinPktLenErr_RMASK 0x1
-#define QIB_6120_ErrStatus_Reserved2_LSB 0x12
-#define QIB_6120_ErrStatus_Reserved2_RMASK 0x7FF
-#define QIB_6120_ErrStatus_RcvIBLostLinkErr_LSB 0x11
-#define QIB_6120_ErrStatus_RcvIBLostLinkErr_RMASK 0x1
-#define QIB_6120_ErrStatus_RcvHdrErr_LSB 0x10
-#define QIB_6120_ErrStatus_RcvHdrErr_RMASK 0x1
-#define QIB_6120_ErrStatus_RcvHdrLenErr_LSB 0xF
-#define QIB_6120_ErrStatus_RcvHdrLenErr_RMASK 0x1
-#define QIB_6120_ErrStatus_RcvBadTidErr_LSB 0xE
-#define QIB_6120_ErrStatus_RcvBadTidErr_RMASK 0x1
-#define QIB_6120_ErrStatus_RcvHdrFullErr_LSB 0xD
-#define QIB_6120_ErrStatus_RcvHdrFullErr_RMASK 0x1
-#define QIB_6120_ErrStatus_RcvEgrFullErr_LSB 0xC
-#define QIB_6120_ErrStatus_RcvEgrFullErr_RMASK 0x1
-#define QIB_6120_ErrStatus_RcvBadVersionErr_LSB 0xB
-#define QIB_6120_ErrStatus_RcvBadVersionErr_RMASK 0x1
-#define QIB_6120_ErrStatus_RcvIBFlowErr_LSB 0xA
-#define QIB_6120_ErrStatus_RcvIBFlowErr_RMASK 0x1
-#define QIB_6120_ErrStatus_RcvEBPErr_LSB 0x9
-#define QIB_6120_ErrStatus_RcvEBPErr_RMASK 0x1
-#define QIB_6120_ErrStatus_RcvUnsupportedVLErr_LSB 0x8
-#define QIB_6120_ErrStatus_RcvUnsupportedVLErr_RMASK 0x1
-#define QIB_6120_ErrStatus_RcvUnexpectedCharErr_LSB 0x7
-#define QIB_6120_ErrStatus_RcvUnexpectedCharErr_RMASK 0x1
-#define QIB_6120_ErrStatus_RcvShortPktLenErr_LSB 0x6
-#define QIB_6120_ErrStatus_RcvShortPktLenErr_RMASK 0x1
-#define QIB_6120_ErrStatus_RcvLongPktLenErr_LSB 0x5
-#define QIB_6120_ErrStatus_RcvLongPktLenErr_RMASK 0x1
-#define QIB_6120_ErrStatus_RcvMaxPktLenErr_LSB 0x4
-#define QIB_6120_ErrStatus_RcvMaxPktLenErr_RMASK 0x1
-#define QIB_6120_ErrStatus_RcvMinPktLenErr_LSB 0x3
-#define QIB_6120_ErrStatus_RcvMinPktLenErr_RMASK 0x1
-#define QIB_6120_ErrStatus_RcvICRCErr_LSB 0x2
-#define QIB_6120_ErrStatus_RcvICRCErr_RMASK 0x1
-#define QIB_6120_ErrStatus_RcvVCRCErr_LSB 0x1
-#define QIB_6120_ErrStatus_RcvVCRCErr_RMASK 0x1
-#define QIB_6120_ErrStatus_RcvFormatErr_LSB 0x0
-#define QIB_6120_ErrStatus_RcvFormatErr_RMASK 0x1
-
-#define QIB_6120_ErrClear_OFFS 0x90
-#define QIB_6120_ErrClear_Reserved_LSB 0x34
-#define QIB_6120_ErrClear_Reserved_RMASK 0xFFF
-#define QIB_6120_ErrClear_HardwareErrClear_LSB 0x33
-#define QIB_6120_ErrClear_HardwareErrClear_RMASK 0x1
-#define QIB_6120_ErrClear_ResetNegatedClear_LSB 0x32
-#define QIB_6120_ErrClear_ResetNegatedClear_RMASK 0x1
-#define QIB_6120_ErrClear_InvalidAddrErrClear_LSB 0x31
-#define QIB_6120_ErrClear_InvalidAddrErrClear_RMASK 0x1
-#define QIB_6120_ErrClear_IBStatusChangedClear_LSB 0x30
-#define QIB_6120_ErrClear_IBStatusChangedClear_RMASK 0x1
-#define QIB_6120_ErrClear_Reserved1_LSB 0x26
-#define QIB_6120_ErrClear_Reserved1_RMASK 0x3FF
-#define QIB_6120_ErrClear_SendUnsupportedVLErrClear_LSB 0x25
-#define QIB_6120_ErrClear_SendUnsupportedVLErrClear_RMASK 0x1
-#define QIB_6120_ErrClear_SendUnexpectedPktNumErrClear_LSB 0x24
-#define QIB_6120_ErrClear_SendUnexpectedPktNumErrClear_RMASK 0x1
-#define QIB_6120_ErrClear_SendPioArmLaunchErrClear_LSB 0x23
-#define QIB_6120_ErrClear_SendPioArmLaunchErrClear_RMASK 0x1
-#define QIB_6120_ErrClear_SendDroppedDataPktErrClear_LSB 0x22
-#define QIB_6120_ErrClear_SendDroppedDataPktErrClear_RMASK 0x1
-#define QIB_6120_ErrClear_SendDroppedSmpPktErrClear_LSB 0x21
-#define QIB_6120_ErrClear_SendDroppedSmpPktErrClear_RMASK 0x1
-#define QIB_6120_ErrClear_SendPktLenErrClear_LSB 0x20
-#define QIB_6120_ErrClear_SendPktLenErrClear_RMASK 0x1
-#define QIB_6120_ErrClear_SendUnderRunErrClear_LSB 0x1F
-#define QIB_6120_ErrClear_SendUnderRunErrClear_RMASK 0x1
-#define QIB_6120_ErrClear_SendMaxPktLenErrClear_LSB 0x1E
-#define QIB_6120_ErrClear_SendMaxPktLenErrClear_RMASK 0x1
-#define QIB_6120_ErrClear_SendMinPktLenErrClear_LSB 0x1D
-#define QIB_6120_ErrClear_SendMinPktLenErrClear_RMASK 0x1
-#define QIB_6120_ErrClear_Reserved2_LSB 0x12
-#define QIB_6120_ErrClear_Reserved2_RMASK 0x7FF
-#define QIB_6120_ErrClear_RcvIBLostLinkErrClear_LSB 0x11
-#define QIB_6120_ErrClear_RcvIBLostLinkErrClear_RMASK 0x1
-#define QIB_6120_ErrClear_RcvHdrErrClear_LSB 0x10
-#define QIB_6120_ErrClear_RcvHdrErrClear_RMASK 0x1
-#define QIB_6120_ErrClear_RcvHdrLenErrClear_LSB 0xF
-#define QIB_6120_ErrClear_RcvHdrLenErrClear_RMASK 0x1
-#define QIB_6120_ErrClear_RcvBadTidErrClear_LSB 0xE
-#define QIB_6120_ErrClear_RcvBadTidErrClear_RMASK 0x1
-#define QIB_6120_ErrClear_RcvHdrFullErrClear_LSB 0xD
-#define QIB_6120_ErrClear_RcvHdrFullErrClear_RMASK 0x1
-#define QIB_6120_ErrClear_RcvEgrFullErrClear_LSB 0xC
-#define QIB_6120_ErrClear_RcvEgrFullErrClear_RMASK 0x1
-#define QIB_6120_ErrClear_RcvBadVersionErrClear_LSB 0xB
-#define QIB_6120_ErrClear_RcvBadVersionErrClear_RMASK 0x1
-#define QIB_6120_ErrClear_RcvIBFlowErrClear_LSB 0xA
-#define QIB_6120_ErrClear_RcvIBFlowErrClear_RMASK 0x1
-#define QIB_6120_ErrClear_RcvEBPErrClear_LSB 0x9
-#define QIB_6120_ErrClear_RcvEBPErrClear_RMASK 0x1
-#define QIB_6120_ErrClear_RcvUnsupportedVLErrClear_LSB 0x8
-#define QIB_6120_ErrClear_RcvUnsupportedVLErrClear_RMASK 0x1
-#define QIB_6120_ErrClear_RcvUnexpectedCharErrClear_LSB 0x7
-#define QIB_6120_ErrClear_RcvUnexpectedCharErrClear_RMASK 0x1
-#define QIB_6120_ErrClear_RcvShortPktLenErrClear_LSB 0x6
-#define QIB_6120_ErrClear_RcvShortPktLenErrClear_RMASK 0x1
-#define QIB_6120_ErrClear_RcvLongPktLenErrClear_LSB 0x5
-#define QIB_6120_ErrClear_RcvLongPktLenErrClear_RMASK 0x1
-#define QIB_6120_ErrClear_RcvMaxPktLenErrClear_LSB 0x4
-#define QIB_6120_ErrClear_RcvMaxPktLenErrClear_RMASK 0x1
-#define QIB_6120_ErrClear_RcvMinPktLenErrClear_LSB 0x3
-#define QIB_6120_ErrClear_RcvMinPktLenErrClear_RMASK 0x1
-#define QIB_6120_ErrClear_RcvICRCErrClear_LSB 0x2
-#define QIB_6120_ErrClear_RcvICRCErrClear_RMASK 0x1
-#define QIB_6120_ErrClear_RcvVCRCErrClear_LSB 0x1
-#define QIB_6120_ErrClear_RcvVCRCErrClear_RMASK 0x1
-#define QIB_6120_ErrClear_RcvFormatErrClear_LSB 0x0
-#define QIB_6120_ErrClear_RcvFormatErrClear_RMASK 0x1
-
-#define QIB_6120_HwErrMask_OFFS 0x98
-#define QIB_6120_HwErrMask_IBCBusFromSPCParityErrMask_LSB 0x3F
-#define QIB_6120_HwErrMask_IBCBusFromSPCParityErrMask_RMASK 0x1
-#define QIB_6120_HwErrMask_IBCBusToSPCParityErrMask_LSB 0x3E
-#define QIB_6120_HwErrMask_IBCBusToSPCParityErrMask_RMASK 0x1
-#define QIB_6120_HwErrMask_Reserved_LSB 0x3D
-#define QIB_6120_HwErrMask_Reserved_RMASK 0x1
-#define QIB_6120_HwErrMask_IBSerdesPClkNotDetectMask_LSB 0x3C
-#define QIB_6120_HwErrMask_IBSerdesPClkNotDetectMask_RMASK 0x1
-#define QIB_6120_HwErrMask_PCIESerdesQ0PClkNotDetectMask_LSB 0x3B
-#define QIB_6120_HwErrMask_PCIESerdesQ0PClkNotDetectMask_RMASK 0x1
-#define QIB_6120_HwErrMask_PCIESerdesQ1PClkNotDetectMask_LSB 0x3A
-#define QIB_6120_HwErrMask_PCIESerdesQ1PClkNotDetectMask_RMASK 0x1
-#define QIB_6120_HwErrMask_Reserved1_LSB 0x39
-#define QIB_6120_HwErrMask_Reserved1_RMASK 0x1
-#define QIB_6120_HwErrMask_IBPLLrfSlipMask_LSB 0x38
-#define QIB_6120_HwErrMask_IBPLLrfSlipMask_RMASK 0x1
-#define QIB_6120_HwErrMask_IBPLLfbSlipMask_LSB 0x37
-#define QIB_6120_HwErrMask_IBPLLfbSlipMask_RMASK 0x1
-#define QIB_6120_HwErrMask_PowerOnBISTFailedMask_LSB 0x36
-#define QIB_6120_HwErrMask_PowerOnBISTFailedMask_RMASK 0x1
-#define QIB_6120_HwErrMask_Reserved2_LSB 0x33
-#define QIB_6120_HwErrMask_Reserved2_RMASK 0x7
-#define QIB_6120_HwErrMask_RXEMemParityErrMask_LSB 0x2C
-#define QIB_6120_HwErrMask_RXEMemParityErrMask_RMASK 0x7F
-#define QIB_6120_HwErrMask_TXEMemParityErrMask_LSB 0x28
-#define QIB_6120_HwErrMask_TXEMemParityErrMask_RMASK 0xF
-#define QIB_6120_HwErrMask_Reserved3_LSB 0x22
-#define QIB_6120_HwErrMask_Reserved3_RMASK 0x3F
-#define QIB_6120_HwErrMask_PCIeBusParityErrMask_LSB 0x1F
-#define QIB_6120_HwErrMask_PCIeBusParityErrMask_RMASK 0x7
-#define QIB_6120_HwErrMask_PcieCplTimeoutMask_LSB 0x1E
-#define QIB_6120_HwErrMask_PcieCplTimeoutMask_RMASK 0x1
-#define QIB_6120_HwErrMask_PoisonedTLPMask_LSB 0x1D
-#define QIB_6120_HwErrMask_PoisonedTLPMask_RMASK 0x1
-#define QIB_6120_HwErrMask_Reserved4_LSB 0x6
-#define QIB_6120_HwErrMask_Reserved4_RMASK 0x7FFFFF
-#define QIB_6120_HwErrMask_PCIeMemParityErrMask_LSB 0x0
-#define QIB_6120_HwErrMask_PCIeMemParityErrMask_RMASK 0x3F
-
-#define QIB_6120_HwErrStatus_OFFS 0xA0
-#define QIB_6120_HwErrStatus_IBCBusFromSPCParityErr_LSB 0x3F
-#define QIB_6120_HwErrStatus_IBCBusFromSPCParityErr_RMASK 0x1
-#define QIB_6120_HwErrStatus_IBCBusToSPCParityErr_LSB 0x3E
-#define QIB_6120_HwErrStatus_IBCBusToSPCParityErr_RMASK 0x1
-#define QIB_6120_HwErrStatus_Reserved_LSB 0x3D
-#define QIB_6120_HwErrStatus_Reserved_RMASK 0x1
-#define QIB_6120_HwErrStatus_IBSerdesPClkNotDetect_LSB 0x3C
-#define QIB_6120_HwErrStatus_IBSerdesPClkNotDetect_RMASK 0x1
-#define QIB_6120_HwErrStatus_PCIESerdesQ0PClkNotDetect_LSB 0x3B
-#define QIB_6120_HwErrStatus_PCIESerdesQ0PClkNotDetect_RMASK 0x1
-#define QIB_6120_HwErrStatus_PCIESerdesQ1PClkNotDetect_LSB 0x3A
-#define QIB_6120_HwErrStatus_PCIESerdesQ1PClkNotDetect_RMASK 0x1
-#define QIB_6120_HwErrStatus_Reserved1_LSB 0x39
-#define QIB_6120_HwErrStatus_Reserved1_RMASK 0x1
-#define QIB_6120_HwErrStatus_IBPLLrfSlip_LSB 0x38
-#define QIB_6120_HwErrStatus_IBPLLrfSlip_RMASK 0x1
-#define QIB_6120_HwErrStatus_IBPLLfbSlip_LSB 0x37
-#define QIB_6120_HwErrStatus_IBPLLfbSlip_RMASK 0x1
-#define QIB_6120_HwErrStatus_PowerOnBISTFailed_LSB 0x36
-#define QIB_6120_HwErrStatus_PowerOnBISTFailed_RMASK 0x1
-#define QIB_6120_HwErrStatus_Reserved2_LSB 0x33
-#define QIB_6120_HwErrStatus_Reserved2_RMASK 0x7
-#define QIB_6120_HwErrStatus_RXEMemParity_LSB 0x2C
-#define QIB_6120_HwErrStatus_RXEMemParity_RMASK 0x7F
-#define QIB_6120_HwErrStatus_TXEMemParity_LSB 0x28
-#define QIB_6120_HwErrStatus_TXEMemParity_RMASK 0xF
-#define QIB_6120_HwErrStatus_Reserved3_LSB 0x22
-#define QIB_6120_HwErrStatus_Reserved3_RMASK 0x3F
-#define QIB_6120_HwErrStatus_PCIeBusParity_LSB 0x1F
-#define QIB_6120_HwErrStatus_PCIeBusParity_RMASK 0x7
-#define QIB_6120_HwErrStatus_PcieCplTimeout_LSB 0x1E
-#define QIB_6120_HwErrStatus_PcieCplTimeout_RMASK 0x1
-#define QIB_6120_HwErrStatus_PoisenedTLP_LSB 0x1D
-#define QIB_6120_HwErrStatus_PoisenedTLP_RMASK 0x1
-#define QIB_6120_HwErrStatus_Reserved4_LSB 0x6
-#define QIB_6120_HwErrStatus_Reserved4_RMASK 0x7FFFFF
-#define QIB_6120_HwErrStatus_PCIeMemParity_LSB 0x0
-#define QIB_6120_HwErrStatus_PCIeMemParity_RMASK 0x3F
-
-#define QIB_6120_HwErrClear_OFFS 0xA8
-#define QIB_6120_HwErrClear_IBCBusFromSPCParityErrClear_LSB 0x3F
-#define QIB_6120_HwErrClear_IBCBusFromSPCParityErrClear_RMASK 0x1
-#define QIB_6120_HwErrClear_IBCBusToSPCparityErrClear_LSB 0x3E
-#define QIB_6120_HwErrClear_IBCBusToSPCparityErrClear_RMASK 0x1
-#define QIB_6120_HwErrClear_Reserved_LSB 0x3D
-#define QIB_6120_HwErrClear_Reserved_RMASK 0x1
-#define QIB_6120_HwErrClear_IBSerdesPClkNotDetectClear_LSB 0x3C
-#define QIB_6120_HwErrClear_IBSerdesPClkNotDetectClear_RMASK 0x1
-#define QIB_6120_HwErrClear_PCIESerdesQ0PClkNotDetectClear_LSB 0x3B
-#define QIB_6120_HwErrClear_PCIESerdesQ0PClkNotDetectClear_RMASK 0x1
-#define QIB_6120_HwErrClear_PCIESerdesQ1PClkNotDetectClear_LSB 0x3A
-#define QIB_6120_HwErrClear_PCIESerdesQ1PClkNotDetectClear_RMASK 0x1
-#define QIB_6120_HwErrClear_Reserved1_LSB 0x39
-#define QIB_6120_HwErrClear_Reserved1_RMASK 0x1
-#define QIB_6120_HwErrClear_IBPLLrfSlipClear_LSB 0x38
-#define QIB_6120_HwErrClear_IBPLLrfSlipClear_RMASK 0x1
-#define QIB_6120_HwErrClear_IBPLLfbSlipClear_LSB 0x37
-#define QIB_6120_HwErrClear_IBPLLfbSlipClear_RMASK 0x1
-#define QIB_6120_HwErrClear_PowerOnBISTFailedClear_LSB 0x36
-#define QIB_6120_HwErrClear_PowerOnBISTFailedClear_RMASK 0x1
-#define QIB_6120_HwErrClear_Reserved2_LSB 0x33
-#define QIB_6120_HwErrClear_Reserved2_RMASK 0x7
-#define QIB_6120_HwErrClear_RXEMemParityClear_LSB 0x2C
-#define QIB_6120_HwErrClear_RXEMemParityClear_RMASK 0x7F
-#define QIB_6120_HwErrClear_TXEMemParityClear_LSB 0x28
-#define QIB_6120_HwErrClear_TXEMemParityClear_RMASK 0xF
-#define QIB_6120_HwErrClear_Reserved3_LSB 0x22
-#define QIB_6120_HwErrClear_Reserved3_RMASK 0x3F
-#define QIB_6120_HwErrClear_PCIeBusParityClr_LSB 0x1F
-#define QIB_6120_HwErrClear_PCIeBusParityClr_RMASK 0x7
-#define QIB_6120_HwErrClear_PcieCplTimeoutClear_LSB 0x1E
-#define QIB_6120_HwErrClear_PcieCplTimeoutClear_RMASK 0x1
-#define QIB_6120_HwErrClear_PoisonedTLPClear_LSB 0x1D
-#define QIB_6120_HwErrClear_PoisonedTLPClear_RMASK 0x1
-#define QIB_6120_HwErrClear_Reserved4_LSB 0x6
-#define QIB_6120_HwErrClear_Reserved4_RMASK 0x7FFFFF
-#define QIB_6120_HwErrClear_PCIeMemParityClr_LSB 0x0
-#define QIB_6120_HwErrClear_PCIeMemParityClr_RMASK 0x3F
-
-#define QIB_6120_HwDiagCtrl_OFFS 0xB0
-#define QIB_6120_HwDiagCtrl_ForceIBCBusFromSPCParityErr_LSB 0x3F
-#define QIB_6120_HwDiagCtrl_ForceIBCBusFromSPCParityErr_RMASK 0x1
-#define QIB_6120_HwDiagCtrl_ForceIBCBusToSPCParityErr_LSB 0x3E
-#define QIB_6120_HwDiagCtrl_ForceIBCBusToSPCParityErr_RMASK 0x1
-#define QIB_6120_HwDiagCtrl_CounterWrEnable_LSB 0x3D
-#define QIB_6120_HwDiagCtrl_CounterWrEnable_RMASK 0x1
-#define QIB_6120_HwDiagCtrl_CounterDisable_LSB 0x3C
-#define QIB_6120_HwDiagCtrl_CounterDisable_RMASK 0x1
-#define QIB_6120_HwDiagCtrl_Reserved_LSB 0x33
-#define QIB_6120_HwDiagCtrl_Reserved_RMASK 0x1FF
-#define QIB_6120_HwDiagCtrl_ForceRxMemParityErr_LSB 0x2C
-#define QIB_6120_HwDiagCtrl_ForceRxMemParityErr_RMASK 0x7F
-#define QIB_6120_HwDiagCtrl_ForceTxMemparityErr_LSB 0x28
-#define QIB_6120_HwDiagCtrl_ForceTxMemparityErr_RMASK 0xF
-#define QIB_6120_HwDiagCtrl_Reserved1_LSB 0x23
-#define QIB_6120_HwDiagCtrl_Reserved1_RMASK 0x1F
-#define QIB_6120_HwDiagCtrl_forcePCIeBusParity_LSB 0x1F
-#define QIB_6120_HwDiagCtrl_forcePCIeBusParity_RMASK 0xF
-#define QIB_6120_HwDiagCtrl_Reserved2_LSB 0x6
-#define QIB_6120_HwDiagCtrl_Reserved2_RMASK 0x1FFFFFF
-#define QIB_6120_HwDiagCtrl_forcePCIeMemParity_LSB 0x0
-#define QIB_6120_HwDiagCtrl_forcePCIeMemParity_RMASK 0x3F
-
-#define QIB_6120_IBCStatus_OFFS 0xC0
-#define QIB_6120_IBCStatus_TxCreditOk_LSB 0x1F
-#define QIB_6120_IBCStatus_TxCreditOk_RMASK 0x1
-#define QIB_6120_IBCStatus_TxReady_LSB 0x1E
-#define QIB_6120_IBCStatus_TxReady_RMASK 0x1
-#define QIB_6120_IBCStatus_Reserved_LSB 0x7
-#define QIB_6120_IBCStatus_Reserved_RMASK 0x7FFFFF
-#define QIB_6120_IBCStatus_LinkState_LSB 0x4
-#define QIB_6120_IBCStatus_LinkState_RMASK 0x7
-#define QIB_6120_IBCStatus_LinkTrainingState_LSB 0x0
-#define QIB_6120_IBCStatus_LinkTrainingState_RMASK 0xF
-
-#define QIB_6120_IBCCtrl_OFFS 0xC8
-#define QIB_6120_IBCCtrl_Loopback_LSB 0x3F
-#define QIB_6120_IBCCtrl_Loopback_RMASK 0x1
-#define QIB_6120_IBCCtrl_LinkDownDefaultState_LSB 0x3E
-#define QIB_6120_IBCCtrl_LinkDownDefaultState_RMASK 0x1
-#define QIB_6120_IBCCtrl_Reserved_LSB 0x2B
-#define QIB_6120_IBCCtrl_Reserved_RMASK 0x7FFFF
-#define QIB_6120_IBCCtrl_CreditScale_LSB 0x28
-#define QIB_6120_IBCCtrl_CreditScale_RMASK 0x7
-#define QIB_6120_IBCCtrl_OverrunThreshold_LSB 0x24
-#define QIB_6120_IBCCtrl_OverrunThreshold_RMASK 0xF
-#define QIB_6120_IBCCtrl_PhyerrThreshold_LSB 0x20
-#define QIB_6120_IBCCtrl_PhyerrThreshold_RMASK 0xF
-#define QIB_6120_IBCCtrl_Reserved1_LSB 0x1F
-#define QIB_6120_IBCCtrl_Reserved1_RMASK 0x1
-#define QIB_6120_IBCCtrl_MaxPktLen_LSB 0x14
-#define QIB_6120_IBCCtrl_MaxPktLen_RMASK 0x7FF
-#define QIB_6120_IBCCtrl_LinkCmd_LSB 0x12
-#define QIB_6120_IBCCtrl_LinkCmd_RMASK 0x3
-#define QIB_6120_IBCCtrl_LinkInitCmd_LSB 0x10
-#define QIB_6120_IBCCtrl_LinkInitCmd_RMASK 0x3
-#define QIB_6120_IBCCtrl_FlowCtrlWaterMark_LSB 0x8
-#define QIB_6120_IBCCtrl_FlowCtrlWaterMark_RMASK 0xFF
-#define QIB_6120_IBCCtrl_FlowCtrlPeriod_LSB 0x0
-#define QIB_6120_IBCCtrl_FlowCtrlPeriod_RMASK 0xFF
-
-#define QIB_6120_EXTStatus_OFFS 0xD0
-#define QIB_6120_EXTStatus_GPIOIn_LSB 0x30
-#define QIB_6120_EXTStatus_GPIOIn_RMASK 0xFFFF
-#define QIB_6120_EXTStatus_Reserved_LSB 0x20
-#define QIB_6120_EXTStatus_Reserved_RMASK 0xFFFF
-#define QIB_6120_EXTStatus_Reserved1_LSB 0x10
-#define QIB_6120_EXTStatus_Reserved1_RMASK 0xFFFF
-#define QIB_6120_EXTStatus_MemBISTFoundErr_LSB 0xF
-#define QIB_6120_EXTStatus_MemBISTFoundErr_RMASK 0x1
-#define QIB_6120_EXTStatus_MemBISTEndTest_LSB 0xE
-#define QIB_6120_EXTStatus_MemBISTEndTest_RMASK 0x1
-#define QIB_6120_EXTStatus_Reserved2_LSB 0x0
-#define QIB_6120_EXTStatus_Reserved2_RMASK 0x3FFF
-
-#define QIB_6120_EXTCtrl_OFFS 0xD8
-#define QIB_6120_EXTCtrl_GPIOOe_LSB 0x30
-#define QIB_6120_EXTCtrl_GPIOOe_RMASK 0xFFFF
-#define QIB_6120_EXTCtrl_GPIOInvert_LSB 0x20
-#define QIB_6120_EXTCtrl_GPIOInvert_RMASK 0xFFFF
-#define QIB_6120_EXTCtrl_Reserved_LSB 0x4
-#define QIB_6120_EXTCtrl_Reserved_RMASK 0xFFFFFFF
-#define QIB_6120_EXTCtrl_LEDPriPortGreenOn_LSB 0x3
-#define QIB_6120_EXTCtrl_LEDPriPortGreenOn_RMASK 0x1
-#define QIB_6120_EXTCtrl_LEDPriPortYellowOn_LSB 0x2
-#define QIB_6120_EXTCtrl_LEDPriPortYellowOn_RMASK 0x1
-#define QIB_6120_EXTCtrl_LEDGblOkGreenOn_LSB 0x1
-#define QIB_6120_EXTCtrl_LEDGblOkGreenOn_RMASK 0x1
-#define QIB_6120_EXTCtrl_LEDGblErrRedOff_LSB 0x0
-#define QIB_6120_EXTCtrl_LEDGblErrRedOff_RMASK 0x1
-
-#define QIB_6120_GPIOOut_OFFS 0xE0
-
-#define QIB_6120_GPIOMask_OFFS 0xE8
-
-#define QIB_6120_GPIOStatus_OFFS 0xF0
-
-#define QIB_6120_GPIOClear_OFFS 0xF8
-
-#define QIB_6120_RcvCtrl_OFFS 0x100
-#define QIB_6120_RcvCtrl_TailUpd_LSB 0x1F
-#define QIB_6120_RcvCtrl_TailUpd_RMASK 0x1
-#define QIB_6120_RcvCtrl_RcvPartitionKeyDisable_LSB 0x1E
-#define QIB_6120_RcvCtrl_RcvPartitionKeyDisable_RMASK 0x1
-#define QIB_6120_RcvCtrl_Reserved_LSB 0x15
-#define QIB_6120_RcvCtrl_Reserved_RMASK 0x1FF
-#define QIB_6120_RcvCtrl_IntrAvail_LSB 0x10
-#define QIB_6120_RcvCtrl_IntrAvail_RMASK 0x1F
-#define QIB_6120_RcvCtrl_Reserved1_LSB 0x9
-#define QIB_6120_RcvCtrl_Reserved1_RMASK 0x7F
-#define QIB_6120_RcvCtrl_Reserved2_LSB 0x5
-#define QIB_6120_RcvCtrl_Reserved2_RMASK 0xF
-#define QIB_6120_RcvCtrl_PortEnable_LSB 0x0
-#define QIB_6120_RcvCtrl_PortEnable_RMASK 0x1F
-
-#define QIB_6120_RcvBTHQP_OFFS 0x108
-#define QIB_6120_RcvBTHQP_BTHQP_Mask_LSB 0x1E
-#define QIB_6120_RcvBTHQP_BTHQP_Mask_RMASK 0x3
-#define QIB_6120_RcvBTHQP_Reserved_LSB 0x18
-#define QIB_6120_RcvBTHQP_Reserved_RMASK 0x3F
-#define QIB_6120_RcvBTHQP_RcvBTHQP_LSB 0x0
-#define QIB_6120_RcvBTHQP_RcvBTHQP_RMASK 0xFFFFFF
-
-#define QIB_6120_RcvHdrSize_OFFS 0x110
-
-#define QIB_6120_RcvHdrCnt_OFFS 0x118
-
-#define QIB_6120_RcvHdrEntSize_OFFS 0x120
-
-#define QIB_6120_RcvTIDBase_OFFS 0x128
-
-#define QIB_6120_RcvTIDCnt_OFFS 0x130
-
-#define QIB_6120_RcvEgrBase_OFFS 0x138
-
-#define QIB_6120_RcvEgrCnt_OFFS 0x140
-
-#define QIB_6120_RcvBufBase_OFFS 0x148
-
-#define QIB_6120_RcvBufSize_OFFS 0x150
-
-#define QIB_6120_RxIntMemBase_OFFS 0x158
-
-#define QIB_6120_RxIntMemSize_OFFS 0x160
-
-#define QIB_6120_RcvPartitionKey_OFFS 0x168
-
-#define QIB_6120_RcvPktLEDCnt_OFFS 0x178
-#define QIB_6120_RcvPktLEDCnt_ONperiod_LSB 0x20
-#define QIB_6120_RcvPktLEDCnt_ONperiod_RMASK 0xFFFFFFFF
-#define QIB_6120_RcvPktLEDCnt_OFFperiod_LSB 0x0
-#define QIB_6120_RcvPktLEDCnt_OFFperiod_RMASK 0xFFFFFFFF
-
-#define QIB_6120_SendCtrl_OFFS 0x1C0
-#define QIB_6120_SendCtrl_Disarm_LSB 0x1F
-#define QIB_6120_SendCtrl_Disarm_RMASK 0x1
-#define QIB_6120_SendCtrl_Reserved_LSB 0x17
-#define QIB_6120_SendCtrl_Reserved_RMASK 0xFF
-#define QIB_6120_SendCtrl_DisarmPIOBuf_LSB 0x10
-#define QIB_6120_SendCtrl_DisarmPIOBuf_RMASK 0x7F
-#define QIB_6120_SendCtrl_Reserved1_LSB 0x4
-#define QIB_6120_SendCtrl_Reserved1_RMASK 0xFFF
-#define QIB_6120_SendCtrl_PIOEnable_LSB 0x3
-#define QIB_6120_SendCtrl_PIOEnable_RMASK 0x1
-#define QIB_6120_SendCtrl_PIOBufAvailUpd_LSB 0x2
-#define QIB_6120_SendCtrl_PIOBufAvailUpd_RMASK 0x1
-#define QIB_6120_SendCtrl_PIOIntBufAvail_LSB 0x1
-#define QIB_6120_SendCtrl_PIOIntBufAvail_RMASK 0x1
-#define QIB_6120_SendCtrl_Abort_LSB 0x0
-#define QIB_6120_SendCtrl_Abort_RMASK 0x1
-
-#define QIB_6120_SendPIOBufBase_OFFS 0x1C8
-#define QIB_6120_SendPIOBufBase_Reserved_LSB 0x35
-#define QIB_6120_SendPIOBufBase_Reserved_RMASK 0x7FF
-#define QIB_6120_SendPIOBufBase_BaseAddr_LargePIO_LSB 0x20
-#define QIB_6120_SendPIOBufBase_BaseAddr_LargePIO_RMASK 0x1FFFFF
-#define QIB_6120_SendPIOBufBase_Reserved1_LSB 0x15
-#define QIB_6120_SendPIOBufBase_Reserved1_RMASK 0x7FF
-#define QIB_6120_SendPIOBufBase_BaseAddr_SmallPIO_LSB 0x0
-#define QIB_6120_SendPIOBufBase_BaseAddr_SmallPIO_RMASK 0x1FFFFF
-
-#define QIB_6120_SendPIOSize_OFFS 0x1D0
-#define QIB_6120_SendPIOSize_Reserved_LSB 0x2D
-#define QIB_6120_SendPIOSize_Reserved_RMASK 0xFFFFF
-#define QIB_6120_SendPIOSize_Size_LargePIO_LSB 0x20
-#define QIB_6120_SendPIOSize_Size_LargePIO_RMASK 0x1FFF
-#define QIB_6120_SendPIOSize_Reserved1_LSB 0xC
-#define QIB_6120_SendPIOSize_Reserved1_RMASK 0xFFFFF
-#define QIB_6120_SendPIOSize_Size_SmallPIO_LSB 0x0
-#define QIB_6120_SendPIOSize_Size_SmallPIO_RMASK 0xFFF
-
-#define QIB_6120_SendPIOBufCnt_OFFS 0x1D8
-#define QIB_6120_SendPIOBufCnt_Reserved_LSB 0x24
-#define QIB_6120_SendPIOBufCnt_Reserved_RMASK 0xFFFFFFF
-#define QIB_6120_SendPIOBufCnt_Num_LargePIO_LSB 0x20
-#define QIB_6120_SendPIOBufCnt_Num_LargePIO_RMASK 0xF
-#define QIB_6120_SendPIOBufCnt_Reserved1_LSB 0x9
-#define QIB_6120_SendPIOBufCnt_Reserved1_RMASK 0x7FFFFF
-#define QIB_6120_SendPIOBufCnt_Num_SmallPIO_LSB 0x0
-#define QIB_6120_SendPIOBufCnt_Num_SmallPIO_RMASK 0x1FF
-
-#define QIB_6120_SendPIOAvailAddr_OFFS 0x1E0
-#define QIB_6120_SendPIOAvailAddr_SendPIOAvailAddr_LSB 0x6
-#define QIB_6120_SendPIOAvailAddr_SendPIOAvailAddr_RMASK 0x3FFFFFFFF
-#define QIB_6120_SendPIOAvailAddr_Reserved_LSB 0x0
-#define QIB_6120_SendPIOAvailAddr_Reserved_RMASK 0x3F
-
-#define QIB_6120_SendBufErr0_OFFS 0x240
-#define QIB_6120_SendBufErr0_SendBufErrPIO_63_0_LSB 0x0
-#define QIB_6120_SendBufErr0_SendBufErrPIO_63_0_RMASK 0x0
-
-#define QIB_6120_RcvHdrAddr0_OFFS 0x280
-#define QIB_6120_RcvHdrAddr0_RcvHdrAddr0_LSB 0x2
-#define QIB_6120_RcvHdrAddr0_RcvHdrAddr0_RMASK 0x3FFFFFFFFF
-#define QIB_6120_RcvHdrAddr0_Reserved_LSB 0x0
-#define QIB_6120_RcvHdrAddr0_Reserved_RMASK 0x3
-
-#define QIB_6120_RcvHdrTailAddr0_OFFS 0x300
-#define QIB_6120_RcvHdrTailAddr0_RcvHdrTailAddr0_LSB 0x2
-#define QIB_6120_RcvHdrTailAddr0_RcvHdrTailAddr0_RMASK 0x3FFFFFFFFF
-#define QIB_6120_RcvHdrTailAddr0_Reserved_LSB 0x0
-#define QIB_6120_RcvHdrTailAddr0_Reserved_RMASK 0x3
-
-#define QIB_6120_SerdesCfg0_OFFS 0x3C0
-#define QIB_6120_SerdesCfg0_DisableIBTxIdleDetect_LSB 0x3F
-#define QIB_6120_SerdesCfg0_DisableIBTxIdleDetect_RMASK 0x1
-#define QIB_6120_SerdesCfg0_Reserved_LSB 0x38
-#define QIB_6120_SerdesCfg0_Reserved_RMASK 0x7F
-#define QIB_6120_SerdesCfg0_RxEqCtl_LSB 0x36
-#define QIB_6120_SerdesCfg0_RxEqCtl_RMASK 0x3
-#define QIB_6120_SerdesCfg0_TxTermAdj_LSB 0x34
-#define QIB_6120_SerdesCfg0_TxTermAdj_RMASK 0x3
-#define QIB_6120_SerdesCfg0_RxTermAdj_LSB 0x32
-#define QIB_6120_SerdesCfg0_RxTermAdj_RMASK 0x3
-#define QIB_6120_SerdesCfg0_TermAdj1_LSB 0x31
-#define QIB_6120_SerdesCfg0_TermAdj1_RMASK 0x1
-#define QIB_6120_SerdesCfg0_TermAdj0_LSB 0x30
-#define QIB_6120_SerdesCfg0_TermAdj0_RMASK 0x1
-#define QIB_6120_SerdesCfg0_LPBKA_LSB 0x2F
-#define QIB_6120_SerdesCfg0_LPBKA_RMASK 0x1
-#define QIB_6120_SerdesCfg0_LPBKB_LSB 0x2E
-#define QIB_6120_SerdesCfg0_LPBKB_RMASK 0x1
-#define QIB_6120_SerdesCfg0_LPBKC_LSB 0x2D
-#define QIB_6120_SerdesCfg0_LPBKC_RMASK 0x1
-#define QIB_6120_SerdesCfg0_LPBKD_LSB 0x2C
-#define QIB_6120_SerdesCfg0_LPBKD_RMASK 0x1
-#define QIB_6120_SerdesCfg0_PW_LSB 0x2B
-#define QIB_6120_SerdesCfg0_PW_RMASK 0x1
-#define QIB_6120_SerdesCfg0_RefSel_LSB 0x29
-#define QIB_6120_SerdesCfg0_RefSel_RMASK 0x3
-#define QIB_6120_SerdesCfg0_ParReset_LSB 0x28
-#define QIB_6120_SerdesCfg0_ParReset_RMASK 0x1
-#define QIB_6120_SerdesCfg0_ParLPBK_LSB 0x27
-#define QIB_6120_SerdesCfg0_ParLPBK_RMASK 0x1
-#define QIB_6120_SerdesCfg0_OffsetEn_LSB 0x26
-#define QIB_6120_SerdesCfg0_OffsetEn_RMASK 0x1
-#define QIB_6120_SerdesCfg0_Offset_LSB 0x1E
-#define QIB_6120_SerdesCfg0_Offset_RMASK 0xFF
-#define QIB_6120_SerdesCfg0_L2PwrDn_LSB 0x1D
-#define QIB_6120_SerdesCfg0_L2PwrDn_RMASK 0x1
-#define QIB_6120_SerdesCfg0_ResetPLL_LSB 0x1C
-#define QIB_6120_SerdesCfg0_ResetPLL_RMASK 0x1
-#define QIB_6120_SerdesCfg0_RxTermEnX_LSB 0x18
-#define QIB_6120_SerdesCfg0_RxTermEnX_RMASK 0xF
-#define QIB_6120_SerdesCfg0_BeaconTxEnX_LSB 0x14
-#define QIB_6120_SerdesCfg0_BeaconTxEnX_RMASK 0xF
-#define QIB_6120_SerdesCfg0_RxDetEnX_LSB 0x10
-#define QIB_6120_SerdesCfg0_RxDetEnX_RMASK 0xF
-#define QIB_6120_SerdesCfg0_TxIdeEnX_LSB 0xC
-#define QIB_6120_SerdesCfg0_TxIdeEnX_RMASK 0xF
-#define QIB_6120_SerdesCfg0_RxIdleEnX_LSB 0x8
-#define QIB_6120_SerdesCfg0_RxIdleEnX_RMASK 0xF
-#define QIB_6120_SerdesCfg0_L1PwrDnA_LSB 0x7
-#define QIB_6120_SerdesCfg0_L1PwrDnA_RMASK 0x1
-#define QIB_6120_SerdesCfg0_L1PwrDnB_LSB 0x6
-#define QIB_6120_SerdesCfg0_L1PwrDnB_RMASK 0x1
-#define QIB_6120_SerdesCfg0_L1PwrDnC_LSB 0x5
-#define QIB_6120_SerdesCfg0_L1PwrDnC_RMASK 0x1
-#define QIB_6120_SerdesCfg0_L1PwrDnD_LSB 0x4
-#define QIB_6120_SerdesCfg0_L1PwrDnD_RMASK 0x1
-#define QIB_6120_SerdesCfg0_ResetA_LSB 0x3
-#define QIB_6120_SerdesCfg0_ResetA_RMASK 0x1
-#define QIB_6120_SerdesCfg0_ResetB_LSB 0x2
-#define QIB_6120_SerdesCfg0_ResetB_RMASK 0x1
-#define QIB_6120_SerdesCfg0_ResetC_LSB 0x1
-#define QIB_6120_SerdesCfg0_ResetC_RMASK 0x1
-#define QIB_6120_SerdesCfg0_ResetD_LSB 0x0
-#define QIB_6120_SerdesCfg0_ResetD_RMASK 0x1
-
-#define QIB_6120_SerdesStat_OFFS 0x3D0
-#define QIB_6120_SerdesStat_Reserved_LSB 0xC
-#define QIB_6120_SerdesStat_Reserved_RMASK 0xFFFFFFFFFFFFF
-#define QIB_6120_SerdesStat_BeaconDetA_LSB 0xB
-#define QIB_6120_SerdesStat_BeaconDetA_RMASK 0x1
-#define QIB_6120_SerdesStat_BeaconDetB_LSB 0xA
-#define QIB_6120_SerdesStat_BeaconDetB_RMASK 0x1
-#define QIB_6120_SerdesStat_BeaconDetC_LSB 0x9
-#define QIB_6120_SerdesStat_BeaconDetC_RMASK 0x1
-#define QIB_6120_SerdesStat_BeaconDetD_LSB 0x8
-#define QIB_6120_SerdesStat_BeaconDetD_RMASK 0x1
-#define QIB_6120_SerdesStat_RxDetA_LSB 0x7
-#define QIB_6120_SerdesStat_RxDetA_RMASK 0x1
-#define QIB_6120_SerdesStat_RxDetB_LSB 0x6
-#define QIB_6120_SerdesStat_RxDetB_RMASK 0x1
-#define QIB_6120_SerdesStat_RxDetC_LSB 0x5
-#define QIB_6120_SerdesStat_RxDetC_RMASK 0x1
-#define QIB_6120_SerdesStat_RxDetD_LSB 0x4
-#define QIB_6120_SerdesStat_RxDetD_RMASK 0x1
-#define QIB_6120_SerdesStat_TxIdleDetA_LSB 0x3
-#define QIB_6120_SerdesStat_TxIdleDetA_RMASK 0x1
-#define QIB_6120_SerdesStat_TxIdleDetB_LSB 0x2
-#define QIB_6120_SerdesStat_TxIdleDetB_RMASK 0x1
-#define QIB_6120_SerdesStat_TxIdleDetC_LSB 0x1
-#define QIB_6120_SerdesStat_TxIdleDetC_RMASK 0x1
-#define QIB_6120_SerdesStat_TxIdleDetD_LSB 0x0
-#define QIB_6120_SerdesStat_TxIdleDetD_RMASK 0x1
-
-#define QIB_6120_XGXSCfg_OFFS 0x3D8
-#define QIB_6120_XGXSCfg_ArmLaunchErrorDisable_LSB 0x3F
-#define QIB_6120_XGXSCfg_ArmLaunchErrorDisable_RMASK 0x1
-#define QIB_6120_XGXSCfg_Reserved_LSB 0x17
-#define QIB_6120_XGXSCfg_Reserved_RMASK 0xFFFFFFFFFF
-#define QIB_6120_XGXSCfg_polarity_inv_LSB 0x13
-#define QIB_6120_XGXSCfg_polarity_inv_RMASK 0xF
-#define QIB_6120_XGXSCfg_link_sync_mask_LSB 0x9
-#define QIB_6120_XGXSCfg_link_sync_mask_RMASK 0x3FF
-#define QIB_6120_XGXSCfg_port_addr_LSB 0x4
-#define QIB_6120_XGXSCfg_port_addr_RMASK 0x1F
-#define QIB_6120_XGXSCfg_mdd_30_LSB 0x3
-#define QIB_6120_XGXSCfg_mdd_30_RMASK 0x1
-#define QIB_6120_XGXSCfg_xcv_resetn_LSB 0x2
-#define QIB_6120_XGXSCfg_xcv_resetn_RMASK 0x1
-#define QIB_6120_XGXSCfg_Reserved1_LSB 0x1
-#define QIB_6120_XGXSCfg_Reserved1_RMASK 0x1
-#define QIB_6120_XGXSCfg_tx_rx_resetn_LSB 0x0
-#define QIB_6120_XGXSCfg_tx_rx_resetn_RMASK 0x1
-
-#define QIB_6120_LBIntCnt_OFFS 0x12000
-
-#define QIB_6120_LBFlowStallCnt_OFFS 0x12008
-
-#define QIB_6120_TxUnsupVLErrCnt_OFFS 0x12018
-
-#define QIB_6120_TxDataPktCnt_OFFS 0x12020
-
-#define QIB_6120_TxFlowPktCnt_OFFS 0x12028
-
-#define QIB_6120_TxDwordCnt_OFFS 0x12030
-
-#define QIB_6120_TxLenErrCnt_OFFS 0x12038
-
-#define QIB_6120_TxMaxMinLenErrCnt_OFFS 0x12040
-
-#define QIB_6120_TxUnderrunCnt_OFFS 0x12048
-
-#define QIB_6120_TxFlowStallCnt_OFFS 0x12050
-
-#define QIB_6120_TxDroppedPktCnt_OFFS 0x12058
-
-#define QIB_6120_RxDroppedPktCnt_OFFS 0x12060
-
-#define QIB_6120_RxDataPktCnt_OFFS 0x12068
-
-#define QIB_6120_RxFlowPktCnt_OFFS 0x12070
-
-#define QIB_6120_RxDwordCnt_OFFS 0x12078
-
-#define QIB_6120_RxLenErrCnt_OFFS 0x12080
-
-#define QIB_6120_RxMaxMinLenErrCnt_OFFS 0x12088
-
-#define QIB_6120_RxICRCErrCnt_OFFS 0x12090
-
-#define QIB_6120_RxVCRCErrCnt_OFFS 0x12098
-
-#define QIB_6120_RxFlowCtrlErrCnt_OFFS 0x120A0
-
-#define QIB_6120_RxBadFormatCnt_OFFS 0x120A8
-
-#define QIB_6120_RxLinkProblemCnt_OFFS 0x120B0
-
-#define QIB_6120_RxEBPCnt_OFFS 0x120B8
-
-#define QIB_6120_RxLPCRCErrCnt_OFFS 0x120C0
-
-#define QIB_6120_RxBufOvflCnt_OFFS 0x120C8
-
-#define QIB_6120_RxTIDFullErrCnt_OFFS 0x120D0
-
-#define QIB_6120_RxTIDValidErrCnt_OFFS 0x120D8
-
-#define QIB_6120_RxPKeyMismatchCnt_OFFS 0x120E0
-
-#define QIB_6120_RxP0HdrEgrOvflCnt_OFFS 0x120E8
-
-#define QIB_6120_IBStatusChangeCnt_OFFS 0x12140
-
-#define QIB_6120_IBLinkErrRecoveryCnt_OFFS 0x12148
-
-#define QIB_6120_IBLinkDownedCnt_OFFS 0x12150
-
-#define QIB_6120_IBSymbolErrCnt_OFFS 0x12158
-
-#define QIB_6120_PcieRetryBufDiagQwordCnt_OFFS 0x12170
-
-#define QIB_6120_RcvEgrArray0_OFFS 0x14000
-
-#define QIB_6120_RcvTIDArray0_OFFS 0x54000
-
-#define QIB_6120_PIOLaunchFIFO_OFFS 0x64000
-
-#define QIB_6120_SendPIOpbcCache_OFFS 0x64800
-
-#define QIB_6120_RcvBuf1_OFFS 0x72000
-
-#define QIB_6120_RcvBuf2_OFFS 0x75000
-
-#define QIB_6120_RcvFlags_OFFS 0x77000
-
-#define QIB_6120_RcvLookupBuf1_OFFS 0x79000
-
-#define QIB_6120_RcvDMABuf_OFFS 0x7B000
-
-#define QIB_6120_MiscRXEIntMem_OFFS 0x7C000
-
-#define QIB_6120_PCIERcvBuf_OFFS 0x80000
-
-#define QIB_6120_PCIERetryBuf_OFFS 0x82000
-
-#define QIB_6120_PCIERcvBufRdToWrAddr_OFFS 0x84000
-
-#define QIB_6120_PIOBuf0_MA_OFFS 0x100000
diff --git a/drivers/infiniband/hw/qib/qib_7220.h b/drivers/infiniband/hw/qib/qib_7220.h
deleted file mode 100644
index 9ecaab6232e3..000000000000
--- a/drivers/infiniband/hw/qib/qib_7220.h
+++ /dev/null
@@ -1,149 +0,0 @@
-#ifndef _QIB_7220_H
-#define _QIB_7220_H
-/*
- * Copyright (c) 2007, 2009, 2010 QLogic Corporation. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-/* grab register-defs auto-generated by HW */
-#include "qib_7220_regs.h"
-
-/* The number of eager receive TIDs for context zero. */
-#define IBA7220_KRCVEGRCNT 2048U
-
-#define IB_7220_LT_STATE_CFGRCVFCFG 0x09
-#define IB_7220_LT_STATE_CFGWAITRMT 0x0a
-#define IB_7220_LT_STATE_TXREVLANES 0x0d
-#define IB_7220_LT_STATE_CFGENH 0x10
-
-struct qib_chip_specific {
- u64 __iomem *cregbase;
- u64 *cntrs;
- u64 *portcntrs;
- spinlock_t sdepb_lock; /* serdes EPB bus */
- spinlock_t rcvmod_lock; /* protect rcvctrl shadow changes */
- spinlock_t gpio_lock; /* RMW of shadows/regs for ExtCtrl and GPIO */
- u64 hwerrmask;
- u64 errormask;
- u64 gpio_out; /* shadow of kr_gpio_out, for rmw ops */
- u64 gpio_mask; /* shadow the gpio mask register */
- u64 extctrl; /* shadow the gpio output enable, etc... */
- u32 ncntrs;
- u32 nportcntrs;
- u32 cntrnamelen;
- u32 portcntrnamelen;
- u32 numctxts;
- u32 rcvegrcnt;
- u32 autoneg_tries;
- u32 serdes_first_init_done;
- u32 sdmabufcnt;
- u32 lastbuf_for_pio;
- u32 updthresh; /* current AvailUpdThld */
- u32 updthresh_dflt; /* default AvailUpdThld */
- u8 presets_needed;
- u8 relock_timer_active;
- char emsgbuf[128];
- char sdmamsgbuf[192];
- char bitsmsgbuf[64];
- struct timer_list relock_timer;
- unsigned int relock_interval; /* in jiffies */
- struct qib_devdata *dd;
-};
-
-struct qib_chippport_specific {
- struct qib_pportdata pportdata;
- wait_queue_head_t autoneg_wait;
- struct delayed_work autoneg_work;
- struct timer_list chase_timer;
- /*
- * these 5 fields are used to establish deltas for IB symbol
- * errors and linkrecovery errors. They can be reported on
- * some chips during link negotiation prior to INIT, and with
- * DDR when faking DDR negotiations with non-IBTA switches.
- * The chip counters are adjusted at driver unload if there is
- * a non-zero delta.
- */
- u64 ibdeltainprog;
- u64 ibsymdelta;
- u64 ibsymsnap;
- u64 iblnkerrdelta;
- u64 iblnkerrsnap;
- u64 ibcctrl; /* kr_ibcctrl shadow */
- u64 ibcddrctrl; /* kr_ibcddrctrl shadow */
- unsigned long chase_end;
- u32 last_delay_mult;
-};
-
-/*
- * This header file provides the declarations and common definitions
- * for (mostly) manipulation of the SerDes blocks within the IBA7220.
- * the functions declared should only be called from within other
- * 7220-related files such as qib_iba7220.c or qib_sd7220.c.
- */
-int qib_sd7220_presets(struct qib_devdata *dd);
-int qib_sd7220_init(struct qib_devdata *dd);
-void qib_sd7220_clr_ibpar(struct qib_devdata *);
-/*
- * Below used for sdnum parameter, selecting one of the two sections
- * used for PCIe, or the single SerDes used for IB, which is the
- * only one currently used
- */
-#define IB_7220_SERDES 2
-
-static inline u32 qib_read_kreg32(const struct qib_devdata *dd,
- const u16 regno)
-{
- if (!dd->kregbase || !(dd->flags & QIB_PRESENT))
- return -1;
- return readl((u32 __iomem *)&dd->kregbase[regno]);
-}
-
-static inline u64 qib_read_kreg64(const struct qib_devdata *dd,
- const u16 regno)
-{
- if (!dd->kregbase || !(dd->flags & QIB_PRESENT))
- return -1;
-
- return readq(&dd->kregbase[regno]);
-}
-
-static inline void qib_write_kreg(const struct qib_devdata *dd,
- const u16 regno, u64 value)
-{
- if (dd->kregbase)
- writeq(value, &dd->kregbase[regno]);
-}
-
-void set_7220_relock_poll(struct qib_devdata *, int);
-void shutdown_7220_relock_poll(struct qib_devdata *);
-void toggle_7220_rclkrls(struct qib_devdata *);
-
-
-#endif /* _QIB_7220_H */
diff --git a/drivers/infiniband/hw/qib/qib_7220_regs.h b/drivers/infiniband/hw/qib/qib_7220_regs.h
deleted file mode 100644
index 0da5bb750e52..000000000000
--- a/drivers/infiniband/hw/qib/qib_7220_regs.h
+++ /dev/null
@@ -1,1496 +0,0 @@
-/*
- * Copyright (c) 2008, 2009, 2010 QLogic Corporation. All rights reserved.
- *
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- */
-
-/* This file is mechanically generated from RTL. Any hand-edits will be lost! */
-
-#define QIB_7220_Revision_OFFS 0x0
-#define QIB_7220_Revision_R_Simulator_LSB 0x3F
-#define QIB_7220_Revision_R_Simulator_RMASK 0x1
-#define QIB_7220_Revision_R_Emulation_LSB 0x3E
-#define QIB_7220_Revision_R_Emulation_RMASK 0x1
-#define QIB_7220_Revision_R_Emulation_Revcode_LSB 0x28
-#define QIB_7220_Revision_R_Emulation_Revcode_RMASK 0x3FFFFF
-#define QIB_7220_Revision_BoardID_LSB 0x20
-#define QIB_7220_Revision_BoardID_RMASK 0xFF
-#define QIB_7220_Revision_R_SW_LSB 0x18
-#define QIB_7220_Revision_R_SW_RMASK 0xFF
-#define QIB_7220_Revision_R_Arch_LSB 0x10
-#define QIB_7220_Revision_R_Arch_RMASK 0xFF
-#define QIB_7220_Revision_R_ChipRevMajor_LSB 0x8
-#define QIB_7220_Revision_R_ChipRevMajor_RMASK 0xFF
-#define QIB_7220_Revision_R_ChipRevMinor_LSB 0x0
-#define QIB_7220_Revision_R_ChipRevMinor_RMASK 0xFF
-
-#define QIB_7220_Control_OFFS 0x8
-#define QIB_7220_Control_SyncResetExceptPcieIRAMRST_LSB 0x7
-#define QIB_7220_Control_SyncResetExceptPcieIRAMRST_RMASK 0x1
-#define QIB_7220_Control_PCIECplQDiagEn_LSB 0x6
-#define QIB_7220_Control_PCIECplQDiagEn_RMASK 0x1
-#define QIB_7220_Control_Reserved_LSB 0x5
-#define QIB_7220_Control_Reserved_RMASK 0x1
-#define QIB_7220_Control_TxLatency_LSB 0x4
-#define QIB_7220_Control_TxLatency_RMASK 0x1
-#define QIB_7220_Control_PCIERetryBufDiagEn_LSB 0x3
-#define QIB_7220_Control_PCIERetryBufDiagEn_RMASK 0x1
-#define QIB_7220_Control_LinkEn_LSB 0x2
-#define QIB_7220_Control_LinkEn_RMASK 0x1
-#define QIB_7220_Control_FreezeMode_LSB 0x1
-#define QIB_7220_Control_FreezeMode_RMASK 0x1
-#define QIB_7220_Control_SyncReset_LSB 0x0
-#define QIB_7220_Control_SyncReset_RMASK 0x1
-
-#define QIB_7220_PageAlign_OFFS 0x10
-
-#define QIB_7220_PortCnt_OFFS 0x18
-
-#define QIB_7220_SendRegBase_OFFS 0x30
-
-#define QIB_7220_UserRegBase_OFFS 0x38
-
-#define QIB_7220_CntrRegBase_OFFS 0x40
-
-#define QIB_7220_Scratch_OFFS 0x48
-
-#define QIB_7220_IntMask_OFFS 0x68
-#define QIB_7220_IntMask_SDmaIntMask_LSB 0x3F
-#define QIB_7220_IntMask_SDmaIntMask_RMASK 0x1
-#define QIB_7220_IntMask_SDmaDisabledMasked_LSB 0x3E
-#define QIB_7220_IntMask_SDmaDisabledMasked_RMASK 0x1
-#define QIB_7220_IntMask_Reserved_LSB 0x31
-#define QIB_7220_IntMask_Reserved_RMASK 0x1FFF
-#define QIB_7220_IntMask_RcvUrg16IntMask_LSB 0x30
-#define QIB_7220_IntMask_RcvUrg16IntMask_RMASK 0x1
-#define QIB_7220_IntMask_RcvUrg15IntMask_LSB 0x2F
-#define QIB_7220_IntMask_RcvUrg15IntMask_RMASK 0x1
-#define QIB_7220_IntMask_RcvUrg14IntMask_LSB 0x2E
-#define QIB_7220_IntMask_RcvUrg14IntMask_RMASK 0x1
-#define QIB_7220_IntMask_RcvUrg13IntMask_LSB 0x2D
-#define QIB_7220_IntMask_RcvUrg13IntMask_RMASK 0x1
-#define QIB_7220_IntMask_RcvUrg12IntMask_LSB 0x2C
-#define QIB_7220_IntMask_RcvUrg12IntMask_RMASK 0x1
-#define QIB_7220_IntMask_RcvUrg11IntMask_LSB 0x2B
-#define QIB_7220_IntMask_RcvUrg11IntMask_RMASK 0x1
-#define QIB_7220_IntMask_RcvUrg10IntMask_LSB 0x2A
-#define QIB_7220_IntMask_RcvUrg10IntMask_RMASK 0x1
-#define QIB_7220_IntMask_RcvUrg9IntMask_LSB 0x29
-#define QIB_7220_IntMask_RcvUrg9IntMask_RMASK 0x1
-#define QIB_7220_IntMask_RcvUrg8IntMask_LSB 0x28
-#define QIB_7220_IntMask_RcvUrg8IntMask_RMASK 0x1
-#define QIB_7220_IntMask_RcvUrg7IntMask_LSB 0x27
-#define QIB_7220_IntMask_RcvUrg7IntMask_RMASK 0x1
-#define QIB_7220_IntMask_RcvUrg6IntMask_LSB 0x26
-#define QIB_7220_IntMask_RcvUrg6IntMask_RMASK 0x1
-#define QIB_7220_IntMask_RcvUrg5IntMask_LSB 0x25
-#define QIB_7220_IntMask_RcvUrg5IntMask_RMASK 0x1
-#define QIB_7220_IntMask_RcvUrg4IntMask_LSB 0x24
-#define QIB_7220_IntMask_RcvUrg4IntMask_RMASK 0x1
-#define QIB_7220_IntMask_RcvUrg3IntMask_LSB 0x23
-#define QIB_7220_IntMask_RcvUrg3IntMask_RMASK 0x1
-#define QIB_7220_IntMask_RcvUrg2IntMask_LSB 0x22
-#define QIB_7220_IntMask_RcvUrg2IntMask_RMASK 0x1
-#define QIB_7220_IntMask_RcvUrg1IntMask_LSB 0x21
-#define QIB_7220_IntMask_RcvUrg1IntMask_RMASK 0x1
-#define QIB_7220_IntMask_RcvUrg0IntMask_LSB 0x20
-#define QIB_7220_IntMask_RcvUrg0IntMask_RMASK 0x1
-#define QIB_7220_IntMask_ErrorIntMask_LSB 0x1F
-#define QIB_7220_IntMask_ErrorIntMask_RMASK 0x1
-#define QIB_7220_IntMask_PioSetIntMask_LSB 0x1E
-#define QIB_7220_IntMask_PioSetIntMask_RMASK 0x1
-#define QIB_7220_IntMask_PioBufAvailIntMask_LSB 0x1D
-#define QIB_7220_IntMask_PioBufAvailIntMask_RMASK 0x1
-#define QIB_7220_IntMask_assertGPIOIntMask_LSB 0x1C
-#define QIB_7220_IntMask_assertGPIOIntMask_RMASK 0x1
-#define QIB_7220_IntMask_IBSerdesTrimDoneIntMask_LSB 0x1B
-#define QIB_7220_IntMask_IBSerdesTrimDoneIntMask_RMASK 0x1
-#define QIB_7220_IntMask_JIntMask_LSB 0x1A
-#define QIB_7220_IntMask_JIntMask_RMASK 0x1
-#define QIB_7220_IntMask_Reserved1_LSB 0x11
-#define QIB_7220_IntMask_Reserved1_RMASK 0x1FF
-#define QIB_7220_IntMask_RcvAvail16IntMask_LSB 0x10
-#define QIB_7220_IntMask_RcvAvail16IntMask_RMASK 0x1
-#define QIB_7220_IntMask_RcvAvail15IntMask_LSB 0xF
-#define QIB_7220_IntMask_RcvAvail15IntMask_RMASK 0x1
-#define QIB_7220_IntMask_RcvAvail14IntMask_LSB 0xE
-#define QIB_7220_IntMask_RcvAvail14IntMask_RMASK 0x1
-#define QIB_7220_IntMask_RcvAvail13IntMask_LSB 0xD
-#define QIB_7220_IntMask_RcvAvail13IntMask_RMASK 0x1
-#define QIB_7220_IntMask_RcvAvail12IntMask_LSB 0xC
-#define QIB_7220_IntMask_RcvAvail12IntMask_RMASK 0x1
-#define QIB_7220_IntMask_RcvAvail11IntMask_LSB 0xB
-#define QIB_7220_IntMask_RcvAvail11IntMask_RMASK 0x1
-#define QIB_7220_IntMask_RcvAvail10IntMask_LSB 0xA
-#define QIB_7220_IntMask_RcvAvail10IntMask_RMASK 0x1
-#define QIB_7220_IntMask_RcvAvail9IntMask_LSB 0x9
-#define QIB_7220_IntMask_RcvAvail9IntMask_RMASK 0x1
-#define QIB_7220_IntMask_RcvAvail8IntMask_LSB 0x8
-#define QIB_7220_IntMask_RcvAvail8IntMask_RMASK 0x1
-#define QIB_7220_IntMask_RcvAvail7IntMask_LSB 0x7
-#define QIB_7220_IntMask_RcvAvail7IntMask_RMASK 0x1
-#define QIB_7220_IntMask_RcvAvail6IntMask_LSB 0x6
-#define QIB_7220_IntMask_RcvAvail6IntMask_RMASK 0x1
-#define QIB_7220_IntMask_RcvAvail5IntMask_LSB 0x5
-#define QIB_7220_IntMask_RcvAvail5IntMask_RMASK 0x1
-#define QIB_7220_IntMask_RcvAvail4IntMask_LSB 0x4
-#define QIB_7220_IntMask_RcvAvail4IntMask_RMASK 0x1
-#define QIB_7220_IntMask_RcvAvail3IntMask_LSB 0x3
-#define QIB_7220_IntMask_RcvAvail3IntMask_RMASK 0x1
-#define QIB_7220_IntMask_RcvAvail2IntMask_LSB 0x2
-#define QIB_7220_IntMask_RcvAvail2IntMask_RMASK 0x1
-#define QIB_7220_IntMask_RcvAvail1IntMask_LSB 0x1
-#define QIB_7220_IntMask_RcvAvail1IntMask_RMASK 0x1
-#define QIB_7220_IntMask_RcvAvail0IntMask_LSB 0x0
-#define QIB_7220_IntMask_RcvAvail0IntMask_RMASK 0x1
-
-#define QIB_7220_IntStatus_OFFS 0x70
-#define QIB_7220_IntStatus_SDmaInt_LSB 0x3F
-#define QIB_7220_IntStatus_SDmaInt_RMASK 0x1
-#define QIB_7220_IntStatus_SDmaDisabled_LSB 0x3E
-#define QIB_7220_IntStatus_SDmaDisabled_RMASK 0x1
-#define QIB_7220_IntStatus_Reserved_LSB 0x31
-#define QIB_7220_IntStatus_Reserved_RMASK 0x1FFF
-#define QIB_7220_IntStatus_RcvUrg16_LSB 0x30
-#define QIB_7220_IntStatus_RcvUrg16_RMASK 0x1
-#define QIB_7220_IntStatus_RcvUrg15_LSB 0x2F
-#define QIB_7220_IntStatus_RcvUrg15_RMASK 0x1
-#define QIB_7220_IntStatus_RcvUrg14_LSB 0x2E
-#define QIB_7220_IntStatus_RcvUrg14_RMASK 0x1
-#define QIB_7220_IntStatus_RcvUrg13_LSB 0x2D
-#define QIB_7220_IntStatus_RcvUrg13_RMASK 0x1
-#define QIB_7220_IntStatus_RcvUrg12_LSB 0x2C
-#define QIB_7220_IntStatus_RcvUrg12_RMASK 0x1
-#define QIB_7220_IntStatus_RcvUrg11_LSB 0x2B
-#define QIB_7220_IntStatus_RcvUrg11_RMASK 0x1
-#define QIB_7220_IntStatus_RcvUrg10_LSB 0x2A
-#define QIB_7220_IntStatus_RcvUrg10_RMASK 0x1
-#define QIB_7220_IntStatus_RcvUrg9_LSB 0x29
-#define QIB_7220_IntStatus_RcvUrg9_RMASK 0x1
-#define QIB_7220_IntStatus_RcvUrg8_LSB 0x28
-#define QIB_7220_IntStatus_RcvUrg8_RMASK 0x1
-#define QIB_7220_IntStatus_RcvUrg7_LSB 0x27
-#define QIB_7220_IntStatus_RcvUrg7_RMASK 0x1
-#define QIB_7220_IntStatus_RcvUrg6_LSB 0x26
-#define QIB_7220_IntStatus_RcvUrg6_RMASK 0x1
-#define QIB_7220_IntStatus_RcvUrg5_LSB 0x25
-#define QIB_7220_IntStatus_RcvUrg5_RMASK 0x1
-#define QIB_7220_IntStatus_RcvUrg4_LSB 0x24
-#define QIB_7220_IntStatus_RcvUrg4_RMASK 0x1
-#define QIB_7220_IntStatus_RcvUrg3_LSB 0x23
-#define QIB_7220_IntStatus_RcvUrg3_RMASK 0x1
-#define QIB_7220_IntStatus_RcvUrg2_LSB 0x22
-#define QIB_7220_IntStatus_RcvUrg2_RMASK 0x1
-#define QIB_7220_IntStatus_RcvUrg1_LSB 0x21
-#define QIB_7220_IntStatus_RcvUrg1_RMASK 0x1
-#define QIB_7220_IntStatus_RcvUrg0_LSB 0x20
-#define QIB_7220_IntStatus_RcvUrg0_RMASK 0x1
-#define QIB_7220_IntStatus_Error_LSB 0x1F
-#define QIB_7220_IntStatus_Error_RMASK 0x1
-#define QIB_7220_IntStatus_PioSent_LSB 0x1E
-#define QIB_7220_IntStatus_PioSent_RMASK 0x1
-#define QIB_7220_IntStatus_PioBufAvail_LSB 0x1D
-#define QIB_7220_IntStatus_PioBufAvail_RMASK 0x1
-#define QIB_7220_IntStatus_assertGPIO_LSB 0x1C
-#define QIB_7220_IntStatus_assertGPIO_RMASK 0x1
-#define QIB_7220_IntStatus_IBSerdesTrimDone_LSB 0x1B
-#define QIB_7220_IntStatus_IBSerdesTrimDone_RMASK 0x1
-#define QIB_7220_IntStatus_JInt_LSB 0x1A
-#define QIB_7220_IntStatus_JInt_RMASK 0x1
-#define QIB_7220_IntStatus_Reserved1_LSB 0x11
-#define QIB_7220_IntStatus_Reserved1_RMASK 0x1FF
-#define QIB_7220_IntStatus_RcvAvail16_LSB 0x10
-#define QIB_7220_IntStatus_RcvAvail16_RMASK 0x1
-#define QIB_7220_IntStatus_RcvAvail15_LSB 0xF
-#define QIB_7220_IntStatus_RcvAvail15_RMASK 0x1
-#define QIB_7220_IntStatus_RcvAvail14_LSB 0xE
-#define QIB_7220_IntStatus_RcvAvail14_RMASK 0x1
-#define QIB_7220_IntStatus_RcvAvail13_LSB 0xD
-#define QIB_7220_IntStatus_RcvAvail13_RMASK 0x1
-#define QIB_7220_IntStatus_RcvAvail12_LSB 0xC
-#define QIB_7220_IntStatus_RcvAvail12_RMASK 0x1
-#define QIB_7220_IntStatus_RcvAvail11_LSB 0xB
-#define QIB_7220_IntStatus_RcvAvail11_RMASK 0x1
-#define QIB_7220_IntStatus_RcvAvail10_LSB 0xA
-#define QIB_7220_IntStatus_RcvAvail10_RMASK 0x1
-#define QIB_7220_IntStatus_RcvAvail9_LSB 0x9
-#define QIB_7220_IntStatus_RcvAvail9_RMASK 0x1
-#define QIB_7220_IntStatus_RcvAvail8_LSB 0x8
-#define QIB_7220_IntStatus_RcvAvail8_RMASK 0x1
-#define QIB_7220_IntStatus_RcvAvail7_LSB 0x7
-#define QIB_7220_IntStatus_RcvAvail7_RMASK 0x1
-#define QIB_7220_IntStatus_RcvAvail6_LSB 0x6
-#define QIB_7220_IntStatus_RcvAvail6_RMASK 0x1
-#define QIB_7220_IntStatus_RcvAvail5_LSB 0x5
-#define QIB_7220_IntStatus_RcvAvail5_RMASK 0x1
-#define QIB_7220_IntStatus_RcvAvail4_LSB 0x4
-#define QIB_7220_IntStatus_RcvAvail4_RMASK 0x1
-#define QIB_7220_IntStatus_RcvAvail3_LSB 0x3
-#define QIB_7220_IntStatus_RcvAvail3_RMASK 0x1
-#define QIB_7220_IntStatus_RcvAvail2_LSB 0x2
-#define QIB_7220_IntStatus_RcvAvail2_RMASK 0x1
-#define QIB_7220_IntStatus_RcvAvail1_LSB 0x1
-#define QIB_7220_IntStatus_RcvAvail1_RMASK 0x1
-#define QIB_7220_IntStatus_RcvAvail0_LSB 0x0
-#define QIB_7220_IntStatus_RcvAvail0_RMASK 0x1
-
-#define QIB_7220_IntClear_OFFS 0x78
-#define QIB_7220_IntClear_SDmaIntClear_LSB 0x3F
-#define QIB_7220_IntClear_SDmaIntClear_RMASK 0x1
-#define QIB_7220_IntClear_SDmaDisabledClear_LSB 0x3E
-#define QIB_7220_IntClear_SDmaDisabledClear_RMASK 0x1
-#define QIB_7220_IntClear_Reserved_LSB 0x31
-#define QIB_7220_IntClear_Reserved_RMASK 0x1FFF
-#define QIB_7220_IntClear_RcvUrg16IntClear_LSB 0x30
-#define QIB_7220_IntClear_RcvUrg16IntClear_RMASK 0x1
-#define QIB_7220_IntClear_RcvUrg15IntClear_LSB 0x2F
-#define QIB_7220_IntClear_RcvUrg15IntClear_RMASK 0x1
-#define QIB_7220_IntClear_RcvUrg14IntClear_LSB 0x2E
-#define QIB_7220_IntClear_RcvUrg14IntClear_RMASK 0x1
-#define QIB_7220_IntClear_RcvUrg13IntClear_LSB 0x2D
-#define QIB_7220_IntClear_RcvUrg13IntClear_RMASK 0x1
-#define QIB_7220_IntClear_RcvUrg12IntClear_LSB 0x2C
-#define QIB_7220_IntClear_RcvUrg12IntClear_RMASK 0x1
-#define QIB_7220_IntClear_RcvUrg11IntClear_LSB 0x2B
-#define QIB_7220_IntClear_RcvUrg11IntClear_RMASK 0x1
-#define QIB_7220_IntClear_RcvUrg10IntClear_LSB 0x2A
-#define QIB_7220_IntClear_RcvUrg10IntClear_RMASK 0x1
-#define QIB_7220_IntClear_RcvUrg9IntClear_LSB 0x29
-#define QIB_7220_IntClear_RcvUrg9IntClear_RMASK 0x1
-#define QIB_7220_IntClear_RcvUrg8IntClear_LSB 0x28
-#define QIB_7220_IntClear_RcvUrg8IntClear_RMASK 0x1
-#define QIB_7220_IntClear_RcvUrg7IntClear_LSB 0x27
-#define QIB_7220_IntClear_RcvUrg7IntClear_RMASK 0x1
-#define QIB_7220_IntClear_RcvUrg6IntClear_LSB 0x26
-#define QIB_7220_IntClear_RcvUrg6IntClear_RMASK 0x1
-#define QIB_7220_IntClear_RcvUrg5IntClear_LSB 0x25
-#define QIB_7220_IntClear_RcvUrg5IntClear_RMASK 0x1
-#define QIB_7220_IntClear_RcvUrg4IntClear_LSB 0x24
-#define QIB_7220_IntClear_RcvUrg4IntClear_RMASK 0x1
-#define QIB_7220_IntClear_RcvUrg3IntClear_LSB 0x23
-#define QIB_7220_IntClear_RcvUrg3IntClear_RMASK 0x1
-#define QIB_7220_IntClear_RcvUrg2IntClear_LSB 0x22
-#define QIB_7220_IntClear_RcvUrg2IntClear_RMASK 0x1
-#define QIB_7220_IntClear_RcvUrg1IntClear_LSB 0x21
-#define QIB_7220_IntClear_RcvUrg1IntClear_RMASK 0x1
-#define QIB_7220_IntClear_RcvUrg0IntClear_LSB 0x20
-#define QIB_7220_IntClear_RcvUrg0IntClear_RMASK 0x1
-#define QIB_7220_IntClear_ErrorIntClear_LSB 0x1F
-#define QIB_7220_IntClear_ErrorIntClear_RMASK 0x1
-#define QIB_7220_IntClear_PioSetIntClear_LSB 0x1E
-#define QIB_7220_IntClear_PioSetIntClear_RMASK 0x1
-#define QIB_7220_IntClear_PioBufAvailIntClear_LSB 0x1D
-#define QIB_7220_IntClear_PioBufAvailIntClear_RMASK 0x1
-#define QIB_7220_IntClear_assertGPIOIntClear_LSB 0x1C
-#define QIB_7220_IntClear_assertGPIOIntClear_RMASK 0x1
-#define QIB_7220_IntClear_IBSerdesTrimDoneClear_LSB 0x1B
-#define QIB_7220_IntClear_IBSerdesTrimDoneClear_RMASK 0x1
-#define QIB_7220_IntClear_JIntClear_LSB 0x1A
-#define QIB_7220_IntClear_JIntClear_RMASK 0x1
-#define QIB_7220_IntClear_Reserved1_LSB 0x11
-#define QIB_7220_IntClear_Reserved1_RMASK 0x1FF
-#define QIB_7220_IntClear_RcvAvail16IntClear_LSB 0x10
-#define QIB_7220_IntClear_RcvAvail16IntClear_RMASK 0x1
-#define QIB_7220_IntClear_RcvAvail15IntClear_LSB 0xF
-#define QIB_7220_IntClear_RcvAvail15IntClear_RMASK 0x1
-#define QIB_7220_IntClear_RcvAvail14IntClear_LSB 0xE
-#define QIB_7220_IntClear_RcvAvail14IntClear_RMASK 0x1
-#define QIB_7220_IntClear_RcvAvail13IntClear_LSB 0xD
-#define QIB_7220_IntClear_RcvAvail13IntClear_RMASK 0x1
-#define QIB_7220_IntClear_RcvAvail12IntClear_LSB 0xC
-#define QIB_7220_IntClear_RcvAvail12IntClear_RMASK 0x1
-#define QIB_7220_IntClear_RcvAvail11IntClear_LSB 0xB
-#define QIB_7220_IntClear_RcvAvail11IntClear_RMASK 0x1
-#define QIB_7220_IntClear_RcvAvail10IntClear_LSB 0xA
-#define QIB_7220_IntClear_RcvAvail10IntClear_RMASK 0x1
-#define QIB_7220_IntClear_RcvAvail9IntClear_LSB 0x9
-#define QIB_7220_IntClear_RcvAvail9IntClear_RMASK 0x1
-#define QIB_7220_IntClear_RcvAvail8IntClear_LSB 0x8
-#define QIB_7220_IntClear_RcvAvail8IntClear_RMASK 0x1
-#define QIB_7220_IntClear_RcvAvail7IntClear_LSB 0x7
-#define QIB_7220_IntClear_RcvAvail7IntClear_RMASK 0x1
-#define QIB_7220_IntClear_RcvAvail6IntClear_LSB 0x6
-#define QIB_7220_IntClear_RcvAvail6IntClear_RMASK 0x1
-#define QIB_7220_IntClear_RcvAvail5IntClear_LSB 0x5
-#define QIB_7220_IntClear_RcvAvail5IntClear_RMASK 0x1
-#define QIB_7220_IntClear_RcvAvail4IntClear_LSB 0x4
-#define QIB_7220_IntClear_RcvAvail4IntClear_RMASK 0x1
-#define QIB_7220_IntClear_RcvAvail3IntClear_LSB 0x3
-#define QIB_7220_IntClear_RcvAvail3IntClear_RMASK 0x1
-#define QIB_7220_IntClear_RcvAvail2IntClear_LSB 0x2
-#define QIB_7220_IntClear_RcvAvail2IntClear_RMASK 0x1
-#define QIB_7220_IntClear_RcvAvail1IntClear_LSB 0x1
-#define QIB_7220_IntClear_RcvAvail1IntClear_RMASK 0x1
-#define QIB_7220_IntClear_RcvAvail0IntClear_LSB 0x0
-#define QIB_7220_IntClear_RcvAvail0IntClear_RMASK 0x1
-
-#define QIB_7220_ErrMask_OFFS 0x80
-#define QIB_7220_ErrMask_Reserved_LSB 0x36
-#define QIB_7220_ErrMask_Reserved_RMASK 0x3FF
-#define QIB_7220_ErrMask_InvalidEEPCmdMask_LSB 0x35
-#define QIB_7220_ErrMask_InvalidEEPCmdMask_RMASK 0x1
-#define QIB_7220_ErrMask_SDmaDescAddrMisalignErrMask_LSB 0x34
-#define QIB_7220_ErrMask_SDmaDescAddrMisalignErrMask_RMASK 0x1
-#define QIB_7220_ErrMask_HardwareErrMask_LSB 0x33
-#define QIB_7220_ErrMask_HardwareErrMask_RMASK 0x1
-#define QIB_7220_ErrMask_ResetNegatedMask_LSB 0x32
-#define QIB_7220_ErrMask_ResetNegatedMask_RMASK 0x1
-#define QIB_7220_ErrMask_InvalidAddrErrMask_LSB 0x31
-#define QIB_7220_ErrMask_InvalidAddrErrMask_RMASK 0x1
-#define QIB_7220_ErrMask_IBStatusChangedMask_LSB 0x30
-#define QIB_7220_ErrMask_IBStatusChangedMask_RMASK 0x1
-#define QIB_7220_ErrMask_SDmaUnexpDataErrMask_LSB 0x2F
-#define QIB_7220_ErrMask_SDmaUnexpDataErrMask_RMASK 0x1
-#define QIB_7220_ErrMask_SDmaMissingDwErrMask_LSB 0x2E
-#define QIB_7220_ErrMask_SDmaMissingDwErrMask_RMASK 0x1
-#define QIB_7220_ErrMask_SDmaDwEnErrMask_LSB 0x2D
-#define QIB_7220_ErrMask_SDmaDwEnErrMask_RMASK 0x1
-#define QIB_7220_ErrMask_SDmaRpyTagErrMask_LSB 0x2C
-#define QIB_7220_ErrMask_SDmaRpyTagErrMask_RMASK 0x1
-#define QIB_7220_ErrMask_SDma1stDescErrMask_LSB 0x2B
-#define QIB_7220_ErrMask_SDma1stDescErrMask_RMASK 0x1
-#define QIB_7220_ErrMask_SDmaBaseErrMask_LSB 0x2A
-#define QIB_7220_ErrMask_SDmaBaseErrMask_RMASK 0x1
-#define QIB_7220_ErrMask_SDmaTailOutOfBoundErrMask_LSB 0x29
-#define QIB_7220_ErrMask_SDmaTailOutOfBoundErrMask_RMASK 0x1
-#define QIB_7220_ErrMask_SDmaOutOfBoundErrMask_LSB 0x28
-#define QIB_7220_ErrMask_SDmaOutOfBoundErrMask_RMASK 0x1
-#define QIB_7220_ErrMask_SDmaGenMismatchErrMask_LSB 0x27
-#define QIB_7220_ErrMask_SDmaGenMismatchErrMask_RMASK 0x1
-#define QIB_7220_ErrMask_SendBufMisuseErrMask_LSB 0x26
-#define QIB_7220_ErrMask_SendBufMisuseErrMask_RMASK 0x1
-#define QIB_7220_ErrMask_SendUnsupportedVLErrMask_LSB 0x25
-#define QIB_7220_ErrMask_SendUnsupportedVLErrMask_RMASK 0x1
-#define QIB_7220_ErrMask_SendUnexpectedPktNumErrMask_LSB 0x24
-#define QIB_7220_ErrMask_SendUnexpectedPktNumErrMask_RMASK 0x1
-#define QIB_7220_ErrMask_SendPioArmLaunchErrMask_LSB 0x23
-#define QIB_7220_ErrMask_SendPioArmLaunchErrMask_RMASK 0x1
-#define QIB_7220_ErrMask_SendDroppedDataPktErrMask_LSB 0x22
-#define QIB_7220_ErrMask_SendDroppedDataPktErrMask_RMASK 0x1
-#define QIB_7220_ErrMask_SendDroppedSmpPktErrMask_LSB 0x21
-#define QIB_7220_ErrMask_SendDroppedSmpPktErrMask_RMASK 0x1
-#define QIB_7220_ErrMask_SendPktLenErrMask_LSB 0x20
-#define QIB_7220_ErrMask_SendPktLenErrMask_RMASK 0x1
-#define QIB_7220_ErrMask_SendUnderRunErrMask_LSB 0x1F
-#define QIB_7220_ErrMask_SendUnderRunErrMask_RMASK 0x1
-#define QIB_7220_ErrMask_SendMaxPktLenErrMask_LSB 0x1E
-#define QIB_7220_ErrMask_SendMaxPktLenErrMask_RMASK 0x1
-#define QIB_7220_ErrMask_SendMinPktLenErrMask_LSB 0x1D
-#define QIB_7220_ErrMask_SendMinPktLenErrMask_RMASK 0x1
-#define QIB_7220_ErrMask_SDmaDisabledErrMask_LSB 0x1C
-#define QIB_7220_ErrMask_SDmaDisabledErrMask_RMASK 0x1
-#define QIB_7220_ErrMask_SendSpecialTriggerErrMask_LSB 0x1B
-#define QIB_7220_ErrMask_SendSpecialTriggerErrMask_RMASK 0x1
-#define QIB_7220_ErrMask_Reserved1_LSB 0x12
-#define QIB_7220_ErrMask_Reserved1_RMASK 0x1FF
-#define QIB_7220_ErrMask_RcvIBLostLinkErrMask_LSB 0x11
-#define QIB_7220_ErrMask_RcvIBLostLinkErrMask_RMASK 0x1
-#define QIB_7220_ErrMask_RcvHdrErrMask_LSB 0x10
-#define QIB_7220_ErrMask_RcvHdrErrMask_RMASK 0x1
-#define QIB_7220_ErrMask_RcvHdrLenErrMask_LSB 0xF
-#define QIB_7220_ErrMask_RcvHdrLenErrMask_RMASK 0x1
-#define QIB_7220_ErrMask_RcvBadTidErrMask_LSB 0xE
-#define QIB_7220_ErrMask_RcvBadTidErrMask_RMASK 0x1
-#define QIB_7220_ErrMask_RcvHdrFullErrMask_LSB 0xD
-#define QIB_7220_ErrMask_RcvHdrFullErrMask_RMASK 0x1
-#define QIB_7220_ErrMask_RcvEgrFullErrMask_LSB 0xC
-#define QIB_7220_ErrMask_RcvEgrFullErrMask_RMASK 0x1
-#define QIB_7220_ErrMask_RcvBadVersionErrMask_LSB 0xB
-#define QIB_7220_ErrMask_RcvBadVersionErrMask_RMASK 0x1
-#define QIB_7220_ErrMask_RcvIBFlowErrMask_LSB 0xA
-#define QIB_7220_ErrMask_RcvIBFlowErrMask_RMASK 0x1
-#define QIB_7220_ErrMask_RcvEBPErrMask_LSB 0x9
-#define QIB_7220_ErrMask_RcvEBPErrMask_RMASK 0x1
-#define QIB_7220_ErrMask_RcvUnsupportedVLErrMask_LSB 0x8
-#define QIB_7220_ErrMask_RcvUnsupportedVLErrMask_RMASK 0x1
-#define QIB_7220_ErrMask_RcvUnexpectedCharErrMask_LSB 0x7
-#define QIB_7220_ErrMask_RcvUnexpectedCharErrMask_RMASK 0x1
-#define QIB_7220_ErrMask_RcvShortPktLenErrMask_LSB 0x6
-#define QIB_7220_ErrMask_RcvShortPktLenErrMask_RMASK 0x1
-#define QIB_7220_ErrMask_RcvLongPktLenErrMask_LSB 0x5
-#define QIB_7220_ErrMask_RcvLongPktLenErrMask_RMASK 0x1
-#define QIB_7220_ErrMask_RcvMaxPktLenErrMask_LSB 0x4
-#define QIB_7220_ErrMask_RcvMaxPktLenErrMask_RMASK 0x1
-#define QIB_7220_ErrMask_RcvMinPktLenErrMask_LSB 0x3
-#define QIB_7220_ErrMask_RcvMinPktLenErrMask_RMASK 0x1
-#define QIB_7220_ErrMask_RcvICRCErrMask_LSB 0x2
-#define QIB_7220_ErrMask_RcvICRCErrMask_RMASK 0x1
-#define QIB_7220_ErrMask_RcvVCRCErrMask_LSB 0x1
-#define QIB_7220_ErrMask_RcvVCRCErrMask_RMASK 0x1
-#define QIB_7220_ErrMask_RcvFormatErrMask_LSB 0x0
-#define QIB_7220_ErrMask_RcvFormatErrMask_RMASK 0x1
-
-#define QIB_7220_ErrStatus_OFFS 0x88
-#define QIB_7220_ErrStatus_Reserved_LSB 0x36
-#define QIB_7220_ErrStatus_Reserved_RMASK 0x3FF
-#define QIB_7220_ErrStatus_InvalidEEPCmdErr_LSB 0x35
-#define QIB_7220_ErrStatus_InvalidEEPCmdErr_RMASK 0x1
-#define QIB_7220_ErrStatus_SDmaDescAddrMisalignErr_LSB 0x34
-#define QIB_7220_ErrStatus_SDmaDescAddrMisalignErr_RMASK 0x1
-#define QIB_7220_ErrStatus_HardwareErr_LSB 0x33
-#define QIB_7220_ErrStatus_HardwareErr_RMASK 0x1
-#define QIB_7220_ErrStatus_ResetNegated_LSB 0x32
-#define QIB_7220_ErrStatus_ResetNegated_RMASK 0x1
-#define QIB_7220_ErrStatus_InvalidAddrErr_LSB 0x31
-#define QIB_7220_ErrStatus_InvalidAddrErr_RMASK 0x1
-#define QIB_7220_ErrStatus_IBStatusChanged_LSB 0x30
-#define QIB_7220_ErrStatus_IBStatusChanged_RMASK 0x1
-#define QIB_7220_ErrStatus_SDmaUnexpDataErr_LSB 0x2F
-#define QIB_7220_ErrStatus_SDmaUnexpDataErr_RMASK 0x1
-#define QIB_7220_ErrStatus_SDmaMissingDwErr_LSB 0x2E
-#define QIB_7220_ErrStatus_SDmaMissingDwErr_RMASK 0x1
-#define QIB_7220_ErrStatus_SDmaDwEnErr_LSB 0x2D
-#define QIB_7220_ErrStatus_SDmaDwEnErr_RMASK 0x1
-#define QIB_7220_ErrStatus_SDmaRpyTagErr_LSB 0x2C
-#define QIB_7220_ErrStatus_SDmaRpyTagErr_RMASK 0x1
-#define QIB_7220_ErrStatus_SDma1stDescErr_LSB 0x2B
-#define QIB_7220_ErrStatus_SDma1stDescErr_RMASK 0x1
-#define QIB_7220_ErrStatus_SDmaBaseErr_LSB 0x2A
-#define QIB_7220_ErrStatus_SDmaBaseErr_RMASK 0x1
-#define QIB_7220_ErrStatus_SDmaTailOutOfBoundErr_LSB 0x29
-#define QIB_7220_ErrStatus_SDmaTailOutOfBoundErr_RMASK 0x1
-#define QIB_7220_ErrStatus_SDmaOutOfBoundErr_LSB 0x28
-#define QIB_7220_ErrStatus_SDmaOutOfBoundErr_RMASK 0x1
-#define QIB_7220_ErrStatus_SDmaGenMismatchErr_LSB 0x27
-#define QIB_7220_ErrStatus_SDmaGenMismatchErr_RMASK 0x1
-#define QIB_7220_ErrStatus_SendBufMisuseErr_LSB 0x26
-#define QIB_7220_ErrStatus_SendBufMisuseErr_RMASK 0x1
-#define QIB_7220_ErrStatus_SendUnsupportedVLErr_LSB 0x25
-#define QIB_7220_ErrStatus_SendUnsupportedVLErr_RMASK 0x1
-#define QIB_7220_ErrStatus_SendUnexpectedPktNumErr_LSB 0x24
-#define QIB_7220_ErrStatus_SendUnexpectedPktNumErr_RMASK 0x1
-#define QIB_7220_ErrStatus_SendPioArmLaunchErr_LSB 0x23
-#define QIB_7220_ErrStatus_SendPioArmLaunchErr_RMASK 0x1
-#define QIB_7220_ErrStatus_SendDroppedDataPktErr_LSB 0x22
-#define QIB_7220_ErrStatus_SendDroppedDataPktErr_RMASK 0x1
-#define QIB_7220_ErrStatus_SendDroppedSmpPktErr_LSB 0x21
-#define QIB_7220_ErrStatus_SendDroppedSmpPktErr_RMASK 0x1
-#define QIB_7220_ErrStatus_SendPktLenErr_LSB 0x20
-#define QIB_7220_ErrStatus_SendPktLenErr_RMASK 0x1
-#define QIB_7220_ErrStatus_SendUnderRunErr_LSB 0x1F
-#define QIB_7220_ErrStatus_SendUnderRunErr_RMASK 0x1
-#define QIB_7220_ErrStatus_SendMaxPktLenErr_LSB 0x1E
-#define QIB_7220_ErrStatus_SendMaxPktLenErr_RMASK 0x1
-#define QIB_7220_ErrStatus_SendMinPktLenErr_LSB 0x1D
-#define QIB_7220_ErrStatus_SendMinPktLenErr_RMASK 0x1
-#define QIB_7220_ErrStatus_SDmaDisabledErr_LSB 0x1C
-#define QIB_7220_ErrStatus_SDmaDisabledErr_RMASK 0x1
-#define QIB_7220_ErrStatus_SendSpecialTriggerErr_LSB 0x1B
-#define QIB_7220_ErrStatus_SendSpecialTriggerErr_RMASK 0x1
-#define QIB_7220_ErrStatus_Reserved1_LSB 0x12
-#define QIB_7220_ErrStatus_Reserved1_RMASK 0x1FF
-#define QIB_7220_ErrStatus_RcvIBLostLinkErr_LSB 0x11
-#define QIB_7220_ErrStatus_RcvIBLostLinkErr_RMASK 0x1
-#define QIB_7220_ErrStatus_RcvHdrErr_LSB 0x10
-#define QIB_7220_ErrStatus_RcvHdrErr_RMASK 0x1
-#define QIB_7220_ErrStatus_RcvHdrLenErr_LSB 0xF
-#define QIB_7220_ErrStatus_RcvHdrLenErr_RMASK 0x1
-#define QIB_7220_ErrStatus_RcvBadTidErr_LSB 0xE
-#define QIB_7220_ErrStatus_RcvBadTidErr_RMASK 0x1
-#define QIB_7220_ErrStatus_RcvHdrFullErr_LSB 0xD
-#define QIB_7220_ErrStatus_RcvHdrFullErr_RMASK 0x1
-#define QIB_7220_ErrStatus_RcvEgrFullErr_LSB 0xC
-#define QIB_7220_ErrStatus_RcvEgrFullErr_RMASK 0x1
-#define QIB_7220_ErrStatus_RcvBadVersionErr_LSB 0xB
-#define QIB_7220_ErrStatus_RcvBadVersionErr_RMASK 0x1
-#define QIB_7220_ErrStatus_RcvIBFlowErr_LSB 0xA
-#define QIB_7220_ErrStatus_RcvIBFlowErr_RMASK 0x1
-#define QIB_7220_ErrStatus_RcvEBPErr_LSB 0x9
-#define QIB_7220_ErrStatus_RcvEBPErr_RMASK 0x1
-#define QIB_7220_ErrStatus_RcvUnsupportedVLErr_LSB 0x8
-#define QIB_7220_ErrStatus_RcvUnsupportedVLErr_RMASK 0x1
-#define QIB_7220_ErrStatus_RcvUnexpectedCharErr_LSB 0x7
-#define QIB_7220_ErrStatus_RcvUnexpectedCharErr_RMASK 0x1
-#define QIB_7220_ErrStatus_RcvShortPktLenErr_LSB 0x6
-#define QIB_7220_ErrStatus_RcvShortPktLenErr_RMASK 0x1
-#define QIB_7220_ErrStatus_RcvLongPktLenErr_LSB 0x5
-#define QIB_7220_ErrStatus_RcvLongPktLenErr_RMASK 0x1
-#define QIB_7220_ErrStatus_RcvMaxPktLenErr_LSB 0x4
-#define QIB_7220_ErrStatus_RcvMaxPktLenErr_RMASK 0x1
-#define QIB_7220_ErrStatus_RcvMinPktLenErr_LSB 0x3
-#define QIB_7220_ErrStatus_RcvMinPktLenErr_RMASK 0x1
-#define QIB_7220_ErrStatus_RcvICRCErr_LSB 0x2
-#define QIB_7220_ErrStatus_RcvICRCErr_RMASK 0x1
-#define QIB_7220_ErrStatus_RcvVCRCErr_LSB 0x1
-#define QIB_7220_ErrStatus_RcvVCRCErr_RMASK 0x1
-#define QIB_7220_ErrStatus_RcvFormatErr_LSB 0x0
-#define QIB_7220_ErrStatus_RcvFormatErr_RMASK 0x1
-
-#define QIB_7220_ErrClear_OFFS 0x90
-#define QIB_7220_ErrClear_Reserved_LSB 0x36
-#define QIB_7220_ErrClear_Reserved_RMASK 0x3FF
-#define QIB_7220_ErrClear_InvalidEEPCmdErrClear_LSB 0x35
-#define QIB_7220_ErrClear_InvalidEEPCmdErrClear_RMASK 0x1
-#define QIB_7220_ErrClear_SDmaDescAddrMisalignErrClear_LSB 0x34
-#define QIB_7220_ErrClear_SDmaDescAddrMisalignErrClear_RMASK 0x1
-#define QIB_7220_ErrClear_HardwareErrClear_LSB 0x33
-#define QIB_7220_ErrClear_HardwareErrClear_RMASK 0x1
-#define QIB_7220_ErrClear_ResetNegatedClear_LSB 0x32
-#define QIB_7220_ErrClear_ResetNegatedClear_RMASK 0x1
-#define QIB_7220_ErrClear_InvalidAddrErrClear_LSB 0x31
-#define QIB_7220_ErrClear_InvalidAddrErrClear_RMASK 0x1
-#define QIB_7220_ErrClear_IBStatusChangedClear_LSB 0x30
-#define QIB_7220_ErrClear_IBStatusChangedClear_RMASK 0x1
-#define QIB_7220_ErrClear_SDmaUnexpDataErrClear_LSB 0x2F
-#define QIB_7220_ErrClear_SDmaUnexpDataErrClear_RMASK 0x1
-#define QIB_7220_ErrClear_SDmaMissingDwErrClear_LSB 0x2E
-#define QIB_7220_ErrClear_SDmaMissingDwErrClear_RMASK 0x1
-#define QIB_7220_ErrClear_SDmaDwEnErrClear_LSB 0x2D
-#define QIB_7220_ErrClear_SDmaDwEnErrClear_RMASK 0x1
-#define QIB_7220_ErrClear_SDmaRpyTagErrClear_LSB 0x2C
-#define QIB_7220_ErrClear_SDmaRpyTagErrClear_RMASK 0x1
-#define QIB_7220_ErrClear_SDma1stDescErrClear_LSB 0x2B
-#define QIB_7220_ErrClear_SDma1stDescErrClear_RMASK 0x1
-#define QIB_7220_ErrClear_SDmaBaseErrClear_LSB 0x2A
-#define QIB_7220_ErrClear_SDmaBaseErrClear_RMASK 0x1
-#define QIB_7220_ErrClear_SDmaTailOutOfBoundErrClear_LSB 0x29
-#define QIB_7220_ErrClear_SDmaTailOutOfBoundErrClear_RMASK 0x1
-#define QIB_7220_ErrClear_SDmaOutOfBoundErrClear_LSB 0x28
-#define QIB_7220_ErrClear_SDmaOutOfBoundErrClear_RMASK 0x1
-#define QIB_7220_ErrClear_SDmaGenMismatchErrClear_LSB 0x27
-#define QIB_7220_ErrClear_SDmaGenMismatchErrClear_RMASK 0x1
-#define QIB_7220_ErrClear_SendBufMisuseErrClear_LSB 0x26
-#define QIB_7220_ErrClear_SendBufMisuseErrClear_RMASK 0x1
-#define QIB_7220_ErrClear_SendUnsupportedVLErrClear_LSB 0x25
-#define QIB_7220_ErrClear_SendUnsupportedVLErrClear_RMASK 0x1
-#define QIB_7220_ErrClear_SendUnexpectedPktNumErrClear_LSB 0x24
-#define QIB_7220_ErrClear_SendUnexpectedPktNumErrClear_RMASK 0x1
-#define QIB_7220_ErrClear_SendPioArmLaunchErrClear_LSB 0x23
-#define QIB_7220_ErrClear_SendPioArmLaunchErrClear_RMASK 0x1
-#define QIB_7220_ErrClear_SendDroppedDataPktErrClear_LSB 0x22
-#define QIB_7220_ErrClear_SendDroppedDataPktErrClear_RMASK 0x1
-#define QIB_7220_ErrClear_SendDroppedSmpPktErrClear_LSB 0x21
-#define QIB_7220_ErrClear_SendDroppedSmpPktErrClear_RMASK 0x1
-#define QIB_7220_ErrClear_SendPktLenErrClear_LSB 0x20
-#define QIB_7220_ErrClear_SendPktLenErrClear_RMASK 0x1
-#define QIB_7220_ErrClear_SendUnderRunErrClear_LSB 0x1F
-#define QIB_7220_ErrClear_SendUnderRunErrClear_RMASK 0x1
-#define QIB_7220_ErrClear_SendMaxPktLenErrClear_LSB 0x1E
-#define QIB_7220_ErrClear_SendMaxPktLenErrClear_RMASK 0x1
-#define QIB_7220_ErrClear_SendMinPktLenErrClear_LSB 0x1D
-#define QIB_7220_ErrClear_SendMinPktLenErrClear_RMASK 0x1
-#define QIB_7220_ErrClear_SDmaDisabledErrClear_LSB 0x1C
-#define QIB_7220_ErrClear_SDmaDisabledErrClear_RMASK 0x1
-#define QIB_7220_ErrClear_SendSpecialTriggerErrClear_LSB 0x1B
-#define QIB_7220_ErrClear_SendSpecialTriggerErrClear_RMASK 0x1
-#define QIB_7220_ErrClear_Reserved1_LSB 0x12
-#define QIB_7220_ErrClear_Reserved1_RMASK 0x1FF
-#define QIB_7220_ErrClear_RcvIBLostLinkErrClear_LSB 0x11
-#define QIB_7220_ErrClear_RcvIBLostLinkErrClear_RMASK 0x1
-#define QIB_7220_ErrClear_RcvHdrErrClear_LSB 0x10
-#define QIB_7220_ErrClear_RcvHdrErrClear_RMASK 0x1
-#define QIB_7220_ErrClear_RcvHdrLenErrClear_LSB 0xF
-#define QIB_7220_ErrClear_RcvHdrLenErrClear_RMASK 0x1
-#define QIB_7220_ErrClear_RcvBadTidErrClear_LSB 0xE
-#define QIB_7220_ErrClear_RcvBadTidErrClear_RMASK 0x1
-#define QIB_7220_ErrClear_RcvHdrFullErrClear_LSB 0xD
-#define QIB_7220_ErrClear_RcvHdrFullErrClear_RMASK 0x1
-#define QIB_7220_ErrClear_RcvEgrFullErrClear_LSB 0xC
-#define QIB_7220_ErrClear_RcvEgrFullErrClear_RMASK 0x1
-#define QIB_7220_ErrClear_RcvBadVersionErrClear_LSB 0xB
-#define QIB_7220_ErrClear_RcvBadVersionErrClear_RMASK 0x1
-#define QIB_7220_ErrClear_RcvIBFlowErrClear_LSB 0xA
-#define QIB_7220_ErrClear_RcvIBFlowErrClear_RMASK 0x1
-#define QIB_7220_ErrClear_RcvEBPErrClear_LSB 0x9
-#define QIB_7220_ErrClear_RcvEBPErrClear_RMASK 0x1
-#define QIB_7220_ErrClear_RcvUnsupportedVLErrClear_LSB 0x8
-#define QIB_7220_ErrClear_RcvUnsupportedVLErrClear_RMASK 0x1
-#define QIB_7220_ErrClear_RcvUnexpectedCharErrClear_LSB 0x7
-#define QIB_7220_ErrClear_RcvUnexpectedCharErrClear_RMASK 0x1
-#define QIB_7220_ErrClear_RcvShortPktLenErrClear_LSB 0x6
-#define QIB_7220_ErrClear_RcvShortPktLenErrClear_RMASK 0x1
-#define QIB_7220_ErrClear_RcvLongPktLenErrClear_LSB 0x5
-#define QIB_7220_ErrClear_RcvLongPktLenErrClear_RMASK 0x1
-#define QIB_7220_ErrClear_RcvMaxPktLenErrClear_LSB 0x4
-#define QIB_7220_ErrClear_RcvMaxPktLenErrClear_RMASK 0x1
-#define QIB_7220_ErrClear_RcvMinPktLenErrClear_LSB 0x3
-#define QIB_7220_ErrClear_RcvMinPktLenErrClear_RMASK 0x1
-#define QIB_7220_ErrClear_RcvICRCErrClear_LSB 0x2
-#define QIB_7220_ErrClear_RcvICRCErrClear_RMASK 0x1
-#define QIB_7220_ErrClear_RcvVCRCErrClear_LSB 0x1
-#define QIB_7220_ErrClear_RcvVCRCErrClear_RMASK 0x1
-#define QIB_7220_ErrClear_RcvFormatErrClear_LSB 0x0
-#define QIB_7220_ErrClear_RcvFormatErrClear_RMASK 0x1
-
-#define QIB_7220_HwErrMask_OFFS 0x98
-#define QIB_7220_HwErrMask_IBCBusFromSPCParityErrMask_LSB 0x3F
-#define QIB_7220_HwErrMask_IBCBusFromSPCParityErrMask_RMASK 0x1
-#define QIB_7220_HwErrMask_IBCBusToSPCParityErrMask_LSB 0x3E
-#define QIB_7220_HwErrMask_IBCBusToSPCParityErrMask_RMASK 0x1
-#define QIB_7220_HwErrMask_Clk_uC_PLLNotLockedMask_LSB 0x3D
-#define QIB_7220_HwErrMask_Clk_uC_PLLNotLockedMask_RMASK 0x1
-#define QIB_7220_HwErrMask_IBSerdesPClkNotDetectMask_LSB 0x3C
-#define QIB_7220_HwErrMask_IBSerdesPClkNotDetectMask_RMASK 0x1
-#define QIB_7220_HwErrMask_PCIESerdesQ3PClkNotDetectMask_LSB 0x3B
-#define QIB_7220_HwErrMask_PCIESerdesQ3PClkNotDetectMask_RMASK 0x1
-#define QIB_7220_HwErrMask_PCIESerdesQ2PClkNotDetectMask_LSB 0x3A
-#define QIB_7220_HwErrMask_PCIESerdesQ2PClkNotDetectMask_RMASK 0x1
-#define QIB_7220_HwErrMask_PCIESerdesQ1PClkNotDetectMask_LSB 0x39
-#define QIB_7220_HwErrMask_PCIESerdesQ1PClkNotDetectMask_RMASK 0x1
-#define QIB_7220_HwErrMask_PCIESerdesQ0PClkNotDetectMask_LSB 0x38
-#define QIB_7220_HwErrMask_PCIESerdesQ0PClkNotDetectMask_RMASK 0x1
-#define QIB_7220_HwErrMask_Reserved_LSB 0x37
-#define QIB_7220_HwErrMask_Reserved_RMASK 0x1
-#define QIB_7220_HwErrMask_PowerOnBISTFailedMask_LSB 0x36
-#define QIB_7220_HwErrMask_PowerOnBISTFailedMask_RMASK 0x1
-#define QIB_7220_HwErrMask_Reserved1_LSB 0x33
-#define QIB_7220_HwErrMask_Reserved1_RMASK 0x7
-#define QIB_7220_HwErrMask_RXEMemParityErrMask_LSB 0x2C
-#define QIB_7220_HwErrMask_RXEMemParityErrMask_RMASK 0x7F
-#define QIB_7220_HwErrMask_TXEMemParityErrMask_LSB 0x28
-#define QIB_7220_HwErrMask_TXEMemParityErrMask_RMASK 0xF
-#define QIB_7220_HwErrMask_DDSRXEQMemoryParityErrMask_LSB 0x27
-#define QIB_7220_HwErrMask_DDSRXEQMemoryParityErrMask_RMASK 0x1
-#define QIB_7220_HwErrMask_IB_uC_MemoryParityErrMask_LSB 0x26
-#define QIB_7220_HwErrMask_IB_uC_MemoryParityErrMask_RMASK 0x1
-#define QIB_7220_HwErrMask_PCIEOct1_uC_MemoryParityErrMask_LSB 0x25
-#define QIB_7220_HwErrMask_PCIEOct1_uC_MemoryParityErrMask_RMASK 0x1
-#define QIB_7220_HwErrMask_PCIEOct0_uC_MemoryParityErrMask_LSB 0x24
-#define QIB_7220_HwErrMask_PCIEOct0_uC_MemoryParityErrMask_RMASK 0x1
-#define QIB_7220_HwErrMask_Reserved2_LSB 0x22
-#define QIB_7220_HwErrMask_Reserved2_RMASK 0x3
-#define QIB_7220_HwErrMask_PCIeBusParityErrMask_LSB 0x1F
-#define QIB_7220_HwErrMask_PCIeBusParityErrMask_RMASK 0x7
-#define QIB_7220_HwErrMask_PcieCplTimeoutMask_LSB 0x1E
-#define QIB_7220_HwErrMask_PcieCplTimeoutMask_RMASK 0x1
-#define QIB_7220_HwErrMask_PoisonedTLPMask_LSB 0x1D
-#define QIB_7220_HwErrMask_PoisonedTLPMask_RMASK 0x1
-#define QIB_7220_HwErrMask_SDmaMemReadErrMask_LSB 0x1C
-#define QIB_7220_HwErrMask_SDmaMemReadErrMask_RMASK 0x1
-#define QIB_7220_HwErrMask_Reserved3_LSB 0x8
-#define QIB_7220_HwErrMask_Reserved3_RMASK 0xFFFFF
-#define QIB_7220_HwErrMask_PCIeMemParityErrMask_LSB 0x0
-#define QIB_7220_HwErrMask_PCIeMemParityErrMask_RMASK 0xFF
-
-#define QIB_7220_HwErrStatus_OFFS 0xA0
-#define QIB_7220_HwErrStatus_IBCBusFromSPCParityErr_LSB 0x3F
-#define QIB_7220_HwErrStatus_IBCBusFromSPCParityErr_RMASK 0x1
-#define QIB_7220_HwErrStatus_IBCBusToSPCParityErr_LSB 0x3E
-#define QIB_7220_HwErrStatus_IBCBusToSPCParityErr_RMASK 0x1
-#define QIB_7220_HwErrStatus_Clk_uC_PLLNotLocked_LSB 0x3D
-#define QIB_7220_HwErrStatus_Clk_uC_PLLNotLocked_RMASK 0x1
-#define QIB_7220_HwErrStatus_IBSerdesPClkNotDetect_LSB 0x3C
-#define QIB_7220_HwErrStatus_IBSerdesPClkNotDetect_RMASK 0x1
-#define QIB_7220_HwErrStatus_PCIESerdesQ3PClkNotDetect_LSB 0x3B
-#define QIB_7220_HwErrStatus_PCIESerdesQ3PClkNotDetect_RMASK 0x1
-#define QIB_7220_HwErrStatus_PCIESerdesQ2PClkNotDetect_LSB 0x3A
-#define QIB_7220_HwErrStatus_PCIESerdesQ2PClkNotDetect_RMASK 0x1
-#define QIB_7220_HwErrStatus_PCIESerdesQ1PClkNotDetect_LSB 0x39
-#define QIB_7220_HwErrStatus_PCIESerdesQ1PClkNotDetect_RMASK 0x1
-#define QIB_7220_HwErrStatus_PCIESerdesQ0PClkNotDetect_LSB 0x38
-#define QIB_7220_HwErrStatus_PCIESerdesQ0PClkNotDetect_RMASK 0x1
-#define QIB_7220_HwErrStatus_Reserved_LSB 0x37
-#define QIB_7220_HwErrStatus_Reserved_RMASK 0x1
-#define QIB_7220_HwErrStatus_PowerOnBISTFailed_LSB 0x36
-#define QIB_7220_HwErrStatus_PowerOnBISTFailed_RMASK 0x1
-#define QIB_7220_HwErrStatus_Reserved1_LSB 0x33
-#define QIB_7220_HwErrStatus_Reserved1_RMASK 0x7
-#define QIB_7220_HwErrStatus_RXEMemParity_LSB 0x2C
-#define QIB_7220_HwErrStatus_RXEMemParity_RMASK 0x7F
-#define QIB_7220_HwErrStatus_TXEMemParity_LSB 0x28
-#define QIB_7220_HwErrStatus_TXEMemParity_RMASK 0xF
-#define QIB_7220_HwErrStatus_DDSRXEQMemoryParityErr_LSB 0x27
-#define QIB_7220_HwErrStatus_DDSRXEQMemoryParityErr_RMASK 0x1
-#define QIB_7220_HwErrStatus_IB_uC_MemoryParityErr_LSB 0x26
-#define QIB_7220_HwErrStatus_IB_uC_MemoryParityErr_RMASK 0x1
-#define QIB_7220_HwErrStatus_PCIE_uC_Oct1MemoryParityErr_LSB 0x25
-#define QIB_7220_HwErrStatus_PCIE_uC_Oct1MemoryParityErr_RMASK 0x1
-#define QIB_7220_HwErrStatus_PCIE_uC_Oct0MemoryParityErr_LSB 0x24
-#define QIB_7220_HwErrStatus_PCIE_uC_Oct0MemoryParityErr_RMASK 0x1
-#define QIB_7220_HwErrStatus_Reserved2_LSB 0x22
-#define QIB_7220_HwErrStatus_Reserved2_RMASK 0x3
-#define QIB_7220_HwErrStatus_PCIeBusParity_LSB 0x1F
-#define QIB_7220_HwErrStatus_PCIeBusParity_RMASK 0x7
-#define QIB_7220_HwErrStatus_PcieCplTimeout_LSB 0x1E
-#define QIB_7220_HwErrStatus_PcieCplTimeout_RMASK 0x1
-#define QIB_7220_HwErrStatus_PoisenedTLP_LSB 0x1D
-#define QIB_7220_HwErrStatus_PoisenedTLP_RMASK 0x1
-#define QIB_7220_HwErrStatus_SDmaMemReadErr_LSB 0x1C
-#define QIB_7220_HwErrStatus_SDmaMemReadErr_RMASK 0x1
-#define QIB_7220_HwErrStatus_Reserved3_LSB 0x8
-#define QIB_7220_HwErrStatus_Reserved3_RMASK 0xFFFFF
-#define QIB_7220_HwErrStatus_PCIeMemParity_LSB 0x0
-#define QIB_7220_HwErrStatus_PCIeMemParity_RMASK 0xFF
-
-#define QIB_7220_HwErrClear_OFFS 0xA8
-#define QIB_7220_HwErrClear_IBCBusFromSPCParityErrClear_LSB 0x3F
-#define QIB_7220_HwErrClear_IBCBusFromSPCParityErrClear_RMASK 0x1
-#define QIB_7220_HwErrClear_IBCBusToSPCparityErrClear_LSB 0x3E
-#define QIB_7220_HwErrClear_IBCBusToSPCparityErrClear_RMASK 0x1
-#define QIB_7220_HwErrClear_Clk_uC_PLLNotLockedClear_LSB 0x3D
-#define QIB_7220_HwErrClear_Clk_uC_PLLNotLockedClear_RMASK 0x1
-#define QIB_7220_HwErrClear_IBSerdesPClkNotDetectClear_LSB 0x3C
-#define QIB_7220_HwErrClear_IBSerdesPClkNotDetectClear_RMASK 0x1
-#define QIB_7220_HwErrClear_PCIESerdesQ3PClkNotDetectClear_LSB 0x3B
-#define QIB_7220_HwErrClear_PCIESerdesQ3PClkNotDetectClear_RMASK 0x1
-#define QIB_7220_HwErrClear_PCIESerdesQ2PClkNotDetectClear_LSB 0x3A
-#define QIB_7220_HwErrClear_PCIESerdesQ2PClkNotDetectClear_RMASK 0x1
-#define QIB_7220_HwErrClear_PCIESerdesQ1PClkNotDetectClear_LSB 0x39
-#define QIB_7220_HwErrClear_PCIESerdesQ1PClkNotDetectClear_RMASK 0x1
-#define QIB_7220_HwErrClear_PCIESerdesQ0PClkNotDetectClear_LSB 0x38
-#define QIB_7220_HwErrClear_PCIESerdesQ0PClkNotDetectClear_RMASK 0x1
-#define QIB_7220_HwErrClear_Reserved_LSB 0x37
-#define QIB_7220_HwErrClear_Reserved_RMASK 0x1
-#define QIB_7220_HwErrClear_PowerOnBISTFailedClear_LSB 0x36
-#define QIB_7220_HwErrClear_PowerOnBISTFailedClear_RMASK 0x1
-#define QIB_7220_HwErrClear_Reserved1_LSB 0x33
-#define QIB_7220_HwErrClear_Reserved1_RMASK 0x7
-#define QIB_7220_HwErrClear_RXEMemParityClear_LSB 0x2C
-#define QIB_7220_HwErrClear_RXEMemParityClear_RMASK 0x7F
-#define QIB_7220_HwErrClear_TXEMemParityClear_LSB 0x28
-#define QIB_7220_HwErrClear_TXEMemParityClear_RMASK 0xF
-#define QIB_7220_HwErrClear_DDSRXEQMemoryParityErrClear_LSB 0x27
-#define QIB_7220_HwErrClear_DDSRXEQMemoryParityErrClear_RMASK 0x1
-#define QIB_7220_HwErrClear_IB_uC_MemoryParityErrClear_LSB 0x26
-#define QIB_7220_HwErrClear_IB_uC_MemoryParityErrClear_RMASK 0x1
-#define QIB_7220_HwErrClear_PCIE_uC_Oct1MemoryParityErrClear_LSB 0x25
-#define QIB_7220_HwErrClear_PCIE_uC_Oct1MemoryParityErrClear_RMASK 0x1
-#define QIB_7220_HwErrClear_PCIE_uC_Oct0MemoryParityErrClear_LSB 0x24
-#define QIB_7220_HwErrClear_PCIE_uC_Oct0MemoryParityErrClear_RMASK 0x1
-#define QIB_7220_HwErrClear_Reserved2_LSB 0x22
-#define QIB_7220_HwErrClear_Reserved2_RMASK 0x3
-#define QIB_7220_HwErrClear_PCIeBusParityClr_LSB 0x1F
-#define QIB_7220_HwErrClear_PCIeBusParityClr_RMASK 0x7
-#define QIB_7220_HwErrClear_PcieCplTimeoutClear_LSB 0x1E
-#define QIB_7220_HwErrClear_PcieCplTimeoutClear_RMASK 0x1
-#define QIB_7220_HwErrClear_PoisonedTLPClear_LSB 0x1D
-#define QIB_7220_HwErrClear_PoisonedTLPClear_RMASK 0x1
-#define QIB_7220_HwErrClear_SDmaMemReadErrClear_LSB 0x1C
-#define QIB_7220_HwErrClear_SDmaMemReadErrClear_RMASK 0x1
-#define QIB_7220_HwErrClear_Reserved3_LSB 0x8
-#define QIB_7220_HwErrClear_Reserved3_RMASK 0xFFFFF
-#define QIB_7220_HwErrClear_PCIeMemParityClr_LSB 0x0
-#define QIB_7220_HwErrClear_PCIeMemParityClr_RMASK 0xFF
-
-#define QIB_7220_HwDiagCtrl_OFFS 0xB0
-#define QIB_7220_HwDiagCtrl_ForceIBCBusFromSPCParityErr_LSB 0x3F
-#define QIB_7220_HwDiagCtrl_ForceIBCBusFromSPCParityErr_RMASK 0x1
-#define QIB_7220_HwDiagCtrl_ForceIBCBusToSPCParityErr_LSB 0x3E
-#define QIB_7220_HwDiagCtrl_ForceIBCBusToSPCParityErr_RMASK 0x1
-#define QIB_7220_HwDiagCtrl_CounterWrEnable_LSB 0x3D
-#define QIB_7220_HwDiagCtrl_CounterWrEnable_RMASK 0x1
-#define QIB_7220_HwDiagCtrl_CounterDisable_LSB 0x3C
-#define QIB_7220_HwDiagCtrl_CounterDisable_RMASK 0x1
-#define QIB_7220_HwDiagCtrl_Reserved_LSB 0x33
-#define QIB_7220_HwDiagCtrl_Reserved_RMASK 0x1FF
-#define QIB_7220_HwDiagCtrl_ForceRxMemParityErr_LSB 0x2C
-#define QIB_7220_HwDiagCtrl_ForceRxMemParityErr_RMASK 0x7F
-#define QIB_7220_HwDiagCtrl_ForceTxMemparityErr_LSB 0x28
-#define QIB_7220_HwDiagCtrl_ForceTxMemparityErr_RMASK 0xF
-#define QIB_7220_HwDiagCtrl_ForceDDSRXEQMemoryParityErr_LSB 0x27
-#define QIB_7220_HwDiagCtrl_ForceDDSRXEQMemoryParityErr_RMASK 0x1
-#define QIB_7220_HwDiagCtrl_ForceIB_uC_MemoryParityErr_LSB 0x26
-#define QIB_7220_HwDiagCtrl_ForceIB_uC_MemoryParityErr_RMASK 0x1
-#define QIB_7220_HwDiagCtrl_ForcePCIE_uC_Oct1MemoryParityErr_LSB 0x25
-#define QIB_7220_HwDiagCtrl_ForcePCIE_uC_Oct1MemoryParityErr_RMASK 0x1
-#define QIB_7220_HwDiagCtrl_ForcePCIE_uC_Oct0MemoryParityErr_LSB 0x24
-#define QIB_7220_HwDiagCtrl_ForcePCIE_uC_Oct0MemoryParityErr_RMASK 0x1
-#define QIB_7220_HwDiagCtrl_Reserved1_LSB 0x23
-#define QIB_7220_HwDiagCtrl_Reserved1_RMASK 0x1
-#define QIB_7220_HwDiagCtrl_forcePCIeBusParity_LSB 0x1F
-#define QIB_7220_HwDiagCtrl_forcePCIeBusParity_RMASK 0xF
-#define QIB_7220_HwDiagCtrl_Reserved2_LSB 0x8
-#define QIB_7220_HwDiagCtrl_Reserved2_RMASK 0x7FFFFF
-#define QIB_7220_HwDiagCtrl_forcePCIeMemParity_LSB 0x0
-#define QIB_7220_HwDiagCtrl_forcePCIeMemParity_RMASK 0xFF
-
-#define QIB_7220_REG_0000B8_OFFS 0xB8
-
-#define QIB_7220_IBCStatus_OFFS 0xC0
-#define QIB_7220_IBCStatus_TxCreditOk_LSB 0x1F
-#define QIB_7220_IBCStatus_TxCreditOk_RMASK 0x1
-#define QIB_7220_IBCStatus_TxReady_LSB 0x1E
-#define QIB_7220_IBCStatus_TxReady_RMASK 0x1
-#define QIB_7220_IBCStatus_Reserved_LSB 0xE
-#define QIB_7220_IBCStatus_Reserved_RMASK 0xFFFF
-#define QIB_7220_IBCStatus_IBTxLaneReversed_LSB 0xD
-#define QIB_7220_IBCStatus_IBTxLaneReversed_RMASK 0x1
-#define QIB_7220_IBCStatus_IBRxLaneReversed_LSB 0xC
-#define QIB_7220_IBCStatus_IBRxLaneReversed_RMASK 0x1
-#define QIB_7220_IBCStatus_IB_SERDES_TRIM_DONE_LSB 0xB
-#define QIB_7220_IBCStatus_IB_SERDES_TRIM_DONE_RMASK 0x1
-#define QIB_7220_IBCStatus_DDS_RXEQ_FAIL_LSB 0xA
-#define QIB_7220_IBCStatus_DDS_RXEQ_FAIL_RMASK 0x1
-#define QIB_7220_IBCStatus_LinkWidthActive_LSB 0x9
-#define QIB_7220_IBCStatus_LinkWidthActive_RMASK 0x1
-#define QIB_7220_IBCStatus_LinkSpeedActive_LSB 0x8
-#define QIB_7220_IBCStatus_LinkSpeedActive_RMASK 0x1
-#define QIB_7220_IBCStatus_LinkState_LSB 0x5
-#define QIB_7220_IBCStatus_LinkState_RMASK 0x7
-#define QIB_7220_IBCStatus_LinkTrainingState_LSB 0x0
-#define QIB_7220_IBCStatus_LinkTrainingState_RMASK 0x1F
-
-#define QIB_7220_IBCCtrl_OFFS 0xC8
-#define QIB_7220_IBCCtrl_Loopback_LSB 0x3F
-#define QIB_7220_IBCCtrl_Loopback_RMASK 0x1
-#define QIB_7220_IBCCtrl_LinkDownDefaultState_LSB 0x3E
-#define QIB_7220_IBCCtrl_LinkDownDefaultState_RMASK 0x1
-#define QIB_7220_IBCCtrl_Reserved_LSB 0x2B
-#define QIB_7220_IBCCtrl_Reserved_RMASK 0x7FFFF
-#define QIB_7220_IBCCtrl_CreditScale_LSB 0x28
-#define QIB_7220_IBCCtrl_CreditScale_RMASK 0x7
-#define QIB_7220_IBCCtrl_OverrunThreshold_LSB 0x24
-#define QIB_7220_IBCCtrl_OverrunThreshold_RMASK 0xF
-#define QIB_7220_IBCCtrl_PhyerrThreshold_LSB 0x20
-#define QIB_7220_IBCCtrl_PhyerrThreshold_RMASK 0xF
-#define QIB_7220_IBCCtrl_MaxPktLen_LSB 0x15
-#define QIB_7220_IBCCtrl_MaxPktLen_RMASK 0x7FF
-#define QIB_7220_IBCCtrl_LinkCmd_LSB 0x13
-#define QIB_7220_IBCCtrl_LinkCmd_RMASK 0x3
-#define QIB_7220_IBCCtrl_LinkInitCmd_LSB 0x10
-#define QIB_7220_IBCCtrl_LinkInitCmd_RMASK 0x7
-#define QIB_7220_IBCCtrl_FlowCtrlWaterMark_LSB 0x8
-#define QIB_7220_IBCCtrl_FlowCtrlWaterMark_RMASK 0xFF
-#define QIB_7220_IBCCtrl_FlowCtrlPeriod_LSB 0x0
-#define QIB_7220_IBCCtrl_FlowCtrlPeriod_RMASK 0xFF
-
-#define QIB_7220_EXTStatus_OFFS 0xD0
-#define QIB_7220_EXTStatus_GPIOIn_LSB 0x30
-#define QIB_7220_EXTStatus_GPIOIn_RMASK 0xFFFF
-#define QIB_7220_EXTStatus_Reserved_LSB 0x20
-#define QIB_7220_EXTStatus_Reserved_RMASK 0xFFFF
-#define QIB_7220_EXTStatus_Reserved1_LSB 0x10
-#define QIB_7220_EXTStatus_Reserved1_RMASK 0xFFFF
-#define QIB_7220_EXTStatus_MemBISTDisabled_LSB 0xF
-#define QIB_7220_EXTStatus_MemBISTDisabled_RMASK 0x1
-#define QIB_7220_EXTStatus_MemBISTEndTest_LSB 0xE
-#define QIB_7220_EXTStatus_MemBISTEndTest_RMASK 0x1
-#define QIB_7220_EXTStatus_Reserved2_LSB 0x0
-#define QIB_7220_EXTStatus_Reserved2_RMASK 0x3FFF
-
-#define QIB_7220_EXTCtrl_OFFS 0xD8
-#define QIB_7220_EXTCtrl_GPIOOe_LSB 0x30
-#define QIB_7220_EXTCtrl_GPIOOe_RMASK 0xFFFF
-#define QIB_7220_EXTCtrl_GPIOInvert_LSB 0x20
-#define QIB_7220_EXTCtrl_GPIOInvert_RMASK 0xFFFF
-#define QIB_7220_EXTCtrl_Reserved_LSB 0x4
-#define QIB_7220_EXTCtrl_Reserved_RMASK 0xFFFFFFF
-#define QIB_7220_EXTCtrl_LEDPriPortGreenOn_LSB 0x3
-#define QIB_7220_EXTCtrl_LEDPriPortGreenOn_RMASK 0x1
-#define QIB_7220_EXTCtrl_LEDPriPortYellowOn_LSB 0x2
-#define QIB_7220_EXTCtrl_LEDPriPortYellowOn_RMASK 0x1
-#define QIB_7220_EXTCtrl_LEDGblOkGreenOn_LSB 0x1
-#define QIB_7220_EXTCtrl_LEDGblOkGreenOn_RMASK 0x1
-#define QIB_7220_EXTCtrl_LEDGblErrRedOff_LSB 0x0
-#define QIB_7220_EXTCtrl_LEDGblErrRedOff_RMASK 0x1
-
-#define QIB_7220_GPIOOut_OFFS 0xE0
-
-#define QIB_7220_GPIOMask_OFFS 0xE8
-
-#define QIB_7220_GPIOStatus_OFFS 0xF0
-
-#define QIB_7220_GPIOClear_OFFS 0xF8
-
-#define QIB_7220_RcvCtrl_OFFS 0x100
-#define QIB_7220_RcvCtrl_Reserved_LSB 0x27
-#define QIB_7220_RcvCtrl_Reserved_RMASK 0x1FFFFFF
-#define QIB_7220_RcvCtrl_RcvQPMapEnable_LSB 0x26
-#define QIB_7220_RcvCtrl_RcvQPMapEnable_RMASK 0x1
-#define QIB_7220_RcvCtrl_PortCfg_LSB 0x24
-#define QIB_7220_RcvCtrl_PortCfg_RMASK 0x3
-#define QIB_7220_RcvCtrl_TailUpd_LSB 0x23
-#define QIB_7220_RcvCtrl_TailUpd_RMASK 0x1
-#define QIB_7220_RcvCtrl_RcvPartitionKeyDisable_LSB 0x22
-#define QIB_7220_RcvCtrl_RcvPartitionKeyDisable_RMASK 0x1
-#define QIB_7220_RcvCtrl_IntrAvail_LSB 0x11
-#define QIB_7220_RcvCtrl_IntrAvail_RMASK 0x1FFFF
-#define QIB_7220_RcvCtrl_PortEnable_LSB 0x0
-#define QIB_7220_RcvCtrl_PortEnable_RMASK 0x1FFFF
-
-#define QIB_7220_RcvBTHQP_OFFS 0x108
-#define QIB_7220_RcvBTHQP_Reserved_LSB 0x18
-#define QIB_7220_RcvBTHQP_Reserved_RMASK 0xFF
-#define QIB_7220_RcvBTHQP_RcvBTHQP_LSB 0x0
-#define QIB_7220_RcvBTHQP_RcvBTHQP_RMASK 0xFFFFFF
-
-#define QIB_7220_RcvHdrSize_OFFS 0x110
-
-#define QIB_7220_RcvHdrCnt_OFFS 0x118
-
-#define QIB_7220_RcvHdrEntSize_OFFS 0x120
-
-#define QIB_7220_RcvTIDBase_OFFS 0x128
-
-#define QIB_7220_RcvTIDCnt_OFFS 0x130
-
-#define QIB_7220_RcvEgrBase_OFFS 0x138
-
-#define QIB_7220_RcvEgrCnt_OFFS 0x140
-
-#define QIB_7220_RcvBufBase_OFFS 0x148
-
-#define QIB_7220_RcvBufSize_OFFS 0x150
-
-#define QIB_7220_RxIntMemBase_OFFS 0x158
-
-#define QIB_7220_RxIntMemSize_OFFS 0x160
-
-#define QIB_7220_RcvPartitionKey_OFFS 0x168
-
-#define QIB_7220_RcvQPMulticastPort_OFFS 0x170
-#define QIB_7220_RcvQPMulticastPort_Reserved_LSB 0x5
-#define QIB_7220_RcvQPMulticastPort_Reserved_RMASK 0x7FFFFFFFFFFFFFF
-#define QIB_7220_RcvQPMulticastPort_RcvQpMcPort_LSB 0x0
-#define QIB_7220_RcvQPMulticastPort_RcvQpMcPort_RMASK 0x1F
-
-#define QIB_7220_RcvPktLEDCnt_OFFS 0x178
-#define QIB_7220_RcvPktLEDCnt_ONperiod_LSB 0x20
-#define QIB_7220_RcvPktLEDCnt_ONperiod_RMASK 0xFFFFFFFF
-#define QIB_7220_RcvPktLEDCnt_OFFperiod_LSB 0x0
-#define QIB_7220_RcvPktLEDCnt_OFFperiod_RMASK 0xFFFFFFFF
-
-#define QIB_7220_IBCDDRCtrl_OFFS 0x180
-#define QIB_7220_IBCDDRCtrl_IB_DLID_MASK_LSB 0x30
-#define QIB_7220_IBCDDRCtrl_IB_DLID_MASK_RMASK 0xFFFF
-#define QIB_7220_IBCDDRCtrl_IB_DLID_LSB 0x20
-#define QIB_7220_IBCDDRCtrl_IB_DLID_RMASK 0xFFFF
-#define QIB_7220_IBCDDRCtrl_Reserved_LSB 0x1B
-#define QIB_7220_IBCDDRCtrl_Reserved_RMASK 0x1F
-#define QIB_7220_IBCDDRCtrl_HRTBT_REQ_LSB 0x1A
-#define QIB_7220_IBCDDRCtrl_HRTBT_REQ_RMASK 0x1
-#define QIB_7220_IBCDDRCtrl_HRTBT_PORT_LSB 0x12
-#define QIB_7220_IBCDDRCtrl_HRTBT_PORT_RMASK 0xFF
-#define QIB_7220_IBCDDRCtrl_HRTBT_AUTO_LSB 0x11
-#define QIB_7220_IBCDDRCtrl_HRTBT_AUTO_RMASK 0x1
-#define QIB_7220_IBCDDRCtrl_HRTBT_ENB_LSB 0x10
-#define QIB_7220_IBCDDRCtrl_HRTBT_ENB_RMASK 0x1
-#define QIB_7220_IBCDDRCtrl_SD_DDS_LSB 0xC
-#define QIB_7220_IBCDDRCtrl_SD_DDS_RMASK 0xF
-#define QIB_7220_IBCDDRCtrl_SD_DDSV_LSB 0xB
-#define QIB_7220_IBCDDRCtrl_SD_DDSV_RMASK 0x1
-#define QIB_7220_IBCDDRCtrl_SD_ADD_ENB_LSB 0xA
-#define QIB_7220_IBCDDRCtrl_SD_ADD_ENB_RMASK 0x1
-#define QIB_7220_IBCDDRCtrl_SD_RX_EQUAL_ENABLE_LSB 0x9
-#define QIB_7220_IBCDDRCtrl_SD_RX_EQUAL_ENABLE_RMASK 0x1
-#define QIB_7220_IBCDDRCtrl_IB_LANE_REV_SUPPORTED_LSB 0x8
-#define QIB_7220_IBCDDRCtrl_IB_LANE_REV_SUPPORTED_RMASK 0x1
-#define QIB_7220_IBCDDRCtrl_IB_POLARITY_REV_SUPP_LSB 0x7
-#define QIB_7220_IBCDDRCtrl_IB_POLARITY_REV_SUPP_RMASK 0x1
-#define QIB_7220_IBCDDRCtrl_IB_NUM_CHANNELS_LSB 0x5
-#define QIB_7220_IBCDDRCtrl_IB_NUM_CHANNELS_RMASK 0x3
-#define QIB_7220_IBCDDRCtrl_SD_SPEED_QDR_LSB 0x4
-#define QIB_7220_IBCDDRCtrl_SD_SPEED_QDR_RMASK 0x1
-#define QIB_7220_IBCDDRCtrl_SD_SPEED_DDR_LSB 0x3
-#define QIB_7220_IBCDDRCtrl_SD_SPEED_DDR_RMASK 0x1
-#define QIB_7220_IBCDDRCtrl_SD_SPEED_SDR_LSB 0x2
-#define QIB_7220_IBCDDRCtrl_SD_SPEED_SDR_RMASK 0x1
-#define QIB_7220_IBCDDRCtrl_SD_SPEED_LSB 0x1
-#define QIB_7220_IBCDDRCtrl_SD_SPEED_RMASK 0x1
-#define QIB_7220_IBCDDRCtrl_IB_ENHANCED_MODE_LSB 0x0
-#define QIB_7220_IBCDDRCtrl_IB_ENHANCED_MODE_RMASK 0x1
-
-#define QIB_7220_HRTBT_GUID_OFFS 0x188
-
-#define QIB_7220_IBCDDRCtrl2_OFFS 0x1A0
-#define QIB_7220_IBCDDRCtrl2_IB_BACK_PORCH_LSB 0x5
-#define QIB_7220_IBCDDRCtrl2_IB_BACK_PORCH_RMASK 0x1F
-#define QIB_7220_IBCDDRCtrl2_IB_FRONT_PORCH_LSB 0x0
-#define QIB_7220_IBCDDRCtrl2_IB_FRONT_PORCH_RMASK 0x1F
-
-#define QIB_7220_IBCDDRStatus_OFFS 0x1A8
-#define QIB_7220_IBCDDRStatus_heartbeat_timed_out_LSB 0x24
-#define QIB_7220_IBCDDRStatus_heartbeat_timed_out_RMASK 0x1
-#define QIB_7220_IBCDDRStatus_heartbeat_crosstalk_LSB 0x20
-#define QIB_7220_IBCDDRStatus_heartbeat_crosstalk_RMASK 0xF
-#define QIB_7220_IBCDDRStatus_RxEqLocalDevice_LSB 0x1E
-#define QIB_7220_IBCDDRStatus_RxEqLocalDevice_RMASK 0x3
-#define QIB_7220_IBCDDRStatus_ReqDDSLocalFromRmt_LSB 0x1A
-#define QIB_7220_IBCDDRStatus_ReqDDSLocalFromRmt_RMASK 0xF
-#define QIB_7220_IBCDDRStatus_LinkRoundTripLatency_LSB 0x0
-#define QIB_7220_IBCDDRStatus_LinkRoundTripLatency_RMASK 0x3FFFFFF
-
-#define QIB_7220_JIntReload_OFFS 0x1B0
-#define QIB_7220_JIntReload_J_limit_reload_LSB 0x10
-#define QIB_7220_JIntReload_J_limit_reload_RMASK 0xFFFF
-#define QIB_7220_JIntReload_J_reload_LSB 0x0
-#define QIB_7220_JIntReload_J_reload_RMASK 0xFFFF
-
-#define QIB_7220_IBNCModeCtrl_OFFS 0x1B8
-#define QIB_7220_IBNCModeCtrl_Reserved_LSB 0x1A
-#define QIB_7220_IBNCModeCtrl_Reserved_RMASK 0x3FFFFFFFFF
-#define QIB_7220_IBNCModeCtrl_TSMCode_TS2_LSB 0x11
-#define QIB_7220_IBNCModeCtrl_TSMCode_TS2_RMASK 0x1FF
-#define QIB_7220_IBNCModeCtrl_TSMCode_TS1_LSB 0x8
-#define QIB_7220_IBNCModeCtrl_TSMCode_TS1_RMASK 0x1FF
-#define QIB_7220_IBNCModeCtrl_Reserved1_LSB 0x3
-#define QIB_7220_IBNCModeCtrl_Reserved1_RMASK 0x1F
-#define QIB_7220_IBNCModeCtrl_TSMEnable_ignore_TSM_on_rx_LSB 0x2
-#define QIB_7220_IBNCModeCtrl_TSMEnable_ignore_TSM_on_rx_RMASK 0x1
-#define QIB_7220_IBNCModeCtrl_TSMEnable_send_TS2_LSB 0x1
-#define QIB_7220_IBNCModeCtrl_TSMEnable_send_TS2_RMASK 0x1
-#define QIB_7220_IBNCModeCtrl_TSMEnable_send_TS1_LSB 0x0
-#define QIB_7220_IBNCModeCtrl_TSMEnable_send_TS1_RMASK 0x1
-
-#define QIB_7220_SendCtrl_OFFS 0x1C0
-#define QIB_7220_SendCtrl_Disarm_LSB 0x1F
-#define QIB_7220_SendCtrl_Disarm_RMASK 0x1
-#define QIB_7220_SendCtrl_Reserved_LSB 0x1D
-#define QIB_7220_SendCtrl_Reserved_RMASK 0x3
-#define QIB_7220_SendCtrl_AvailUpdThld_LSB 0x18
-#define QIB_7220_SendCtrl_AvailUpdThld_RMASK 0x1F
-#define QIB_7220_SendCtrl_DisarmPIOBuf_LSB 0x10
-#define QIB_7220_SendCtrl_DisarmPIOBuf_RMASK 0xFF
-#define QIB_7220_SendCtrl_Reserved1_LSB 0xD
-#define QIB_7220_SendCtrl_Reserved1_RMASK 0x7
-#define QIB_7220_SendCtrl_SDmaHalt_LSB 0xC
-#define QIB_7220_SendCtrl_SDmaHalt_RMASK 0x1
-#define QIB_7220_SendCtrl_SDmaEnable_LSB 0xB
-#define QIB_7220_SendCtrl_SDmaEnable_RMASK 0x1
-#define QIB_7220_SendCtrl_SDmaSingleDescriptor_LSB 0xA
-#define QIB_7220_SendCtrl_SDmaSingleDescriptor_RMASK 0x1
-#define QIB_7220_SendCtrl_SDmaIntEnable_LSB 0x9
-#define QIB_7220_SendCtrl_SDmaIntEnable_RMASK 0x1
-#define QIB_7220_SendCtrl_Reserved2_LSB 0x5
-#define QIB_7220_SendCtrl_Reserved2_RMASK 0xF
-#define QIB_7220_SendCtrl_SSpecialTriggerEn_LSB 0x4
-#define QIB_7220_SendCtrl_SSpecialTriggerEn_RMASK 0x1
-#define QIB_7220_SendCtrl_SPioEnable_LSB 0x3
-#define QIB_7220_SendCtrl_SPioEnable_RMASK 0x1
-#define QIB_7220_SendCtrl_SendBufAvailUpd_LSB 0x2
-#define QIB_7220_SendCtrl_SendBufAvailUpd_RMASK 0x1
-#define QIB_7220_SendCtrl_SendIntBufAvail_LSB 0x1
-#define QIB_7220_SendCtrl_SendIntBufAvail_RMASK 0x1
-#define QIB_7220_SendCtrl_Abort_LSB 0x0
-#define QIB_7220_SendCtrl_Abort_RMASK 0x1
-
-#define QIB_7220_SendBufBase_OFFS 0x1C8
-#define QIB_7220_SendBufBase_Reserved_LSB 0x35
-#define QIB_7220_SendBufBase_Reserved_RMASK 0x7FF
-#define QIB_7220_SendBufBase_BaseAddr_LargePIO_LSB 0x20
-#define QIB_7220_SendBufBase_BaseAddr_LargePIO_RMASK 0x1FFFFF
-#define QIB_7220_SendBufBase_Reserved1_LSB 0x15
-#define QIB_7220_SendBufBase_Reserved1_RMASK 0x7FF
-#define QIB_7220_SendBufBase_BaseAddr_SmallPIO_LSB 0x0
-#define QIB_7220_SendBufBase_BaseAddr_SmallPIO_RMASK 0x1FFFFF
-
-#define QIB_7220_SendBufSize_OFFS 0x1D0
-#define QIB_7220_SendBufSize_Reserved_LSB 0x2D
-#define QIB_7220_SendBufSize_Reserved_RMASK 0xFFFFF
-#define QIB_7220_SendBufSize_Size_LargePIO_LSB 0x20
-#define QIB_7220_SendBufSize_Size_LargePIO_RMASK 0x1FFF
-#define QIB_7220_SendBufSize_Reserved1_LSB 0xC
-#define QIB_7220_SendBufSize_Reserved1_RMASK 0xFFFFF
-#define QIB_7220_SendBufSize_Size_SmallPIO_LSB 0x0
-#define QIB_7220_SendBufSize_Size_SmallPIO_RMASK 0xFFF
-
-#define QIB_7220_SendBufCnt_OFFS 0x1D8
-#define QIB_7220_SendBufCnt_Reserved_LSB 0x24
-#define QIB_7220_SendBufCnt_Reserved_RMASK 0xFFFFFFF
-#define QIB_7220_SendBufCnt_Num_LargeBuffers_LSB 0x20
-#define QIB_7220_SendBufCnt_Num_LargeBuffers_RMASK 0xF
-#define QIB_7220_SendBufCnt_Reserved1_LSB 0x9
-#define QIB_7220_SendBufCnt_Reserved1_RMASK 0x7FFFFF
-#define QIB_7220_SendBufCnt_Num_SmallBuffers_LSB 0x0
-#define QIB_7220_SendBufCnt_Num_SmallBuffers_RMASK 0x1FF
-
-#define QIB_7220_SendBufAvailAddr_OFFS 0x1E0
-#define QIB_7220_SendBufAvailAddr_SendBufAvailAddr_LSB 0x6
-#define QIB_7220_SendBufAvailAddr_SendBufAvailAddr_RMASK 0x3FFFFFFFF
-#define QIB_7220_SendBufAvailAddr_Reserved_LSB 0x0
-#define QIB_7220_SendBufAvailAddr_Reserved_RMASK 0x3F
-
-#define QIB_7220_TxIntMemBase_OFFS 0x1E8
-
-#define QIB_7220_TxIntMemSize_OFFS 0x1F0
-
-#define QIB_7220_SendDmaBase_OFFS 0x1F8
-#define QIB_7220_SendDmaBase_Reserved_LSB 0x30
-#define QIB_7220_SendDmaBase_Reserved_RMASK 0xFFFF
-#define QIB_7220_SendDmaBase_SendDmaBase_LSB 0x0
-#define QIB_7220_SendDmaBase_SendDmaBase_RMASK 0xFFFFFFFFFFFF
-
-#define QIB_7220_SendDmaLenGen_OFFS 0x200
-#define QIB_7220_SendDmaLenGen_Reserved_LSB 0x13
-#define QIB_7220_SendDmaLenGen_Reserved_RMASK 0x1FFFFFFFFFFF
-#define QIB_7220_SendDmaLenGen_Generation_LSB 0x10
-#define QIB_7220_SendDmaLenGen_Generation_MSB 0x12
-#define QIB_7220_SendDmaLenGen_Generation_RMASK 0x7
-#define QIB_7220_SendDmaLenGen_Length_LSB 0x0
-#define QIB_7220_SendDmaLenGen_Length_RMASK 0xFFFF
-
-#define QIB_7220_SendDmaTail_OFFS 0x208
-#define QIB_7220_SendDmaTail_Reserved_LSB 0x10
-#define QIB_7220_SendDmaTail_Reserved_RMASK 0xFFFFFFFFFFFF
-#define QIB_7220_SendDmaTail_SendDmaTail_LSB 0x0
-#define QIB_7220_SendDmaTail_SendDmaTail_RMASK 0xFFFF
-
-#define QIB_7220_SendDmaHead_OFFS 0x210
-#define QIB_7220_SendDmaHead_Reserved_LSB 0x30
-#define QIB_7220_SendDmaHead_Reserved_RMASK 0xFFFF
-#define QIB_7220_SendDmaHead_InternalSendDmaHead_LSB 0x20
-#define QIB_7220_SendDmaHead_InternalSendDmaHead_RMASK 0xFFFF
-#define QIB_7220_SendDmaHead_Reserved1_LSB 0x10
-#define QIB_7220_SendDmaHead_Reserved1_RMASK 0xFFFF
-#define QIB_7220_SendDmaHead_SendDmaHead_LSB 0x0
-#define QIB_7220_SendDmaHead_SendDmaHead_RMASK 0xFFFF
-
-#define QIB_7220_SendDmaHeadAddr_OFFS 0x218
-#define QIB_7220_SendDmaHeadAddr_Reserved_LSB 0x30
-#define QIB_7220_SendDmaHeadAddr_Reserved_RMASK 0xFFFF
-#define QIB_7220_SendDmaHeadAddr_SendDmaHeadAddr_LSB 0x0
-#define QIB_7220_SendDmaHeadAddr_SendDmaHeadAddr_RMASK 0xFFFFFFFFFFFF
-
-#define QIB_7220_SendDmaBufMask0_OFFS 0x220
-#define QIB_7220_SendDmaBufMask0_BufMask_63_0_LSB 0x0
-#define QIB_7220_SendDmaBufMask0_BufMask_63_0_RMASK 0x0
-
-#define QIB_7220_SendDmaStatus_OFFS 0x238
-#define QIB_7220_SendDmaStatus_ScoreBoardDrainInProg_LSB 0x3F
-#define QIB_7220_SendDmaStatus_ScoreBoardDrainInProg_RMASK 0x1
-#define QIB_7220_SendDmaStatus_AbortInProg_LSB 0x3E
-#define QIB_7220_SendDmaStatus_AbortInProg_RMASK 0x1
-#define QIB_7220_SendDmaStatus_InternalSDmaEnable_LSB 0x3D
-#define QIB_7220_SendDmaStatus_InternalSDmaEnable_RMASK 0x1
-#define QIB_7220_SendDmaStatus_ScbDescIndex_13_0_LSB 0x2F
-#define QIB_7220_SendDmaStatus_ScbDescIndex_13_0_RMASK 0x3FFF
-#define QIB_7220_SendDmaStatus_RpyLowAddr_6_0_LSB 0x28
-#define QIB_7220_SendDmaStatus_RpyLowAddr_6_0_RMASK 0x7F
-#define QIB_7220_SendDmaStatus_RpyTag_7_0_LSB 0x20
-#define QIB_7220_SendDmaStatus_RpyTag_7_0_RMASK 0xFF
-#define QIB_7220_SendDmaStatus_ScbFull_LSB 0x1F
-#define QIB_7220_SendDmaStatus_ScbFull_RMASK 0x1
-#define QIB_7220_SendDmaStatus_ScbEmpty_LSB 0x1E
-#define QIB_7220_SendDmaStatus_ScbEmpty_RMASK 0x1
-#define QIB_7220_SendDmaStatus_ScbEntryValid_LSB 0x1D
-#define QIB_7220_SendDmaStatus_ScbEntryValid_RMASK 0x1
-#define QIB_7220_SendDmaStatus_ScbFetchDescFlag_LSB 0x1C
-#define QIB_7220_SendDmaStatus_ScbFetchDescFlag_RMASK 0x1
-#define QIB_7220_SendDmaStatus_SplFifoReadyToGo_LSB 0x1B
-#define QIB_7220_SendDmaStatus_SplFifoReadyToGo_RMASK 0x1
-#define QIB_7220_SendDmaStatus_SplFifoDisarmed_LSB 0x1A
-#define QIB_7220_SendDmaStatus_SplFifoDisarmed_RMASK 0x1
-#define QIB_7220_SendDmaStatus_SplFifoEmpty_LSB 0x19
-#define QIB_7220_SendDmaStatus_SplFifoEmpty_RMASK 0x1
-#define QIB_7220_SendDmaStatus_SplFifoFull_LSB 0x18
-#define QIB_7220_SendDmaStatus_SplFifoFull_RMASK 0x1
-#define QIB_7220_SendDmaStatus_SplFifoBufNum_LSB 0x10
-#define QIB_7220_SendDmaStatus_SplFifoBufNum_RMASK 0xFF
-#define QIB_7220_SendDmaStatus_SplFifoDescIndex_LSB 0x0
-#define QIB_7220_SendDmaStatus_SplFifoDescIndex_RMASK 0xFFFF
-
-#define QIB_7220_SendBufErr0_OFFS 0x240
-#define QIB_7220_SendBufErr0_SendBufErr_63_0_LSB 0x0
-#define QIB_7220_SendBufErr0_SendBufErr_63_0_RMASK 0x0
-
-#define QIB_7220_RcvHdrAddr0_OFFS 0x270
-#define QIB_7220_RcvHdrAddr0_RcvHdrAddr0_LSB 0x2
-#define QIB_7220_RcvHdrAddr0_RcvHdrAddr0_RMASK 0x3FFFFFFFFF
-#define QIB_7220_RcvHdrAddr0_Reserved_LSB 0x0
-#define QIB_7220_RcvHdrAddr0_Reserved_RMASK 0x3
-
-#define QIB_7220_RcvHdrTailAddr0_OFFS 0x300
-#define QIB_7220_RcvHdrTailAddr0_RcvHdrTailAddr0_LSB 0x2
-#define QIB_7220_RcvHdrTailAddr0_RcvHdrTailAddr0_RMASK 0x3FFFFFFFFF
-#define QIB_7220_RcvHdrTailAddr0_Reserved_LSB 0x0
-#define QIB_7220_RcvHdrTailAddr0_Reserved_RMASK 0x3
-
-#define QIB_7220_ibsd_epb_access_ctrl_OFFS 0x3C0
-#define QIB_7220_ibsd_epb_access_ctrl_sw_ib_epb_req_granted_LSB 0x8
-#define QIB_7220_ibsd_epb_access_ctrl_sw_ib_epb_req_granted_RMASK 0x1
-#define QIB_7220_ibsd_epb_access_ctrl_Reserved_LSB 0x1
-#define QIB_7220_ibsd_epb_access_ctrl_Reserved_RMASK 0x7F
-#define QIB_7220_ibsd_epb_access_ctrl_sw_ib_epb_req_LSB 0x0
-#define QIB_7220_ibsd_epb_access_ctrl_sw_ib_epb_req_RMASK 0x1
-
-#define QIB_7220_ibsd_epb_transaction_reg_OFFS 0x3C8
-#define QIB_7220_ibsd_epb_transaction_reg_ib_epb_rdy_LSB 0x1F
-#define QIB_7220_ibsd_epb_transaction_reg_ib_epb_rdy_RMASK 0x1
-#define QIB_7220_ibsd_epb_transaction_reg_ib_epb_req_error_LSB 0x1E
-#define QIB_7220_ibsd_epb_transaction_reg_ib_epb_req_error_RMASK 0x1
-#define QIB_7220_ibsd_epb_transaction_reg_Reserved_LSB 0x1D
-#define QIB_7220_ibsd_epb_transaction_reg_Reserved_RMASK 0x1
-#define QIB_7220_ibsd_epb_transaction_reg_mem_data_parity_LSB 0x1C
-#define QIB_7220_ibsd_epb_transaction_reg_mem_data_parity_RMASK 0x1
-#define QIB_7220_ibsd_epb_transaction_reg_Reserved1_LSB 0x1B
-#define QIB_7220_ibsd_epb_transaction_reg_Reserved1_RMASK 0x1
-#define QIB_7220_ibsd_epb_transaction_reg_ib_epb_cs_LSB 0x19
-#define QIB_7220_ibsd_epb_transaction_reg_ib_epb_cs_RMASK 0x3
-#define QIB_7220_ibsd_epb_transaction_reg_ib_epb_read_write_LSB 0x18
-#define QIB_7220_ibsd_epb_transaction_reg_ib_epb_read_write_RMASK 0x1
-#define QIB_7220_ibsd_epb_transaction_reg_Reserved2_LSB 0x17
-#define QIB_7220_ibsd_epb_transaction_reg_Reserved2_RMASK 0x1
-#define QIB_7220_ibsd_epb_transaction_reg_ib_epb_address_LSB 0x8
-#define QIB_7220_ibsd_epb_transaction_reg_ib_epb_address_RMASK 0x7FFF
-#define QIB_7220_ibsd_epb_transaction_reg_ib_epb_data_LSB 0x0
-#define QIB_7220_ibsd_epb_transaction_reg_ib_epb_data_RMASK 0xFF
-
-#define QIB_7220_XGXSCfg_OFFS 0x3D8
-#define QIB_7220_XGXSCfg_sel_link_down_for_fctrl_lane_sync_reset_LSB 0x3F
-#define QIB_7220_XGXSCfg_sel_link_down_for_fctrl_lane_sync_reset_RMASK 0x1
-#define QIB_7220_XGXSCfg_Reserved_LSB 0x13
-#define QIB_7220_XGXSCfg_Reserved_RMASK 0xFFFFFFFFFFF
-#define QIB_7220_XGXSCfg_link_sync_mask_LSB 0x9
-#define QIB_7220_XGXSCfg_link_sync_mask_RMASK 0x3FF
-#define QIB_7220_XGXSCfg_Reserved1_LSB 0x3
-#define QIB_7220_XGXSCfg_Reserved1_RMASK 0x3F
-#define QIB_7220_XGXSCfg_xcv_reset_LSB 0x2
-#define QIB_7220_XGXSCfg_xcv_reset_RMASK 0x1
-#define QIB_7220_XGXSCfg_Reserved2_LSB 0x1
-#define QIB_7220_XGXSCfg_Reserved2_RMASK 0x1
-#define QIB_7220_XGXSCfg_tx_rx_reset_LSB 0x0
-#define QIB_7220_XGXSCfg_tx_rx_reset_RMASK 0x1
-
-#define QIB_7220_IBSerDesCtrl_OFFS 0x3E0
-#define QIB_7220_IBSerDesCtrl_Reserved_LSB 0x2D
-#define QIB_7220_IBSerDesCtrl_Reserved_RMASK 0x7FFFF
-#define QIB_7220_IBSerDesCtrl_INT_uC_LSB 0x2C
-#define QIB_7220_IBSerDesCtrl_INT_uC_RMASK 0x1
-#define QIB_7220_IBSerDesCtrl_CKSEL_uC_LSB 0x2A
-#define QIB_7220_IBSerDesCtrl_CKSEL_uC_RMASK 0x3
-#define QIB_7220_IBSerDesCtrl_PLLN_LSB 0x28
-#define QIB_7220_IBSerDesCtrl_PLLN_RMASK 0x3
-#define QIB_7220_IBSerDesCtrl_PLLM_LSB 0x25
-#define QIB_7220_IBSerDesCtrl_PLLM_RMASK 0x7
-#define QIB_7220_IBSerDesCtrl_TXOBPD_LSB 0x24
-#define QIB_7220_IBSerDesCtrl_TXOBPD_RMASK 0x1
-#define QIB_7220_IBSerDesCtrl_TWC_LSB 0x23
-#define QIB_7220_IBSerDesCtrl_TWC_RMASK 0x1
-#define QIB_7220_IBSerDesCtrl_RXIDLE_LSB 0x22
-#define QIB_7220_IBSerDesCtrl_RXIDLE_RMASK 0x1
-#define QIB_7220_IBSerDesCtrl_RXINV_LSB 0x21
-#define QIB_7220_IBSerDesCtrl_RXINV_RMASK 0x1
-#define QIB_7220_IBSerDesCtrl_TXINV_LSB 0x20
-#define QIB_7220_IBSerDesCtrl_TXINV_RMASK 0x1
-#define QIB_7220_IBSerDesCtrl_Reserved1_LSB 0x12
-#define QIB_7220_IBSerDesCtrl_Reserved1_RMASK 0x3FFF
-#define QIB_7220_IBSerDesCtrl_NumSerDesRegsToWrForRXEQ_LSB 0xD
-#define QIB_7220_IBSerDesCtrl_NumSerDesRegsToWrForRXEQ_RMASK 0x1F
-#define QIB_7220_IBSerDesCtrl_NumSerDesRegsToWrForDDS_LSB 0x8
-#define QIB_7220_IBSerDesCtrl_NumSerDesRegsToWrForDDS_RMASK 0x1F
-#define QIB_7220_IBSerDesCtrl_Reserved2_LSB 0x1
-#define QIB_7220_IBSerDesCtrl_Reserved2_RMASK 0x7F
-#define QIB_7220_IBSerDesCtrl_ResetIB_uC_Core_LSB 0x0
-#define QIB_7220_IBSerDesCtrl_ResetIB_uC_Core_RMASK 0x1
-
-#define QIB_7220_pciesd_epb_access_ctrl_OFFS 0x400
-#define QIB_7220_pciesd_epb_access_ctrl_sw_pcie_epb_req_granted_LSB 0x8
-#define QIB_7220_pciesd_epb_access_ctrl_sw_pcie_epb_req_granted_RMASK 0x1
-#define QIB_7220_pciesd_epb_access_ctrl_Reserved_LSB 0x3
-#define QIB_7220_pciesd_epb_access_ctrl_Reserved_RMASK 0x1F
-#define QIB_7220_pciesd_epb_access_ctrl_sw_pcieepb_star_en_LSB 0x1
-#define QIB_7220_pciesd_epb_access_ctrl_sw_pcieepb_star_en_RMASK 0x3
-#define QIB_7220_pciesd_epb_access_ctrl_sw_pcie_epb_req_LSB 0x0
-#define QIB_7220_pciesd_epb_access_ctrl_sw_pcie_epb_req_RMASK 0x1
-
-#define QIB_7220_pciesd_epb_transaction_reg_OFFS 0x408
-#define QIB_7220_pciesd_epb_transaction_reg_pcie_epb_rdy_LSB 0x1F
-#define QIB_7220_pciesd_epb_transaction_reg_pcie_epb_rdy_RMASK 0x1
-#define QIB_7220_pciesd_epb_transaction_reg_pcie_epb_req_error_LSB 0x1E
-#define QIB_7220_pciesd_epb_transaction_reg_pcie_epb_req_error_RMASK 0x1
-#define QIB_7220_pciesd_epb_transaction_reg_Reserved_LSB 0x1D
-#define QIB_7220_pciesd_epb_transaction_reg_Reserved_RMASK 0x1
-#define QIB_7220_pciesd_epb_transaction_reg_mem_data_parity_LSB 0x1C
-#define QIB_7220_pciesd_epb_transaction_reg_mem_data_parity_RMASK 0x1
-#define QIB_7220_pciesd_epb_transaction_reg_pcie_epb_cs_LSB 0x19
-#define QIB_7220_pciesd_epb_transaction_reg_pcie_epb_cs_RMASK 0x7
-#define QIB_7220_pciesd_epb_transaction_reg_pcie_epb_read_write_LSB 0x18
-#define QIB_7220_pciesd_epb_transaction_reg_pcie_epb_read_write_RMASK 0x1
-#define QIB_7220_pciesd_epb_transaction_reg_Reserved1_LSB 0x17
-#define QIB_7220_pciesd_epb_transaction_reg_Reserved1_RMASK 0x1
-#define QIB_7220_pciesd_epb_transaction_reg_pcie_epb_address_LSB 0x8
-#define QIB_7220_pciesd_epb_transaction_reg_pcie_epb_address_RMASK 0x7FFF
-#define QIB_7220_pciesd_epb_transaction_reg_pcie_epb_data_LSB 0x0
-#define QIB_7220_pciesd_epb_transaction_reg_pcie_epb_data_RMASK 0xFF
-
-#define QIB_7220_SerDes_DDSRXEQ0_OFFS 0x500
-#define QIB_7220_SerDes_DDSRXEQ0_reg_addr_LSB 0x4
-#define QIB_7220_SerDes_DDSRXEQ0_reg_addr_RMASK 0x3F
-#define QIB_7220_SerDes_DDSRXEQ0_element_num_LSB 0x0
-#define QIB_7220_SerDes_DDSRXEQ0_element_num_RMASK 0xF
-
-#define QIB_7220_LBIntCnt_OFFS 0x13000
-
-#define QIB_7220_LBFlowStallCnt_OFFS 0x13008
-
-#define QIB_7220_TxSDmaDescCnt_OFFS 0x13010
-
-#define QIB_7220_TxUnsupVLErrCnt_OFFS 0x13018
-
-#define QIB_7220_TxDataPktCnt_OFFS 0x13020
-
-#define QIB_7220_TxFlowPktCnt_OFFS 0x13028
-
-#define QIB_7220_TxDwordCnt_OFFS 0x13030
-
-#define QIB_7220_TxLenErrCnt_OFFS 0x13038
-
-#define QIB_7220_TxMaxMinLenErrCnt_OFFS 0x13040
-
-#define QIB_7220_TxUnderrunCnt_OFFS 0x13048
-
-#define QIB_7220_TxFlowStallCnt_OFFS 0x13050
-
-#define QIB_7220_TxDroppedPktCnt_OFFS 0x13058
-
-#define QIB_7220_RxDroppedPktCnt_OFFS 0x13060
-
-#define QIB_7220_RxDataPktCnt_OFFS 0x13068
-
-#define QIB_7220_RxFlowPktCnt_OFFS 0x13070
-
-#define QIB_7220_RxDwordCnt_OFFS 0x13078
-
-#define QIB_7220_RxLenErrCnt_OFFS 0x13080
-
-#define QIB_7220_RxMaxMinLenErrCnt_OFFS 0x13088
-
-#define QIB_7220_RxICRCErrCnt_OFFS 0x13090
-
-#define QIB_7220_RxVCRCErrCnt_OFFS 0x13098
-
-#define QIB_7220_RxFlowCtrlViolCnt_OFFS 0x130A0
-
-#define QIB_7220_RxVersionErrCnt_OFFS 0x130A8
-
-#define QIB_7220_RxLinkMalformCnt_OFFS 0x130B0
-
-#define QIB_7220_RxEBPCnt_OFFS 0x130B8
-
-#define QIB_7220_RxLPCRCErrCnt_OFFS 0x130C0
-
-#define QIB_7220_RxBufOvflCnt_OFFS 0x130C8
-
-#define QIB_7220_RxTIDFullErrCnt_OFFS 0x130D0
-
-#define QIB_7220_RxTIDValidErrCnt_OFFS 0x130D8
-
-#define QIB_7220_RxPKeyMismatchCnt_OFFS 0x130E0
-
-#define QIB_7220_RxP0HdrEgrOvflCnt_OFFS 0x130E8
-
-#define QIB_7220_IBStatusChangeCnt_OFFS 0x13170
-
-#define QIB_7220_IBLinkErrRecoveryCnt_OFFS 0x13178
-
-#define QIB_7220_IBLinkDownedCnt_OFFS 0x13180
-
-#define QIB_7220_IBSymbolErrCnt_OFFS 0x13188
-
-#define QIB_7220_RxVL15DroppedPktCnt_OFFS 0x13190
-
-#define QIB_7220_RxOtherLocalPhyErrCnt_OFFS 0x13198
-
-#define QIB_7220_PcieRetryBufDiagQwordCnt_OFFS 0x131A0
-
-#define QIB_7220_ExcessBufferOvflCnt_OFFS 0x131A8
-
-#define QIB_7220_LocalLinkIntegrityErrCnt_OFFS 0x131B0
-
-#define QIB_7220_RxVlErrCnt_OFFS 0x131B8
-
-#define QIB_7220_RxDlidFltrCnt_OFFS 0x131C0
-
-#define QIB_7220_CNT_0131C8_OFFS 0x131C8
-
-#define QIB_7220_PSStat_OFFS 0x13200
-
-#define QIB_7220_PSStart_OFFS 0x13208
-
-#define QIB_7220_PSInterval_OFFS 0x13210
-
-#define QIB_7220_PSRcvDataCount_OFFS 0x13218
-
-#define QIB_7220_PSRcvPktsCount_OFFS 0x13220
-
-#define QIB_7220_PSXmitDataCount_OFFS 0x13228
-
-#define QIB_7220_PSXmitPktsCount_OFFS 0x13230
-
-#define QIB_7220_PSXmitWaitCount_OFFS 0x13238
-
-#define QIB_7220_CNT_013240_OFFS 0x13240
-
-#define QIB_7220_RcvEgrArray_OFFS 0x14000
-
-#define QIB_7220_MEM_038000_OFFS 0x38000
-
-#define QIB_7220_RcvTIDArray0_OFFS 0x53000
-
-#define QIB_7220_PIOLaunchFIFO_OFFS 0x64000
-
-#define QIB_7220_MEM_064480_OFFS 0x64480
-
-#define QIB_7220_SendPIOpbcCache_OFFS 0x64800
-
-#define QIB_7220_MEM_064C80_OFFS 0x64C80
-
-#define QIB_7220_PreLaunchFIFO_OFFS 0x65000
-
-#define QIB_7220_MEM_065080_OFFS 0x65080
-
-#define QIB_7220_ScoreBoard_OFFS 0x65400
-
-#define QIB_7220_MEM_065440_OFFS 0x65440
-
-#define QIB_7220_DescriptorFIFO_OFFS 0x65800
-
-#define QIB_7220_MEM_065880_OFFS 0x65880
-
-#define QIB_7220_RcvBuf1_OFFS 0x72000
-
-#define QIB_7220_MEM_074800_OFFS 0x74800
-
-#define QIB_7220_RcvBuf2_OFFS 0x75000
-
-#define QIB_7220_MEM_076400_OFFS 0x76400
-
-#define QIB_7220_RcvFlags_OFFS 0x77000
-
-#define QIB_7220_MEM_078400_OFFS 0x78400
-
-#define QIB_7220_RcvLookupBuf1_OFFS 0x79000
-
-#define QIB_7220_MEM_07A400_OFFS 0x7A400
-
-#define QIB_7220_RcvDMADatBuf_OFFS 0x7B000
-
-#define QIB_7220_RcvDMAHdrBuf_OFFS 0x7B800
-
-#define QIB_7220_MiscRXEIntMem_OFFS 0x7C000
-
-#define QIB_7220_MEM_07D400_OFFS 0x7D400
-
-#define QIB_7220_PCIERcvBuf_OFFS 0x80000
-
-#define QIB_7220_PCIERetryBuf_OFFS 0x84000
-
-#define QIB_7220_PCIERcvBufRdToWrAddr_OFFS 0x88000
-
-#define QIB_7220_PCIECplBuf_OFFS 0x90000
-
-#define QIB_7220_IBSerDesMappTable_OFFS 0x94000
-
-#define QIB_7220_MEM_095000_OFFS 0x95000
-
-#define QIB_7220_SendBuf0_MA_OFFS 0x100000
-
-#define QIB_7220_MEM_1A0000_OFFS 0x1A0000
diff --git a/drivers/infiniband/hw/qib/qib_7322_regs.h b/drivers/infiniband/hw/qib/qib_7322_regs.h
deleted file mode 100644
index 32dc81ff8d4a..000000000000
--- a/drivers/infiniband/hw/qib/qib_7322_regs.h
+++ /dev/null
@@ -1,3163 +0,0 @@
-/*
- * Copyright (c) 2008, 2009, 2010 QLogic Corporation. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-/* This file is mechanically generated from RTL. Any hand-edits will be lost! */
-
-#define QIB_7322_Revision_OFFS 0x0
-#define QIB_7322_Revision_DEF 0x0000000002010601
-#define QIB_7322_Revision_R_Simulator_LSB 0x3F
-#define QIB_7322_Revision_R_Simulator_MSB 0x3F
-#define QIB_7322_Revision_R_Simulator_RMASK 0x1
-#define QIB_7322_Revision_R_Emulation_LSB 0x3E
-#define QIB_7322_Revision_R_Emulation_MSB 0x3E
-#define QIB_7322_Revision_R_Emulation_RMASK 0x1
-#define QIB_7322_Revision_R_Emulation_Revcode_LSB 0x28
-#define QIB_7322_Revision_R_Emulation_Revcode_MSB 0x3D
-#define QIB_7322_Revision_R_Emulation_Revcode_RMASK 0x3FFFFF
-#define QIB_7322_Revision_BoardID_LSB 0x20
-#define QIB_7322_Revision_BoardID_MSB 0x27
-#define QIB_7322_Revision_BoardID_RMASK 0xFF
-#define QIB_7322_Revision_R_SW_LSB 0x18
-#define QIB_7322_Revision_R_SW_MSB 0x1F
-#define QIB_7322_Revision_R_SW_RMASK 0xFF
-#define QIB_7322_Revision_R_Arch_LSB 0x10
-#define QIB_7322_Revision_R_Arch_MSB 0x17
-#define QIB_7322_Revision_R_Arch_RMASK 0xFF
-#define QIB_7322_Revision_R_ChipRevMajor_LSB 0x8
-#define QIB_7322_Revision_R_ChipRevMajor_MSB 0xF
-#define QIB_7322_Revision_R_ChipRevMajor_RMASK 0xFF
-#define QIB_7322_Revision_R_ChipRevMinor_LSB 0x0
-#define QIB_7322_Revision_R_ChipRevMinor_MSB 0x7
-#define QIB_7322_Revision_R_ChipRevMinor_RMASK 0xFF
-
-#define QIB_7322_Control_OFFS 0x8
-#define QIB_7322_Control_DEF 0x0000000000000000
-#define QIB_7322_Control_PCIECplQDiagEn_LSB 0x6
-#define QIB_7322_Control_PCIECplQDiagEn_MSB 0x6
-#define QIB_7322_Control_PCIECplQDiagEn_RMASK 0x1
-#define QIB_7322_Control_PCIEPostQDiagEn_LSB 0x5
-#define QIB_7322_Control_PCIEPostQDiagEn_MSB 0x5
-#define QIB_7322_Control_PCIEPostQDiagEn_RMASK 0x1
-#define QIB_7322_Control_SDmaDescFetchPriorityEn_LSB 0x4
-#define QIB_7322_Control_SDmaDescFetchPriorityEn_MSB 0x4
-#define QIB_7322_Control_SDmaDescFetchPriorityEn_RMASK 0x1
-#define QIB_7322_Control_PCIERetryBufDiagEn_LSB 0x3
-#define QIB_7322_Control_PCIERetryBufDiagEn_MSB 0x3
-#define QIB_7322_Control_PCIERetryBufDiagEn_RMASK 0x1
-#define QIB_7322_Control_FreezeMode_LSB 0x1
-#define QIB_7322_Control_FreezeMode_MSB 0x1
-#define QIB_7322_Control_FreezeMode_RMASK 0x1
-#define QIB_7322_Control_SyncReset_LSB 0x0
-#define QIB_7322_Control_SyncReset_MSB 0x0
-#define QIB_7322_Control_SyncReset_RMASK 0x1
-
-#define QIB_7322_PageAlign_OFFS 0x10
-#define QIB_7322_PageAlign_DEF 0x0000000000001000
-
-#define QIB_7322_ContextCnt_OFFS 0x18
-#define QIB_7322_ContextCnt_DEF 0x0000000000000012
-
-#define QIB_7322_Scratch_OFFS 0x20
-#define QIB_7322_Scratch_DEF 0x0000000000000000
-
-#define QIB_7322_CntrRegBase_OFFS 0x28
-#define QIB_7322_CntrRegBase_DEF 0x0000000000011000
-
-#define QIB_7322_SendRegBase_OFFS 0x30
-#define QIB_7322_SendRegBase_DEF 0x0000000000003000
-
-#define QIB_7322_UserRegBase_OFFS 0x38
-#define QIB_7322_UserRegBase_DEF 0x0000000000200000
-
-#define QIB_7322_IntMask_OFFS 0x68
-#define QIB_7322_IntMask_DEF 0x0000000000000000
-#define QIB_7322_IntMask_SDmaIntMask_1_LSB 0x3F
-#define QIB_7322_IntMask_SDmaIntMask_1_MSB 0x3F
-#define QIB_7322_IntMask_SDmaIntMask_1_RMASK 0x1
-#define QIB_7322_IntMask_SDmaIntMask_0_LSB 0x3E
-#define QIB_7322_IntMask_SDmaIntMask_0_MSB 0x3E
-#define QIB_7322_IntMask_SDmaIntMask_0_RMASK 0x1
-#define QIB_7322_IntMask_SDmaProgressIntMask_1_LSB 0x3D
-#define QIB_7322_IntMask_SDmaProgressIntMask_1_MSB 0x3D
-#define QIB_7322_IntMask_SDmaProgressIntMask_1_RMASK 0x1
-#define QIB_7322_IntMask_SDmaProgressIntMask_0_LSB 0x3C
-#define QIB_7322_IntMask_SDmaProgressIntMask_0_MSB 0x3C
-#define QIB_7322_IntMask_SDmaProgressIntMask_0_RMASK 0x1
-#define QIB_7322_IntMask_SDmaIdleIntMask_1_LSB 0x3B
-#define QIB_7322_IntMask_SDmaIdleIntMask_1_MSB 0x3B
-#define QIB_7322_IntMask_SDmaIdleIntMask_1_RMASK 0x1
-#define QIB_7322_IntMask_SDmaIdleIntMask_0_LSB 0x3A
-#define QIB_7322_IntMask_SDmaIdleIntMask_0_MSB 0x3A
-#define QIB_7322_IntMask_SDmaIdleIntMask_0_RMASK 0x1
-#define QIB_7322_IntMask_SDmaCleanupDoneMask_1_LSB 0x39
-#define QIB_7322_IntMask_SDmaCleanupDoneMask_1_MSB 0x39
-#define QIB_7322_IntMask_SDmaCleanupDoneMask_1_RMASK 0x1
-#define QIB_7322_IntMask_SDmaCleanupDoneMask_0_LSB 0x38
-#define QIB_7322_IntMask_SDmaCleanupDoneMask_0_MSB 0x38
-#define QIB_7322_IntMask_SDmaCleanupDoneMask_0_RMASK 0x1
-#define QIB_7322_IntMask_RcvUrg17IntMask_LSB 0x31
-#define QIB_7322_IntMask_RcvUrg17IntMask_MSB 0x31
-#define QIB_7322_IntMask_RcvUrg17IntMask_RMASK 0x1
-#define QIB_7322_IntMask_RcvUrg16IntMask_LSB 0x30
-#define QIB_7322_IntMask_RcvUrg16IntMask_MSB 0x30
-#define QIB_7322_IntMask_RcvUrg16IntMask_RMASK 0x1
-#define QIB_7322_IntMask_RcvUrg15IntMask_LSB 0x2F
-#define QIB_7322_IntMask_RcvUrg15IntMask_MSB 0x2F
-#define QIB_7322_IntMask_RcvUrg15IntMask_RMASK 0x1
-#define QIB_7322_IntMask_RcvUrg14IntMask_LSB 0x2E
-#define QIB_7322_IntMask_RcvUrg14IntMask_MSB 0x2E
-#define QIB_7322_IntMask_RcvUrg14IntMask_RMASK 0x1
-#define QIB_7322_IntMask_RcvUrg13IntMask_LSB 0x2D
-#define QIB_7322_IntMask_RcvUrg13IntMask_MSB 0x2D
-#define QIB_7322_IntMask_RcvUrg13IntMask_RMASK 0x1
-#define QIB_7322_IntMask_RcvUrg12IntMask_LSB 0x2C
-#define QIB_7322_IntMask_RcvUrg12IntMask_MSB 0x2C
-#define QIB_7322_IntMask_RcvUrg12IntMask_RMASK 0x1
-#define QIB_7322_IntMask_RcvUrg11IntMask_LSB 0x2B
-#define QIB_7322_IntMask_RcvUrg11IntMask_MSB 0x2B
-#define QIB_7322_IntMask_RcvUrg11IntMask_RMASK 0x1
-#define QIB_7322_IntMask_RcvUrg10IntMask_LSB 0x2A
-#define QIB_7322_IntMask_RcvUrg10IntMask_MSB 0x2A
-#define QIB_7322_IntMask_RcvUrg10IntMask_RMASK 0x1
-#define QIB_7322_IntMask_RcvUrg9IntMask_LSB 0x29
-#define QIB_7322_IntMask_RcvUrg9IntMask_MSB 0x29
-#define QIB_7322_IntMask_RcvUrg9IntMask_RMASK 0x1
-#define QIB_7322_IntMask_RcvUrg8IntMask_LSB 0x28
-#define QIB_7322_IntMask_RcvUrg8IntMask_MSB 0x28
-#define QIB_7322_IntMask_RcvUrg8IntMask_RMASK 0x1
-#define QIB_7322_IntMask_RcvUrg7IntMask_LSB 0x27
-#define QIB_7322_IntMask_RcvUrg7IntMask_MSB 0x27
-#define QIB_7322_IntMask_RcvUrg7IntMask_RMASK 0x1
-#define QIB_7322_IntMask_RcvUrg6IntMask_LSB 0x26
-#define QIB_7322_IntMask_RcvUrg6IntMask_MSB 0x26
-#define QIB_7322_IntMask_RcvUrg6IntMask_RMASK 0x1
-#define QIB_7322_IntMask_RcvUrg5IntMask_LSB 0x25
-#define QIB_7322_IntMask_RcvUrg5IntMask_MSB 0x25
-#define QIB_7322_IntMask_RcvUrg5IntMask_RMASK 0x1
-#define QIB_7322_IntMask_RcvUrg4IntMask_LSB 0x24
-#define QIB_7322_IntMask_RcvUrg4IntMask_MSB 0x24
-#define QIB_7322_IntMask_RcvUrg4IntMask_RMASK 0x1
-#define QIB_7322_IntMask_RcvUrg3IntMask_LSB 0x23
-#define QIB_7322_IntMask_RcvUrg3IntMask_MSB 0x23
-#define QIB_7322_IntMask_RcvUrg3IntMask_RMASK 0x1
-#define QIB_7322_IntMask_RcvUrg2IntMask_LSB 0x22
-#define QIB_7322_IntMask_RcvUrg2IntMask_MSB 0x22
-#define QIB_7322_IntMask_RcvUrg2IntMask_RMASK 0x1
-#define QIB_7322_IntMask_RcvUrg1IntMask_LSB 0x21
-#define QIB_7322_IntMask_RcvUrg1IntMask_MSB 0x21
-#define QIB_7322_IntMask_RcvUrg1IntMask_RMASK 0x1
-#define QIB_7322_IntMask_RcvUrg0IntMask_LSB 0x20
-#define QIB_7322_IntMask_RcvUrg0IntMask_MSB 0x20
-#define QIB_7322_IntMask_RcvUrg0IntMask_RMASK 0x1
-#define QIB_7322_IntMask_ErrIntMask_1_LSB 0x1F
-#define QIB_7322_IntMask_ErrIntMask_1_MSB 0x1F
-#define QIB_7322_IntMask_ErrIntMask_1_RMASK 0x1
-#define QIB_7322_IntMask_ErrIntMask_0_LSB 0x1E
-#define QIB_7322_IntMask_ErrIntMask_0_MSB 0x1E
-#define QIB_7322_IntMask_ErrIntMask_0_RMASK 0x1
-#define QIB_7322_IntMask_ErrIntMask_LSB 0x1D
-#define QIB_7322_IntMask_ErrIntMask_MSB 0x1D
-#define QIB_7322_IntMask_ErrIntMask_RMASK 0x1
-#define QIB_7322_IntMask_AssertGPIOIntMask_LSB 0x1C
-#define QIB_7322_IntMask_AssertGPIOIntMask_MSB 0x1C
-#define QIB_7322_IntMask_AssertGPIOIntMask_RMASK 0x1
-#define QIB_7322_IntMask_SendDoneIntMask_1_LSB 0x19
-#define QIB_7322_IntMask_SendDoneIntMask_1_MSB 0x19
-#define QIB_7322_IntMask_SendDoneIntMask_1_RMASK 0x1
-#define QIB_7322_IntMask_SendDoneIntMask_0_LSB 0x18
-#define QIB_7322_IntMask_SendDoneIntMask_0_MSB 0x18
-#define QIB_7322_IntMask_SendDoneIntMask_0_RMASK 0x1
-#define QIB_7322_IntMask_SendBufAvailIntMask_LSB 0x17
-#define QIB_7322_IntMask_SendBufAvailIntMask_MSB 0x17
-#define QIB_7322_IntMask_SendBufAvailIntMask_RMASK 0x1
-#define QIB_7322_IntMask_RcvAvail17IntMask_LSB 0x11
-#define QIB_7322_IntMask_RcvAvail17IntMask_MSB 0x11
-#define QIB_7322_IntMask_RcvAvail17IntMask_RMASK 0x1
-#define QIB_7322_IntMask_RcvAvail16IntMask_LSB 0x10
-#define QIB_7322_IntMask_RcvAvail16IntMask_MSB 0x10
-#define QIB_7322_IntMask_RcvAvail16IntMask_RMASK 0x1
-#define QIB_7322_IntMask_RcvAvail15IntMask_LSB 0xF
-#define QIB_7322_IntMask_RcvAvail15IntMask_MSB 0xF
-#define QIB_7322_IntMask_RcvAvail15IntMask_RMASK 0x1
-#define QIB_7322_IntMask_RcvAvail14IntMask_LSB 0xE
-#define QIB_7322_IntMask_RcvAvail14IntMask_MSB 0xE
-#define QIB_7322_IntMask_RcvAvail14IntMask_RMASK 0x1
-#define QIB_7322_IntMask_RcvAvail13IntMask_LSB 0xD
-#define QIB_7322_IntMask_RcvAvail13IntMask_MSB 0xD
-#define QIB_7322_IntMask_RcvAvail13IntMask_RMASK 0x1
-#define QIB_7322_IntMask_RcvAvail12IntMask_LSB 0xC
-#define QIB_7322_IntMask_RcvAvail12IntMask_MSB 0xC
-#define QIB_7322_IntMask_RcvAvail12IntMask_RMASK 0x1
-#define QIB_7322_IntMask_RcvAvail11IntMask_LSB 0xB
-#define QIB_7322_IntMask_RcvAvail11IntMask_MSB 0xB
-#define QIB_7322_IntMask_RcvAvail11IntMask_RMASK 0x1
-#define QIB_7322_IntMask_RcvAvail10IntMask_LSB 0xA
-#define QIB_7322_IntMask_RcvAvail10IntMask_MSB 0xA
-#define QIB_7322_IntMask_RcvAvail10IntMask_RMASK 0x1
-#define QIB_7322_IntMask_RcvAvail9IntMask_LSB 0x9
-#define QIB_7322_IntMask_RcvAvail9IntMask_MSB 0x9
-#define QIB_7322_IntMask_RcvAvail9IntMask_RMASK 0x1
-#define QIB_7322_IntMask_RcvAvail8IntMask_LSB 0x8
-#define QIB_7322_IntMask_RcvAvail8IntMask_MSB 0x8
-#define QIB_7322_IntMask_RcvAvail8IntMask_RMASK 0x1
-#define QIB_7322_IntMask_RcvAvail7IntMask_LSB 0x7
-#define QIB_7322_IntMask_RcvAvail7IntMask_MSB 0x7
-#define QIB_7322_IntMask_RcvAvail7IntMask_RMASK 0x1
-#define QIB_7322_IntMask_RcvAvail6IntMask_LSB 0x6
-#define QIB_7322_IntMask_RcvAvail6IntMask_MSB 0x6
-#define QIB_7322_IntMask_RcvAvail6IntMask_RMASK 0x1
-#define QIB_7322_IntMask_RcvAvail5IntMask_LSB 0x5
-#define QIB_7322_IntMask_RcvAvail5IntMask_MSB 0x5
-#define QIB_7322_IntMask_RcvAvail5IntMask_RMASK 0x1
-#define QIB_7322_IntMask_RcvAvail4IntMask_LSB 0x4
-#define QIB_7322_IntMask_RcvAvail4IntMask_MSB 0x4
-#define QIB_7322_IntMask_RcvAvail4IntMask_RMASK 0x1
-#define QIB_7322_IntMask_RcvAvail3IntMask_LSB 0x3
-#define QIB_7322_IntMask_RcvAvail3IntMask_MSB 0x3
-#define QIB_7322_IntMask_RcvAvail3IntMask_RMASK 0x1
-#define QIB_7322_IntMask_RcvAvail2IntMask_LSB 0x2
-#define QIB_7322_IntMask_RcvAvail2IntMask_MSB 0x2
-#define QIB_7322_IntMask_RcvAvail2IntMask_RMASK 0x1
-#define QIB_7322_IntMask_RcvAvail1IntMask_LSB 0x1
-#define QIB_7322_IntMask_RcvAvail1IntMask_MSB 0x1
-#define QIB_7322_IntMask_RcvAvail1IntMask_RMASK 0x1
-#define QIB_7322_IntMask_RcvAvail0IntMask_LSB 0x0
-#define QIB_7322_IntMask_RcvAvail0IntMask_MSB 0x0
-#define QIB_7322_IntMask_RcvAvail0IntMask_RMASK 0x1
-
-#define QIB_7322_IntStatus_OFFS 0x70
-#define QIB_7322_IntStatus_DEF 0x0000000000000000
-#define QIB_7322_IntStatus_SDmaInt_1_LSB 0x3F
-#define QIB_7322_IntStatus_SDmaInt_1_MSB 0x3F
-#define QIB_7322_IntStatus_SDmaInt_1_RMASK 0x1
-#define QIB_7322_IntStatus_SDmaInt_0_LSB 0x3E
-#define QIB_7322_IntStatus_SDmaInt_0_MSB 0x3E
-#define QIB_7322_IntStatus_SDmaInt_0_RMASK 0x1
-#define QIB_7322_IntStatus_SDmaProgressInt_1_LSB 0x3D
-#define QIB_7322_IntStatus_SDmaProgressInt_1_MSB 0x3D
-#define QIB_7322_IntStatus_SDmaProgressInt_1_RMASK 0x1
-#define QIB_7322_IntStatus_SDmaProgressInt_0_LSB 0x3C
-#define QIB_7322_IntStatus_SDmaProgressInt_0_MSB 0x3C
-#define QIB_7322_IntStatus_SDmaProgressInt_0_RMASK 0x1
-#define QIB_7322_IntStatus_SDmaIdleInt_1_LSB 0x3B
-#define QIB_7322_IntStatus_SDmaIdleInt_1_MSB 0x3B
-#define QIB_7322_IntStatus_SDmaIdleInt_1_RMASK 0x1
-#define QIB_7322_IntStatus_SDmaIdleInt_0_LSB 0x3A
-#define QIB_7322_IntStatus_SDmaIdleInt_0_MSB 0x3A
-#define QIB_7322_IntStatus_SDmaIdleInt_0_RMASK 0x1
-#define QIB_7322_IntStatus_SDmaCleanupDone_1_LSB 0x39
-#define QIB_7322_IntStatus_SDmaCleanupDone_1_MSB 0x39
-#define QIB_7322_IntStatus_SDmaCleanupDone_1_RMASK 0x1
-#define QIB_7322_IntStatus_SDmaCleanupDone_0_LSB 0x38
-#define QIB_7322_IntStatus_SDmaCleanupDone_0_MSB 0x38
-#define QIB_7322_IntStatus_SDmaCleanupDone_0_RMASK 0x1
-#define QIB_7322_IntStatus_RcvUrg17_LSB 0x31
-#define QIB_7322_IntStatus_RcvUrg17_MSB 0x31
-#define QIB_7322_IntStatus_RcvUrg17_RMASK 0x1
-#define QIB_7322_IntStatus_RcvUrg16_LSB 0x30
-#define QIB_7322_IntStatus_RcvUrg16_MSB 0x30
-#define QIB_7322_IntStatus_RcvUrg16_RMASK 0x1
-#define QIB_7322_IntStatus_RcvUrg15_LSB 0x2F
-#define QIB_7322_IntStatus_RcvUrg15_MSB 0x2F
-#define QIB_7322_IntStatus_RcvUrg15_RMASK 0x1
-#define QIB_7322_IntStatus_RcvUrg14_LSB 0x2E
-#define QIB_7322_IntStatus_RcvUrg14_MSB 0x2E
-#define QIB_7322_IntStatus_RcvUrg14_RMASK 0x1
-#define QIB_7322_IntStatus_RcvUrg13_LSB 0x2D
-#define QIB_7322_IntStatus_RcvUrg13_MSB 0x2D
-#define QIB_7322_IntStatus_RcvUrg13_RMASK 0x1
-#define QIB_7322_IntStatus_RcvUrg12_LSB 0x2C
-#define QIB_7322_IntStatus_RcvUrg12_MSB 0x2C
-#define QIB_7322_IntStatus_RcvUrg12_RMASK 0x1
-#define QIB_7322_IntStatus_RcvUrg11_LSB 0x2B
-#define QIB_7322_IntStatus_RcvUrg11_MSB 0x2B
-#define QIB_7322_IntStatus_RcvUrg11_RMASK 0x1
-#define QIB_7322_IntStatus_RcvUrg10_LSB 0x2A
-#define QIB_7322_IntStatus_RcvUrg10_MSB 0x2A
-#define QIB_7322_IntStatus_RcvUrg10_RMASK 0x1
-#define QIB_7322_IntStatus_RcvUrg9_LSB 0x29
-#define QIB_7322_IntStatus_RcvUrg9_MSB 0x29
-#define QIB_7322_IntStatus_RcvUrg9_RMASK 0x1
-#define QIB_7322_IntStatus_RcvUrg8_LSB 0x28
-#define QIB_7322_IntStatus_RcvUrg8_MSB 0x28
-#define QIB_7322_IntStatus_RcvUrg8_RMASK 0x1
-#define QIB_7322_IntStatus_RcvUrg7_LSB 0x27
-#define QIB_7322_IntStatus_RcvUrg7_MSB 0x27
-#define QIB_7322_IntStatus_RcvUrg7_RMASK 0x1
-#define QIB_7322_IntStatus_RcvUrg6_LSB 0x26
-#define QIB_7322_IntStatus_RcvUrg6_MSB 0x26
-#define QIB_7322_IntStatus_RcvUrg6_RMASK 0x1
-#define QIB_7322_IntStatus_RcvUrg5_LSB 0x25
-#define QIB_7322_IntStatus_RcvUrg5_MSB 0x25
-#define QIB_7322_IntStatus_RcvUrg5_RMASK 0x1
-#define QIB_7322_IntStatus_RcvUrg4_LSB 0x24
-#define QIB_7322_IntStatus_RcvUrg4_MSB 0x24
-#define QIB_7322_IntStatus_RcvUrg4_RMASK 0x1
-#define QIB_7322_IntStatus_RcvUrg3_LSB 0x23
-#define QIB_7322_IntStatus_RcvUrg3_MSB 0x23
-#define QIB_7322_IntStatus_RcvUrg3_RMASK 0x1
-#define QIB_7322_IntStatus_RcvUrg2_LSB 0x22
-#define QIB_7322_IntStatus_RcvUrg2_MSB 0x22
-#define QIB_7322_IntStatus_RcvUrg2_RMASK 0x1
-#define QIB_7322_IntStatus_RcvUrg1_LSB 0x21
-#define QIB_7322_IntStatus_RcvUrg1_MSB 0x21
-#define QIB_7322_IntStatus_RcvUrg1_RMASK 0x1
-#define QIB_7322_IntStatus_RcvUrg0_LSB 0x20
-#define QIB_7322_IntStatus_RcvUrg0_MSB 0x20
-#define QIB_7322_IntStatus_RcvUrg0_RMASK 0x1
-#define QIB_7322_IntStatus_Err_1_LSB 0x1F
-#define QIB_7322_IntStatus_Err_1_MSB 0x1F
-#define QIB_7322_IntStatus_Err_1_RMASK 0x1
-#define QIB_7322_IntStatus_Err_0_LSB 0x1E
-#define QIB_7322_IntStatus_Err_0_MSB 0x1E
-#define QIB_7322_IntStatus_Err_0_RMASK 0x1
-#define QIB_7322_IntStatus_Err_LSB 0x1D
-#define QIB_7322_IntStatus_Err_MSB 0x1D
-#define QIB_7322_IntStatus_Err_RMASK 0x1
-#define QIB_7322_IntStatus_AssertGPIO_LSB 0x1C
-#define QIB_7322_IntStatus_AssertGPIO_MSB 0x1C
-#define QIB_7322_IntStatus_AssertGPIO_RMASK 0x1
-#define QIB_7322_IntStatus_SendDone_1_LSB 0x19
-#define QIB_7322_IntStatus_SendDone_1_MSB 0x19
-#define QIB_7322_IntStatus_SendDone_1_RMASK 0x1
-#define QIB_7322_IntStatus_SendDone_0_LSB 0x18
-#define QIB_7322_IntStatus_SendDone_0_MSB 0x18
-#define QIB_7322_IntStatus_SendDone_0_RMASK 0x1
-#define QIB_7322_IntStatus_SendBufAvail_LSB 0x17
-#define QIB_7322_IntStatus_SendBufAvail_MSB 0x17
-#define QIB_7322_IntStatus_SendBufAvail_RMASK 0x1
-#define QIB_7322_IntStatus_RcvAvail17_LSB 0x11
-#define QIB_7322_IntStatus_RcvAvail17_MSB 0x11
-#define QIB_7322_IntStatus_RcvAvail17_RMASK 0x1
-#define QIB_7322_IntStatus_RcvAvail16_LSB 0x10
-#define QIB_7322_IntStatus_RcvAvail16_MSB 0x10
-#define QIB_7322_IntStatus_RcvAvail16_RMASK 0x1
-#define QIB_7322_IntStatus_RcvAvail15_LSB 0xF
-#define QIB_7322_IntStatus_RcvAvail15_MSB 0xF
-#define QIB_7322_IntStatus_RcvAvail15_RMASK 0x1
-#define QIB_7322_IntStatus_RcvAvail14_LSB 0xE
-#define QIB_7322_IntStatus_RcvAvail14_MSB 0xE
-#define QIB_7322_IntStatus_RcvAvail14_RMASK 0x1
-#define QIB_7322_IntStatus_RcvAvail13_LSB 0xD
-#define QIB_7322_IntStatus_RcvAvail13_MSB 0xD
-#define QIB_7322_IntStatus_RcvAvail13_RMASK 0x1
-#define QIB_7322_IntStatus_RcvAvail12_LSB 0xC
-#define QIB_7322_IntStatus_RcvAvail12_MSB 0xC
-#define QIB_7322_IntStatus_RcvAvail12_RMASK 0x1
-#define QIB_7322_IntStatus_RcvAvail11_LSB 0xB
-#define QIB_7322_IntStatus_RcvAvail11_MSB 0xB
-#define QIB_7322_IntStatus_RcvAvail11_RMASK 0x1
-#define QIB_7322_IntStatus_RcvAvail10_LSB 0xA
-#define QIB_7322_IntStatus_RcvAvail10_MSB 0xA
-#define QIB_7322_IntStatus_RcvAvail10_RMASK 0x1
-#define QIB_7322_IntStatus_RcvAvail9_LSB 0x9
-#define QIB_7322_IntStatus_RcvAvail9_MSB 0x9
-#define QIB_7322_IntStatus_RcvAvail9_RMASK 0x1
-#define QIB_7322_IntStatus_RcvAvail8_LSB 0x8
-#define QIB_7322_IntStatus_RcvAvail8_MSB 0x8
-#define QIB_7322_IntStatus_RcvAvail8_RMASK 0x1
-#define QIB_7322_IntStatus_RcvAvail7_LSB 0x7
-#define QIB_7322_IntStatus_RcvAvail7_MSB 0x7
-#define QIB_7322_IntStatus_RcvAvail7_RMASK 0x1
-#define QIB_7322_IntStatus_RcvAvail6_LSB 0x6
-#define QIB_7322_IntStatus_RcvAvail6_MSB 0x6
-#define QIB_7322_IntStatus_RcvAvail6_RMASK 0x1
-#define QIB_7322_IntStatus_RcvAvail5_LSB 0x5
-#define QIB_7322_IntStatus_RcvAvail5_MSB 0x5
-#define QIB_7322_IntStatus_RcvAvail5_RMASK 0x1
-#define QIB_7322_IntStatus_RcvAvail4_LSB 0x4
-#define QIB_7322_IntStatus_RcvAvail4_MSB 0x4
-#define QIB_7322_IntStatus_RcvAvail4_RMASK 0x1
-#define QIB_7322_IntStatus_RcvAvail3_LSB 0x3
-#define QIB_7322_IntStatus_RcvAvail3_MSB 0x3
-#define QIB_7322_IntStatus_RcvAvail3_RMASK 0x1
-#define QIB_7322_IntStatus_RcvAvail2_LSB 0x2
-#define QIB_7322_IntStatus_RcvAvail2_MSB 0x2
-#define QIB_7322_IntStatus_RcvAvail2_RMASK 0x1
-#define QIB_7322_IntStatus_RcvAvail1_LSB 0x1
-#define QIB_7322_IntStatus_RcvAvail1_MSB 0x1
-#define QIB_7322_IntStatus_RcvAvail1_RMASK 0x1
-#define QIB_7322_IntStatus_RcvAvail0_LSB 0x0
-#define QIB_7322_IntStatus_RcvAvail0_MSB 0x0
-#define QIB_7322_IntStatus_RcvAvail0_RMASK 0x1
-
-#define QIB_7322_IntClear_OFFS 0x78
-#define QIB_7322_IntClear_DEF 0x0000000000000000
-#define QIB_7322_IntClear_SDmaIntClear_1_LSB 0x3F
-#define QIB_7322_IntClear_SDmaIntClear_1_MSB 0x3F
-#define QIB_7322_IntClear_SDmaIntClear_1_RMASK 0x1
-#define QIB_7322_IntClear_SDmaIntClear_0_LSB 0x3E
-#define QIB_7322_IntClear_SDmaIntClear_0_MSB 0x3E
-#define QIB_7322_IntClear_SDmaIntClear_0_RMASK 0x1
-#define QIB_7322_IntClear_SDmaProgressIntClear_1_LSB 0x3D
-#define QIB_7322_IntClear_SDmaProgressIntClear_1_MSB 0x3D
-#define QIB_7322_IntClear_SDmaProgressIntClear_1_RMASK 0x1
-#define QIB_7322_IntClear_SDmaProgressIntClear_0_LSB 0x3C
-#define QIB_7322_IntClear_SDmaProgressIntClear_0_MSB 0x3C
-#define QIB_7322_IntClear_SDmaProgressIntClear_0_RMASK 0x1
-#define QIB_7322_IntClear_SDmaIdleIntClear_1_LSB 0x3B
-#define QIB_7322_IntClear_SDmaIdleIntClear_1_MSB 0x3B
-#define QIB_7322_IntClear_SDmaIdleIntClear_1_RMASK 0x1
-#define QIB_7322_IntClear_SDmaIdleIntClear_0_LSB 0x3A
-#define QIB_7322_IntClear_SDmaIdleIntClear_0_MSB 0x3A
-#define QIB_7322_IntClear_SDmaIdleIntClear_0_RMASK 0x1
-#define QIB_7322_IntClear_SDmaCleanupDoneClear_1_LSB 0x39
-#define QIB_7322_IntClear_SDmaCleanupDoneClear_1_MSB 0x39
-#define QIB_7322_IntClear_SDmaCleanupDoneClear_1_RMASK 0x1
-#define QIB_7322_IntClear_SDmaCleanupDoneClear_0_LSB 0x38
-#define QIB_7322_IntClear_SDmaCleanupDoneClear_0_MSB 0x38
-#define QIB_7322_IntClear_SDmaCleanupDoneClear_0_RMASK 0x1
-#define QIB_7322_IntClear_RcvUrg17IntClear_LSB 0x31
-#define QIB_7322_IntClear_RcvUrg17IntClear_MSB 0x31
-#define QIB_7322_IntClear_RcvUrg17IntClear_RMASK 0x1
-#define QIB_7322_IntClear_RcvUrg16IntClear_LSB 0x30
-#define QIB_7322_IntClear_RcvUrg16IntClear_MSB 0x30
-#define QIB_7322_IntClear_RcvUrg16IntClear_RMASK 0x1
-#define QIB_7322_IntClear_RcvUrg15IntClear_LSB 0x2F
-#define QIB_7322_IntClear_RcvUrg15IntClear_MSB 0x2F
-#define QIB_7322_IntClear_RcvUrg15IntClear_RMASK 0x1
-#define QIB_7322_IntClear_RcvUrg14IntClear_LSB 0x2E
-#define QIB_7322_IntClear_RcvUrg14IntClear_MSB 0x2E
-#define QIB_7322_IntClear_RcvUrg14IntClear_RMASK 0x1
-#define QIB_7322_IntClear_RcvUrg13IntClear_LSB 0x2D
-#define QIB_7322_IntClear_RcvUrg13IntClear_MSB 0x2D
-#define QIB_7322_IntClear_RcvUrg13IntClear_RMASK 0x1
-#define QIB_7322_IntClear_RcvUrg12IntClear_LSB 0x2C
-#define QIB_7322_IntClear_RcvUrg12IntClear_MSB 0x2C
-#define QIB_7322_IntClear_RcvUrg12IntClear_RMASK 0x1
-#define QIB_7322_IntClear_RcvUrg11IntClear_LSB 0x2B
-#define QIB_7322_IntClear_RcvUrg11IntClear_MSB 0x2B
-#define QIB_7322_IntClear_RcvUrg11IntClear_RMASK 0x1
-#define QIB_7322_IntClear_RcvUrg10IntClear_LSB 0x2A
-#define QIB_7322_IntClear_RcvUrg10IntClear_MSB 0x2A
-#define QIB_7322_IntClear_RcvUrg10IntClear_RMASK 0x1
-#define QIB_7322_IntClear_RcvUrg9IntClear_LSB 0x29
-#define QIB_7322_IntClear_RcvUrg9IntClear_MSB 0x29
-#define QIB_7322_IntClear_RcvUrg9IntClear_RMASK 0x1
-#define QIB_7322_IntClear_RcvUrg8IntClear_LSB 0x28
-#define QIB_7322_IntClear_RcvUrg8IntClear_MSB 0x28
-#define QIB_7322_IntClear_RcvUrg8IntClear_RMASK 0x1
-#define QIB_7322_IntClear_RcvUrg7IntClear_LSB 0x27
-#define QIB_7322_IntClear_RcvUrg7IntClear_MSB 0x27
-#define QIB_7322_IntClear_RcvUrg7IntClear_RMASK 0x1
-#define QIB_7322_IntClear_RcvUrg6IntClear_LSB 0x26
-#define QIB_7322_IntClear_RcvUrg6IntClear_MSB 0x26
-#define QIB_7322_IntClear_RcvUrg6IntClear_RMASK 0x1
-#define QIB_7322_IntClear_RcvUrg5IntClear_LSB 0x25
-#define QIB_7322_IntClear_RcvUrg5IntClear_MSB 0x25
-#define QIB_7322_IntClear_RcvUrg5IntClear_RMASK 0x1
-#define QIB_7322_IntClear_RcvUrg4IntClear_LSB 0x24
-#define QIB_7322_IntClear_RcvUrg4IntClear_MSB 0x24
-#define QIB_7322_IntClear_RcvUrg4IntClear_RMASK 0x1
-#define QIB_7322_IntClear_RcvUrg3IntClear_LSB 0x23
-#define QIB_7322_IntClear_RcvUrg3IntClear_MSB 0x23
-#define QIB_7322_IntClear_RcvUrg3IntClear_RMASK 0x1
-#define QIB_7322_IntClear_RcvUrg2IntClear_LSB 0x22
-#define QIB_7322_IntClear_RcvUrg2IntClear_MSB 0x22
-#define QIB_7322_IntClear_RcvUrg2IntClear_RMASK 0x1
-#define QIB_7322_IntClear_RcvUrg1IntClear_LSB 0x21
-#define QIB_7322_IntClear_RcvUrg1IntClear_MSB 0x21
-#define QIB_7322_IntClear_RcvUrg1IntClear_RMASK 0x1
-#define QIB_7322_IntClear_RcvUrg0IntClear_LSB 0x20
-#define QIB_7322_IntClear_RcvUrg0IntClear_MSB 0x20
-#define QIB_7322_IntClear_RcvUrg0IntClear_RMASK 0x1
-#define QIB_7322_IntClear_ErrIntClear_1_LSB 0x1F
-#define QIB_7322_IntClear_ErrIntClear_1_MSB 0x1F
-#define QIB_7322_IntClear_ErrIntClear_1_RMASK 0x1
-#define QIB_7322_IntClear_ErrIntClear_0_LSB 0x1E
-#define QIB_7322_IntClear_ErrIntClear_0_MSB 0x1E
-#define QIB_7322_IntClear_ErrIntClear_0_RMASK 0x1
-#define QIB_7322_IntClear_ErrIntClear_LSB 0x1D
-#define QIB_7322_IntClear_ErrIntClear_MSB 0x1D
-#define QIB_7322_IntClear_ErrIntClear_RMASK 0x1
-#define QIB_7322_IntClear_AssertGPIOIntClear_LSB 0x1C
-#define QIB_7322_IntClear_AssertGPIOIntClear_MSB 0x1C
-#define QIB_7322_IntClear_AssertGPIOIntClear_RMASK 0x1
-#define QIB_7322_IntClear_SendDoneIntClear_1_LSB 0x19
-#define QIB_7322_IntClear_SendDoneIntClear_1_MSB 0x19
-#define QIB_7322_IntClear_SendDoneIntClear_1_RMASK 0x1
-#define QIB_7322_IntClear_SendDoneIntClear_0_LSB 0x18
-#define QIB_7322_IntClear_SendDoneIntClear_0_MSB 0x18
-#define QIB_7322_IntClear_SendDoneIntClear_0_RMASK 0x1
-#define QIB_7322_IntClear_SendBufAvailIntClear_LSB 0x17
-#define QIB_7322_IntClear_SendBufAvailIntClear_MSB 0x17
-#define QIB_7322_IntClear_SendBufAvailIntClear_RMASK 0x1
-#define QIB_7322_IntClear_RcvAvail17IntClear_LSB 0x11
-#define QIB_7322_IntClear_RcvAvail17IntClear_MSB 0x11
-#define QIB_7322_IntClear_RcvAvail17IntClear_RMASK 0x1
-#define QIB_7322_IntClear_RcvAvail16IntClear_LSB 0x10
-#define QIB_7322_IntClear_RcvAvail16IntClear_MSB 0x10
-#define QIB_7322_IntClear_RcvAvail16IntClear_RMASK 0x1
-#define QIB_7322_IntClear_RcvAvail15IntClear_LSB 0xF
-#define QIB_7322_IntClear_RcvAvail15IntClear_MSB 0xF
-#define QIB_7322_IntClear_RcvAvail15IntClear_RMASK 0x1
-#define QIB_7322_IntClear_RcvAvail14IntClear_LSB 0xE
-#define QIB_7322_IntClear_RcvAvail14IntClear_MSB 0xE
-#define QIB_7322_IntClear_RcvAvail14IntClear_RMASK 0x1
-#define QIB_7322_IntClear_RcvAvail13IntClear_LSB 0xD
-#define QIB_7322_IntClear_RcvAvail13IntClear_MSB 0xD
-#define QIB_7322_IntClear_RcvAvail13IntClear_RMASK 0x1
-#define QIB_7322_IntClear_RcvAvail12IntClear_LSB 0xC
-#define QIB_7322_IntClear_RcvAvail12IntClear_MSB 0xC
-#define QIB_7322_IntClear_RcvAvail12IntClear_RMASK 0x1
-#define QIB_7322_IntClear_RcvAvail11IntClear_LSB 0xB
-#define QIB_7322_IntClear_RcvAvail11IntClear_MSB 0xB
-#define QIB_7322_IntClear_RcvAvail11IntClear_RMASK 0x1
-#define QIB_7322_IntClear_RcvAvail10IntClear_LSB 0xA
-#define QIB_7322_IntClear_RcvAvail10IntClear_MSB 0xA
-#define QIB_7322_IntClear_RcvAvail10IntClear_RMASK 0x1
-#define QIB_7322_IntClear_RcvAvail9IntClear_LSB 0x9
-#define QIB_7322_IntClear_RcvAvail9IntClear_MSB 0x9
-#define QIB_7322_IntClear_RcvAvail9IntClear_RMASK 0x1
-#define QIB_7322_IntClear_RcvAvail8IntClear_LSB 0x8
-#define QIB_7322_IntClear_RcvAvail8IntClear_MSB 0x8
-#define QIB_7322_IntClear_RcvAvail8IntClear_RMASK 0x1
-#define QIB_7322_IntClear_RcvAvail7IntClear_LSB 0x7
-#define QIB_7322_IntClear_RcvAvail7IntClear_MSB 0x7
-#define QIB_7322_IntClear_RcvAvail7IntClear_RMASK 0x1
-#define QIB_7322_IntClear_RcvAvail6IntClear_LSB 0x6
-#define QIB_7322_IntClear_RcvAvail6IntClear_MSB 0x6
-#define QIB_7322_IntClear_RcvAvail6IntClear_RMASK 0x1
-#define QIB_7322_IntClear_RcvAvail5IntClear_LSB 0x5
-#define QIB_7322_IntClear_RcvAvail5IntClear_MSB 0x5
-#define QIB_7322_IntClear_RcvAvail5IntClear_RMASK 0x1
-#define QIB_7322_IntClear_RcvAvail4IntClear_LSB 0x4
-#define QIB_7322_IntClear_RcvAvail4IntClear_MSB 0x4
-#define QIB_7322_IntClear_RcvAvail4IntClear_RMASK 0x1
-#define QIB_7322_IntClear_RcvAvail3IntClear_LSB 0x3
-#define QIB_7322_IntClear_RcvAvail3IntClear_MSB 0x3
-#define QIB_7322_IntClear_RcvAvail3IntClear_RMASK 0x1
-#define QIB_7322_IntClear_RcvAvail2IntClear_LSB 0x2
-#define QIB_7322_IntClear_RcvAvail2IntClear_MSB 0x2
-#define QIB_7322_IntClear_RcvAvail2IntClear_RMASK 0x1
-#define QIB_7322_IntClear_RcvAvail1IntClear_LSB 0x1
-#define QIB_7322_IntClear_RcvAvail1IntClear_MSB 0x1
-#define QIB_7322_IntClear_RcvAvail1IntClear_RMASK 0x1
-#define QIB_7322_IntClear_RcvAvail0IntClear_LSB 0x0
-#define QIB_7322_IntClear_RcvAvail0IntClear_MSB 0x0
-#define QIB_7322_IntClear_RcvAvail0IntClear_RMASK 0x1
-
-#define QIB_7322_ErrMask_OFFS 0x80
-#define QIB_7322_ErrMask_DEF 0x0000000000000000
-#define QIB_7322_ErrMask_ResetNegatedMask_LSB 0x3F
-#define QIB_7322_ErrMask_ResetNegatedMask_MSB 0x3F
-#define QIB_7322_ErrMask_ResetNegatedMask_RMASK 0x1
-#define QIB_7322_ErrMask_HardwareErrMask_LSB 0x3E
-#define QIB_7322_ErrMask_HardwareErrMask_MSB 0x3E
-#define QIB_7322_ErrMask_HardwareErrMask_RMASK 0x1
-#define QIB_7322_ErrMask_InvalidAddrErrMask_LSB 0x3D
-#define QIB_7322_ErrMask_InvalidAddrErrMask_MSB 0x3D
-#define QIB_7322_ErrMask_InvalidAddrErrMask_RMASK 0x1
-#define QIB_7322_ErrMask_SDmaVL15ErrMask_LSB 0x38
-#define QIB_7322_ErrMask_SDmaVL15ErrMask_MSB 0x38
-#define QIB_7322_ErrMask_SDmaVL15ErrMask_RMASK 0x1
-#define QIB_7322_ErrMask_SBufVL15MisUseErrMask_LSB 0x37
-#define QIB_7322_ErrMask_SBufVL15MisUseErrMask_MSB 0x37
-#define QIB_7322_ErrMask_SBufVL15MisUseErrMask_RMASK 0x1
-#define QIB_7322_ErrMask_InvalidEEPCmdMask_LSB 0x35
-#define QIB_7322_ErrMask_InvalidEEPCmdMask_MSB 0x35
-#define QIB_7322_ErrMask_InvalidEEPCmdMask_RMASK 0x1
-#define QIB_7322_ErrMask_RcvContextShareErrMask_LSB 0x34
-#define QIB_7322_ErrMask_RcvContextShareErrMask_MSB 0x34
-#define QIB_7322_ErrMask_RcvContextShareErrMask_RMASK 0x1
-#define QIB_7322_ErrMask_SendVLMismatchErrMask_LSB 0x24
-#define QIB_7322_ErrMask_SendVLMismatchErrMask_MSB 0x24
-#define QIB_7322_ErrMask_SendVLMismatchErrMask_RMASK 0x1
-#define QIB_7322_ErrMask_SendArmLaunchErrMask_LSB 0x23
-#define QIB_7322_ErrMask_SendArmLaunchErrMask_MSB 0x23
-#define QIB_7322_ErrMask_SendArmLaunchErrMask_RMASK 0x1
-#define QIB_7322_ErrMask_SendSpecialTriggerErrMask_LSB 0x1B
-#define QIB_7322_ErrMask_SendSpecialTriggerErrMask_MSB 0x1B
-#define QIB_7322_ErrMask_SendSpecialTriggerErrMask_RMASK 0x1
-#define QIB_7322_ErrMask_SDmaWrongPortErrMask_LSB 0x1A
-#define QIB_7322_ErrMask_SDmaWrongPortErrMask_MSB 0x1A
-#define QIB_7322_ErrMask_SDmaWrongPortErrMask_RMASK 0x1
-#define QIB_7322_ErrMask_SDmaBufMaskDuplicateErrMask_LSB 0x19
-#define QIB_7322_ErrMask_SDmaBufMaskDuplicateErrMask_MSB 0x19
-#define QIB_7322_ErrMask_SDmaBufMaskDuplicateErrMask_RMASK 0x1
-#define QIB_7322_ErrMask_RcvHdrFullErrMask_LSB 0xD
-#define QIB_7322_ErrMask_RcvHdrFullErrMask_MSB 0xD
-#define QIB_7322_ErrMask_RcvHdrFullErrMask_RMASK 0x1
-#define QIB_7322_ErrMask_RcvEgrFullErrMask_LSB 0xC
-#define QIB_7322_ErrMask_RcvEgrFullErrMask_MSB 0xC
-#define QIB_7322_ErrMask_RcvEgrFullErrMask_RMASK 0x1
-
-#define QIB_7322_ErrStatus_OFFS 0x88
-#define QIB_7322_ErrStatus_DEF 0x0000000000000000
-#define QIB_7322_ErrStatus_ResetNegated_LSB 0x3F
-#define QIB_7322_ErrStatus_ResetNegated_MSB 0x3F
-#define QIB_7322_ErrStatus_ResetNegated_RMASK 0x1
-#define QIB_7322_ErrStatus_HardwareErr_LSB 0x3E
-#define QIB_7322_ErrStatus_HardwareErr_MSB 0x3E
-#define QIB_7322_ErrStatus_HardwareErr_RMASK 0x1
-#define QIB_7322_ErrStatus_InvalidAddrErr_LSB 0x3D
-#define QIB_7322_ErrStatus_InvalidAddrErr_MSB 0x3D
-#define QIB_7322_ErrStatus_InvalidAddrErr_RMASK 0x1
-#define QIB_7322_ErrStatus_SDmaVL15Err_LSB 0x38
-#define QIB_7322_ErrStatus_SDmaVL15Err_MSB 0x38
-#define QIB_7322_ErrStatus_SDmaVL15Err_RMASK 0x1
-#define QIB_7322_ErrStatus_SBufVL15MisUseErr_LSB 0x37
-#define QIB_7322_ErrStatus_SBufVL15MisUseErr_MSB 0x37
-#define QIB_7322_ErrStatus_SBufVL15MisUseErr_RMASK 0x1
-#define QIB_7322_ErrStatus_InvalidEEPCmdErr_LSB 0x35
-#define QIB_7322_ErrStatus_InvalidEEPCmdErr_MSB 0x35
-#define QIB_7322_ErrStatus_InvalidEEPCmdErr_RMASK 0x1
-#define QIB_7322_ErrStatus_RcvContextShareErr_LSB 0x34
-#define QIB_7322_ErrStatus_RcvContextShareErr_MSB 0x34
-#define QIB_7322_ErrStatus_RcvContextShareErr_RMASK 0x1
-#define QIB_7322_ErrStatus_SendVLMismatchErr_LSB 0x24
-#define QIB_7322_ErrStatus_SendVLMismatchErr_MSB 0x24
-#define QIB_7322_ErrStatus_SendVLMismatchErr_RMASK 0x1
-#define QIB_7322_ErrStatus_SendArmLaunchErr_LSB 0x23
-#define QIB_7322_ErrStatus_SendArmLaunchErr_MSB 0x23
-#define QIB_7322_ErrStatus_SendArmLaunchErr_RMASK 0x1
-#define QIB_7322_ErrStatus_SendSpecialTriggerErr_LSB 0x1B
-#define QIB_7322_ErrStatus_SendSpecialTriggerErr_MSB 0x1B
-#define QIB_7322_ErrStatus_SendSpecialTriggerErr_RMASK 0x1
-#define QIB_7322_ErrStatus_SDmaWrongPortErr_LSB 0x1A
-#define QIB_7322_ErrStatus_SDmaWrongPortErr_MSB 0x1A
-#define QIB_7322_ErrStatus_SDmaWrongPortErr_RMASK 0x1
-#define QIB_7322_ErrStatus_SDmaBufMaskDuplicateErr_LSB 0x19
-#define QIB_7322_ErrStatus_SDmaBufMaskDuplicateErr_MSB 0x19
-#define QIB_7322_ErrStatus_SDmaBufMaskDuplicateErr_RMASK 0x1
-#define QIB_7322_ErrStatus_RcvHdrFullErr_LSB 0xD
-#define QIB_7322_ErrStatus_RcvHdrFullErr_MSB 0xD
-#define QIB_7322_ErrStatus_RcvHdrFullErr_RMASK 0x1
-#define QIB_7322_ErrStatus_RcvEgrFullErr_LSB 0xC
-#define QIB_7322_ErrStatus_RcvEgrFullErr_MSB 0xC
-#define QIB_7322_ErrStatus_RcvEgrFullErr_RMASK 0x1
-
-#define QIB_7322_ErrClear_OFFS 0x90
-#define QIB_7322_ErrClear_DEF 0x0000000000000000
-#define QIB_7322_ErrClear_ResetNegatedClear_LSB 0x3F
-#define QIB_7322_ErrClear_ResetNegatedClear_MSB 0x3F
-#define QIB_7322_ErrClear_ResetNegatedClear_RMASK 0x1
-#define QIB_7322_ErrClear_HardwareErrClear_LSB 0x3E
-#define QIB_7322_ErrClear_HardwareErrClear_MSB 0x3E
-#define QIB_7322_ErrClear_HardwareErrClear_RMASK 0x1
-#define QIB_7322_ErrClear_InvalidAddrErrClear_LSB 0x3D
-#define QIB_7322_ErrClear_InvalidAddrErrClear_MSB 0x3D
-#define QIB_7322_ErrClear_InvalidAddrErrClear_RMASK 0x1
-#define QIB_7322_ErrClear_SDmaVL15ErrClear_LSB 0x38
-#define QIB_7322_ErrClear_SDmaVL15ErrClear_MSB 0x38
-#define QIB_7322_ErrClear_SDmaVL15ErrClear_RMASK 0x1
-#define QIB_7322_ErrClear_SBufVL15MisUseErrClear_LSB 0x37
-#define QIB_7322_ErrClear_SBufVL15MisUseErrClear_MSB 0x37
-#define QIB_7322_ErrClear_SBufVL15MisUseErrClear_RMASK 0x1
-#define QIB_7322_ErrClear_InvalidEEPCmdErrClear_LSB 0x35
-#define QIB_7322_ErrClear_InvalidEEPCmdErrClear_MSB 0x35
-#define QIB_7322_ErrClear_InvalidEEPCmdErrClear_RMASK 0x1
-#define QIB_7322_ErrClear_RcvContextShareErrClear_LSB 0x34
-#define QIB_7322_ErrClear_RcvContextShareErrClear_MSB 0x34
-#define QIB_7322_ErrClear_RcvContextShareErrClear_RMASK 0x1
-#define QIB_7322_ErrClear_SendVLMismatchErrMask_LSB 0x24
-#define QIB_7322_ErrClear_SendVLMismatchErrMask_MSB 0x24
-#define QIB_7322_ErrClear_SendVLMismatchErrMask_RMASK 0x1
-#define QIB_7322_ErrClear_SendArmLaunchErrClear_LSB 0x23
-#define QIB_7322_ErrClear_SendArmLaunchErrClear_MSB 0x23
-#define QIB_7322_ErrClear_SendArmLaunchErrClear_RMASK 0x1
-#define QIB_7322_ErrClear_SendSpecialTriggerErrClear_LSB 0x1B
-#define QIB_7322_ErrClear_SendSpecialTriggerErrClear_MSB 0x1B
-#define QIB_7322_ErrClear_SendSpecialTriggerErrClear_RMASK 0x1
-#define QIB_7322_ErrClear_SDmaWrongPortErrClear_LSB 0x1A
-#define QIB_7322_ErrClear_SDmaWrongPortErrClear_MSB 0x1A
-#define QIB_7322_ErrClear_SDmaWrongPortErrClear_RMASK 0x1
-#define QIB_7322_ErrClear_SDmaBufMaskDuplicateErrClear_LSB 0x19
-#define QIB_7322_ErrClear_SDmaBufMaskDuplicateErrClear_MSB 0x19
-#define QIB_7322_ErrClear_SDmaBufMaskDuplicateErrClear_RMASK 0x1
-#define QIB_7322_ErrClear_RcvHdrFullErrClear_LSB 0xD
-#define QIB_7322_ErrClear_RcvHdrFullErrClear_MSB 0xD
-#define QIB_7322_ErrClear_RcvHdrFullErrClear_RMASK 0x1
-#define QIB_7322_ErrClear_RcvEgrFullErrClear_LSB 0xC
-#define QIB_7322_ErrClear_RcvEgrFullErrClear_MSB 0xC
-#define QIB_7322_ErrClear_RcvEgrFullErrClear_RMASK 0x1
-
-#define QIB_7322_HwErrMask_OFFS 0x98
-#define QIB_7322_HwErrMask_DEF 0x0000000000000000
-#define QIB_7322_HwErrMask_IBSerdesPClkNotDetectMask_1_LSB 0x3F
-#define QIB_7322_HwErrMask_IBSerdesPClkNotDetectMask_1_MSB 0x3F
-#define QIB_7322_HwErrMask_IBSerdesPClkNotDetectMask_1_RMASK 0x1
-#define QIB_7322_HwErrMask_IBSerdesPClkNotDetectMask_0_LSB 0x3E
-#define QIB_7322_HwErrMask_IBSerdesPClkNotDetectMask_0_MSB 0x3E
-#define QIB_7322_HwErrMask_IBSerdesPClkNotDetectMask_0_RMASK 0x1
-#define QIB_7322_HwErrMask_PCIESerdesPClkNotDetectMask_LSB 0x37
-#define QIB_7322_HwErrMask_PCIESerdesPClkNotDetectMask_MSB 0x37
-#define QIB_7322_HwErrMask_PCIESerdesPClkNotDetectMask_RMASK 0x1
-#define QIB_7322_HwErrMask_PowerOnBISTFailedMask_LSB 0x36
-#define QIB_7322_HwErrMask_PowerOnBISTFailedMask_MSB 0x36
-#define QIB_7322_HwErrMask_PowerOnBISTFailedMask_RMASK 0x1
-#define QIB_7322_HwErrMask_TempsenseTholdReachedMask_LSB 0x35
-#define QIB_7322_HwErrMask_TempsenseTholdReachedMask_MSB 0x35
-#define QIB_7322_HwErrMask_TempsenseTholdReachedMask_RMASK 0x1
-#define QIB_7322_HwErrMask_MemoryErrMask_LSB 0x30
-#define QIB_7322_HwErrMask_MemoryErrMask_MSB 0x30
-#define QIB_7322_HwErrMask_MemoryErrMask_RMASK 0x1
-#define QIB_7322_HwErrMask_pcie_phy_txParityErr_LSB 0x22
-#define QIB_7322_HwErrMask_pcie_phy_txParityErr_MSB 0x22
-#define QIB_7322_HwErrMask_pcie_phy_txParityErr_RMASK 0x1
-#define QIB_7322_HwErrMask_PCIeBusParityErrMask_LSB 0x1F
-#define QIB_7322_HwErrMask_PCIeBusParityErrMask_MSB 0x21
-#define QIB_7322_HwErrMask_PCIeBusParityErrMask_RMASK 0x7
-#define QIB_7322_HwErrMask_PcieCplTimeoutMask_LSB 0x1E
-#define QIB_7322_HwErrMask_PcieCplTimeoutMask_MSB 0x1E
-#define QIB_7322_HwErrMask_PcieCplTimeoutMask_RMASK 0x1
-#define QIB_7322_HwErrMask_PciePoisonedTLPMask_LSB 0x1D
-#define QIB_7322_HwErrMask_PciePoisonedTLPMask_MSB 0x1D
-#define QIB_7322_HwErrMask_PciePoisonedTLPMask_RMASK 0x1
-#define QIB_7322_HwErrMask_SDmaMemReadErrMask_1_LSB 0x1C
-#define QIB_7322_HwErrMask_SDmaMemReadErrMask_1_MSB 0x1C
-#define QIB_7322_HwErrMask_SDmaMemReadErrMask_1_RMASK 0x1
-#define QIB_7322_HwErrMask_SDmaMemReadErrMask_0_LSB 0x1B
-#define QIB_7322_HwErrMask_SDmaMemReadErrMask_0_MSB 0x1B
-#define QIB_7322_HwErrMask_SDmaMemReadErrMask_0_RMASK 0x1
-#define QIB_7322_HwErrMask_IBCBusFromSPCParityErrMask_1_LSB 0xF
-#define QIB_7322_HwErrMask_IBCBusFromSPCParityErrMask_1_MSB 0xF
-#define QIB_7322_HwErrMask_IBCBusFromSPCParityErrMask_1_RMASK 0x1
-#define QIB_7322_HwErrMask_IBCBusToSPCParityErrMask_1_LSB 0xE
-#define QIB_7322_HwErrMask_IBCBusToSPCParityErrMask_1_MSB 0xE
-#define QIB_7322_HwErrMask_IBCBusToSPCParityErrMask_1_RMASK 0x1
-#define QIB_7322_HwErrMask_IBCBusFromSPCParityErrMask_0_LSB 0xD
-#define QIB_7322_HwErrMask_IBCBusFromSPCParityErrMask_0_MSB 0xD
-#define QIB_7322_HwErrMask_IBCBusFromSPCParityErrMask_0_RMASK 0x1
-#define QIB_7322_HwErrMask_statusValidNoEopMask_LSB 0xC
-#define QIB_7322_HwErrMask_statusValidNoEopMask_MSB 0xC
-#define QIB_7322_HwErrMask_statusValidNoEopMask_RMASK 0x1
-#define QIB_7322_HwErrMask_LATriggeredMask_LSB 0xB
-#define QIB_7322_HwErrMask_LATriggeredMask_MSB 0xB
-#define QIB_7322_HwErrMask_LATriggeredMask_RMASK 0x1
-
-#define QIB_7322_HwErrStatus_OFFS 0xA0
-#define QIB_7322_HwErrStatus_DEF 0x0000000000000000
-#define QIB_7322_HwErrStatus_IBSerdesPClkNotDetect_1_LSB 0x3F
-#define QIB_7322_HwErrStatus_IBSerdesPClkNotDetect_1_MSB 0x3F
-#define QIB_7322_HwErrStatus_IBSerdesPClkNotDetect_1_RMASK 0x1
-#define QIB_7322_HwErrStatus_IBSerdesPClkNotDetect_0_LSB 0x3E
-#define QIB_7322_HwErrStatus_IBSerdesPClkNotDetect_0_MSB 0x3E
-#define QIB_7322_HwErrStatus_IBSerdesPClkNotDetect_0_RMASK 0x1
-#define QIB_7322_HwErrStatus_PCIESerdesPClkNotDetect_LSB 0x37
-#define QIB_7322_HwErrStatus_PCIESerdesPClkNotDetect_MSB 0x37
-#define QIB_7322_HwErrStatus_PCIESerdesPClkNotDetect_RMASK 0x1
-#define QIB_7322_HwErrStatus_PowerOnBISTFailed_LSB 0x36
-#define QIB_7322_HwErrStatus_PowerOnBISTFailed_MSB 0x36
-#define QIB_7322_HwErrStatus_PowerOnBISTFailed_RMASK 0x1
-#define QIB_7322_HwErrStatus_TempsenseTholdReached_LSB 0x35
-#define QIB_7322_HwErrStatus_TempsenseTholdReached_MSB 0x35
-#define QIB_7322_HwErrStatus_TempsenseTholdReached_RMASK 0x1
-#define QIB_7322_HwErrStatus_MemoryErr_LSB 0x30
-#define QIB_7322_HwErrStatus_MemoryErr_MSB 0x30
-#define QIB_7322_HwErrStatus_MemoryErr_RMASK 0x1
-#define QIB_7322_HwErrStatus_pcie_phy_txParityErr_LSB 0x22
-#define QIB_7322_HwErrStatus_pcie_phy_txParityErr_MSB 0x22
-#define QIB_7322_HwErrStatus_pcie_phy_txParityErr_RMASK 0x1
-#define QIB_7322_HwErrStatus_PCIeBusParity_LSB 0x1F
-#define QIB_7322_HwErrStatus_PCIeBusParity_MSB 0x21
-#define QIB_7322_HwErrStatus_PCIeBusParity_RMASK 0x7
-#define QIB_7322_HwErrStatus_PcieCplTimeout_LSB 0x1E
-#define QIB_7322_HwErrStatus_PcieCplTimeout_MSB 0x1E
-#define QIB_7322_HwErrStatus_PcieCplTimeout_RMASK 0x1
-#define QIB_7322_HwErrStatus_PciePoisonedTLP_LSB 0x1D
-#define QIB_7322_HwErrStatus_PciePoisonedTLP_MSB 0x1D
-#define QIB_7322_HwErrStatus_PciePoisonedTLP_RMASK 0x1
-#define QIB_7322_HwErrStatus_SDmaMemReadErr_1_LSB 0x1C
-#define QIB_7322_HwErrStatus_SDmaMemReadErr_1_MSB 0x1C
-#define QIB_7322_HwErrStatus_SDmaMemReadErr_1_RMASK 0x1
-#define QIB_7322_HwErrStatus_SDmaMemReadErr_0_LSB 0x1B
-#define QIB_7322_HwErrStatus_SDmaMemReadErr_0_MSB 0x1B
-#define QIB_7322_HwErrStatus_SDmaMemReadErr_0_RMASK 0x1
-#define QIB_7322_HwErrStatus_IBCBusFromSPCParityErr_1_LSB 0xF
-#define QIB_7322_HwErrStatus_IBCBusFromSPCParityErr_1_MSB 0xF
-#define QIB_7322_HwErrStatus_IBCBusFromSPCParityErr_1_RMASK 0x1
-#define QIB_7322_HwErrStatus_IBCBusToSPCParityErr_1_LSB 0xE
-#define QIB_7322_HwErrStatus_IBCBusToSPCParityErr_1_MSB 0xE
-#define QIB_7322_HwErrStatus_IBCBusToSPCParityErr_1_RMASK 0x1
-#define QIB_7322_HwErrStatus_IBCBusFromSPCParityErr_0_LSB 0xD
-#define QIB_7322_HwErrStatus_IBCBusFromSPCParityErr_0_MSB 0xD
-#define QIB_7322_HwErrStatus_IBCBusFromSPCParityErr_0_RMASK 0x1
-#define QIB_7322_HwErrStatus_statusValidNoEop_LSB 0xC
-#define QIB_7322_HwErrStatus_statusValidNoEop_MSB 0xC
-#define QIB_7322_HwErrStatus_statusValidNoEop_RMASK 0x1
-#define QIB_7322_HwErrStatus_LATriggered_LSB 0xB
-#define QIB_7322_HwErrStatus_LATriggered_MSB 0xB
-#define QIB_7322_HwErrStatus_LATriggered_RMASK 0x1
-
-#define QIB_7322_HwErrClear_OFFS 0xA8
-#define QIB_7322_HwErrClear_DEF 0x0000000000000000
-#define QIB_7322_HwErrClear_IBSerdesPClkNotDetectClear_1_LSB 0x3F
-#define QIB_7322_HwErrClear_IBSerdesPClkNotDetectClear_1_MSB 0x3F
-#define QIB_7322_HwErrClear_IBSerdesPClkNotDetectClear_1_RMASK 0x1
-#define QIB_7322_HwErrClear_IBSerdesPClkNotDetectClear_0_LSB 0x3E
-#define QIB_7322_HwErrClear_IBSerdesPClkNotDetectClear_0_MSB 0x3E
-#define QIB_7322_HwErrClear_IBSerdesPClkNotDetectClear_0_RMASK 0x1
-#define QIB_7322_HwErrClear_PCIESerdesPClkNotDetectClear_LSB 0x37
-#define QIB_7322_HwErrClear_PCIESerdesPClkNotDetectClear_MSB 0x37
-#define QIB_7322_HwErrClear_PCIESerdesPClkNotDetectClear_RMASK 0x1
-#define QIB_7322_HwErrClear_PowerOnBISTFailedClear_LSB 0x36
-#define QIB_7322_HwErrClear_PowerOnBISTFailedClear_MSB 0x36
-#define QIB_7322_HwErrClear_PowerOnBISTFailedClear_RMASK 0x1
-#define QIB_7322_HwErrClear_TempsenseTholdReachedClear_LSB 0x35
-#define QIB_7322_HwErrClear_TempsenseTholdReachedClear_MSB 0x35
-#define QIB_7322_HwErrClear_TempsenseTholdReachedClear_RMASK 0x1
-#define QIB_7322_HwErrClear_MemoryErrClear_LSB 0x30
-#define QIB_7322_HwErrClear_MemoryErrClear_MSB 0x30
-#define QIB_7322_HwErrClear_MemoryErrClear_RMASK 0x1
-#define QIB_7322_HwErrClear_pcie_phy_txParityErr_LSB 0x22
-#define QIB_7322_HwErrClear_pcie_phy_txParityErr_MSB 0x22
-#define QIB_7322_HwErrClear_pcie_phy_txParityErr_RMASK 0x1
-#define QIB_7322_HwErrClear_PCIeBusParityClear_LSB 0x1F
-#define QIB_7322_HwErrClear_PCIeBusParityClear_MSB 0x21
-#define QIB_7322_HwErrClear_PCIeBusParityClear_RMASK 0x7
-#define QIB_7322_HwErrClear_PcieCplTimeoutClear_LSB 0x1E
-#define QIB_7322_HwErrClear_PcieCplTimeoutClear_MSB 0x1E
-#define QIB_7322_HwErrClear_PcieCplTimeoutClear_RMASK 0x1
-#define QIB_7322_HwErrClear_PciePoisonedTLPClear_LSB 0x1D
-#define QIB_7322_HwErrClear_PciePoisonedTLPClear_MSB 0x1D
-#define QIB_7322_HwErrClear_PciePoisonedTLPClear_RMASK 0x1
-#define QIB_7322_HwErrClear_SDmaMemReadErrClear_1_LSB 0x1C
-#define QIB_7322_HwErrClear_SDmaMemReadErrClear_1_MSB 0x1C
-#define QIB_7322_HwErrClear_SDmaMemReadErrClear_1_RMASK 0x1
-#define QIB_7322_HwErrClear_SDmaMemReadErrClear_0_LSB 0x1B
-#define QIB_7322_HwErrClear_SDmaMemReadErrClear_0_MSB 0x1B
-#define QIB_7322_HwErrClear_SDmaMemReadErrClear_0_RMASK 0x1
-#define QIB_7322_HwErrClear_IBCBusFromSPCParityErrClear_1_LSB 0xF
-#define QIB_7322_HwErrClear_IBCBusFromSPCParityErrClear_1_MSB 0xF
-#define QIB_7322_HwErrClear_IBCBusFromSPCParityErrClear_1_RMASK 0x1
-#define QIB_7322_HwErrClear_IBCBusToSPCParityErrClear_1_LSB 0xE
-#define QIB_7322_HwErrClear_IBCBusToSPCParityErrClear_1_MSB 0xE
-#define QIB_7322_HwErrClear_IBCBusToSPCParityErrClear_1_RMASK 0x1
-#define QIB_7322_HwErrClear_IBCBusFromSPCParityErrClear_0_LSB 0xD
-#define QIB_7322_HwErrClear_IBCBusFromSPCParityErrClear_0_MSB 0xD
-#define QIB_7322_HwErrClear_IBCBusFromSPCParityErrClear_0_RMASK 0x1
-#define QIB_7322_HwErrClear_statusValidNoEopClear_LSB 0xC
-#define QIB_7322_HwErrClear_statusValidNoEopClear_MSB 0xC
-#define QIB_7322_HwErrClear_statusValidNoEopClear_RMASK 0x1
-#define QIB_7322_HwErrClear_LATriggeredClear_LSB 0xB
-#define QIB_7322_HwErrClear_LATriggeredClear_MSB 0xB
-#define QIB_7322_HwErrClear_LATriggeredClear_RMASK 0x1
-
-#define QIB_7322_HwDiagCtrl_OFFS 0xB0
-#define QIB_7322_HwDiagCtrl_DEF 0x0000000000000000
-#define QIB_7322_HwDiagCtrl_Diagnostic_LSB 0x3F
-#define QIB_7322_HwDiagCtrl_Diagnostic_MSB 0x3F
-#define QIB_7322_HwDiagCtrl_Diagnostic_RMASK 0x1
-#define QIB_7322_HwDiagCtrl_CounterWrEnable_LSB 0x3D
-#define QIB_7322_HwDiagCtrl_CounterWrEnable_MSB 0x3D
-#define QIB_7322_HwDiagCtrl_CounterWrEnable_RMASK 0x1
-#define QIB_7322_HwDiagCtrl_CounterDisable_LSB 0x3C
-#define QIB_7322_HwDiagCtrl_CounterDisable_MSB 0x3C
-#define QIB_7322_HwDiagCtrl_CounterDisable_RMASK 0x1
-#define QIB_7322_HwDiagCtrl_forcePCIeBusParity_LSB 0x1F
-#define QIB_7322_HwDiagCtrl_forcePCIeBusParity_MSB 0x22
-#define QIB_7322_HwDiagCtrl_forcePCIeBusParity_RMASK 0xF
-#define QIB_7322_HwDiagCtrl_ForceIBCBusFromSPCParityErr_1_LSB 0xF
-#define QIB_7322_HwDiagCtrl_ForceIBCBusFromSPCParityErr_1_MSB 0xF
-#define QIB_7322_HwDiagCtrl_ForceIBCBusFromSPCParityErr_1_RMASK 0x1
-#define QIB_7322_HwDiagCtrl_ForceIBCBusToSPCParityErr_1_LSB 0xE
-#define QIB_7322_HwDiagCtrl_ForceIBCBusToSPCParityErr_1_MSB 0xE
-#define QIB_7322_HwDiagCtrl_ForceIBCBusToSPCParityErr_1_RMASK 0x1
-#define QIB_7322_HwDiagCtrl_ForceIBCBusFromSPCParityErr_0_LSB 0xD
-#define QIB_7322_HwDiagCtrl_ForceIBCBusFromSPCParityErr_0_MSB 0xD
-#define QIB_7322_HwDiagCtrl_ForceIBCBusFromSPCParityErr_0_RMASK 0x1
-#define QIB_7322_HwDiagCtrl_ForceIBCBusToSPCParityErr_0_LSB 0xC
-#define QIB_7322_HwDiagCtrl_ForceIBCBusToSPCParityErr_0_MSB 0xC
-#define QIB_7322_HwDiagCtrl_ForceIBCBusToSPCParityErr_0_RMASK 0x1
-
-#define QIB_7322_EXTStatus_OFFS 0xC0
-#define QIB_7322_EXTStatus_DEF 0x000000000000X000
-#define QIB_7322_EXTStatus_GPIOIn_LSB 0x30
-#define QIB_7322_EXTStatus_GPIOIn_MSB 0x3F
-#define QIB_7322_EXTStatus_GPIOIn_RMASK 0xFFFF
-#define QIB_7322_EXTStatus_MemBISTDisabled_LSB 0xF
-#define QIB_7322_EXTStatus_MemBISTDisabled_MSB 0xF
-#define QIB_7322_EXTStatus_MemBISTDisabled_RMASK 0x1
-#define QIB_7322_EXTStatus_MemBISTEndTest_LSB 0xE
-#define QIB_7322_EXTStatus_MemBISTEndTest_MSB 0xE
-#define QIB_7322_EXTStatus_MemBISTEndTest_RMASK 0x1
-
-#define QIB_7322_EXTCtrl_OFFS 0xC8
-#define QIB_7322_EXTCtrl_DEF 0x0000000000000000
-#define QIB_7322_EXTCtrl_GPIOOe_LSB 0x30
-#define QIB_7322_EXTCtrl_GPIOOe_MSB 0x3F
-#define QIB_7322_EXTCtrl_GPIOOe_RMASK 0xFFFF
-#define QIB_7322_EXTCtrl_GPIOInvert_LSB 0x20
-#define QIB_7322_EXTCtrl_GPIOInvert_MSB 0x2F
-#define QIB_7322_EXTCtrl_GPIOInvert_RMASK 0xFFFF
-#define QIB_7322_EXTCtrl_LEDPort1GreenOn_LSB 0x3
-#define QIB_7322_EXTCtrl_LEDPort1GreenOn_MSB 0x3
-#define QIB_7322_EXTCtrl_LEDPort1GreenOn_RMASK 0x1
-#define QIB_7322_EXTCtrl_LEDPort1YellowOn_LSB 0x2
-#define QIB_7322_EXTCtrl_LEDPort1YellowOn_MSB 0x2
-#define QIB_7322_EXTCtrl_LEDPort1YellowOn_RMASK 0x1
-#define QIB_7322_EXTCtrl_LEDPort0GreenOn_LSB 0x1
-#define QIB_7322_EXTCtrl_LEDPort0GreenOn_MSB 0x1
-#define QIB_7322_EXTCtrl_LEDPort0GreenOn_RMASK 0x1
-#define QIB_7322_EXTCtrl_LEDPort0YellowOn_LSB 0x0
-#define QIB_7322_EXTCtrl_LEDPort0YellowOn_MSB 0x0
-#define QIB_7322_EXTCtrl_LEDPort0YellowOn_RMASK 0x1
-
-#define QIB_7322_GPIOOut_OFFS 0xE0
-#define QIB_7322_GPIOOut_DEF 0x0000000000000000
-
-#define QIB_7322_GPIOMask_OFFS 0xE8
-#define QIB_7322_GPIOMask_DEF 0x0000000000000000
-
-#define QIB_7322_GPIOStatus_OFFS 0xF0
-#define QIB_7322_GPIOStatus_DEF 0x0000000000000000
-
-#define QIB_7322_GPIOClear_OFFS 0xF8
-#define QIB_7322_GPIOClear_DEF 0x0000000000000000
-
-#define QIB_7322_RcvCtrl_OFFS 0x100
-#define QIB_7322_RcvCtrl_DEF 0x0000000000000000
-#define QIB_7322_RcvCtrl_TidReDirect_LSB 0x30
-#define QIB_7322_RcvCtrl_TidReDirect_MSB 0x3F
-#define QIB_7322_RcvCtrl_TidReDirect_RMASK 0xFFFF
-#define QIB_7322_RcvCtrl_TailUpd_LSB 0x2F
-#define QIB_7322_RcvCtrl_TailUpd_MSB 0x2F
-#define QIB_7322_RcvCtrl_TailUpd_RMASK 0x1
-#define QIB_7322_RcvCtrl_XrcTypeCode_LSB 0x2C
-#define QIB_7322_RcvCtrl_XrcTypeCode_MSB 0x2E
-#define QIB_7322_RcvCtrl_XrcTypeCode_RMASK 0x7
-#define QIB_7322_RcvCtrl_TidFlowEnable_LSB 0x2B
-#define QIB_7322_RcvCtrl_TidFlowEnable_MSB 0x2B
-#define QIB_7322_RcvCtrl_TidFlowEnable_RMASK 0x1
-#define QIB_7322_RcvCtrl_ContextCfg_LSB 0x29
-#define QIB_7322_RcvCtrl_ContextCfg_MSB 0x2A
-#define QIB_7322_RcvCtrl_ContextCfg_RMASK 0x3
-#define QIB_7322_RcvCtrl_IntrAvail_LSB 0x14
-#define QIB_7322_RcvCtrl_IntrAvail_MSB 0x25
-#define QIB_7322_RcvCtrl_IntrAvail_RMASK 0x3FFFF
-#define QIB_7322_RcvCtrl_dontDropRHQFull_LSB 0x0
-#define QIB_7322_RcvCtrl_dontDropRHQFull_MSB 0x11
-#define QIB_7322_RcvCtrl_dontDropRHQFull_RMASK 0x3FFFF
-
-#define QIB_7322_RcvHdrSize_OFFS 0x110
-#define QIB_7322_RcvHdrSize_DEF 0x0000000000000000
-
-#define QIB_7322_RcvHdrCnt_OFFS 0x118
-#define QIB_7322_RcvHdrCnt_DEF 0x0000000000000000
-
-#define QIB_7322_RcvHdrEntSize_OFFS 0x120
-#define QIB_7322_RcvHdrEntSize_DEF 0x0000000000000000
-
-#define QIB_7322_RcvTIDBase_OFFS 0x128
-#define QIB_7322_RcvTIDBase_DEF 0x0000000000050000
-
-#define QIB_7322_RcvTIDCnt_OFFS 0x130
-#define QIB_7322_RcvTIDCnt_DEF 0x0000000000000200
-
-#define QIB_7322_RcvEgrBase_OFFS 0x138
-#define QIB_7322_RcvEgrBase_DEF 0x0000000000014000
-
-#define QIB_7322_RcvEgrCnt_OFFS 0x140
-#define QIB_7322_RcvEgrCnt_DEF 0x0000000000001000
-
-#define QIB_7322_RcvBufBase_OFFS 0x148
-#define QIB_7322_RcvBufBase_DEF 0x0000000000080000
-
-#define QIB_7322_RcvBufSize_OFFS 0x150
-#define QIB_7322_RcvBufSize_DEF 0x0000000000005000
-
-#define QIB_7322_RxIntMemBase_OFFS 0x158
-#define QIB_7322_RxIntMemBase_DEF 0x0000000000077000
-
-#define QIB_7322_RxIntMemSize_OFFS 0x160
-#define QIB_7322_RxIntMemSize_DEF 0x0000000000007000
-
-#define QIB_7322_feature_mask_OFFS 0x190
-#define QIB_7322_feature_mask_DEF 0x00000000000000XX
-
-#define QIB_7322_active_feature_mask_OFFS 0x198
-#define QIB_7322_active_feature_mask_DEF 0x00000000000000XX
-#define QIB_7322_active_feature_mask_Port1_QDR_Enabled_LSB 0x5
-#define QIB_7322_active_feature_mask_Port1_QDR_Enabled_MSB 0x5
-#define QIB_7322_active_feature_mask_Port1_QDR_Enabled_RMASK 0x1
-#define QIB_7322_active_feature_mask_Port1_DDR_Enabled_LSB 0x4
-#define QIB_7322_active_feature_mask_Port1_DDR_Enabled_MSB 0x4
-#define QIB_7322_active_feature_mask_Port1_DDR_Enabled_RMASK 0x1
-#define QIB_7322_active_feature_mask_Port1_SDR_Enabled_LSB 0x3
-#define QIB_7322_active_feature_mask_Port1_SDR_Enabled_MSB 0x3
-#define QIB_7322_active_feature_mask_Port1_SDR_Enabled_RMASK 0x1
-#define QIB_7322_active_feature_mask_Port0_QDR_Enabled_LSB 0x2
-#define QIB_7322_active_feature_mask_Port0_QDR_Enabled_MSB 0x2
-#define QIB_7322_active_feature_mask_Port0_QDR_Enabled_RMASK 0x1
-#define QIB_7322_active_feature_mask_Port0_DDR_Enabled_LSB 0x1
-#define QIB_7322_active_feature_mask_Port0_DDR_Enabled_MSB 0x1
-#define QIB_7322_active_feature_mask_Port0_DDR_Enabled_RMASK 0x1
-#define QIB_7322_active_feature_mask_Port0_SDR_Enabled_LSB 0x0
-#define QIB_7322_active_feature_mask_Port0_SDR_Enabled_MSB 0x0
-#define QIB_7322_active_feature_mask_Port0_SDR_Enabled_RMASK 0x1
-
-#define QIB_7322_SendCtrl_OFFS 0x1C0
-#define QIB_7322_SendCtrl_DEF 0x0000000000000000
-#define QIB_7322_SendCtrl_Disarm_LSB 0x1F
-#define QIB_7322_SendCtrl_Disarm_MSB 0x1F
-#define QIB_7322_SendCtrl_Disarm_RMASK 0x1
-#define QIB_7322_SendCtrl_SendBufAvailPad64Byte_LSB 0x1D
-#define QIB_7322_SendCtrl_SendBufAvailPad64Byte_MSB 0x1D
-#define QIB_7322_SendCtrl_SendBufAvailPad64Byte_RMASK 0x1
-#define QIB_7322_SendCtrl_AvailUpdThld_LSB 0x18
-#define QIB_7322_SendCtrl_AvailUpdThld_MSB 0x1C
-#define QIB_7322_SendCtrl_AvailUpdThld_RMASK 0x1F
-#define QIB_7322_SendCtrl_DisarmSendBuf_LSB 0x10
-#define QIB_7322_SendCtrl_DisarmSendBuf_MSB 0x17
-#define QIB_7322_SendCtrl_DisarmSendBuf_RMASK 0xFF
-#define QIB_7322_SendCtrl_SpecialTriggerEn_LSB 0x4
-#define QIB_7322_SendCtrl_SpecialTriggerEn_MSB 0x4
-#define QIB_7322_SendCtrl_SpecialTriggerEn_RMASK 0x1
-#define QIB_7322_SendCtrl_SendBufAvailUpd_LSB 0x2
-#define QIB_7322_SendCtrl_SendBufAvailUpd_MSB 0x2
-#define QIB_7322_SendCtrl_SendBufAvailUpd_RMASK 0x1
-#define QIB_7322_SendCtrl_SendIntBufAvail_LSB 0x1
-#define QIB_7322_SendCtrl_SendIntBufAvail_MSB 0x1
-#define QIB_7322_SendCtrl_SendIntBufAvail_RMASK 0x1
-
-#define QIB_7322_SendBufBase_OFFS 0x1C8
-#define QIB_7322_SendBufBase_DEF 0x0018000000100000
-#define QIB_7322_SendBufBase_BaseAddr_LargePIO_LSB 0x20
-#define QIB_7322_SendBufBase_BaseAddr_LargePIO_MSB 0x34
-#define QIB_7322_SendBufBase_BaseAddr_LargePIO_RMASK 0x1FFFFF
-#define QIB_7322_SendBufBase_BaseAddr_SmallPIO_LSB 0x0
-#define QIB_7322_SendBufBase_BaseAddr_SmallPIO_MSB 0x14
-#define QIB_7322_SendBufBase_BaseAddr_SmallPIO_RMASK 0x1FFFFF
-
-#define QIB_7322_SendBufSize_OFFS 0x1D0
-#define QIB_7322_SendBufSize_DEF 0x0000108000000880
-#define QIB_7322_SendBufSize_Size_LargePIO_LSB 0x20
-#define QIB_7322_SendBufSize_Size_LargePIO_MSB 0x2C
-#define QIB_7322_SendBufSize_Size_LargePIO_RMASK 0x1FFF
-#define QIB_7322_SendBufSize_Size_SmallPIO_LSB 0x0
-#define QIB_7322_SendBufSize_Size_SmallPIO_MSB 0xB
-#define QIB_7322_SendBufSize_Size_SmallPIO_RMASK 0xFFF
-
-#define QIB_7322_SendBufCnt_OFFS 0x1D8
-#define QIB_7322_SendBufCnt_DEF 0x0000002000000080
-#define QIB_7322_SendBufCnt_Num_LargeBuffers_LSB 0x20
-#define QIB_7322_SendBufCnt_Num_LargeBuffers_MSB 0x25
-#define QIB_7322_SendBufCnt_Num_LargeBuffers_RMASK 0x3F
-#define QIB_7322_SendBufCnt_Num_SmallBuffers_LSB 0x0
-#define QIB_7322_SendBufCnt_Num_SmallBuffers_MSB 0x8
-#define QIB_7322_SendBufCnt_Num_SmallBuffers_RMASK 0x1FF
-
-#define QIB_7322_SendBufAvailAddr_OFFS 0x1E0
-#define QIB_7322_SendBufAvailAddr_DEF 0x0000000000000000
-#define QIB_7322_SendBufAvailAddr_SendBufAvailAddr_LSB 0x6
-#define QIB_7322_SendBufAvailAddr_SendBufAvailAddr_MSB 0x27
-#define QIB_7322_SendBufAvailAddr_SendBufAvailAddr_RMASK 0x3FFFFFFFF
-
-#define QIB_7322_SendBufErr0_OFFS 0x240
-#define QIB_7322_SendBufErr0_DEF 0x0000000000000000
-#define QIB_7322_SendBufErr0_SendBufErr_63_0_LSB 0x0
-#define QIB_7322_SendBufErr0_SendBufErr_63_0_MSB 0x3F
-#define QIB_7322_SendBufErr0_SendBufErr_63_0_RMASK 0x0
-
-#define QIB_7322_AvailUpdCount_OFFS 0x268
-#define QIB_7322_AvailUpdCount_DEF 0x0000000000000000
-#define QIB_7322_AvailUpdCount_AvailUpdCount_LSB 0x0
-#define QIB_7322_AvailUpdCount_AvailUpdCount_MSB 0x4
-#define QIB_7322_AvailUpdCount_AvailUpdCount_RMASK 0x1F
-
-#define QIB_7322_RcvHdrAddr0_OFFS 0x280
-#define QIB_7322_RcvHdrAddr0_DEF 0x0000000000000000
-#define QIB_7322_RcvHdrAddr0_RcvHdrAddr_LSB 0x2
-#define QIB_7322_RcvHdrAddr0_RcvHdrAddr_MSB 0x27
-#define QIB_7322_RcvHdrAddr0_RcvHdrAddr_RMASK 0x3FFFFFFFFF
-
-#define QIB_7322_RcvHdrTailAddr0_OFFS 0x340
-#define QIB_7322_RcvHdrTailAddr0_DEF 0x0000000000000000
-#define QIB_7322_RcvHdrTailAddr0_RcvHdrTailAddr_LSB 0x2
-#define QIB_7322_RcvHdrTailAddr0_RcvHdrTailAddr_MSB 0x27
-#define QIB_7322_RcvHdrTailAddr0_RcvHdrTailAddr_RMASK 0x3FFFFFFFFF
-
-#define QIB_7322_ahb_access_ctrl_OFFS 0x460
-#define QIB_7322_ahb_access_ctrl_DEF 0x0000000000000000
-#define QIB_7322_ahb_access_ctrl_sw_sel_ahb_trgt_LSB 0x1
-#define QIB_7322_ahb_access_ctrl_sw_sel_ahb_trgt_MSB 0x2
-#define QIB_7322_ahb_access_ctrl_sw_sel_ahb_trgt_RMASK 0x3
-#define QIB_7322_ahb_access_ctrl_sw_ahb_sel_LSB 0x0
-#define QIB_7322_ahb_access_ctrl_sw_ahb_sel_MSB 0x0
-#define QIB_7322_ahb_access_ctrl_sw_ahb_sel_RMASK 0x1
-
-#define QIB_7322_ahb_transaction_reg_OFFS 0x468
-#define QIB_7322_ahb_transaction_reg_DEF 0x0000000080000000
-#define QIB_7322_ahb_transaction_reg_ahb_data_LSB 0x20
-#define QIB_7322_ahb_transaction_reg_ahb_data_MSB 0x3F
-#define QIB_7322_ahb_transaction_reg_ahb_data_RMASK 0xFFFFFFFF
-#define QIB_7322_ahb_transaction_reg_ahb_rdy_LSB 0x1F
-#define QIB_7322_ahb_transaction_reg_ahb_rdy_MSB 0x1F
-#define QIB_7322_ahb_transaction_reg_ahb_rdy_RMASK 0x1
-#define QIB_7322_ahb_transaction_reg_ahb_req_err_LSB 0x1E
-#define QIB_7322_ahb_transaction_reg_ahb_req_err_MSB 0x1E
-#define QIB_7322_ahb_transaction_reg_ahb_req_err_RMASK 0x1
-#define QIB_7322_ahb_transaction_reg_write_not_read_LSB 0x1B
-#define QIB_7322_ahb_transaction_reg_write_not_read_MSB 0x1B
-#define QIB_7322_ahb_transaction_reg_write_not_read_RMASK 0x1
-#define QIB_7322_ahb_transaction_reg_ahb_address_LSB 0x10
-#define QIB_7322_ahb_transaction_reg_ahb_address_MSB 0x1A
-#define QIB_7322_ahb_transaction_reg_ahb_address_RMASK 0x7FF
-
-#define QIB_7322_SPC_JTAG_ACCESS_REG_OFFS 0x470
-#define QIB_7322_SPC_JTAG_ACCESS_REG_DEF 0x0000000000000001
-#define QIB_7322_SPC_JTAG_ACCESS_REG_SPC_JTAG_ACCESS_EN_LSB 0xA
-#define QIB_7322_SPC_JTAG_ACCESS_REG_SPC_JTAG_ACCESS_EN_MSB 0xA
-#define QIB_7322_SPC_JTAG_ACCESS_REG_SPC_JTAG_ACCESS_EN_RMASK 0x1
-#define QIB_7322_SPC_JTAG_ACCESS_REG_bist_en_LSB 0x5
-#define QIB_7322_SPC_JTAG_ACCESS_REG_bist_en_MSB 0x9
-#define QIB_7322_SPC_JTAG_ACCESS_REG_bist_en_RMASK 0x1F
-#define QIB_7322_SPC_JTAG_ACCESS_REG_opcode_LSB 0x3
-#define QIB_7322_SPC_JTAG_ACCESS_REG_opcode_MSB 0x4
-#define QIB_7322_SPC_JTAG_ACCESS_REG_opcode_RMASK 0x3
-#define QIB_7322_SPC_JTAG_ACCESS_REG_tdi_LSB 0x2
-#define QIB_7322_SPC_JTAG_ACCESS_REG_tdi_MSB 0x2
-#define QIB_7322_SPC_JTAG_ACCESS_REG_tdi_RMASK 0x1
-#define QIB_7322_SPC_JTAG_ACCESS_REG_tdo_LSB 0x1
-#define QIB_7322_SPC_JTAG_ACCESS_REG_tdo_MSB 0x1
-#define QIB_7322_SPC_JTAG_ACCESS_REG_tdo_RMASK 0x1
-#define QIB_7322_SPC_JTAG_ACCESS_REG_rdy_LSB 0x0
-#define QIB_7322_SPC_JTAG_ACCESS_REG_rdy_MSB 0x0
-#define QIB_7322_SPC_JTAG_ACCESS_REG_rdy_RMASK 0x1
-
-#define QIB_7322_SendCheckMask0_OFFS 0x4C0
-#define QIB_7322_SendCheckMask0_DEF 0x0000000000000000
-#define QIB_7322_SendCheckMask0_SendCheckMask_63_32_LSB 0x0
-#define QIB_7322_SendCheckMask0_SendCheckMask_63_32_MSB 0x3F
-#define QIB_7322_SendCheckMask0_SendCheckMask_63_32_RMASK 0x0
-
-#define QIB_7322_SendGRHCheckMask0_OFFS 0x4E0
-#define QIB_7322_SendGRHCheckMask0_DEF 0x0000000000000000
-#define QIB_7322_SendGRHCheckMask0_SendGRHCheckMask_63_32_LSB 0x0
-#define QIB_7322_SendGRHCheckMask0_SendGRHCheckMask_63_32_MSB 0x3F
-#define QIB_7322_SendGRHCheckMask0_SendGRHCheckMask_63_32_RMASK 0x0
-
-#define QIB_7322_SendIBPacketMask0_OFFS 0x500
-#define QIB_7322_SendIBPacketMask0_DEF 0x0000000000000000
-#define QIB_7322_SendIBPacketMask0_SendIBPacketMask_63_32_LSB 0x0
-#define QIB_7322_SendIBPacketMask0_SendIBPacketMask_63_32_MSB 0x3F
-#define QIB_7322_SendIBPacketMask0_SendIBPacketMask_63_32_RMASK 0x0
-
-#define QIB_7322_IntRedirect0_OFFS 0x540
-#define QIB_7322_IntRedirect0_DEF 0x0000000000000000
-#define QIB_7322_IntRedirect0_vec11_LSB 0x37
-#define QIB_7322_IntRedirect0_vec11_MSB 0x3B
-#define QIB_7322_IntRedirect0_vec11_RMASK 0x1F
-#define QIB_7322_IntRedirect0_vec10_LSB 0x32
-#define QIB_7322_IntRedirect0_vec10_MSB 0x36
-#define QIB_7322_IntRedirect0_vec10_RMASK 0x1F
-#define QIB_7322_IntRedirect0_vec9_LSB 0x2D
-#define QIB_7322_IntRedirect0_vec9_MSB 0x31
-#define QIB_7322_IntRedirect0_vec9_RMASK 0x1F
-#define QIB_7322_IntRedirect0_vec8_LSB 0x28
-#define QIB_7322_IntRedirect0_vec8_MSB 0x2C
-#define QIB_7322_IntRedirect0_vec8_RMASK 0x1F
-#define QIB_7322_IntRedirect0_vec7_LSB 0x23
-#define QIB_7322_IntRedirect0_vec7_MSB 0x27
-#define QIB_7322_IntRedirect0_vec7_RMASK 0x1F
-#define QIB_7322_IntRedirect0_vec6_LSB 0x1E
-#define QIB_7322_IntRedirect0_vec6_MSB 0x22
-#define QIB_7322_IntRedirect0_vec6_RMASK 0x1F
-#define QIB_7322_IntRedirect0_vec5_LSB 0x19
-#define QIB_7322_IntRedirect0_vec5_MSB 0x1D
-#define QIB_7322_IntRedirect0_vec5_RMASK 0x1F
-#define QIB_7322_IntRedirect0_vec4_LSB 0x14
-#define QIB_7322_IntRedirect0_vec4_MSB 0x18
-#define QIB_7322_IntRedirect0_vec4_RMASK 0x1F
-#define QIB_7322_IntRedirect0_vec3_LSB 0xF
-#define QIB_7322_IntRedirect0_vec3_MSB 0x13
-#define QIB_7322_IntRedirect0_vec3_RMASK 0x1F
-#define QIB_7322_IntRedirect0_vec2_LSB 0xA
-#define QIB_7322_IntRedirect0_vec2_MSB 0xE
-#define QIB_7322_IntRedirect0_vec2_RMASK 0x1F
-#define QIB_7322_IntRedirect0_vec1_LSB 0x5
-#define QIB_7322_IntRedirect0_vec1_MSB 0x9
-#define QIB_7322_IntRedirect0_vec1_RMASK 0x1F
-#define QIB_7322_IntRedirect0_vec0_LSB 0x0
-#define QIB_7322_IntRedirect0_vec0_MSB 0x4
-#define QIB_7322_IntRedirect0_vec0_RMASK 0x1F
-
-#define QIB_7322_Int_Granted_OFFS 0x570
-#define QIB_7322_Int_Granted_DEF 0x0000000000000000
-
-#define QIB_7322_vec_clr_without_int_OFFS 0x578
-#define QIB_7322_vec_clr_without_int_DEF 0x0000000000000000
-
-#define QIB_7322_DCACtrlA_OFFS 0x580
-#define QIB_7322_DCACtrlA_DEF 0x0000000000000000
-#define QIB_7322_DCACtrlA_SendDMAHead1DCAEnable_LSB 0x4
-#define QIB_7322_DCACtrlA_SendDMAHead1DCAEnable_MSB 0x4
-#define QIB_7322_DCACtrlA_SendDMAHead1DCAEnable_RMASK 0x1
-#define QIB_7322_DCACtrlA_SendDMAHead0DCAEnable_LSB 0x3
-#define QIB_7322_DCACtrlA_SendDMAHead0DCAEnable_MSB 0x3
-#define QIB_7322_DCACtrlA_SendDMAHead0DCAEnable_RMASK 0x1
-#define QIB_7322_DCACtrlA_RcvTailUpdDCAEnable_LSB 0x2
-#define QIB_7322_DCACtrlA_RcvTailUpdDCAEnable_MSB 0x2
-#define QIB_7322_DCACtrlA_RcvTailUpdDCAEnable_RMASK 0x1
-#define QIB_7322_DCACtrlA_EagerDCAEnable_LSB 0x1
-#define QIB_7322_DCACtrlA_EagerDCAEnable_MSB 0x1
-#define QIB_7322_DCACtrlA_EagerDCAEnable_RMASK 0x1
-#define QIB_7322_DCACtrlA_RcvHdrqDCAEnable_LSB 0x0
-#define QIB_7322_DCACtrlA_RcvHdrqDCAEnable_MSB 0x0
-#define QIB_7322_DCACtrlA_RcvHdrqDCAEnable_RMASK 0x1
-
-#define QIB_7322_DCACtrlB_OFFS 0x588
-#define QIB_7322_DCACtrlB_DEF 0x0000000000000000
-#define QIB_7322_DCACtrlB_RcvHdrq3DCAXfrCnt_LSB 0x36
-#define QIB_7322_DCACtrlB_RcvHdrq3DCAXfrCnt_MSB 0x3B
-#define QIB_7322_DCACtrlB_RcvHdrq3DCAXfrCnt_RMASK 0x3F
-#define QIB_7322_DCACtrlB_RcvHdrq3DCAOPH_LSB 0x2E
-#define QIB_7322_DCACtrlB_RcvHdrq3DCAOPH_MSB 0x35
-#define QIB_7322_DCACtrlB_RcvHdrq3DCAOPH_RMASK 0xFF
-#define QIB_7322_DCACtrlB_RcvHdrq2DCAXfrCnt_LSB 0x28
-#define QIB_7322_DCACtrlB_RcvHdrq2DCAXfrCnt_MSB 0x2D
-#define QIB_7322_DCACtrlB_RcvHdrq2DCAXfrCnt_RMASK 0x3F
-#define QIB_7322_DCACtrlB_RcvHdrq2DCAOPH_LSB 0x20
-#define QIB_7322_DCACtrlB_RcvHdrq2DCAOPH_MSB 0x27
-#define QIB_7322_DCACtrlB_RcvHdrq2DCAOPH_RMASK 0xFF
-#define QIB_7322_DCACtrlB_RcvHdrq1DCAXfrCnt_LSB 0x16
-#define QIB_7322_DCACtrlB_RcvHdrq1DCAXfrCnt_MSB 0x1B
-#define QIB_7322_DCACtrlB_RcvHdrq1DCAXfrCnt_RMASK 0x3F
-#define QIB_7322_DCACtrlB_RcvHdrq1DCAOPH_LSB 0xE
-#define QIB_7322_DCACtrlB_RcvHdrq1DCAOPH_MSB 0x15
-#define QIB_7322_DCACtrlB_RcvHdrq1DCAOPH_RMASK 0xFF
-#define QIB_7322_DCACtrlB_RcvHdrq0DCAXfrCnt_LSB 0x8
-#define QIB_7322_DCACtrlB_RcvHdrq0DCAXfrCnt_MSB 0xD
-#define QIB_7322_DCACtrlB_RcvHdrq0DCAXfrCnt_RMASK 0x3F
-#define QIB_7322_DCACtrlB_RcvHdrq0DCAOPH_LSB 0x0
-#define QIB_7322_DCACtrlB_RcvHdrq0DCAOPH_MSB 0x7
-#define QIB_7322_DCACtrlB_RcvHdrq0DCAOPH_RMASK 0xFF
-
-#define QIB_7322_DCACtrlC_OFFS 0x590
-#define QIB_7322_DCACtrlC_DEF 0x0000000000000000
-#define QIB_7322_DCACtrlC_RcvHdrq7DCAXfrCnt_LSB 0x36
-#define QIB_7322_DCACtrlC_RcvHdrq7DCAXfrCnt_MSB 0x3B
-#define QIB_7322_DCACtrlC_RcvHdrq7DCAXfrCnt_RMASK 0x3F
-#define QIB_7322_DCACtrlC_RcvHdrq7DCAOPH_LSB 0x2E
-#define QIB_7322_DCACtrlC_RcvHdrq7DCAOPH_MSB 0x35
-#define QIB_7322_DCACtrlC_RcvHdrq7DCAOPH_RMASK 0xFF
-#define QIB_7322_DCACtrlC_RcvHdrq6DCAXfrCnt_LSB 0x28
-#define QIB_7322_DCACtrlC_RcvHdrq6DCAXfrCnt_MSB 0x2D
-#define QIB_7322_DCACtrlC_RcvHdrq6DCAXfrCnt_RMASK 0x3F
-#define QIB_7322_DCACtrlC_RcvHdrq6DCAOPH_LSB 0x20
-#define QIB_7322_DCACtrlC_RcvHdrq6DCAOPH_MSB 0x27
-#define QIB_7322_DCACtrlC_RcvHdrq6DCAOPH_RMASK 0xFF
-#define QIB_7322_DCACtrlC_RcvHdrq5DCAXfrCnt_LSB 0x16
-#define QIB_7322_DCACtrlC_RcvHdrq5DCAXfrCnt_MSB 0x1B
-#define QIB_7322_DCACtrlC_RcvHdrq5DCAXfrCnt_RMASK 0x3F
-#define QIB_7322_DCACtrlC_RcvHdrq5DCAOPH_LSB 0xE
-#define QIB_7322_DCACtrlC_RcvHdrq5DCAOPH_MSB 0x15
-#define QIB_7322_DCACtrlC_RcvHdrq5DCAOPH_RMASK 0xFF
-#define QIB_7322_DCACtrlC_RcvHdrq4DCAXfrCnt_LSB 0x8
-#define QIB_7322_DCACtrlC_RcvHdrq4DCAXfrCnt_MSB 0xD
-#define QIB_7322_DCACtrlC_RcvHdrq4DCAXfrCnt_RMASK 0x3F
-#define QIB_7322_DCACtrlC_RcvHdrq4DCAOPH_LSB 0x0
-#define QIB_7322_DCACtrlC_RcvHdrq4DCAOPH_MSB 0x7
-#define QIB_7322_DCACtrlC_RcvHdrq4DCAOPH_RMASK 0xFF
-
-#define QIB_7322_DCACtrlD_OFFS 0x598
-#define QIB_7322_DCACtrlD_DEF 0x0000000000000000
-#define QIB_7322_DCACtrlD_RcvHdrq11DCAXfrCnt_LSB 0x36
-#define QIB_7322_DCACtrlD_RcvHdrq11DCAXfrCnt_MSB 0x3B
-#define QIB_7322_DCACtrlD_RcvHdrq11DCAXfrCnt_RMASK 0x3F
-#define QIB_7322_DCACtrlD_RcvHdrq11DCAOPH_LSB 0x2E
-#define QIB_7322_DCACtrlD_RcvHdrq11DCAOPH_MSB 0x35
-#define QIB_7322_DCACtrlD_RcvHdrq11DCAOPH_RMASK 0xFF
-#define QIB_7322_DCACtrlD_RcvHdrq10DCAXfrCnt_LSB 0x28
-#define QIB_7322_DCACtrlD_RcvHdrq10DCAXfrCnt_MSB 0x2D
-#define QIB_7322_DCACtrlD_RcvHdrq10DCAXfrCnt_RMASK 0x3F
-#define QIB_7322_DCACtrlD_RcvHdrq10DCAOPH_LSB 0x20
-#define QIB_7322_DCACtrlD_RcvHdrq10DCAOPH_MSB 0x27
-#define QIB_7322_DCACtrlD_RcvHdrq10DCAOPH_RMASK 0xFF
-#define QIB_7322_DCACtrlD_RcvHdrq9DCAXfrCnt_LSB 0x16
-#define QIB_7322_DCACtrlD_RcvHdrq9DCAXfrCnt_MSB 0x1B
-#define QIB_7322_DCACtrlD_RcvHdrq9DCAXfrCnt_RMASK 0x3F
-#define QIB_7322_DCACtrlD_RcvHdrq9DCAOPH_LSB 0xE
-#define QIB_7322_DCACtrlD_RcvHdrq9DCAOPH_MSB 0x15
-#define QIB_7322_DCACtrlD_RcvHdrq9DCAOPH_RMASK 0xFF
-#define QIB_7322_DCACtrlD_RcvHdrq8DCAXfrCnt_LSB 0x8
-#define QIB_7322_DCACtrlD_RcvHdrq8DCAXfrCnt_MSB 0xD
-#define QIB_7322_DCACtrlD_RcvHdrq8DCAXfrCnt_RMASK 0x3F
-#define QIB_7322_DCACtrlD_RcvHdrq8DCAOPH_LSB 0x0
-#define QIB_7322_DCACtrlD_RcvHdrq8DCAOPH_MSB 0x7
-#define QIB_7322_DCACtrlD_RcvHdrq8DCAOPH_RMASK 0xFF
-
-#define QIB_7322_DCACtrlE_OFFS 0x5A0
-#define QIB_7322_DCACtrlE_DEF 0x0000000000000000
-#define QIB_7322_DCACtrlE_RcvHdrq15DCAXfrCnt_LSB 0x36
-#define QIB_7322_DCACtrlE_RcvHdrq15DCAXfrCnt_MSB 0x3B
-#define QIB_7322_DCACtrlE_RcvHdrq15DCAXfrCnt_RMASK 0x3F
-#define QIB_7322_DCACtrlE_RcvHdrq15DCAOPH_LSB 0x2E
-#define QIB_7322_DCACtrlE_RcvHdrq15DCAOPH_MSB 0x35
-#define QIB_7322_DCACtrlE_RcvHdrq15DCAOPH_RMASK 0xFF
-#define QIB_7322_DCACtrlE_RcvHdrq14DCAXfrCnt_LSB 0x28
-#define QIB_7322_DCACtrlE_RcvHdrq14DCAXfrCnt_MSB 0x2D
-#define QIB_7322_DCACtrlE_RcvHdrq14DCAXfrCnt_RMASK 0x3F
-#define QIB_7322_DCACtrlE_RcvHdrq14DCAOPH_LSB 0x20
-#define QIB_7322_DCACtrlE_RcvHdrq14DCAOPH_MSB 0x27
-#define QIB_7322_DCACtrlE_RcvHdrq14DCAOPH_RMASK 0xFF
-#define QIB_7322_DCACtrlE_RcvHdrq13DCAXfrCnt_LSB 0x16
-#define QIB_7322_DCACtrlE_RcvHdrq13DCAXfrCnt_MSB 0x1B
-#define QIB_7322_DCACtrlE_RcvHdrq13DCAXfrCnt_RMASK 0x3F
-#define QIB_7322_DCACtrlE_RcvHdrq13DCAOPH_LSB 0xE
-#define QIB_7322_DCACtrlE_RcvHdrq13DCAOPH_MSB 0x15
-#define QIB_7322_DCACtrlE_RcvHdrq13DCAOPH_RMASK 0xFF
-#define QIB_7322_DCACtrlE_RcvHdrq12DCAXfrCnt_LSB 0x8
-#define QIB_7322_DCACtrlE_RcvHdrq12DCAXfrCnt_MSB 0xD
-#define QIB_7322_DCACtrlE_RcvHdrq12DCAXfrCnt_RMASK 0x3F
-#define QIB_7322_DCACtrlE_RcvHdrq12DCAOPH_LSB 0x0
-#define QIB_7322_DCACtrlE_RcvHdrq12DCAOPH_MSB 0x7
-#define QIB_7322_DCACtrlE_RcvHdrq12DCAOPH_RMASK 0xFF
-
-#define QIB_7322_DCACtrlF_OFFS 0x5A8
-#define QIB_7322_DCACtrlF_DEF 0x0000000000000000
-#define QIB_7322_DCACtrlF_SendDma1DCAOPH_LSB 0x28
-#define QIB_7322_DCACtrlF_SendDma1DCAOPH_MSB 0x2F
-#define QIB_7322_DCACtrlF_SendDma1DCAOPH_RMASK 0xFF
-#define QIB_7322_DCACtrlF_SendDma0DCAOPH_LSB 0x20
-#define QIB_7322_DCACtrlF_SendDma0DCAOPH_MSB 0x27
-#define QIB_7322_DCACtrlF_SendDma0DCAOPH_RMASK 0xFF
-#define QIB_7322_DCACtrlF_RcvHdrq17DCAXfrCnt_LSB 0x16
-#define QIB_7322_DCACtrlF_RcvHdrq17DCAXfrCnt_MSB 0x1B
-#define QIB_7322_DCACtrlF_RcvHdrq17DCAXfrCnt_RMASK 0x3F
-#define QIB_7322_DCACtrlF_RcvHdrq17DCAOPH_LSB 0xE
-#define QIB_7322_DCACtrlF_RcvHdrq17DCAOPH_MSB 0x15
-#define QIB_7322_DCACtrlF_RcvHdrq17DCAOPH_RMASK 0xFF
-#define QIB_7322_DCACtrlF_RcvHdrq16DCAXfrCnt_LSB 0x8
-#define QIB_7322_DCACtrlF_RcvHdrq16DCAXfrCnt_MSB 0xD
-#define QIB_7322_DCACtrlF_RcvHdrq16DCAXfrCnt_RMASK 0x3F
-#define QIB_7322_DCACtrlF_RcvHdrq16DCAOPH_LSB 0x0
-#define QIB_7322_DCACtrlF_RcvHdrq16DCAOPH_MSB 0x7
-#define QIB_7322_DCACtrlF_RcvHdrq16DCAOPH_RMASK 0xFF
-
-#define QIB_7322_RcvAvailTimeOut0_OFFS 0xC00
-#define QIB_7322_RcvAvailTimeOut0_DEF 0x0000000000000000
-#define QIB_7322_RcvAvailTimeOut0_RcvAvailTOCount_LSB 0x10
-#define QIB_7322_RcvAvailTimeOut0_RcvAvailTOCount_MSB 0x1F
-#define QIB_7322_RcvAvailTimeOut0_RcvAvailTOCount_RMASK 0xFFFF
-#define QIB_7322_RcvAvailTimeOut0_RcvAvailTOReload_LSB 0x0
-#define QIB_7322_RcvAvailTimeOut0_RcvAvailTOReload_MSB 0xF
-#define QIB_7322_RcvAvailTimeOut0_RcvAvailTOReload_RMASK 0xFFFF
-
-#define QIB_7322_CntrRegBase_0_OFFS 0x1028
-#define QIB_7322_CntrRegBase_0_DEF 0x0000000000012000
-
-#define QIB_7322_ErrMask_0_OFFS 0x1080
-#define QIB_7322_ErrMask_0_DEF 0x0000000000000000
-#define QIB_7322_ErrMask_0_IBStatusChangedMask_LSB 0x3A
-#define QIB_7322_ErrMask_0_IBStatusChangedMask_MSB 0x3A
-#define QIB_7322_ErrMask_0_IBStatusChangedMask_RMASK 0x1
-#define QIB_7322_ErrMask_0_SHeadersErrMask_LSB 0x39
-#define QIB_7322_ErrMask_0_SHeadersErrMask_MSB 0x39
-#define QIB_7322_ErrMask_0_SHeadersErrMask_RMASK 0x1
-#define QIB_7322_ErrMask_0_VL15BufMisuseErrMask_LSB 0x36
-#define QIB_7322_ErrMask_0_VL15BufMisuseErrMask_MSB 0x36
-#define QIB_7322_ErrMask_0_VL15BufMisuseErrMask_RMASK 0x1
-#define QIB_7322_ErrMask_0_SDmaHaltErrMask_LSB 0x31
-#define QIB_7322_ErrMask_0_SDmaHaltErrMask_MSB 0x31
-#define QIB_7322_ErrMask_0_SDmaHaltErrMask_RMASK 0x1
-#define QIB_7322_ErrMask_0_SDmaDescAddrMisalignErrMask_LSB 0x30
-#define QIB_7322_ErrMask_0_SDmaDescAddrMisalignErrMask_MSB 0x30
-#define QIB_7322_ErrMask_0_SDmaDescAddrMisalignErrMask_RMASK 0x1
-#define QIB_7322_ErrMask_0_SDmaUnexpDataErrMask_LSB 0x2F
-#define QIB_7322_ErrMask_0_SDmaUnexpDataErrMask_MSB 0x2F
-#define QIB_7322_ErrMask_0_SDmaUnexpDataErrMask_RMASK 0x1
-#define QIB_7322_ErrMask_0_SDmaMissingDwErrMask_LSB 0x2E
-#define QIB_7322_ErrMask_0_SDmaMissingDwErrMask_MSB 0x2E
-#define QIB_7322_ErrMask_0_SDmaMissingDwErrMask_RMASK 0x1
-#define QIB_7322_ErrMask_0_SDmaDwEnErrMask_LSB 0x2D
-#define QIB_7322_ErrMask_0_SDmaDwEnErrMask_MSB 0x2D
-#define QIB_7322_ErrMask_0_SDmaDwEnErrMask_RMASK 0x1
-#define QIB_7322_ErrMask_0_SDmaRpyTagErrMask_LSB 0x2C
-#define QIB_7322_ErrMask_0_SDmaRpyTagErrMask_MSB 0x2C
-#define QIB_7322_ErrMask_0_SDmaRpyTagErrMask_RMASK 0x1
-#define QIB_7322_ErrMask_0_SDma1stDescErrMask_LSB 0x2B
-#define QIB_7322_ErrMask_0_SDma1stDescErrMask_MSB 0x2B
-#define QIB_7322_ErrMask_0_SDma1stDescErrMask_RMASK 0x1
-#define QIB_7322_ErrMask_0_SDmaBaseErrMask_LSB 0x2A
-#define QIB_7322_ErrMask_0_SDmaBaseErrMask_MSB 0x2A
-#define QIB_7322_ErrMask_0_SDmaBaseErrMask_RMASK 0x1
-#define QIB_7322_ErrMask_0_SDmaTailOutOfBoundErrMask_LSB 0x29
-#define QIB_7322_ErrMask_0_SDmaTailOutOfBoundErrMask_MSB 0x29
-#define QIB_7322_ErrMask_0_SDmaTailOutOfBoundErrMask_RMASK 0x1
-#define QIB_7322_ErrMask_0_SDmaOutOfBoundErrMask_LSB 0x28
-#define QIB_7322_ErrMask_0_SDmaOutOfBoundErrMask_MSB 0x28
-#define QIB_7322_ErrMask_0_SDmaOutOfBoundErrMask_RMASK 0x1
-#define QIB_7322_ErrMask_0_SDmaGenMismatchErrMask_LSB 0x27
-#define QIB_7322_ErrMask_0_SDmaGenMismatchErrMask_MSB 0x27
-#define QIB_7322_ErrMask_0_SDmaGenMismatchErrMask_RMASK 0x1
-#define QIB_7322_ErrMask_0_SendBufMisuseErrMask_LSB 0x26
-#define QIB_7322_ErrMask_0_SendBufMisuseErrMask_MSB 0x26
-#define QIB_7322_ErrMask_0_SendBufMisuseErrMask_RMASK 0x1
-#define QIB_7322_ErrMask_0_SendUnsupportedVLErrMask_LSB 0x25
-#define QIB_7322_ErrMask_0_SendUnsupportedVLErrMask_MSB 0x25
-#define QIB_7322_ErrMask_0_SendUnsupportedVLErrMask_RMASK 0x1
-#define QIB_7322_ErrMask_0_SendUnexpectedPktNumErrMask_LSB 0x24
-#define QIB_7322_ErrMask_0_SendUnexpectedPktNumErrMask_MSB 0x24
-#define QIB_7322_ErrMask_0_SendUnexpectedPktNumErrMask_RMASK 0x1
-#define QIB_7322_ErrMask_0_SendDroppedDataPktErrMask_LSB 0x22
-#define QIB_7322_ErrMask_0_SendDroppedDataPktErrMask_MSB 0x22
-#define QIB_7322_ErrMask_0_SendDroppedDataPktErrMask_RMASK 0x1
-#define QIB_7322_ErrMask_0_SendDroppedSmpPktErrMask_LSB 0x21
-#define QIB_7322_ErrMask_0_SendDroppedSmpPktErrMask_MSB 0x21
-#define QIB_7322_ErrMask_0_SendDroppedSmpPktErrMask_RMASK 0x1
-#define QIB_7322_ErrMask_0_SendPktLenErrMask_LSB 0x20
-#define QIB_7322_ErrMask_0_SendPktLenErrMask_MSB 0x20
-#define QIB_7322_ErrMask_0_SendPktLenErrMask_RMASK 0x1
-#define QIB_7322_ErrMask_0_SendUnderRunErrMask_LSB 0x1F
-#define QIB_7322_ErrMask_0_SendUnderRunErrMask_MSB 0x1F
-#define QIB_7322_ErrMask_0_SendUnderRunErrMask_RMASK 0x1
-#define QIB_7322_ErrMask_0_SendMaxPktLenErrMask_LSB 0x1E
-#define QIB_7322_ErrMask_0_SendMaxPktLenErrMask_MSB 0x1E
-#define QIB_7322_ErrMask_0_SendMaxPktLenErrMask_RMASK 0x1
-#define QIB_7322_ErrMask_0_SendMinPktLenErrMask_LSB 0x1D
-#define QIB_7322_ErrMask_0_SendMinPktLenErrMask_MSB 0x1D
-#define QIB_7322_ErrMask_0_SendMinPktLenErrMask_RMASK 0x1
-#define QIB_7322_ErrMask_0_RcvIBLostLinkErrMask_LSB 0x11
-#define QIB_7322_ErrMask_0_RcvIBLostLinkErrMask_MSB 0x11
-#define QIB_7322_ErrMask_0_RcvIBLostLinkErrMask_RMASK 0x1
-#define QIB_7322_ErrMask_0_RcvHdrErrMask_LSB 0x10
-#define QIB_7322_ErrMask_0_RcvHdrErrMask_MSB 0x10
-#define QIB_7322_ErrMask_0_RcvHdrErrMask_RMASK 0x1
-#define QIB_7322_ErrMask_0_RcvHdrLenErrMask_LSB 0xF
-#define QIB_7322_ErrMask_0_RcvHdrLenErrMask_MSB 0xF
-#define QIB_7322_ErrMask_0_RcvHdrLenErrMask_RMASK 0x1
-#define QIB_7322_ErrMask_0_RcvBadTidErrMask_LSB 0xE
-#define QIB_7322_ErrMask_0_RcvBadTidErrMask_MSB 0xE
-#define QIB_7322_ErrMask_0_RcvBadTidErrMask_RMASK 0x1
-#define QIB_7322_ErrMask_0_RcvBadVersionErrMask_LSB 0xB
-#define QIB_7322_ErrMask_0_RcvBadVersionErrMask_MSB 0xB
-#define QIB_7322_ErrMask_0_RcvBadVersionErrMask_RMASK 0x1
-#define QIB_7322_ErrMask_0_RcvIBFlowErrMask_LSB 0xA
-#define QIB_7322_ErrMask_0_RcvIBFlowErrMask_MSB 0xA
-#define QIB_7322_ErrMask_0_RcvIBFlowErrMask_RMASK 0x1
-#define QIB_7322_ErrMask_0_RcvEBPErrMask_LSB 0x9
-#define QIB_7322_ErrMask_0_RcvEBPErrMask_MSB 0x9
-#define QIB_7322_ErrMask_0_RcvEBPErrMask_RMASK 0x1
-#define QIB_7322_ErrMask_0_RcvUnsupportedVLErrMask_LSB 0x8
-#define QIB_7322_ErrMask_0_RcvUnsupportedVLErrMask_MSB 0x8
-#define QIB_7322_ErrMask_0_RcvUnsupportedVLErrMask_RMASK 0x1
-#define QIB_7322_ErrMask_0_RcvUnexpectedCharErrMask_LSB 0x7
-#define QIB_7322_ErrMask_0_RcvUnexpectedCharErrMask_MSB 0x7
-#define QIB_7322_ErrMask_0_RcvUnexpectedCharErrMask_RMASK 0x1
-#define QIB_7322_ErrMask_0_RcvShortPktLenErrMask_LSB 0x6
-#define QIB_7322_ErrMask_0_RcvShortPktLenErrMask_MSB 0x6
-#define QIB_7322_ErrMask_0_RcvShortPktLenErrMask_RMASK 0x1
-#define QIB_7322_ErrMask_0_RcvLongPktLenErrMask_LSB 0x5
-#define QIB_7322_ErrMask_0_RcvLongPktLenErrMask_MSB 0x5
-#define QIB_7322_ErrMask_0_RcvLongPktLenErrMask_RMASK 0x1
-#define QIB_7322_ErrMask_0_RcvMaxPktLenErrMask_LSB 0x4
-#define QIB_7322_ErrMask_0_RcvMaxPktLenErrMask_MSB 0x4
-#define QIB_7322_ErrMask_0_RcvMaxPktLenErrMask_RMASK 0x1
-#define QIB_7322_ErrMask_0_RcvMinPktLenErrMask_LSB 0x3
-#define QIB_7322_ErrMask_0_RcvMinPktLenErrMask_MSB 0x3
-#define QIB_7322_ErrMask_0_RcvMinPktLenErrMask_RMASK 0x1
-#define QIB_7322_ErrMask_0_RcvICRCErrMask_LSB 0x2
-#define QIB_7322_ErrMask_0_RcvICRCErrMask_MSB 0x2
-#define QIB_7322_ErrMask_0_RcvICRCErrMask_RMASK 0x1
-#define QIB_7322_ErrMask_0_RcvVCRCErrMask_LSB 0x1
-#define QIB_7322_ErrMask_0_RcvVCRCErrMask_MSB 0x1
-#define QIB_7322_ErrMask_0_RcvVCRCErrMask_RMASK 0x1
-#define QIB_7322_ErrMask_0_RcvFormatErrMask_LSB 0x0
-#define QIB_7322_ErrMask_0_RcvFormatErrMask_MSB 0x0
-#define QIB_7322_ErrMask_0_RcvFormatErrMask_RMASK 0x1
-
-#define QIB_7322_ErrStatus_0_OFFS 0x1088
-#define QIB_7322_ErrStatus_0_DEF 0x0000000000000000
-#define QIB_7322_ErrStatus_0_IBStatusChanged_LSB 0x3A
-#define QIB_7322_ErrStatus_0_IBStatusChanged_MSB 0x3A
-#define QIB_7322_ErrStatus_0_IBStatusChanged_RMASK 0x1
-#define QIB_7322_ErrStatus_0_SHeadersErr_LSB 0x39
-#define QIB_7322_ErrStatus_0_SHeadersErr_MSB 0x39
-#define QIB_7322_ErrStatus_0_SHeadersErr_RMASK 0x1
-#define QIB_7322_ErrStatus_0_VL15BufMisuseErr_LSB 0x36
-#define QIB_7322_ErrStatus_0_VL15BufMisuseErr_MSB 0x36
-#define QIB_7322_ErrStatus_0_VL15BufMisuseErr_RMASK 0x1
-#define QIB_7322_ErrStatus_0_SDmaHaltErr_LSB 0x31
-#define QIB_7322_ErrStatus_0_SDmaHaltErr_MSB 0x31
-#define QIB_7322_ErrStatus_0_SDmaHaltErr_RMASK 0x1
-#define QIB_7322_ErrStatus_0_SDmaDescAddrMisalignErr_LSB 0x30
-#define QIB_7322_ErrStatus_0_SDmaDescAddrMisalignErr_MSB 0x30
-#define QIB_7322_ErrStatus_0_SDmaDescAddrMisalignErr_RMASK 0x1
-#define QIB_7322_ErrStatus_0_SDmaUnexpDataErr_LSB 0x2F
-#define QIB_7322_ErrStatus_0_SDmaUnexpDataErr_MSB 0x2F
-#define QIB_7322_ErrStatus_0_SDmaUnexpDataErr_RMASK 0x1
-#define QIB_7322_ErrStatus_0_SDmaMissingDwErr_LSB 0x2E
-#define QIB_7322_ErrStatus_0_SDmaMissingDwErr_MSB 0x2E
-#define QIB_7322_ErrStatus_0_SDmaMissingDwErr_RMASK 0x1
-#define QIB_7322_ErrStatus_0_SDmaDwEnErr_LSB 0x2D
-#define QIB_7322_ErrStatus_0_SDmaDwEnErr_MSB 0x2D
-#define QIB_7322_ErrStatus_0_SDmaDwEnErr_RMASK 0x1
-#define QIB_7322_ErrStatus_0_SDmaRpyTagErr_LSB 0x2C
-#define QIB_7322_ErrStatus_0_SDmaRpyTagErr_MSB 0x2C
-#define QIB_7322_ErrStatus_0_SDmaRpyTagErr_RMASK 0x1
-#define QIB_7322_ErrStatus_0_SDma1stDescErr_LSB 0x2B
-#define QIB_7322_ErrStatus_0_SDma1stDescErr_MSB 0x2B
-#define QIB_7322_ErrStatus_0_SDma1stDescErr_RMASK 0x1
-#define QIB_7322_ErrStatus_0_SDmaBaseErr_LSB 0x2A
-#define QIB_7322_ErrStatus_0_SDmaBaseErr_MSB 0x2A
-#define QIB_7322_ErrStatus_0_SDmaBaseErr_RMASK 0x1
-#define QIB_7322_ErrStatus_0_SDmaTailOutOfBoundErr_LSB 0x29
-#define QIB_7322_ErrStatus_0_SDmaTailOutOfBoundErr_MSB 0x29
-#define QIB_7322_ErrStatus_0_SDmaTailOutOfBoundErr_RMASK 0x1
-#define QIB_7322_ErrStatus_0_SDmaOutOfBoundErr_LSB 0x28
-#define QIB_7322_ErrStatus_0_SDmaOutOfBoundErr_MSB 0x28
-#define QIB_7322_ErrStatus_0_SDmaOutOfBoundErr_RMASK 0x1
-#define QIB_7322_ErrStatus_0_SDmaGenMismatchErr_LSB 0x27
-#define QIB_7322_ErrStatus_0_SDmaGenMismatchErr_MSB 0x27
-#define QIB_7322_ErrStatus_0_SDmaGenMismatchErr_RMASK 0x1
-#define QIB_7322_ErrStatus_0_SendBufMisuseErr_LSB 0x26
-#define QIB_7322_ErrStatus_0_SendBufMisuseErr_MSB 0x26
-#define QIB_7322_ErrStatus_0_SendBufMisuseErr_RMASK 0x1
-#define QIB_7322_ErrStatus_0_SendUnsupportedVLErr_LSB 0x25
-#define QIB_7322_ErrStatus_0_SendUnsupportedVLErr_MSB 0x25
-#define QIB_7322_ErrStatus_0_SendUnsupportedVLErr_RMASK 0x1
-#define QIB_7322_ErrStatus_0_SendUnexpectedPktNumErr_LSB 0x24
-#define QIB_7322_ErrStatus_0_SendUnexpectedPktNumErr_MSB 0x24
-#define QIB_7322_ErrStatus_0_SendUnexpectedPktNumErr_RMASK 0x1
-#define QIB_7322_ErrStatus_0_SendDroppedDataPktErr_LSB 0x22
-#define QIB_7322_ErrStatus_0_SendDroppedDataPktErr_MSB 0x22
-#define QIB_7322_ErrStatus_0_SendDroppedDataPktErr_RMASK 0x1
-#define QIB_7322_ErrStatus_0_SendDroppedSmpPktErr_LSB 0x21
-#define QIB_7322_ErrStatus_0_SendDroppedSmpPktErr_MSB 0x21
-#define QIB_7322_ErrStatus_0_SendDroppedSmpPktErr_RMASK 0x1
-#define QIB_7322_ErrStatus_0_SendPktLenErr_LSB 0x20
-#define QIB_7322_ErrStatus_0_SendPktLenErr_MSB 0x20
-#define QIB_7322_ErrStatus_0_SendPktLenErr_RMASK 0x1
-#define QIB_7322_ErrStatus_0_SendUnderRunErr_LSB 0x1F
-#define QIB_7322_ErrStatus_0_SendUnderRunErr_MSB 0x1F
-#define QIB_7322_ErrStatus_0_SendUnderRunErr_RMASK 0x1
-#define QIB_7322_ErrStatus_0_SendMaxPktLenErr_LSB 0x1E
-#define QIB_7322_ErrStatus_0_SendMaxPktLenErr_MSB 0x1E
-#define QIB_7322_ErrStatus_0_SendMaxPktLenErr_RMASK 0x1
-#define QIB_7322_ErrStatus_0_SendMinPktLenErr_LSB 0x1D
-#define QIB_7322_ErrStatus_0_SendMinPktLenErr_MSB 0x1D
-#define QIB_7322_ErrStatus_0_SendMinPktLenErr_RMASK 0x1
-#define QIB_7322_ErrStatus_0_RcvIBLostLinkErr_LSB 0x11
-#define QIB_7322_ErrStatus_0_RcvIBLostLinkErr_MSB 0x11
-#define QIB_7322_ErrStatus_0_RcvIBLostLinkErr_RMASK 0x1
-#define QIB_7322_ErrStatus_0_RcvHdrErr_LSB 0x10
-#define QIB_7322_ErrStatus_0_RcvHdrErr_MSB 0x10
-#define QIB_7322_ErrStatus_0_RcvHdrErr_RMASK 0x1
-#define QIB_7322_ErrStatus_0_RcvHdrLenErr_LSB 0xF
-#define QIB_7322_ErrStatus_0_RcvHdrLenErr_MSB 0xF
-#define QIB_7322_ErrStatus_0_RcvHdrLenErr_RMASK 0x1
-#define QIB_7322_ErrStatus_0_RcvBadTidErr_LSB 0xE
-#define QIB_7322_ErrStatus_0_RcvBadTidErr_MSB 0xE
-#define QIB_7322_ErrStatus_0_RcvBadTidErr_RMASK 0x1
-#define QIB_7322_ErrStatus_0_RcvBadVersionErr_LSB 0xB
-#define QIB_7322_ErrStatus_0_RcvBadVersionErr_MSB 0xB
-#define QIB_7322_ErrStatus_0_RcvBadVersionErr_RMASK 0x1
-#define QIB_7322_ErrStatus_0_RcvIBFlowErr_LSB 0xA
-#define QIB_7322_ErrStatus_0_RcvIBFlowErr_MSB 0xA
-#define QIB_7322_ErrStatus_0_RcvIBFlowErr_RMASK 0x1
-#define QIB_7322_ErrStatus_0_RcvEBPErr_LSB 0x9
-#define QIB_7322_ErrStatus_0_RcvEBPErr_MSB 0x9
-#define QIB_7322_ErrStatus_0_RcvEBPErr_RMASK 0x1
-#define QIB_7322_ErrStatus_0_RcvUnsupportedVLErr_LSB 0x8
-#define QIB_7322_ErrStatus_0_RcvUnsupportedVLErr_MSB 0x8
-#define QIB_7322_ErrStatus_0_RcvUnsupportedVLErr_RMASK 0x1
-#define QIB_7322_ErrStatus_0_RcvUnexpectedCharErr_LSB 0x7
-#define QIB_7322_ErrStatus_0_RcvUnexpectedCharErr_MSB 0x7
-#define QIB_7322_ErrStatus_0_RcvUnexpectedCharErr_RMASK 0x1
-#define QIB_7322_ErrStatus_0_RcvShortPktLenErr_LSB 0x6
-#define QIB_7322_ErrStatus_0_RcvShortPktLenErr_MSB 0x6
-#define QIB_7322_ErrStatus_0_RcvShortPktLenErr_RMASK 0x1
-#define QIB_7322_ErrStatus_0_RcvLongPktLenErr_LSB 0x5
-#define QIB_7322_ErrStatus_0_RcvLongPktLenErr_MSB 0x5
-#define QIB_7322_ErrStatus_0_RcvLongPktLenErr_RMASK 0x1
-#define QIB_7322_ErrStatus_0_RcvMaxPktLenErr_LSB 0x4
-#define QIB_7322_ErrStatus_0_RcvMaxPktLenErr_MSB 0x4
-#define QIB_7322_ErrStatus_0_RcvMaxPktLenErr_RMASK 0x1
-#define QIB_7322_ErrStatus_0_RcvMinPktLenErr_LSB 0x3
-#define QIB_7322_ErrStatus_0_RcvMinPktLenErr_MSB 0x3
-#define QIB_7322_ErrStatus_0_RcvMinPktLenErr_RMASK 0x1
-#define QIB_7322_ErrStatus_0_RcvICRCErr_LSB 0x2
-#define QIB_7322_ErrStatus_0_RcvICRCErr_MSB 0x2
-#define QIB_7322_ErrStatus_0_RcvICRCErr_RMASK 0x1
-#define QIB_7322_ErrStatus_0_RcvVCRCErr_LSB 0x1
-#define QIB_7322_ErrStatus_0_RcvVCRCErr_MSB 0x1
-#define QIB_7322_ErrStatus_0_RcvVCRCErr_RMASK 0x1
-#define QIB_7322_ErrStatus_0_RcvFormatErr_LSB 0x0
-#define QIB_7322_ErrStatus_0_RcvFormatErr_MSB 0x0
-#define QIB_7322_ErrStatus_0_RcvFormatErr_RMASK 0x1
-
-#define QIB_7322_ErrClear_0_OFFS 0x1090
-#define QIB_7322_ErrClear_0_DEF 0x0000000000000000
-#define QIB_7322_ErrClear_0_IBStatusChangedClear_LSB 0x3A
-#define QIB_7322_ErrClear_0_IBStatusChangedClear_MSB 0x3A
-#define QIB_7322_ErrClear_0_IBStatusChangedClear_RMASK 0x1
-#define QIB_7322_ErrClear_0_SHeadersErrClear_LSB 0x39
-#define QIB_7322_ErrClear_0_SHeadersErrClear_MSB 0x39
-#define QIB_7322_ErrClear_0_SHeadersErrClear_RMASK 0x1
-#define QIB_7322_ErrClear_0_VL15BufMisuseErrClear_LSB 0x36
-#define QIB_7322_ErrClear_0_VL15BufMisuseErrClear_MSB 0x36
-#define QIB_7322_ErrClear_0_VL15BufMisuseErrClear_RMASK 0x1
-#define QIB_7322_ErrClear_0_SDmaHaltErrClear_LSB 0x31
-#define QIB_7322_ErrClear_0_SDmaHaltErrClear_MSB 0x31
-#define QIB_7322_ErrClear_0_SDmaHaltErrClear_RMASK 0x1
-#define QIB_7322_ErrClear_0_SDmaDescAddrMisalignErrClear_LSB 0x30
-#define QIB_7322_ErrClear_0_SDmaDescAddrMisalignErrClear_MSB 0x30
-#define QIB_7322_ErrClear_0_SDmaDescAddrMisalignErrClear_RMASK 0x1
-#define QIB_7322_ErrClear_0_SDmaUnexpDataErrClear_LSB 0x2F
-#define QIB_7322_ErrClear_0_SDmaUnexpDataErrClear_MSB 0x2F
-#define QIB_7322_ErrClear_0_SDmaUnexpDataErrClear_RMASK 0x1
-#define QIB_7322_ErrClear_0_SDmaMissingDwErrClear_LSB 0x2E
-#define QIB_7322_ErrClear_0_SDmaMissingDwErrClear_MSB 0x2E
-#define QIB_7322_ErrClear_0_SDmaMissingDwErrClear_RMASK 0x1
-#define QIB_7322_ErrClear_0_SDmaDwEnErrClear_LSB 0x2D
-#define QIB_7322_ErrClear_0_SDmaDwEnErrClear_MSB 0x2D
-#define QIB_7322_ErrClear_0_SDmaDwEnErrClear_RMASK 0x1
-#define QIB_7322_ErrClear_0_SDmaRpyTagErrClear_LSB 0x2C
-#define QIB_7322_ErrClear_0_SDmaRpyTagErrClear_MSB 0x2C
-#define QIB_7322_ErrClear_0_SDmaRpyTagErrClear_RMASK 0x1
-#define QIB_7322_ErrClear_0_SDma1stDescErrClear_LSB 0x2B
-#define QIB_7322_ErrClear_0_SDma1stDescErrClear_MSB 0x2B
-#define QIB_7322_ErrClear_0_SDma1stDescErrClear_RMASK 0x1
-#define QIB_7322_ErrClear_0_SDmaBaseErrClear_LSB 0x2A
-#define QIB_7322_ErrClear_0_SDmaBaseErrClear_MSB 0x2A
-#define QIB_7322_ErrClear_0_SDmaBaseErrClear_RMASK 0x1
-#define QIB_7322_ErrClear_0_SDmaTailOutOfBoundErrClear_LSB 0x29
-#define QIB_7322_ErrClear_0_SDmaTailOutOfBoundErrClear_MSB 0x29
-#define QIB_7322_ErrClear_0_SDmaTailOutOfBoundErrClear_RMASK 0x1
-#define QIB_7322_ErrClear_0_SDmaOutOfBoundErrClear_LSB 0x28
-#define QIB_7322_ErrClear_0_SDmaOutOfBoundErrClear_MSB 0x28
-#define QIB_7322_ErrClear_0_SDmaOutOfBoundErrClear_RMASK 0x1
-#define QIB_7322_ErrClear_0_SDmaGenMismatchErrClear_LSB 0x27
-#define QIB_7322_ErrClear_0_SDmaGenMismatchErrClear_MSB 0x27
-#define QIB_7322_ErrClear_0_SDmaGenMismatchErrClear_RMASK 0x1
-#define QIB_7322_ErrClear_0_SendBufMisuseErrClear_LSB 0x26
-#define QIB_7322_ErrClear_0_SendBufMisuseErrClear_MSB 0x26
-#define QIB_7322_ErrClear_0_SendBufMisuseErrClear_RMASK 0x1
-#define QIB_7322_ErrClear_0_SendUnsupportedVLErrClear_LSB 0x25
-#define QIB_7322_ErrClear_0_SendUnsupportedVLErrClear_MSB 0x25
-#define QIB_7322_ErrClear_0_SendUnsupportedVLErrClear_RMASK 0x1
-#define QIB_7322_ErrClear_0_SendUnexpectedPktNumErrClear_LSB 0x24
-#define QIB_7322_ErrClear_0_SendUnexpectedPktNumErrClear_MSB 0x24
-#define QIB_7322_ErrClear_0_SendUnexpectedPktNumErrClear_RMASK 0x1
-#define QIB_7322_ErrClear_0_SendDroppedDataPktErrClear_LSB 0x22
-#define QIB_7322_ErrClear_0_SendDroppedDataPktErrClear_MSB 0x22
-#define QIB_7322_ErrClear_0_SendDroppedDataPktErrClear_RMASK 0x1
-#define QIB_7322_ErrClear_0_SendDroppedSmpPktErrClear_LSB 0x21
-#define QIB_7322_ErrClear_0_SendDroppedSmpPktErrClear_MSB 0x21
-#define QIB_7322_ErrClear_0_SendDroppedSmpPktErrClear_RMASK 0x1
-#define QIB_7322_ErrClear_0_SendPktLenErrClear_LSB 0x20
-#define QIB_7322_ErrClear_0_SendPktLenErrClear_MSB 0x20
-#define QIB_7322_ErrClear_0_SendPktLenErrClear_RMASK 0x1
-#define QIB_7322_ErrClear_0_SendUnderRunErrClear_LSB 0x1F
-#define QIB_7322_ErrClear_0_SendUnderRunErrClear_MSB 0x1F
-#define QIB_7322_ErrClear_0_SendUnderRunErrClear_RMASK 0x1
-#define QIB_7322_ErrClear_0_SendMaxPktLenErrClear_LSB 0x1E
-#define QIB_7322_ErrClear_0_SendMaxPktLenErrClear_MSB 0x1E
-#define QIB_7322_ErrClear_0_SendMaxPktLenErrClear_RMASK 0x1
-#define QIB_7322_ErrClear_0_SendMinPktLenErrClear_LSB 0x1D
-#define QIB_7322_ErrClear_0_SendMinPktLenErrClear_MSB 0x1D
-#define QIB_7322_ErrClear_0_SendMinPktLenErrClear_RMASK 0x1
-#define QIB_7322_ErrClear_0_RcvIBLostLinkErrClear_LSB 0x11
-#define QIB_7322_ErrClear_0_RcvIBLostLinkErrClear_MSB 0x11
-#define QIB_7322_ErrClear_0_RcvIBLostLinkErrClear_RMASK 0x1
-#define QIB_7322_ErrClear_0_RcvHdrErrClear_LSB 0x10
-#define QIB_7322_ErrClear_0_RcvHdrErrClear_MSB 0x10
-#define QIB_7322_ErrClear_0_RcvHdrErrClear_RMASK 0x1
-#define QIB_7322_ErrClear_0_RcvHdrLenErrClear_LSB 0xF
-#define QIB_7322_ErrClear_0_RcvHdrLenErrClear_MSB 0xF
-#define QIB_7322_ErrClear_0_RcvHdrLenErrClear_RMASK 0x1
-#define QIB_7322_ErrClear_0_RcvBadTidErrClear_LSB 0xE
-#define QIB_7322_ErrClear_0_RcvBadTidErrClear_MSB 0xE
-#define QIB_7322_ErrClear_0_RcvBadTidErrClear_RMASK 0x1
-#define QIB_7322_ErrClear_0_RcvBadVersionErrClear_LSB 0xB
-#define QIB_7322_ErrClear_0_RcvBadVersionErrClear_MSB 0xB
-#define QIB_7322_ErrClear_0_RcvBadVersionErrClear_RMASK 0x1
-#define QIB_7322_ErrClear_0_RcvIBFlowErrClear_LSB 0xA
-#define QIB_7322_ErrClear_0_RcvIBFlowErrClear_MSB 0xA
-#define QIB_7322_ErrClear_0_RcvIBFlowErrClear_RMASK 0x1
-#define QIB_7322_ErrClear_0_RcvEBPErrClear_LSB 0x9
-#define QIB_7322_ErrClear_0_RcvEBPErrClear_MSB 0x9
-#define QIB_7322_ErrClear_0_RcvEBPErrClear_RMASK 0x1
-#define QIB_7322_ErrClear_0_RcvUnsupportedVLErrClear_LSB 0x8
-#define QIB_7322_ErrClear_0_RcvUnsupportedVLErrClear_MSB 0x8
-#define QIB_7322_ErrClear_0_RcvUnsupportedVLErrClear_RMASK 0x1
-#define QIB_7322_ErrClear_0_RcvUnexpectedCharErrClear_LSB 0x7
-#define QIB_7322_ErrClear_0_RcvUnexpectedCharErrClear_MSB 0x7
-#define QIB_7322_ErrClear_0_RcvUnexpectedCharErrClear_RMASK 0x1
-#define QIB_7322_ErrClear_0_RcvShortPktLenErrClear_LSB 0x6
-#define QIB_7322_ErrClear_0_RcvShortPktLenErrClear_MSB 0x6
-#define QIB_7322_ErrClear_0_RcvShortPktLenErrClear_RMASK 0x1
-#define QIB_7322_ErrClear_0_RcvLongPktLenErrClear_LSB 0x5
-#define QIB_7322_ErrClear_0_RcvLongPktLenErrClear_MSB 0x5
-#define QIB_7322_ErrClear_0_RcvLongPktLenErrClear_RMASK 0x1
-#define QIB_7322_ErrClear_0_RcvMaxPktLenErrClear_LSB 0x4
-#define QIB_7322_ErrClear_0_RcvMaxPktLenErrClear_MSB 0x4
-#define QIB_7322_ErrClear_0_RcvMaxPktLenErrClear_RMASK 0x1
-#define QIB_7322_ErrClear_0_RcvMinPktLenErrClear_LSB 0x3
-#define QIB_7322_ErrClear_0_RcvMinPktLenErrClear_MSB 0x3
-#define QIB_7322_ErrClear_0_RcvMinPktLenErrClear_RMASK 0x1
-#define QIB_7322_ErrClear_0_RcvICRCErrClear_LSB 0x2
-#define QIB_7322_ErrClear_0_RcvICRCErrClear_MSB 0x2
-#define QIB_7322_ErrClear_0_RcvICRCErrClear_RMASK 0x1
-#define QIB_7322_ErrClear_0_RcvVCRCErrClear_LSB 0x1
-#define QIB_7322_ErrClear_0_RcvVCRCErrClear_MSB 0x1
-#define QIB_7322_ErrClear_0_RcvVCRCErrClear_RMASK 0x1
-#define QIB_7322_ErrClear_0_RcvFormatErrClear_LSB 0x0
-#define QIB_7322_ErrClear_0_RcvFormatErrClear_MSB 0x0
-#define QIB_7322_ErrClear_0_RcvFormatErrClear_RMASK 0x1
-
-#define QIB_7322_TXEStatus_0_OFFS 0x10B8
-#define QIB_7322_TXEStatus_0_DEF 0x0000000XC00080FF
-#define QIB_7322_TXEStatus_0_TXE_IBC_Idle_LSB 0x1F
-#define QIB_7322_TXEStatus_0_TXE_IBC_Idle_MSB 0x1F
-#define QIB_7322_TXEStatus_0_TXE_IBC_Idle_RMASK 0x1
-#define QIB_7322_TXEStatus_0_RmFifoEmpty_LSB 0x1E
-#define QIB_7322_TXEStatus_0_RmFifoEmpty_MSB 0x1E
-#define QIB_7322_TXEStatus_0_RmFifoEmpty_RMASK 0x1
-#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL15_LSB 0xF
-#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL15_MSB 0xF
-#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL15_RMASK 0x1
-#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL7_LSB 0x7
-#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL7_MSB 0x7
-#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL7_RMASK 0x1
-#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL6_LSB 0x6
-#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL6_MSB 0x6
-#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL6_RMASK 0x1
-#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL5_LSB 0x5
-#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL5_MSB 0x5
-#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL5_RMASK 0x1
-#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL4_LSB 0x4
-#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL4_MSB 0x4
-#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL4_RMASK 0x1
-#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL3_LSB 0x3
-#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL3_MSB 0x3
-#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL3_RMASK 0x1
-#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL2_LSB 0x2
-#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL2_MSB 0x2
-#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL2_RMASK 0x1
-#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL1_LSB 0x1
-#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL1_MSB 0x1
-#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL1_RMASK 0x1
-#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL0_LSB 0x0
-#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL0_MSB 0x0
-#define QIB_7322_TXEStatus_0_LaFifoEmpty_VL0_RMASK 0x1
-
-#define QIB_7322_RcvCtrl_0_OFFS 0x1100
-#define QIB_7322_RcvCtrl_0_DEF 0x0000000000000000
-#define QIB_7322_RcvCtrl_0_RcvResetCredit_LSB 0x2A
-#define QIB_7322_RcvCtrl_0_RcvResetCredit_MSB 0x2A
-#define QIB_7322_RcvCtrl_0_RcvResetCredit_RMASK 0x1
-#define QIB_7322_RcvCtrl_0_RcvPartitionKeyDisable_LSB 0x29
-#define QIB_7322_RcvCtrl_0_RcvPartitionKeyDisable_MSB 0x29
-#define QIB_7322_RcvCtrl_0_RcvPartitionKeyDisable_RMASK 0x1
-#define QIB_7322_RcvCtrl_0_RcvQPMapEnable_LSB 0x28
-#define QIB_7322_RcvCtrl_0_RcvQPMapEnable_MSB 0x28
-#define QIB_7322_RcvCtrl_0_RcvQPMapEnable_RMASK 0x1
-#define QIB_7322_RcvCtrl_0_RcvIBPortEnable_LSB 0x27
-#define QIB_7322_RcvCtrl_0_RcvIBPortEnable_MSB 0x27
-#define QIB_7322_RcvCtrl_0_RcvIBPortEnable_RMASK 0x1
-#define QIB_7322_RcvCtrl_0_ContextEnableUser_LSB 0x2
-#define QIB_7322_RcvCtrl_0_ContextEnableUser_MSB 0x11
-#define QIB_7322_RcvCtrl_0_ContextEnableUser_RMASK 0xFFFF
-#define QIB_7322_RcvCtrl_0_ContextEnableKernel_LSB 0x0
-#define QIB_7322_RcvCtrl_0_ContextEnableKernel_MSB 0x0
-#define QIB_7322_RcvCtrl_0_ContextEnableKernel_RMASK 0x1
-
-#define QIB_7322_RcvBTHQP_0_OFFS 0x1108
-#define QIB_7322_RcvBTHQP_0_DEF 0x0000000000000000
-#define QIB_7322_RcvBTHQP_0_RcvBTHQP_LSB 0x0
-#define QIB_7322_RcvBTHQP_0_RcvBTHQP_MSB 0x17
-#define QIB_7322_RcvBTHQP_0_RcvBTHQP_RMASK 0xFFFFFF
-
-#define QIB_7322_RcvQPMapTableA_0_OFFS 0x1110
-#define QIB_7322_RcvQPMapTableA_0_DEF 0x0000000000000000
-#define QIB_7322_RcvQPMapTableA_0_RcvQPMapContext5_LSB 0x19
-#define QIB_7322_RcvQPMapTableA_0_RcvQPMapContext5_MSB 0x1D
-#define QIB_7322_RcvQPMapTableA_0_RcvQPMapContext5_RMASK 0x1F
-#define QIB_7322_RcvQPMapTableA_0_RcvQPMapContext4_LSB 0x14
-#define QIB_7322_RcvQPMapTableA_0_RcvQPMapContext4_MSB 0x18
-#define QIB_7322_RcvQPMapTableA_0_RcvQPMapContext4_RMASK 0x1F
-#define QIB_7322_RcvQPMapTableA_0_RcvQPMapContext3_LSB 0xF
-#define QIB_7322_RcvQPMapTableA_0_RcvQPMapContext3_MSB 0x13
-#define QIB_7322_RcvQPMapTableA_0_RcvQPMapContext3_RMASK 0x1F
-#define QIB_7322_RcvQPMapTableA_0_RcvQPMapContext2_LSB 0xA
-#define QIB_7322_RcvQPMapTableA_0_RcvQPMapContext2_MSB 0xE
-#define QIB_7322_RcvQPMapTableA_0_RcvQPMapContext2_RMASK 0x1F
-#define QIB_7322_RcvQPMapTableA_0_RcvQPMapContext1_LSB 0x5
-#define QIB_7322_RcvQPMapTableA_0_RcvQPMapContext1_MSB 0x9
-#define QIB_7322_RcvQPMapTableA_0_RcvQPMapContext1_RMASK 0x1F
-#define QIB_7322_RcvQPMapTableA_0_RcvQPMapContext0_LSB 0x0
-#define QIB_7322_RcvQPMapTableA_0_RcvQPMapContext0_MSB 0x4
-#define QIB_7322_RcvQPMapTableA_0_RcvQPMapContext0_RMASK 0x1F
-
-#define QIB_7322_RcvQPMapTableB_0_OFFS 0x1118
-#define QIB_7322_RcvQPMapTableB_0_DEF 0x0000000000000000
-#define QIB_7322_RcvQPMapTableB_0_RcvQPMapContext11_LSB 0x19
-#define QIB_7322_RcvQPMapTableB_0_RcvQPMapContext11_MSB 0x1D
-#define QIB_7322_RcvQPMapTableB_0_RcvQPMapContext11_RMASK 0x1F
-#define QIB_7322_RcvQPMapTableB_0_RcvQPMapContext10_LSB 0x14
-#define QIB_7322_RcvQPMapTableB_0_RcvQPMapContext10_MSB 0x18
-#define QIB_7322_RcvQPMapTableB_0_RcvQPMapContext10_RMASK 0x1F
-#define QIB_7322_RcvQPMapTableB_0_RcvQPMapContext9_LSB 0xF
-#define QIB_7322_RcvQPMapTableB_0_RcvQPMapContext9_MSB 0x13
-#define QIB_7322_RcvQPMapTableB_0_RcvQPMapContext9_RMASK 0x1F
-#define QIB_7322_RcvQPMapTableB_0_RcvQPMapContext8_LSB 0xA
-#define QIB_7322_RcvQPMapTableB_0_RcvQPMapContext8_MSB 0xE
-#define QIB_7322_RcvQPMapTableB_0_RcvQPMapContext8_RMASK 0x1F
-#define QIB_7322_RcvQPMapTableB_0_RcvQPMapContext7_LSB 0x5
-#define QIB_7322_RcvQPMapTableB_0_RcvQPMapContext7_MSB 0x9
-#define QIB_7322_RcvQPMapTableB_0_RcvQPMapContext7_RMASK 0x1F
-#define QIB_7322_RcvQPMapTableB_0_RcvQPMapContext6_LSB 0x0
-#define QIB_7322_RcvQPMapTableB_0_RcvQPMapContext6_MSB 0x4
-#define QIB_7322_RcvQPMapTableB_0_RcvQPMapContext6_RMASK 0x1F
-
-#define QIB_7322_RcvQPMapTableC_0_OFFS 0x1120
-#define QIB_7322_RcvQPMapTableC_0_DEF 0x0000000000000000
-#define QIB_7322_RcvQPMapTableC_0_RcvQPMapContext17_LSB 0x19
-#define QIB_7322_RcvQPMapTableC_0_RcvQPMapContext17_MSB 0x1D
-#define QIB_7322_RcvQPMapTableC_0_RcvQPMapContext17_RMASK 0x1F
-#define QIB_7322_RcvQPMapTableC_0_RcvQPMapContext16_LSB 0x14
-#define QIB_7322_RcvQPMapTableC_0_RcvQPMapContext16_MSB 0x18
-#define QIB_7322_RcvQPMapTableC_0_RcvQPMapContext16_RMASK 0x1F
-#define QIB_7322_RcvQPMapTableC_0_RcvQPMapContext15_LSB 0xF
-#define QIB_7322_RcvQPMapTableC_0_RcvQPMapContext15_MSB 0x13
-#define QIB_7322_RcvQPMapTableC_0_RcvQPMapContext15_RMASK 0x1F
-#define QIB_7322_RcvQPMapTableC_0_RcvQPMapContext14_LSB 0xA
-#define QIB_7322_RcvQPMapTableC_0_RcvQPMapContext14_MSB 0xE
-#define QIB_7322_RcvQPMapTableC_0_RcvQPMapContext14_RMASK 0x1F
-#define QIB_7322_RcvQPMapTableC_0_RcvQPMapContext13_LSB 0x5
-#define QIB_7322_RcvQPMapTableC_0_RcvQPMapContext13_MSB 0x9
-#define QIB_7322_RcvQPMapTableC_0_RcvQPMapContext13_RMASK 0x1F
-#define QIB_7322_RcvQPMapTableC_0_RcvQPMapContext12_LSB 0x0
-#define QIB_7322_RcvQPMapTableC_0_RcvQPMapContext12_MSB 0x4
-#define QIB_7322_RcvQPMapTableC_0_RcvQPMapContext12_RMASK 0x1F
-
-#define QIB_7322_RcvQPMapTableD_0_OFFS 0x1128
-#define QIB_7322_RcvQPMapTableD_0_DEF 0x0000000000000000
-#define QIB_7322_RcvQPMapTableD_0_RcvQPMapContext23_LSB 0x19
-#define QIB_7322_RcvQPMapTableD_0_RcvQPMapContext23_MSB 0x1D
-#define QIB_7322_RcvQPMapTableD_0_RcvQPMapContext23_RMASK 0x1F
-#define QIB_7322_RcvQPMapTableD_0_RcvQPMapContext22_LSB 0x14
-#define QIB_7322_RcvQPMapTableD_0_RcvQPMapContext22_MSB 0x18
-#define QIB_7322_RcvQPMapTableD_0_RcvQPMapContext22_RMASK 0x1F
-#define QIB_7322_RcvQPMapTableD_0_RcvQPMapContext21_LSB 0xF
-#define QIB_7322_RcvQPMapTableD_0_RcvQPMapContext21_MSB 0x13
-#define QIB_7322_RcvQPMapTableD_0_RcvQPMapContext21_RMASK 0x1F
-#define QIB_7322_RcvQPMapTableD_0_RcvQPMapContext20_LSB 0xA
-#define QIB_7322_RcvQPMapTableD_0_RcvQPMapContext20_MSB 0xE
-#define QIB_7322_RcvQPMapTableD_0_RcvQPMapContext20_RMASK 0x1F
-#define QIB_7322_RcvQPMapTableD_0_RcvQPMapContext19_LSB 0x5
-#define QIB_7322_RcvQPMapTableD_0_RcvQPMapContext19_MSB 0x9
-#define QIB_7322_RcvQPMapTableD_0_RcvQPMapContext19_RMASK 0x1F
-#define QIB_7322_RcvQPMapTableD_0_RcvQPMapContext18_LSB 0x0
-#define QIB_7322_RcvQPMapTableD_0_RcvQPMapContext18_MSB 0x4
-#define QIB_7322_RcvQPMapTableD_0_RcvQPMapContext18_RMASK 0x1F
-
-#define QIB_7322_RcvQPMapTableE_0_OFFS 0x1130
-#define QIB_7322_RcvQPMapTableE_0_DEF 0x0000000000000000
-#define QIB_7322_RcvQPMapTableE_0_RcvQPMapContext29_LSB 0x19
-#define QIB_7322_RcvQPMapTableE_0_RcvQPMapContext29_MSB 0x1D
-#define QIB_7322_RcvQPMapTableE_0_RcvQPMapContext29_RMASK 0x1F
-#define QIB_7322_RcvQPMapTableE_0_RcvQPMapContext28_LSB 0x14
-#define QIB_7322_RcvQPMapTableE_0_RcvQPMapContext28_MSB 0x18
-#define QIB_7322_RcvQPMapTableE_0_RcvQPMapContext28_RMASK 0x1F
-#define QIB_7322_RcvQPMapTableE_0_RcvQPMapContext27_LSB 0xF
-#define QIB_7322_RcvQPMapTableE_0_RcvQPMapContext27_MSB 0x13
-#define QIB_7322_RcvQPMapTableE_0_RcvQPMapContext27_RMASK 0x1F
-#define QIB_7322_RcvQPMapTableE_0_RcvQPMapContext26_LSB 0xA
-#define QIB_7322_RcvQPMapTableE_0_RcvQPMapContext26_MSB 0xE
-#define QIB_7322_RcvQPMapTableE_0_RcvQPMapContext26_RMASK 0x1F
-#define QIB_7322_RcvQPMapTableE_0_RcvQPMapContext25_LSB 0x5
-#define QIB_7322_RcvQPMapTableE_0_RcvQPMapContext25_MSB 0x9
-#define QIB_7322_RcvQPMapTableE_0_RcvQPMapContext25_RMASK 0x1F
-#define QIB_7322_RcvQPMapTableE_0_RcvQPMapContext24_LSB 0x0
-#define QIB_7322_RcvQPMapTableE_0_RcvQPMapContext24_MSB 0x4
-#define QIB_7322_RcvQPMapTableE_0_RcvQPMapContext24_RMASK 0x1F
-
-#define QIB_7322_RcvQPMapTableF_0_OFFS 0x1138
-#define QIB_7322_RcvQPMapTableF_0_DEF 0x0000000000000000
-#define QIB_7322_RcvQPMapTableF_0_RcvQPMapContext31_LSB 0x5
-#define QIB_7322_RcvQPMapTableF_0_RcvQPMapContext31_MSB 0x9
-#define QIB_7322_RcvQPMapTableF_0_RcvQPMapContext31_RMASK 0x1F
-#define QIB_7322_RcvQPMapTableF_0_RcvQPMapContext30_LSB 0x0
-#define QIB_7322_RcvQPMapTableF_0_RcvQPMapContext30_MSB 0x4
-#define QIB_7322_RcvQPMapTableF_0_RcvQPMapContext30_RMASK 0x1F
-
-#define QIB_7322_PSStat_0_OFFS 0x1140
-#define QIB_7322_PSStat_0_DEF 0x0000000000000000
-
-#define QIB_7322_PSStart_0_OFFS 0x1148
-#define QIB_7322_PSStart_0_DEF 0x0000000000000000
-
-#define QIB_7322_PSInterval_0_OFFS 0x1150
-#define QIB_7322_PSInterval_0_DEF 0x0000000000000000
-
-#define QIB_7322_RcvStatus_0_OFFS 0x1160
-#define QIB_7322_RcvStatus_0_DEF 0x0000000000000000
-#define QIB_7322_RcvStatus_0_DmaeqBlockingContext_LSB 0x1
-#define QIB_7322_RcvStatus_0_DmaeqBlockingContext_MSB 0x5
-#define QIB_7322_RcvStatus_0_DmaeqBlockingContext_RMASK 0x1F
-#define QIB_7322_RcvStatus_0_RxPktInProgress_LSB 0x0
-#define QIB_7322_RcvStatus_0_RxPktInProgress_MSB 0x0
-#define QIB_7322_RcvStatus_0_RxPktInProgress_RMASK 0x1
-
-#define QIB_7322_RcvPartitionKey_0_OFFS 0x1168
-#define QIB_7322_RcvPartitionKey_0_DEF 0x0000000000000000
-
-#define QIB_7322_RcvQPMulticastContext_0_OFFS 0x1170
-#define QIB_7322_RcvQPMulticastContext_0_DEF 0x0000000000000000
-#define QIB_7322_RcvQPMulticastContext_0_RcvQpMcContext_LSB 0x0
-#define QIB_7322_RcvQPMulticastContext_0_RcvQpMcContext_MSB 0x4
-#define QIB_7322_RcvQPMulticastContext_0_RcvQpMcContext_RMASK 0x1F
-
-#define QIB_7322_RcvPktLEDCnt_0_OFFS 0x1178
-#define QIB_7322_RcvPktLEDCnt_0_DEF 0x0000000000000000
-#define QIB_7322_RcvPktLEDCnt_0_ONperiod_LSB 0x20
-#define QIB_7322_RcvPktLEDCnt_0_ONperiod_MSB 0x3F
-#define QIB_7322_RcvPktLEDCnt_0_ONperiod_RMASK 0xFFFFFFFF
-#define QIB_7322_RcvPktLEDCnt_0_OFFperiod_LSB 0x0
-#define QIB_7322_RcvPktLEDCnt_0_OFFperiod_MSB 0x1F
-#define QIB_7322_RcvPktLEDCnt_0_OFFperiod_RMASK 0xFFFFFFFF
-
-#define QIB_7322_SendDmaIdleCnt_0_OFFS 0x1180
-#define QIB_7322_SendDmaIdleCnt_0_DEF 0x0000000000000000
-#define QIB_7322_SendDmaIdleCnt_0_SendDmaIdleCnt_LSB 0x0
-#define QIB_7322_SendDmaIdleCnt_0_SendDmaIdleCnt_MSB 0xF
-#define QIB_7322_SendDmaIdleCnt_0_SendDmaIdleCnt_RMASK 0xFFFF
-
-#define QIB_7322_SendDmaReloadCnt_0_OFFS 0x1188
-#define QIB_7322_SendDmaReloadCnt_0_DEF 0x0000000000000000
-#define QIB_7322_SendDmaReloadCnt_0_SendDmaReloadCnt_LSB 0x0
-#define QIB_7322_SendDmaReloadCnt_0_SendDmaReloadCnt_MSB 0xF
-#define QIB_7322_SendDmaReloadCnt_0_SendDmaReloadCnt_RMASK 0xFFFF
-
-#define QIB_7322_SendDmaDescCnt_0_OFFS 0x1190
-#define QIB_7322_SendDmaDescCnt_0_DEF 0x0000000000000000
-#define QIB_7322_SendDmaDescCnt_0_SendDmaDescCnt_LSB 0x0
-#define QIB_7322_SendDmaDescCnt_0_SendDmaDescCnt_MSB 0xF
-#define QIB_7322_SendDmaDescCnt_0_SendDmaDescCnt_RMASK 0xFFFF
-
-#define QIB_7322_SendCtrl_0_OFFS 0x11C0
-#define QIB_7322_SendCtrl_0_DEF 0x0000000000000000
-#define QIB_7322_SendCtrl_0_IBVLArbiterEn_LSB 0xF
-#define QIB_7322_SendCtrl_0_IBVLArbiterEn_MSB 0xF
-#define QIB_7322_SendCtrl_0_IBVLArbiterEn_RMASK 0x1
-#define QIB_7322_SendCtrl_0_TxeDrainRmFifo_LSB 0xE
-#define QIB_7322_SendCtrl_0_TxeDrainRmFifo_MSB 0xE
-#define QIB_7322_SendCtrl_0_TxeDrainRmFifo_RMASK 0x1
-#define QIB_7322_SendCtrl_0_TxeDrainLaFifo_LSB 0xD
-#define QIB_7322_SendCtrl_0_TxeDrainLaFifo_MSB 0xD
-#define QIB_7322_SendCtrl_0_TxeDrainLaFifo_RMASK 0x1
-#define QIB_7322_SendCtrl_0_SDmaHalt_LSB 0xC
-#define QIB_7322_SendCtrl_0_SDmaHalt_MSB 0xC
-#define QIB_7322_SendCtrl_0_SDmaHalt_RMASK 0x1
-#define QIB_7322_SendCtrl_0_SDmaEnable_LSB 0xB
-#define QIB_7322_SendCtrl_0_SDmaEnable_MSB 0xB
-#define QIB_7322_SendCtrl_0_SDmaEnable_RMASK 0x1
-#define QIB_7322_SendCtrl_0_SDmaSingleDescriptor_LSB 0xA
-#define QIB_7322_SendCtrl_0_SDmaSingleDescriptor_MSB 0xA
-#define QIB_7322_SendCtrl_0_SDmaSingleDescriptor_RMASK 0x1
-#define QIB_7322_SendCtrl_0_SDmaIntEnable_LSB 0x9
-#define QIB_7322_SendCtrl_0_SDmaIntEnable_MSB 0x9
-#define QIB_7322_SendCtrl_0_SDmaIntEnable_RMASK 0x1
-#define QIB_7322_SendCtrl_0_SDmaCleanup_LSB 0x8
-#define QIB_7322_SendCtrl_0_SDmaCleanup_MSB 0x8
-#define QIB_7322_SendCtrl_0_SDmaCleanup_RMASK 0x1
-#define QIB_7322_SendCtrl_0_ForceCreditUpToDate_LSB 0x7
-#define QIB_7322_SendCtrl_0_ForceCreditUpToDate_MSB 0x7
-#define QIB_7322_SendCtrl_0_ForceCreditUpToDate_RMASK 0x1
-#define QIB_7322_SendCtrl_0_SendEnable_LSB 0x3
-#define QIB_7322_SendCtrl_0_SendEnable_MSB 0x3
-#define QIB_7322_SendCtrl_0_SendEnable_RMASK 0x1
-#define QIB_7322_SendCtrl_0_TxeBypassIbc_LSB 0x1
-#define QIB_7322_SendCtrl_0_TxeBypassIbc_MSB 0x1
-#define QIB_7322_SendCtrl_0_TxeBypassIbc_RMASK 0x1
-#define QIB_7322_SendCtrl_0_TxeAbortIbc_LSB 0x0
-#define QIB_7322_SendCtrl_0_TxeAbortIbc_MSB 0x0
-#define QIB_7322_SendCtrl_0_TxeAbortIbc_RMASK 0x1
-
-#define QIB_7322_SendDmaBase_0_OFFS 0x11F8
-#define QIB_7322_SendDmaBase_0_DEF 0x0000000000000000
-#define QIB_7322_SendDmaBase_0_SendDmaBase_LSB 0x0
-#define QIB_7322_SendDmaBase_0_SendDmaBase_MSB 0x2F
-#define QIB_7322_SendDmaBase_0_SendDmaBase_RMASK 0xFFFFFFFFFFFF
-
-#define QIB_7322_SendDmaLenGen_0_OFFS 0x1200
-#define QIB_7322_SendDmaLenGen_0_DEF 0x0000000000000000
-#define QIB_7322_SendDmaLenGen_0_Generation_LSB 0x10
-#define QIB_7322_SendDmaLenGen_0_Generation_MSB 0x12
-#define QIB_7322_SendDmaLenGen_0_Generation_RMASK 0x7
-#define QIB_7322_SendDmaLenGen_0_Length_LSB 0x0
-#define QIB_7322_SendDmaLenGen_0_Length_MSB 0xF
-#define QIB_7322_SendDmaLenGen_0_Length_RMASK 0xFFFF
-
-#define QIB_7322_SendDmaTail_0_OFFS 0x1208
-#define QIB_7322_SendDmaTail_0_DEF 0x0000000000000000
-#define QIB_7322_SendDmaTail_0_SendDmaTail_LSB 0x0
-#define QIB_7322_SendDmaTail_0_SendDmaTail_MSB 0xF
-#define QIB_7322_SendDmaTail_0_SendDmaTail_RMASK 0xFFFF
-
-#define QIB_7322_SendDmaHead_0_OFFS 0x1210
-#define QIB_7322_SendDmaHead_0_DEF 0x0000000000000000
-#define QIB_7322_SendDmaHead_0_InternalSendDmaHead_LSB 0x20
-#define QIB_7322_SendDmaHead_0_InternalSendDmaHead_MSB 0x2F
-#define QIB_7322_SendDmaHead_0_InternalSendDmaHead_RMASK 0xFFFF
-#define QIB_7322_SendDmaHead_0_SendDmaHead_LSB 0x0
-#define QIB_7322_SendDmaHead_0_SendDmaHead_MSB 0xF
-#define QIB_7322_SendDmaHead_0_SendDmaHead_RMASK 0xFFFF
-
-#define QIB_7322_SendDmaHeadAddr_0_OFFS 0x1218
-#define QIB_7322_SendDmaHeadAddr_0_DEF 0x0000000000000000
-#define QIB_7322_SendDmaHeadAddr_0_SendDmaHeadAddr_LSB 0x0
-#define QIB_7322_SendDmaHeadAddr_0_SendDmaHeadAddr_MSB 0x2F
-#define QIB_7322_SendDmaHeadAddr_0_SendDmaHeadAddr_RMASK 0xFFFFFFFFFFFF
-
-#define QIB_7322_SendDmaBufMask0_0_OFFS 0x1220
-#define QIB_7322_SendDmaBufMask0_0_DEF 0x0000000000000000
-#define QIB_7322_SendDmaBufMask0_0_BufMask_63_0_LSB 0x0
-#define QIB_7322_SendDmaBufMask0_0_BufMask_63_0_MSB 0x3F
-#define QIB_7322_SendDmaBufMask0_0_BufMask_63_0_RMASK 0x0
-
-#define QIB_7322_SendDmaStatus_0_OFFS 0x1238
-#define QIB_7322_SendDmaStatus_0_DEF 0x0000000042000000
-#define QIB_7322_SendDmaStatus_0_ScoreBoardDrainInProg_LSB 0x3F
-#define QIB_7322_SendDmaStatus_0_ScoreBoardDrainInProg_MSB 0x3F
-#define QIB_7322_SendDmaStatus_0_ScoreBoardDrainInProg_RMASK 0x1
-#define QIB_7322_SendDmaStatus_0_HaltInProg_LSB 0x3E
-#define QIB_7322_SendDmaStatus_0_HaltInProg_MSB 0x3E
-#define QIB_7322_SendDmaStatus_0_HaltInProg_RMASK 0x1
-#define QIB_7322_SendDmaStatus_0_InternalSDmaHalt_LSB 0x3D
-#define QIB_7322_SendDmaStatus_0_InternalSDmaHalt_MSB 0x3D
-#define QIB_7322_SendDmaStatus_0_InternalSDmaHalt_RMASK 0x1
-#define QIB_7322_SendDmaStatus_0_ScbDescIndex_13_0_LSB 0x2F
-#define QIB_7322_SendDmaStatus_0_ScbDescIndex_13_0_MSB 0x3C
-#define QIB_7322_SendDmaStatus_0_ScbDescIndex_13_0_RMASK 0x3FFF
-#define QIB_7322_SendDmaStatus_0_RpyLowAddr_6_0_LSB 0x28
-#define QIB_7322_SendDmaStatus_0_RpyLowAddr_6_0_MSB 0x2E
-#define QIB_7322_SendDmaStatus_0_RpyLowAddr_6_0_RMASK 0x7F
-#define QIB_7322_SendDmaStatus_0_RpyTag_7_0_LSB 0x20
-#define QIB_7322_SendDmaStatus_0_RpyTag_7_0_MSB 0x27
-#define QIB_7322_SendDmaStatus_0_RpyTag_7_0_RMASK 0xFF
-#define QIB_7322_SendDmaStatus_0_ScbFull_LSB 0x1F
-#define QIB_7322_SendDmaStatus_0_ScbFull_MSB 0x1F
-#define QIB_7322_SendDmaStatus_0_ScbFull_RMASK 0x1
-#define QIB_7322_SendDmaStatus_0_ScbEmpty_LSB 0x1E
-#define QIB_7322_SendDmaStatus_0_ScbEmpty_MSB 0x1E
-#define QIB_7322_SendDmaStatus_0_ScbEmpty_RMASK 0x1
-#define QIB_7322_SendDmaStatus_0_ScbEntryValid_LSB 0x1D
-#define QIB_7322_SendDmaStatus_0_ScbEntryValid_MSB 0x1D
-#define QIB_7322_SendDmaStatus_0_ScbEntryValid_RMASK 0x1
-#define QIB_7322_SendDmaStatus_0_ScbFetchDescFlag_LSB 0x1C
-#define QIB_7322_SendDmaStatus_0_ScbFetchDescFlag_MSB 0x1C
-#define QIB_7322_SendDmaStatus_0_ScbFetchDescFlag_RMASK 0x1
-#define QIB_7322_SendDmaStatus_0_SplFifoReadyToGo_LSB 0x1B
-#define QIB_7322_SendDmaStatus_0_SplFifoReadyToGo_MSB 0x1B
-#define QIB_7322_SendDmaStatus_0_SplFifoReadyToGo_RMASK 0x1
-#define QIB_7322_SendDmaStatus_0_SplFifoDisarmed_LSB 0x1A
-#define QIB_7322_SendDmaStatus_0_SplFifoDisarmed_MSB 0x1A
-#define QIB_7322_SendDmaStatus_0_SplFifoDisarmed_RMASK 0x1
-#define QIB_7322_SendDmaStatus_0_SplFifoEmpty_LSB 0x19
-#define QIB_7322_SendDmaStatus_0_SplFifoEmpty_MSB 0x19
-#define QIB_7322_SendDmaStatus_0_SplFifoEmpty_RMASK 0x1
-#define QIB_7322_SendDmaStatus_0_SplFifoFull_LSB 0x18
-#define QIB_7322_SendDmaStatus_0_SplFifoFull_MSB 0x18
-#define QIB_7322_SendDmaStatus_0_SplFifoFull_RMASK 0x1
-#define QIB_7322_SendDmaStatus_0_SplFifoBufNum_LSB 0x10
-#define QIB_7322_SendDmaStatus_0_SplFifoBufNum_MSB 0x17
-#define QIB_7322_SendDmaStatus_0_SplFifoBufNum_RMASK 0xFF
-#define QIB_7322_SendDmaStatus_0_SplFifoDescIndex_LSB 0x0
-#define QIB_7322_SendDmaStatus_0_SplFifoDescIndex_MSB 0xF
-#define QIB_7322_SendDmaStatus_0_SplFifoDescIndex_RMASK 0xFFFF
-
-#define QIB_7322_SendDmaPriorityThld_0_OFFS 0x1258
-#define QIB_7322_SendDmaPriorityThld_0_DEF 0x0000000000000000
-#define QIB_7322_SendDmaPriorityThld_0_PriorityThreshold_LSB 0x0
-#define QIB_7322_SendDmaPriorityThld_0_PriorityThreshold_MSB 0x3
-#define QIB_7322_SendDmaPriorityThld_0_PriorityThreshold_RMASK 0xF
-
-#define QIB_7322_SendHdrErrSymptom_0_OFFS 0x1260
-#define QIB_7322_SendHdrErrSymptom_0_DEF 0x0000000000000000
-#define QIB_7322_SendHdrErrSymptom_0_NonKeyPacket_LSB 0x6
-#define QIB_7322_SendHdrErrSymptom_0_NonKeyPacket_MSB 0x6
-#define QIB_7322_SendHdrErrSymptom_0_NonKeyPacket_RMASK 0x1
-#define QIB_7322_SendHdrErrSymptom_0_GRHFail_LSB 0x5
-#define QIB_7322_SendHdrErrSymptom_0_GRHFail_MSB 0x5
-#define QIB_7322_SendHdrErrSymptom_0_GRHFail_RMASK 0x1
-#define QIB_7322_SendHdrErrSymptom_0_PkeyFail_LSB 0x4
-#define QIB_7322_SendHdrErrSymptom_0_PkeyFail_MSB 0x4
-#define QIB_7322_SendHdrErrSymptom_0_PkeyFail_RMASK 0x1
-#define QIB_7322_SendHdrErrSymptom_0_QPFail_LSB 0x3
-#define QIB_7322_SendHdrErrSymptom_0_QPFail_MSB 0x3
-#define QIB_7322_SendHdrErrSymptom_0_QPFail_RMASK 0x1
-#define QIB_7322_SendHdrErrSymptom_0_SLIDFail_LSB 0x2
-#define QIB_7322_SendHdrErrSymptom_0_SLIDFail_MSB 0x2
-#define QIB_7322_SendHdrErrSymptom_0_SLIDFail_RMASK 0x1
-#define QIB_7322_SendHdrErrSymptom_0_RawIPV6_LSB 0x1
-#define QIB_7322_SendHdrErrSymptom_0_RawIPV6_MSB 0x1
-#define QIB_7322_SendHdrErrSymptom_0_RawIPV6_RMASK 0x1
-#define QIB_7322_SendHdrErrSymptom_0_PacketTooSmall_LSB 0x0
-#define QIB_7322_SendHdrErrSymptom_0_PacketTooSmall_MSB 0x0
-#define QIB_7322_SendHdrErrSymptom_0_PacketTooSmall_RMASK 0x1
-
-#define QIB_7322_RxCreditVL0_0_OFFS 0x1280
-#define QIB_7322_RxCreditVL0_0_DEF 0x0000000000000000
-#define QIB_7322_RxCreditVL0_0_RxBufrConsumedVL_LSB 0x10
-#define QIB_7322_RxCreditVL0_0_RxBufrConsumedVL_MSB 0x1B
-#define QIB_7322_RxCreditVL0_0_RxBufrConsumedVL_RMASK 0xFFF
-#define QIB_7322_RxCreditVL0_0_RxMaxCreditVL_LSB 0x0
-#define QIB_7322_RxCreditVL0_0_RxMaxCreditVL_MSB 0xB
-#define QIB_7322_RxCreditVL0_0_RxMaxCreditVL_RMASK 0xFFF
-
-#define QIB_7322_SendDmaBufUsed0_0_OFFS 0x1480
-#define QIB_7322_SendDmaBufUsed0_0_DEF 0x0000000000000000
-#define QIB_7322_SendDmaBufUsed0_0_BufUsed_63_0_LSB 0x0
-#define QIB_7322_SendDmaBufUsed0_0_BufUsed_63_0_MSB 0x3F
-#define QIB_7322_SendDmaBufUsed0_0_BufUsed_63_0_RMASK 0x0
-
-#define QIB_7322_SendCheckControl_0_OFFS 0x14A8
-#define QIB_7322_SendCheckControl_0_DEF 0x0000000000000000
-#define QIB_7322_SendCheckControl_0_PKey_En_LSB 0x4
-#define QIB_7322_SendCheckControl_0_PKey_En_MSB 0x4
-#define QIB_7322_SendCheckControl_0_PKey_En_RMASK 0x1
-#define QIB_7322_SendCheckControl_0_BTHQP_En_LSB 0x3
-#define QIB_7322_SendCheckControl_0_BTHQP_En_MSB 0x3
-#define QIB_7322_SendCheckControl_0_BTHQP_En_RMASK 0x1
-#define QIB_7322_SendCheckControl_0_SLID_En_LSB 0x2
-#define QIB_7322_SendCheckControl_0_SLID_En_MSB 0x2
-#define QIB_7322_SendCheckControl_0_SLID_En_RMASK 0x1
-#define QIB_7322_SendCheckControl_0_RawIPV6_En_LSB 0x1
-#define QIB_7322_SendCheckControl_0_RawIPV6_En_MSB 0x1
-#define QIB_7322_SendCheckControl_0_RawIPV6_En_RMASK 0x1
-#define QIB_7322_SendCheckControl_0_PacketTooSmall_En_LSB 0x0
-#define QIB_7322_SendCheckControl_0_PacketTooSmall_En_MSB 0x0
-#define QIB_7322_SendCheckControl_0_PacketTooSmall_En_RMASK 0x1
-
-#define QIB_7322_SendIBSLIDMask_0_OFFS 0x14B0
-#define QIB_7322_SendIBSLIDMask_0_DEF 0x0000000000000000
-#define QIB_7322_SendIBSLIDMask_0_SendIBSLIDMask_15_0_LSB 0x0
-#define QIB_7322_SendIBSLIDMask_0_SendIBSLIDMask_15_0_MSB 0xF
-#define QIB_7322_SendIBSLIDMask_0_SendIBSLIDMask_15_0_RMASK 0xFFFF
-
-#define QIB_7322_SendIBSLIDAssign_0_OFFS 0x14B8
-#define QIB_7322_SendIBSLIDAssign_0_DEF 0x0000000000000000
-#define QIB_7322_SendIBSLIDAssign_0_SendIBSLIDAssign_15_0_LSB 0x0
-#define QIB_7322_SendIBSLIDAssign_0_SendIBSLIDAssign_15_0_MSB 0xF
-#define QIB_7322_SendIBSLIDAssign_0_SendIBSLIDAssign_15_0_RMASK 0xFFFF
-
-#define QIB_7322_IBCStatusA_0_OFFS 0x1540
-#define QIB_7322_IBCStatusA_0_DEF 0x0000000000000X02
-#define QIB_7322_IBCStatusA_0_TxCreditOk_VL7_LSB 0x27
-#define QIB_7322_IBCStatusA_0_TxCreditOk_VL7_MSB 0x27
-#define QIB_7322_IBCStatusA_0_TxCreditOk_VL7_RMASK 0x1
-#define QIB_7322_IBCStatusA_0_TxCreditOk_VL6_LSB 0x26
-#define QIB_7322_IBCStatusA_0_TxCreditOk_VL6_MSB 0x26
-#define QIB_7322_IBCStatusA_0_TxCreditOk_VL6_RMASK 0x1
-#define QIB_7322_IBCStatusA_0_TxCreditOk_VL5_LSB 0x25
-#define QIB_7322_IBCStatusA_0_TxCreditOk_VL5_MSB 0x25
-#define QIB_7322_IBCStatusA_0_TxCreditOk_VL5_RMASK 0x1
-#define QIB_7322_IBCStatusA_0_TxCreditOk_VL4_LSB 0x24
-#define QIB_7322_IBCStatusA_0_TxCreditOk_VL4_MSB 0x24
-#define QIB_7322_IBCStatusA_0_TxCreditOk_VL4_RMASK 0x1
-#define QIB_7322_IBCStatusA_0_TxCreditOk_VL3_LSB 0x23
-#define QIB_7322_IBCStatusA_0_TxCreditOk_VL3_MSB 0x23
-#define QIB_7322_IBCStatusA_0_TxCreditOk_VL3_RMASK 0x1
-#define QIB_7322_IBCStatusA_0_TxCreditOk_VL2_LSB 0x22
-#define QIB_7322_IBCStatusA_0_TxCreditOk_VL2_MSB 0x22
-#define QIB_7322_IBCStatusA_0_TxCreditOk_VL2_RMASK 0x1
-#define QIB_7322_IBCStatusA_0_TxCreditOk_VL1_LSB 0x21
-#define QIB_7322_IBCStatusA_0_TxCreditOk_VL1_MSB 0x21
-#define QIB_7322_IBCStatusA_0_TxCreditOk_VL1_RMASK 0x1
-#define QIB_7322_IBCStatusA_0_TxCreditOk_VL0_LSB 0x20
-#define QIB_7322_IBCStatusA_0_TxCreditOk_VL0_MSB 0x20
-#define QIB_7322_IBCStatusA_0_TxCreditOk_VL0_RMASK 0x1
-#define QIB_7322_IBCStatusA_0_TxReady_LSB 0x1E
-#define QIB_7322_IBCStatusA_0_TxReady_MSB 0x1E
-#define QIB_7322_IBCStatusA_0_TxReady_RMASK 0x1
-#define QIB_7322_IBCStatusA_0_LinkSpeedQDR_LSB 0x1D
-#define QIB_7322_IBCStatusA_0_LinkSpeedQDR_MSB 0x1D
-#define QIB_7322_IBCStatusA_0_LinkSpeedQDR_RMASK 0x1
-#define QIB_7322_IBCStatusA_0_ScrambleCapRemote_LSB 0xF
-#define QIB_7322_IBCStatusA_0_ScrambleCapRemote_MSB 0xF
-#define QIB_7322_IBCStatusA_0_ScrambleCapRemote_RMASK 0x1
-#define QIB_7322_IBCStatusA_0_ScrambleEn_LSB 0xE
-#define QIB_7322_IBCStatusA_0_ScrambleEn_MSB 0xE
-#define QIB_7322_IBCStatusA_0_ScrambleEn_RMASK 0x1
-#define QIB_7322_IBCStatusA_0_IBTxLaneReversed_LSB 0xD
-#define QIB_7322_IBCStatusA_0_IBTxLaneReversed_MSB 0xD
-#define QIB_7322_IBCStatusA_0_IBTxLaneReversed_RMASK 0x1
-#define QIB_7322_IBCStatusA_0_IBRxLaneReversed_LSB 0xC
-#define QIB_7322_IBCStatusA_0_IBRxLaneReversed_MSB 0xC
-#define QIB_7322_IBCStatusA_0_IBRxLaneReversed_RMASK 0x1
-#define QIB_7322_IBCStatusA_0_DDS_RXEQ_FAIL_LSB 0xA
-#define QIB_7322_IBCStatusA_0_DDS_RXEQ_FAIL_MSB 0xA
-#define QIB_7322_IBCStatusA_0_DDS_RXEQ_FAIL_RMASK 0x1
-#define QIB_7322_IBCStatusA_0_LinkWidthActive_LSB 0x9
-#define QIB_7322_IBCStatusA_0_LinkWidthActive_MSB 0x9
-#define QIB_7322_IBCStatusA_0_LinkWidthActive_RMASK 0x1
-#define QIB_7322_IBCStatusA_0_LinkSpeedActive_LSB 0x8
-#define QIB_7322_IBCStatusA_0_LinkSpeedActive_MSB 0x8
-#define QIB_7322_IBCStatusA_0_LinkSpeedActive_RMASK 0x1
-#define QIB_7322_IBCStatusA_0_LinkState_LSB 0x5
-#define QIB_7322_IBCStatusA_0_LinkState_MSB 0x7
-#define QIB_7322_IBCStatusA_0_LinkState_RMASK 0x7
-#define QIB_7322_IBCStatusA_0_LinkTrainingState_LSB 0x0
-#define QIB_7322_IBCStatusA_0_LinkTrainingState_MSB 0x4
-#define QIB_7322_IBCStatusA_0_LinkTrainingState_RMASK 0x1F
-
-#define QIB_7322_IBCStatusB_0_OFFS 0x1548
-#define QIB_7322_IBCStatusB_0_DEF 0x00000000XXXXXXXX
-#define QIB_7322_IBCStatusB_0_ibsd_adaptation_timer_debug_LSB 0x27
-#define QIB_7322_IBCStatusB_0_ibsd_adaptation_timer_debug_MSB 0x27
-#define QIB_7322_IBCStatusB_0_ibsd_adaptation_timer_debug_RMASK 0x1
-#define QIB_7322_IBCStatusB_0_ibsd_adaptation_timer_reached_threshold_LSB 0x26
-#define QIB_7322_IBCStatusB_0_ibsd_adaptation_timer_reached_threshold_MSB 0x26
-#define QIB_7322_IBCStatusB_0_ibsd_adaptation_timer_reached_threshold_RMASK 0x1
-#define QIB_7322_IBCStatusB_0_ibsd_adaptation_timer_started_LSB 0x25
-#define QIB_7322_IBCStatusB_0_ibsd_adaptation_timer_started_MSB 0x25
-#define QIB_7322_IBCStatusB_0_ibsd_adaptation_timer_started_RMASK 0x1
-#define QIB_7322_IBCStatusB_0_heartbeat_timed_out_LSB 0x24
-#define QIB_7322_IBCStatusB_0_heartbeat_timed_out_MSB 0x24
-#define QIB_7322_IBCStatusB_0_heartbeat_timed_out_RMASK 0x1
-#define QIB_7322_IBCStatusB_0_heartbeat_crosstalk_LSB 0x20
-#define QIB_7322_IBCStatusB_0_heartbeat_crosstalk_MSB 0x23
-#define QIB_7322_IBCStatusB_0_heartbeat_crosstalk_RMASK 0xF
-#define QIB_7322_IBCStatusB_0_RxEqLocalDevice_LSB 0x1E
-#define QIB_7322_IBCStatusB_0_RxEqLocalDevice_MSB 0x1F
-#define QIB_7322_IBCStatusB_0_RxEqLocalDevice_RMASK 0x3
-#define QIB_7322_IBCStatusB_0_ReqDDSLocalFromRmt_LSB 0x1A
-#define QIB_7322_IBCStatusB_0_ReqDDSLocalFromRmt_MSB 0x1D
-#define QIB_7322_IBCStatusB_0_ReqDDSLocalFromRmt_RMASK 0xF
-#define QIB_7322_IBCStatusB_0_LinkRoundTripLatency_LSB 0x0
-#define QIB_7322_IBCStatusB_0_LinkRoundTripLatency_MSB 0x19
-#define QIB_7322_IBCStatusB_0_LinkRoundTripLatency_RMASK 0x3FFFFFF
-
-#define QIB_7322_IBCCtrlA_0_OFFS 0x1560
-#define QIB_7322_IBCCtrlA_0_DEF 0x0000000000000000
-#define QIB_7322_IBCCtrlA_0_Loopback_LSB 0x3F
-#define QIB_7322_IBCCtrlA_0_Loopback_MSB 0x3F
-#define QIB_7322_IBCCtrlA_0_Loopback_RMASK 0x1
-#define QIB_7322_IBCCtrlA_0_LinkDownDefaultState_LSB 0x3E
-#define QIB_7322_IBCCtrlA_0_LinkDownDefaultState_MSB 0x3E
-#define QIB_7322_IBCCtrlA_0_LinkDownDefaultState_RMASK 0x1
-#define QIB_7322_IBCCtrlA_0_IBLinkEn_LSB 0x3D
-#define QIB_7322_IBCCtrlA_0_IBLinkEn_MSB 0x3D
-#define QIB_7322_IBCCtrlA_0_IBLinkEn_RMASK 0x1
-#define QIB_7322_IBCCtrlA_0_IBStatIntReductionEn_LSB 0x3C
-#define QIB_7322_IBCCtrlA_0_IBStatIntReductionEn_MSB 0x3C
-#define QIB_7322_IBCCtrlA_0_IBStatIntReductionEn_RMASK 0x1
-#define QIB_7322_IBCCtrlA_0_NumVLane_LSB 0x30
-#define QIB_7322_IBCCtrlA_0_NumVLane_MSB 0x32
-#define QIB_7322_IBCCtrlA_0_NumVLane_RMASK 0x7
-#define QIB_7322_IBCCtrlA_0_OverrunThreshold_LSB 0x24
-#define QIB_7322_IBCCtrlA_0_OverrunThreshold_MSB 0x27
-#define QIB_7322_IBCCtrlA_0_OverrunThreshold_RMASK 0xF
-#define QIB_7322_IBCCtrlA_0_PhyerrThreshold_LSB 0x20
-#define QIB_7322_IBCCtrlA_0_PhyerrThreshold_MSB 0x23
-#define QIB_7322_IBCCtrlA_0_PhyerrThreshold_RMASK 0xF
-#define QIB_7322_IBCCtrlA_0_MaxPktLen_LSB 0x15
-#define QIB_7322_IBCCtrlA_0_MaxPktLen_MSB 0x1F
-#define QIB_7322_IBCCtrlA_0_MaxPktLen_RMASK 0x7FF
-#define QIB_7322_IBCCtrlA_0_LinkCmd_LSB 0x13
-#define QIB_7322_IBCCtrlA_0_LinkCmd_MSB 0x14
-#define QIB_7322_IBCCtrlA_0_LinkCmd_RMASK 0x3
-#define QIB_7322_IBCCtrlA_0_LinkInitCmd_LSB 0x10
-#define QIB_7322_IBCCtrlA_0_LinkInitCmd_MSB 0x12
-#define QIB_7322_IBCCtrlA_0_LinkInitCmd_RMASK 0x7
-#define QIB_7322_IBCCtrlA_0_FlowCtrlWaterMark_LSB 0x8
-#define QIB_7322_IBCCtrlA_0_FlowCtrlWaterMark_MSB 0xF
-#define QIB_7322_IBCCtrlA_0_FlowCtrlWaterMark_RMASK 0xFF
-#define QIB_7322_IBCCtrlA_0_FlowCtrlPeriod_LSB 0x0
-#define QIB_7322_IBCCtrlA_0_FlowCtrlPeriod_MSB 0x7
-#define QIB_7322_IBCCtrlA_0_FlowCtrlPeriod_RMASK 0xFF
-
-#define QIB_7322_IBCCtrlB_0_OFFS 0x1568
-#define QIB_7322_IBCCtrlB_0_DEF 0x00000000000305FF
-#define QIB_7322_IBCCtrlB_0_IB_DLID_MASK_LSB 0x30
-#define QIB_7322_IBCCtrlB_0_IB_DLID_MASK_MSB 0x3F
-#define QIB_7322_IBCCtrlB_0_IB_DLID_MASK_RMASK 0xFFFF
-#define QIB_7322_IBCCtrlB_0_IB_DLID_LSB 0x20
-#define QIB_7322_IBCCtrlB_0_IB_DLID_MSB 0x2F
-#define QIB_7322_IBCCtrlB_0_IB_DLID_RMASK 0xFFFF
-#define QIB_7322_IBCCtrlB_0_IB_ENABLE_FILT_DPKT_LSB 0x1B
-#define QIB_7322_IBCCtrlB_0_IB_ENABLE_FILT_DPKT_MSB 0x1B
-#define QIB_7322_IBCCtrlB_0_IB_ENABLE_FILT_DPKT_RMASK 0x1
-#define QIB_7322_IBCCtrlB_0_HRTBT_REQ_LSB 0x1A
-#define QIB_7322_IBCCtrlB_0_HRTBT_REQ_MSB 0x1A
-#define QIB_7322_IBCCtrlB_0_HRTBT_REQ_RMASK 0x1
-#define QIB_7322_IBCCtrlB_0_HRTBT_PORT_LSB 0x12
-#define QIB_7322_IBCCtrlB_0_HRTBT_PORT_MSB 0x19
-#define QIB_7322_IBCCtrlB_0_HRTBT_PORT_RMASK 0xFF
-#define QIB_7322_IBCCtrlB_0_HRTBT_AUTO_LSB 0x11
-#define QIB_7322_IBCCtrlB_0_HRTBT_AUTO_MSB 0x11
-#define QIB_7322_IBCCtrlB_0_HRTBT_AUTO_RMASK 0x1
-#define QIB_7322_IBCCtrlB_0_HRTBT_ENB_LSB 0x10
-#define QIB_7322_IBCCtrlB_0_HRTBT_ENB_MSB 0x10
-#define QIB_7322_IBCCtrlB_0_HRTBT_ENB_RMASK 0x1
-#define QIB_7322_IBCCtrlB_0_SD_DDS_LSB 0xC
-#define QIB_7322_IBCCtrlB_0_SD_DDS_MSB 0xF
-#define QIB_7322_IBCCtrlB_0_SD_DDS_RMASK 0xF
-#define QIB_7322_IBCCtrlB_0_SD_DDSV_LSB 0xB
-#define QIB_7322_IBCCtrlB_0_SD_DDSV_MSB 0xB
-#define QIB_7322_IBCCtrlB_0_SD_DDSV_RMASK 0x1
-#define QIB_7322_IBCCtrlB_0_SD_ADD_ENB_LSB 0xA
-#define QIB_7322_IBCCtrlB_0_SD_ADD_ENB_MSB 0xA
-#define QIB_7322_IBCCtrlB_0_SD_ADD_ENB_RMASK 0x1
-#define QIB_7322_IBCCtrlB_0_SD_RX_EQUAL_ENABLE_LSB 0x9
-#define QIB_7322_IBCCtrlB_0_SD_RX_EQUAL_ENABLE_MSB 0x9
-#define QIB_7322_IBCCtrlB_0_SD_RX_EQUAL_ENABLE_RMASK 0x1
-#define QIB_7322_IBCCtrlB_0_IB_LANE_REV_SUPPORTED_LSB 0x8
-#define QIB_7322_IBCCtrlB_0_IB_LANE_REV_SUPPORTED_MSB 0x8
-#define QIB_7322_IBCCtrlB_0_IB_LANE_REV_SUPPORTED_RMASK 0x1
-#define QIB_7322_IBCCtrlB_0_IB_POLARITY_REV_SUPP_LSB 0x7
-#define QIB_7322_IBCCtrlB_0_IB_POLARITY_REV_SUPP_MSB 0x7
-#define QIB_7322_IBCCtrlB_0_IB_POLARITY_REV_SUPP_RMASK 0x1
-#define QIB_7322_IBCCtrlB_0_IB_NUM_CHANNELS_LSB 0x5
-#define QIB_7322_IBCCtrlB_0_IB_NUM_CHANNELS_MSB 0x6
-#define QIB_7322_IBCCtrlB_0_IB_NUM_CHANNELS_RMASK 0x3
-#define QIB_7322_IBCCtrlB_0_SD_SPEED_QDR_LSB 0x4
-#define QIB_7322_IBCCtrlB_0_SD_SPEED_QDR_MSB 0x4
-#define QIB_7322_IBCCtrlB_0_SD_SPEED_QDR_RMASK 0x1
-#define QIB_7322_IBCCtrlB_0_SD_SPEED_DDR_LSB 0x3
-#define QIB_7322_IBCCtrlB_0_SD_SPEED_DDR_MSB 0x3
-#define QIB_7322_IBCCtrlB_0_SD_SPEED_DDR_RMASK 0x1
-#define QIB_7322_IBCCtrlB_0_SD_SPEED_SDR_LSB 0x2
-#define QIB_7322_IBCCtrlB_0_SD_SPEED_SDR_MSB 0x2
-#define QIB_7322_IBCCtrlB_0_SD_SPEED_SDR_RMASK 0x1
-#define QIB_7322_IBCCtrlB_0_SD_SPEED_LSB 0x1
-#define QIB_7322_IBCCtrlB_0_SD_SPEED_MSB 0x1
-#define QIB_7322_IBCCtrlB_0_SD_SPEED_RMASK 0x1
-#define QIB_7322_IBCCtrlB_0_IB_ENHANCED_MODE_LSB 0x0
-#define QIB_7322_IBCCtrlB_0_IB_ENHANCED_MODE_MSB 0x0
-#define QIB_7322_IBCCtrlB_0_IB_ENHANCED_MODE_RMASK 0x1
-
-#define QIB_7322_IBCCtrlC_0_OFFS 0x1570
-#define QIB_7322_IBCCtrlC_0_DEF 0x0000000000000301
-#define QIB_7322_IBCCtrlC_0_IB_BACK_PORCH_LSB 0x5
-#define QIB_7322_IBCCtrlC_0_IB_BACK_PORCH_MSB 0x9
-#define QIB_7322_IBCCtrlC_0_IB_BACK_PORCH_RMASK 0x1F
-#define QIB_7322_IBCCtrlC_0_IB_FRONT_PORCH_LSB 0x0
-#define QIB_7322_IBCCtrlC_0_IB_FRONT_PORCH_MSB 0x4
-#define QIB_7322_IBCCtrlC_0_IB_FRONT_PORCH_RMASK 0x1F
-
-#define QIB_7322_HRTBT_GUID_0_OFFS 0x1588
-#define QIB_7322_HRTBT_GUID_0_DEF 0x0000000000000000
-
-#define QIB_7322_IB_SDTEST_IF_TX_0_OFFS 0x1590
-#define QIB_7322_IB_SDTEST_IF_TX_0_DEF 0x0000000000000000
-#define QIB_7322_IB_SDTEST_IF_TX_0_TS_TX_RX_CFG_LSB 0x30
-#define QIB_7322_IB_SDTEST_IF_TX_0_TS_TX_RX_CFG_MSB 0x3F
-#define QIB_7322_IB_SDTEST_IF_TX_0_TS_TX_RX_CFG_RMASK 0xFFFF
-#define QIB_7322_IB_SDTEST_IF_TX_0_TS_TX_TX_CFG_LSB 0x20
-#define QIB_7322_IB_SDTEST_IF_TX_0_TS_TX_TX_CFG_MSB 0x2F
-#define QIB_7322_IB_SDTEST_IF_TX_0_TS_TX_TX_CFG_RMASK 0xFFFF
-#define QIB_7322_IB_SDTEST_IF_TX_0_TS_TX_SPEED_LSB 0xD
-#define QIB_7322_IB_SDTEST_IF_TX_0_TS_TX_SPEED_MSB 0xF
-#define QIB_7322_IB_SDTEST_IF_TX_0_TS_TX_SPEED_RMASK 0x7
-#define QIB_7322_IB_SDTEST_IF_TX_0_TS_TX_OPCODE_LSB 0xB
-#define QIB_7322_IB_SDTEST_IF_TX_0_TS_TX_OPCODE_MSB 0xC
-#define QIB_7322_IB_SDTEST_IF_TX_0_TS_TX_OPCODE_RMASK 0x3
-#define QIB_7322_IB_SDTEST_IF_TX_0_CREDIT_CHANGE_LSB 0x4
-#define QIB_7322_IB_SDTEST_IF_TX_0_CREDIT_CHANGE_MSB 0x4
-#define QIB_7322_IB_SDTEST_IF_TX_0_CREDIT_CHANGE_RMASK 0x1
-#define QIB_7322_IB_SDTEST_IF_TX_0_VL_CAP_LSB 0x2
-#define QIB_7322_IB_SDTEST_IF_TX_0_VL_CAP_MSB 0x3
-#define QIB_7322_IB_SDTEST_IF_TX_0_VL_CAP_RMASK 0x3
-#define QIB_7322_IB_SDTEST_IF_TX_0_TS_3_TX_VALID_LSB 0x1
-#define QIB_7322_IB_SDTEST_IF_TX_0_TS_3_TX_VALID_MSB 0x1
-#define QIB_7322_IB_SDTEST_IF_TX_0_TS_3_TX_VALID_RMASK 0x1
-#define QIB_7322_IB_SDTEST_IF_TX_0_TS_T_TX_VALID_LSB 0x0
-#define QIB_7322_IB_SDTEST_IF_TX_0_TS_T_TX_VALID_MSB 0x0
-#define QIB_7322_IB_SDTEST_IF_TX_0_TS_T_TX_VALID_RMASK 0x1
-
-#define QIB_7322_IB_SDTEST_IF_RX_0_OFFS 0x1598
-#define QIB_7322_IB_SDTEST_IF_RX_0_DEF 0x0000000000000000
-#define QIB_7322_IB_SDTEST_IF_RX_0_TS_RX_RX_CFG_LSB 0x30
-#define QIB_7322_IB_SDTEST_IF_RX_0_TS_RX_RX_CFG_MSB 0x3F
-#define QIB_7322_IB_SDTEST_IF_RX_0_TS_RX_RX_CFG_RMASK 0xFFFF
-#define QIB_7322_IB_SDTEST_IF_RX_0_TS_RX_TX_CFG_LSB 0x20
-#define QIB_7322_IB_SDTEST_IF_RX_0_TS_RX_TX_CFG_MSB 0x2F
-#define QIB_7322_IB_SDTEST_IF_RX_0_TS_RX_TX_CFG_RMASK 0xFFFF
-#define QIB_7322_IB_SDTEST_IF_RX_0_TS_RX_B_LSB 0x18
-#define QIB_7322_IB_SDTEST_IF_RX_0_TS_RX_B_MSB 0x1F
-#define QIB_7322_IB_SDTEST_IF_RX_0_TS_RX_B_RMASK 0xFF
-#define QIB_7322_IB_SDTEST_IF_RX_0_TS_RX_A_LSB 0x10
-#define QIB_7322_IB_SDTEST_IF_RX_0_TS_RX_A_MSB 0x17
-#define QIB_7322_IB_SDTEST_IF_RX_0_TS_RX_A_RMASK 0xFF
-#define QIB_7322_IB_SDTEST_IF_RX_0_TS_3_RX_VALID_LSB 0x1
-#define QIB_7322_IB_SDTEST_IF_RX_0_TS_3_RX_VALID_MSB 0x1
-#define QIB_7322_IB_SDTEST_IF_RX_0_TS_3_RX_VALID_RMASK 0x1
-#define QIB_7322_IB_SDTEST_IF_RX_0_TS_T_RX_VALID_LSB 0x0
-#define QIB_7322_IB_SDTEST_IF_RX_0_TS_T_RX_VALID_MSB 0x0
-#define QIB_7322_IB_SDTEST_IF_RX_0_TS_T_RX_VALID_RMASK 0x1
-
-#define QIB_7322_IBNCModeCtrl_0_OFFS 0x15B8
-#define QIB_7322_IBNCModeCtrl_0_DEF 0x0000000000000000
-#define QIB_7322_IBNCModeCtrl_0_ScrambleCapRemoteForce_LSB 0x22
-#define QIB_7322_IBNCModeCtrl_0_ScrambleCapRemoteForce_MSB 0x22
-#define QIB_7322_IBNCModeCtrl_0_ScrambleCapRemoteForce_RMASK 0x1
-#define QIB_7322_IBNCModeCtrl_0_ScrambleCapRemoteMask_LSB 0x21
-#define QIB_7322_IBNCModeCtrl_0_ScrambleCapRemoteMask_MSB 0x21
-#define QIB_7322_IBNCModeCtrl_0_ScrambleCapRemoteMask_RMASK 0x1
-#define QIB_7322_IBNCModeCtrl_0_ScrambleCapLocal_LSB 0x20
-#define QIB_7322_IBNCModeCtrl_0_ScrambleCapLocal_MSB 0x20
-#define QIB_7322_IBNCModeCtrl_0_ScrambleCapLocal_RMASK 0x1
-#define QIB_7322_IBNCModeCtrl_0_TSMCode_TS2_LSB 0x11
-#define QIB_7322_IBNCModeCtrl_0_TSMCode_TS2_MSB 0x19
-#define QIB_7322_IBNCModeCtrl_0_TSMCode_TS2_RMASK 0x1FF
-#define QIB_7322_IBNCModeCtrl_0_TSMCode_TS1_LSB 0x8
-#define QIB_7322_IBNCModeCtrl_0_TSMCode_TS1_MSB 0x10
-#define QIB_7322_IBNCModeCtrl_0_TSMCode_TS1_RMASK 0x1FF
-#define QIB_7322_IBNCModeCtrl_0_TSMEnable_ignore_TSM_on_rx_LSB 0x2
-#define QIB_7322_IBNCModeCtrl_0_TSMEnable_ignore_TSM_on_rx_MSB 0x2
-#define QIB_7322_IBNCModeCtrl_0_TSMEnable_ignore_TSM_on_rx_RMASK 0x1
-#define QIB_7322_IBNCModeCtrl_0_TSMEnable_send_TS2_LSB 0x1
-#define QIB_7322_IBNCModeCtrl_0_TSMEnable_send_TS2_MSB 0x1
-#define QIB_7322_IBNCModeCtrl_0_TSMEnable_send_TS2_RMASK 0x1
-#define QIB_7322_IBNCModeCtrl_0_TSMEnable_send_TS1_LSB 0x0
-#define QIB_7322_IBNCModeCtrl_0_TSMEnable_send_TS1_MSB 0x0
-#define QIB_7322_IBNCModeCtrl_0_TSMEnable_send_TS1_RMASK 0x1
-
-#define QIB_7322_IBSerdesStatus_0_OFFS 0x15D0
-#define QIB_7322_IBSerdesStatus_0_DEF 0x0000000000000000
-
-#define QIB_7322_IBPCSConfig_0_OFFS 0x15D8
-#define QIB_7322_IBPCSConfig_0_DEF 0x0000000000000007
-#define QIB_7322_IBPCSConfig_0_link_sync_mask_LSB 0x9
-#define QIB_7322_IBPCSConfig_0_link_sync_mask_MSB 0x12
-#define QIB_7322_IBPCSConfig_0_link_sync_mask_RMASK 0x3FF
-#define QIB_7322_IBPCSConfig_0_xcv_rreset_LSB 0x2
-#define QIB_7322_IBPCSConfig_0_xcv_rreset_MSB 0x2
-#define QIB_7322_IBPCSConfig_0_xcv_rreset_RMASK 0x1
-#define QIB_7322_IBPCSConfig_0_xcv_treset_LSB 0x1
-#define QIB_7322_IBPCSConfig_0_xcv_treset_MSB 0x1
-#define QIB_7322_IBPCSConfig_0_xcv_treset_RMASK 0x1
-#define QIB_7322_IBPCSConfig_0_tx_rx_reset_LSB 0x0
-#define QIB_7322_IBPCSConfig_0_tx_rx_reset_MSB 0x0
-#define QIB_7322_IBPCSConfig_0_tx_rx_reset_RMASK 0x1
-
-#define QIB_7322_IBSerdesCtrl_0_OFFS 0x15E0
-#define QIB_7322_IBSerdesCtrl_0_DEF 0x0000000000FFA00F
-#define QIB_7322_IBSerdesCtrl_0_DISABLE_RXLATOFF_QDR_LSB 0x1A
-#define QIB_7322_IBSerdesCtrl_0_DISABLE_RXLATOFF_QDR_MSB 0x1A
-#define QIB_7322_IBSerdesCtrl_0_DISABLE_RXLATOFF_QDR_RMASK 0x1
-#define QIB_7322_IBSerdesCtrl_0_DISABLE_RXLATOFF_DDR_LSB 0x19
-#define QIB_7322_IBSerdesCtrl_0_DISABLE_RXLATOFF_DDR_MSB 0x19
-#define QIB_7322_IBSerdesCtrl_0_DISABLE_RXLATOFF_DDR_RMASK 0x1
-#define QIB_7322_IBSerdesCtrl_0_DISABLE_RXLATOFF_SDR_LSB 0x18
-#define QIB_7322_IBSerdesCtrl_0_DISABLE_RXLATOFF_SDR_MSB 0x18
-#define QIB_7322_IBSerdesCtrl_0_DISABLE_RXLATOFF_SDR_RMASK 0x1
-#define QIB_7322_IBSerdesCtrl_0_CHANNEL_RESET_N_LSB 0x14
-#define QIB_7322_IBSerdesCtrl_0_CHANNEL_RESET_N_MSB 0x17
-#define QIB_7322_IBSerdesCtrl_0_CHANNEL_RESET_N_RMASK 0xF
-#define QIB_7322_IBSerdesCtrl_0_CGMODE_LSB 0x10
-#define QIB_7322_IBSerdesCtrl_0_CGMODE_MSB 0x13
-#define QIB_7322_IBSerdesCtrl_0_CGMODE_RMASK 0xF
-#define QIB_7322_IBSerdesCtrl_0_IB_LAT_MODE_LSB 0xF
-#define QIB_7322_IBSerdesCtrl_0_IB_LAT_MODE_MSB 0xF
-#define QIB_7322_IBSerdesCtrl_0_IB_LAT_MODE_RMASK 0x1
-#define QIB_7322_IBSerdesCtrl_0_RXLOSEN_LSB 0xD
-#define QIB_7322_IBSerdesCtrl_0_RXLOSEN_MSB 0xD
-#define QIB_7322_IBSerdesCtrl_0_RXLOSEN_RMASK 0x1
-#define QIB_7322_IBSerdesCtrl_0_LPEN_LSB 0xC
-#define QIB_7322_IBSerdesCtrl_0_LPEN_MSB 0xC
-#define QIB_7322_IBSerdesCtrl_0_LPEN_RMASK 0x1
-#define QIB_7322_IBSerdesCtrl_0_PLLPD_LSB 0xB
-#define QIB_7322_IBSerdesCtrl_0_PLLPD_MSB 0xB
-#define QIB_7322_IBSerdesCtrl_0_PLLPD_RMASK 0x1
-#define QIB_7322_IBSerdesCtrl_0_TXPD_LSB 0xA
-#define QIB_7322_IBSerdesCtrl_0_TXPD_MSB 0xA
-#define QIB_7322_IBSerdesCtrl_0_TXPD_RMASK 0x1
-#define QIB_7322_IBSerdesCtrl_0_RXPD_LSB 0x9
-#define QIB_7322_IBSerdesCtrl_0_RXPD_MSB 0x9
-#define QIB_7322_IBSerdesCtrl_0_RXPD_RMASK 0x1
-#define QIB_7322_IBSerdesCtrl_0_TXIDLE_LSB 0x8
-#define QIB_7322_IBSerdesCtrl_0_TXIDLE_MSB 0x8
-#define QIB_7322_IBSerdesCtrl_0_TXIDLE_RMASK 0x1
-#define QIB_7322_IBSerdesCtrl_0_CMODE_LSB 0x0
-#define QIB_7322_IBSerdesCtrl_0_CMODE_MSB 0x6
-#define QIB_7322_IBSerdesCtrl_0_CMODE_RMASK 0x7F
-
-#define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_OFFS 0x1600
-#define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_DEF 0x0000000000000000
-#define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_tx_override_deemphasis_select_LSB 0x1F
-#define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_tx_override_deemphasis_select_MSB 0x1F
-#define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_tx_override_deemphasis_select_RMASK 0x1
-#define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_reset_tx_deemphasis_override_LSB 0x1E
-#define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_reset_tx_deemphasis_override_MSB 0x1E
-#define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_reset_tx_deemphasis_override_RMASK 0x1
-#define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_txampcntl_d2a_LSB 0xE
-#define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_txampcntl_d2a_MSB 0x11
-#define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_txampcntl_d2a_RMASK 0xF
-#define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_txc0_ena_LSB 0x9
-#define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_txc0_ena_MSB 0xD
-#define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_txc0_ena_RMASK 0x1F
-#define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_txcp1_ena_LSB 0x5
-#define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_txcp1_ena_MSB 0x8
-#define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_txcp1_ena_RMASK 0xF
-#define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_txcn1_xtra_emph0_LSB 0x3
-#define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_txcn1_xtra_emph0_MSB 0x4
-#define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_txcn1_xtra_emph0_RMASK 0x3
-#define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_txcn1_ena_LSB 0x0
-#define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_txcn1_ena_MSB 0x2
-#define QIB_7322_IBSD_TX_DEEMPHASIS_OVERRIDE_0_txcn1_ena_RMASK 0x7
-
-#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_OFFS 0x1640
-#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_DEF 0x0000000000000000
-#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenagain_sdr_ch3_LSB 0x27
-#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenagain_sdr_ch3_MSB 0x27
-#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenagain_sdr_ch3_RMASK 0x1
-#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenagain_sdr_ch2_LSB 0x26
-#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenagain_sdr_ch2_MSB 0x26
-#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenagain_sdr_ch2_RMASK 0x1
-#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenagain_sdr_ch1_LSB 0x25
-#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenagain_sdr_ch1_MSB 0x25
-#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenagain_sdr_ch1_RMASK 0x1
-#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenagain_sdr_ch0_LSB 0x24
-#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenagain_sdr_ch0_MSB 0x24
-#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenagain_sdr_ch0_RMASK 0x1
-#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenale_sdr_ch3_LSB 0x23
-#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenale_sdr_ch3_MSB 0x23
-#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenale_sdr_ch3_RMASK 0x1
-#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenale_sdr_ch2_LSB 0x22
-#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenale_sdr_ch2_MSB 0x22
-#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenale_sdr_ch2_RMASK 0x1
-#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenale_sdr_ch1_LSB 0x21
-#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenale_sdr_ch1_MSB 0x21
-#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenale_sdr_ch1_RMASK 0x1
-#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenale_sdr_ch0_LSB 0x20
-#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenale_sdr_ch0_MSB 0x20
-#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenale_sdr_ch0_RMASK 0x1
-#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenadfe_sdr_ch3_LSB 0x18
-#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenadfe_sdr_ch3_MSB 0x1F
-#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenadfe_sdr_ch3_RMASK 0xFF
-#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenadfe_sdr_ch2_LSB 0x10
-#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenadfe_sdr_ch2_MSB 0x17
-#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenadfe_sdr_ch2_RMASK 0xFF
-#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenadfe_sdr_ch1_LSB 0x8
-#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenadfe_sdr_ch1_MSB 0xF
-#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenadfe_sdr_ch1_RMASK 0xFF
-#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenadfe_sdr_ch0_LSB 0x0
-#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenadfe_sdr_ch0_MSB 0x7
-#define QIB_7322_ADAPT_DISABLE_STATIC_SDR_0_static_disable_rxenadfe_sdr_ch0_RMASK 0xFF
-
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_OFFS 0x1648
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_DEF 0x0000000000000000
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenagain_sdr_ch3_LSB 0x27
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenagain_sdr_ch3_MSB 0x27
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenagain_sdr_ch3_RMASK 0x1
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenagain_sdr_ch2_LSB 0x26
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenagain_sdr_ch2_MSB 0x26
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenagain_sdr_ch2_RMASK 0x1
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenagain_sdr_ch1_LSB 0x25
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenagain_sdr_ch1_MSB 0x25
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenagain_sdr_ch1_RMASK 0x1
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenagain_sdr_ch0_LSB 0x24
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenagain_sdr_ch0_MSB 0x24
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenagain_sdr_ch0_RMASK 0x1
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenale_sdr_ch3_LSB 0x23
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenale_sdr_ch3_MSB 0x23
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenale_sdr_ch3_RMASK 0x1
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenale_sdr_ch2_LSB 0x22
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenale_sdr_ch2_MSB 0x22
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenale_sdr_ch2_RMASK 0x1
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenale_sdr_ch1_LSB 0x21
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenale_sdr_ch1_MSB 0x21
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenale_sdr_ch1_RMASK 0x1
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenale_sdr_ch0_LSB 0x20
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenale_sdr_ch0_MSB 0x20
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenale_sdr_ch0_RMASK 0x1
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenadfe_sdr_ch3_LSB 0x18
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenadfe_sdr_ch3_MSB 0x1F
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenadfe_sdr_ch3_RMASK 0xFF
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenadfe_sdr_ch2_LSB 0x10
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenadfe_sdr_ch2_MSB 0x17
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenadfe_sdr_ch2_RMASK 0xFF
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenadfe_sdr_ch1_LSB 0x8
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenadfe_sdr_ch1_MSB 0xF
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenadfe_sdr_ch1_RMASK 0xFF
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenadfe_sdr_ch0_LSB 0x0
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenadfe_sdr_ch0_MSB 0x7
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_SDR_0_dyn_disable_rxenadfe_sdr_ch0_RMASK 0xFF
-
-#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_OFFS 0x1650
-#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_DEF 0x0000000000000000
-#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenagain_ddr_ch3_LSB 0x27
-#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenagain_ddr_ch3_MSB 0x27
-#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenagain_ddr_ch3_RMASK 0x1
-#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenagain_ddr_ch2_LSB 0x26
-#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenagain_ddr_ch2_MSB 0x26
-#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenagain_ddr_ch2_RMASK 0x1
-#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenagain_ddr_ch1_LSB 0x25
-#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenagain_ddr_ch1_MSB 0x25
-#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenagain_ddr_ch1_RMASK 0x1
-#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenagain_ddr_ch0_LSB 0x24
-#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenagain_ddr_ch0_MSB 0x24
-#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenagain_ddr_ch0_RMASK 0x1
-#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenale_ddr_ch3_LSB 0x23
-#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenale_ddr_ch3_MSB 0x23
-#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenale_ddr_ch3_RMASK 0x1
-#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenale_ddr_ch2_LSB 0x22
-#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenale_ddr_ch2_MSB 0x22
-#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenale_ddr_ch2_RMASK 0x1
-#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenale_ddr_ch1_LSB 0x21
-#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenale_ddr_ch1_MSB 0x21
-#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenale_ddr_ch1_RMASK 0x1
-#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenale_ddr_ch0_LSB 0x20
-#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenale_ddr_ch0_MSB 0x20
-#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenale_ddr_ch0_RMASK 0x1
-#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenadfe_ddr_ch3_LSB 0x18
-#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenadfe_ddr_ch3_MSB 0x1F
-#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenadfe_ddr_ch3_RMASK 0xFF
-#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenadfe_ddr_ch2_LSB 0x10
-#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenadfe_ddr_ch2_MSB 0x17
-#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenadfe_ddr_ch2_RMASK 0xFF
-#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenadfe_ddr_ch1_LSB 0x8
-#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenadfe_ddr_ch1_MSB 0xF
-#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenadfe_ddr_ch1_RMASK 0xFF
-#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenadfe_ddr_ch0_LSB 0x0
-#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenadfe_ddr_ch0_MSB 0x7
-#define QIB_7322_ADAPT_DISABLE_STATIC_DDR_0_static_disable_rxenadfe_ddr_ch0_RMASK 0xFF
-
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_OFFS 0x1658
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_DEF 0x0000000000000000
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenagain_ddr_ch3_LSB 0x27
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenagain_ddr_ch3_MSB 0x27
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenagain_ddr_ch3_RMASK 0x1
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenagain_ddr_ch2_LSB 0x26
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenagain_ddr_ch2_MSB 0x26
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenagain_ddr_ch2_RMASK 0x1
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenagain_ddr_ch1_LSB 0x25
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenagain_ddr_ch1_MSB 0x25
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenagain_ddr_ch1_RMASK 0x1
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenagain_ddr_ch0_LSB 0x24
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenagain_ddr_ch0_MSB 0x24
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenagain_ddr_ch0_RMASK 0x1
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenale_ddr_ch3_LSB 0x23
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenale_ddr_ch3_MSB 0x23
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenale_ddr_ch3_RMASK 0x1
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenale_ddr_ch2_LSB 0x22
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenale_ddr_ch2_MSB 0x22
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenale_ddr_ch2_RMASK 0x1
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenale_ddr_ch1_LSB 0x21
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenale_ddr_ch1_MSB 0x21
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenale_ddr_ch1_RMASK 0x1
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenale_ddr_ch0_LSB 0x20
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenale_ddr_ch0_MSB 0x20
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenale_ddr_ch0_RMASK 0x1
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenadfe_ddr_ch3_LSB 0x18
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenadfe_ddr_ch3_MSB 0x1F
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenadfe_ddr_ch3_RMASK 0xFF
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenadfe_ddr_ch2_LSB 0x10
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenadfe_ddr_ch2_MSB 0x17
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenadfe_ddr_ch2_RMASK 0xFF
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenadfe_ddr_ch1_LSB 0x8
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenadfe_ddr_ch1_MSB 0xF
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenadfe_ddr_ch1_RMASK 0xFF
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenadfe_ddr_ch0_LSB 0x0
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenadfe_ddr_ch0_MSB 0x7
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_DDR_0_dyn_disable_rxenadfe_ddr_ch0_RMASK 0xFF
-
-#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_OFFS 0x1660
-#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_DEF 0x0000000000000000
-#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenagain_qdr_ch3_LSB 0x27
-#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenagain_qdr_ch3_MSB 0x27
-#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenagain_qdr_ch3_RMASK 0x1
-#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenagain_qdr_ch2_LSB 0x26
-#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenagain_qdr_ch2_MSB 0x26
-#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenagain_qdr_ch2_RMASK 0x1
-#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenagain_qdr_ch1_LSB 0x25
-#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenagain_qdr_ch1_MSB 0x25
-#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenagain_qdr_ch1_RMASK 0x1
-#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenagain_qdr_ch0_LSB 0x24
-#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenagain_qdr_ch0_MSB 0x24
-#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenagain_qdr_ch0_RMASK 0x1
-#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenale_qdr_ch3_LSB 0x23
-#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenale_qdr_ch3_MSB 0x23
-#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenale_qdr_ch3_RMASK 0x1
-#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenale_qdr_ch2_LSB 0x22
-#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenale_qdr_ch2_MSB 0x22
-#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenale_qdr_ch2_RMASK 0x1
-#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenale_qdr_ch1_LSB 0x21
-#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenale_qdr_ch1_MSB 0x21
-#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenale_qdr_ch1_RMASK 0x1
-#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenale_qdr_ch0_LSB 0x20
-#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenale_qdr_ch0_MSB 0x20
-#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenale_qdr_ch0_RMASK 0x1
-#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenadfe_qdr_ch3_LSB 0x18
-#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenadfe_qdr_ch3_MSB 0x1F
-#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenadfe_qdr_ch3_RMASK 0xFF
-#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenadfe_qdr_ch2_LSB 0x10
-#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenadfe_qdr_ch2_MSB 0x17
-#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenadfe_qdr_ch2_RMASK 0xFF
-#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenadfe_qdr_ch1_LSB 0x8
-#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenadfe_qdr_ch1_MSB 0xF
-#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenadfe_qdr_ch1_RMASK 0xFF
-#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenadfe_qdr_ch0_LSB 0x0
-#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenadfe_qdr_ch0_MSB 0x7
-#define QIB_7322_ADAPT_DISABLE_STATIC_QDR_0_static_disable_rxenadfe_qdr_ch0_RMASK 0xFF
-
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_OFFS 0x1668
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_DEF 0x0000000000000000
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenagain_qdr_ch3_LSB 0x27
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenagain_qdr_ch3_MSB 0x27
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenagain_qdr_ch3_RMASK 0x1
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenagain_qdr_ch2_LSB 0x26
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenagain_qdr_ch2_MSB 0x26
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenagain_qdr_ch2_RMASK 0x1
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenagain_qdr_ch1_LSB 0x25
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenagain_qdr_ch1_MSB 0x25
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenagain_qdr_ch1_RMASK 0x1
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenagain_qdr_ch0_LSB 0x24
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenagain_qdr_ch0_MSB 0x24
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenagain_qdr_ch0_RMASK 0x1
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenale_qdr_ch3_LSB 0x23
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenale_qdr_ch3_MSB 0x23
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenale_qdr_ch3_RMASK 0x1
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenale_qdr_ch2_LSB 0x22
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenale_qdr_ch2_MSB 0x22
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenale_qdr_ch2_RMASK 0x1
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenale_qdr_ch1_LSB 0x21
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenale_qdr_ch1_MSB 0x21
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenale_qdr_ch1_RMASK 0x1
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenale_qdr_ch0_LSB 0x20
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenale_qdr_ch0_MSB 0x20
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenale_qdr_ch0_RMASK 0x1
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenadfe_qdr_ch3_LSB 0x18
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenadfe_qdr_ch3_MSB 0x1F
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenadfe_qdr_ch3_RMASK 0xFF
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenadfe_qdr_ch2_LSB 0x10
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenadfe_qdr_ch2_MSB 0x17
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenadfe_qdr_ch2_RMASK 0xFF
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenadfe_qdr_ch1_LSB 0x8
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenadfe_qdr_ch1_MSB 0xF
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenadfe_qdr_ch1_RMASK 0xFF
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenadfe_qdr_ch0_LSB 0x0
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenadfe_qdr_ch0_MSB 0x7
-#define QIB_7322_ADAPT_DISABLE_DYNAMIC_QDR_0_dyn_disable_rxenadfe_qdr_ch0_RMASK 0xFF
-
-#define QIB_7322_ADAPT_DISABLE_TIMER_THRESHOLD_0_OFFS 0x1670
-#define QIB_7322_ADAPT_DISABLE_TIMER_THRESHOLD_0_DEF 0x0000000000000000
-
-#define QIB_7322_HighPriorityLimit_0_OFFS 0x1BC0
-#define QIB_7322_HighPriorityLimit_0_DEF 0x0000000000000000
-#define QIB_7322_HighPriorityLimit_0_Limit_LSB 0x0
-#define QIB_7322_HighPriorityLimit_0_Limit_MSB 0x7
-#define QIB_7322_HighPriorityLimit_0_Limit_RMASK 0xFF
-
-#define QIB_7322_LowPriority0_0_OFFS 0x1C00
-#define QIB_7322_LowPriority0_0_DEF 0x0000000000000000
-#define QIB_7322_LowPriority0_0_VirtualLane_LSB 0x10
-#define QIB_7322_LowPriority0_0_VirtualLane_MSB 0x12
-#define QIB_7322_LowPriority0_0_VirtualLane_RMASK 0x7
-#define QIB_7322_LowPriority0_0_Weight_LSB 0x0
-#define QIB_7322_LowPriority0_0_Weight_MSB 0x7
-#define QIB_7322_LowPriority0_0_Weight_RMASK 0xFF
-
-#define QIB_7322_HighPriority0_0_OFFS 0x1E00
-#define QIB_7322_HighPriority0_0_DEF 0x0000000000000000
-#define QIB_7322_HighPriority0_0_VirtualLane_LSB 0x10
-#define QIB_7322_HighPriority0_0_VirtualLane_MSB 0x12
-#define QIB_7322_HighPriority0_0_VirtualLane_RMASK 0x7
-#define QIB_7322_HighPriority0_0_Weight_LSB 0x0
-#define QIB_7322_HighPriority0_0_Weight_MSB 0x7
-#define QIB_7322_HighPriority0_0_Weight_RMASK 0xFF
-
-#define QIB_7322_CntrRegBase_1_OFFS 0x2028
-#define QIB_7322_CntrRegBase_1_DEF 0x0000000000013000
-
-#define QIB_7322_RcvQPMulticastContext_1_OFFS 0x2170
-
-#define QIB_7322_SendCtrl_1_OFFS 0x21C0
-
-#define QIB_7322_SendBufAvail0_OFFS 0x3000
-#define QIB_7322_SendBufAvail0_DEF 0x0000000000000000
-#define QIB_7322_SendBufAvail0_SendBuf_31_0_LSB 0x0
-#define QIB_7322_SendBufAvail0_SendBuf_31_0_MSB 0x3F
-#define QIB_7322_SendBufAvail0_SendBuf_31_0_RMASK 0x0
-
-#define QIB_7322_MsixTable_OFFS 0x8000
-#define QIB_7322_MsixTable_DEF 0x0000000000000000
-
-#define QIB_7322_MsixPba_OFFS 0x9000
-#define QIB_7322_MsixPba_DEF 0x0000000000000000
-
-#define QIB_7322_LAMemory_OFFS 0xA000
-#define QIB_7322_LAMemory_DEF 0x0000000000000000
-
-#define QIB_7322_LBIntCnt_OFFS 0x11000
-#define QIB_7322_LBIntCnt_DEF 0x0000000000000000
-
-#define QIB_7322_LBFlowStallCnt_OFFS 0x11008
-#define QIB_7322_LBFlowStallCnt_DEF 0x0000000000000000
-
-#define QIB_7322_RxTIDFullErrCnt_OFFS 0x110D0
-#define QIB_7322_RxTIDFullErrCnt_DEF 0x0000000000000000
-
-#define QIB_7322_RxTIDValidErrCnt_OFFS 0x110D8
-#define QIB_7322_RxTIDValidErrCnt_DEF 0x0000000000000000
-
-#define QIB_7322_RxP0HdrEgrOvflCnt_OFFS 0x110E8
-#define QIB_7322_RxP0HdrEgrOvflCnt_DEF 0x0000000000000000
-
-#define QIB_7322_PcieRetryBufDiagQwordCnt_OFFS 0x111A0
-#define QIB_7322_PcieRetryBufDiagQwordCnt_DEF 0x0000000000000000
-
-#define QIB_7322_RxTidFlowDropCnt_OFFS 0x111E0
-#define QIB_7322_RxTidFlowDropCnt_DEF 0x0000000000000000
-
-#define QIB_7322_LBIntCnt_0_OFFS 0x12000
-#define QIB_7322_LBIntCnt_0_DEF 0x0000000000000000
-
-#define QIB_7322_TxCreditUpToDateTimeOut_0_OFFS 0x12008
-#define QIB_7322_TxCreditUpToDateTimeOut_0_DEF 0x0000000000000000
-
-#define QIB_7322_TxSDmaDescCnt_0_OFFS 0x12010
-#define QIB_7322_TxSDmaDescCnt_0_DEF 0x0000000000000000
-
-#define QIB_7322_TxUnsupVLErrCnt_0_OFFS 0x12018
-#define QIB_7322_TxUnsupVLErrCnt_0_DEF 0x0000000000000000
-
-#define QIB_7322_TxDataPktCnt_0_OFFS 0x12020
-#define QIB_7322_TxDataPktCnt_0_DEF 0x0000000000000000
-
-#define QIB_7322_TxFlowPktCnt_0_OFFS 0x12028
-#define QIB_7322_TxFlowPktCnt_0_DEF 0x0000000000000000
-
-#define QIB_7322_TxDwordCnt_0_OFFS 0x12030
-#define QIB_7322_TxDwordCnt_0_DEF 0x0000000000000000
-
-#define QIB_7322_TxLenErrCnt_0_OFFS 0x12038
-#define QIB_7322_TxLenErrCnt_0_DEF 0x0000000000000000
-
-#define QIB_7322_TxMaxMinLenErrCnt_0_OFFS 0x12040
-#define QIB_7322_TxMaxMinLenErrCnt_0_DEF 0x0000000000000000
-
-#define QIB_7322_TxUnderrunCnt_0_OFFS 0x12048
-#define QIB_7322_TxUnderrunCnt_0_DEF 0x0000000000000000
-
-#define QIB_7322_TxFlowStallCnt_0_OFFS 0x12050
-#define QIB_7322_TxFlowStallCnt_0_DEF 0x0000000000000000
-
-#define QIB_7322_TxDroppedPktCnt_0_OFFS 0x12058
-#define QIB_7322_TxDroppedPktCnt_0_DEF 0x0000000000000000
-
-#define QIB_7322_RxDroppedPktCnt_0_OFFS 0x12060
-#define QIB_7322_RxDroppedPktCnt_0_DEF 0x0000000000000000
-
-#define QIB_7322_RxDataPktCnt_0_OFFS 0x12068
-#define QIB_7322_RxDataPktCnt_0_DEF 0x0000000000000000
-
-#define QIB_7322_RxFlowPktCnt_0_OFFS 0x12070
-#define QIB_7322_RxFlowPktCnt_0_DEF 0x0000000000000000
-
-#define QIB_7322_RxDwordCnt_0_OFFS 0x12078
-#define QIB_7322_RxDwordCnt_0_DEF 0x0000000000000000
-
-#define QIB_7322_RxLenErrCnt_0_OFFS 0x12080
-#define QIB_7322_RxLenErrCnt_0_DEF 0x0000000000000000
-
-#define QIB_7322_RxMaxMinLenErrCnt_0_OFFS 0x12088
-#define QIB_7322_RxMaxMinLenErrCnt_0_DEF 0x0000000000000000
-
-#define QIB_7322_RxICRCErrCnt_0_OFFS 0x12090
-#define QIB_7322_RxICRCErrCnt_0_DEF 0x0000000000000000
-
-#define QIB_7322_RxVCRCErrCnt_0_OFFS 0x12098
-#define QIB_7322_RxVCRCErrCnt_0_DEF 0x0000000000000000
-
-#define QIB_7322_RxFlowCtrlViolCnt_0_OFFS 0x120A0
-#define QIB_7322_RxFlowCtrlViolCnt_0_DEF 0x0000000000000000
-
-#define QIB_7322_RxVersionErrCnt_0_OFFS 0x120A8
-#define QIB_7322_RxVersionErrCnt_0_DEF 0x0000000000000000
-
-#define QIB_7322_RxLinkMalformCnt_0_OFFS 0x120B0
-#define QIB_7322_RxLinkMalformCnt_0_DEF 0x0000000000000000
-
-#define QIB_7322_RxEBPCnt_0_OFFS 0x120B8
-#define QIB_7322_RxEBPCnt_0_DEF 0x0000000000000000
-
-#define QIB_7322_RxLPCRCErrCnt_0_OFFS 0x120C0
-#define QIB_7322_RxLPCRCErrCnt_0_DEF 0x0000000000000000
-
-#define QIB_7322_RxBufOvflCnt_0_OFFS 0x120C8
-#define QIB_7322_RxBufOvflCnt_0_DEF 0x0000000000000000
-
-#define QIB_7322_RxLenTruncateCnt_0_OFFS 0x120D0
-#define QIB_7322_RxLenTruncateCnt_0_DEF 0x0000000000000000
-
-#define QIB_7322_RxPKeyMismatchCnt_0_OFFS 0x120E0
-#define QIB_7322_RxPKeyMismatchCnt_0_DEF 0x0000000000000000
-
-#define QIB_7322_IBLinkDownedCnt_0_OFFS 0x12180
-#define QIB_7322_IBLinkDownedCnt_0_DEF 0x0000000000000000
-
-#define QIB_7322_IBSymbolErrCnt_0_OFFS 0x12188
-#define QIB_7322_IBSymbolErrCnt_0_DEF 0x0000000000000000
-
-#define QIB_7322_IBStatusChangeCnt_0_OFFS 0x12190
-#define QIB_7322_IBStatusChangeCnt_0_DEF 0x0000000000000000
-
-#define QIB_7322_IBLinkErrRecoveryCnt_0_OFFS 0x12198
-#define QIB_7322_IBLinkErrRecoveryCnt_0_DEF 0x0000000000000000
-
-#define QIB_7322_ExcessBufferOvflCnt_0_OFFS 0x121A8
-#define QIB_7322_ExcessBufferOvflCnt_0_DEF 0x0000000000000000
-
-#define QIB_7322_LocalLinkIntegrityErrCnt_0_OFFS 0x121B0
-#define QIB_7322_LocalLinkIntegrityErrCnt_0_DEF 0x0000000000000000
-
-#define QIB_7322_RxVlErrCnt_0_OFFS 0x121B8
-#define QIB_7322_RxVlErrCnt_0_DEF 0x0000000000000000
-
-#define QIB_7322_RxDlidFltrCnt_0_OFFS 0x121C0
-#define QIB_7322_RxDlidFltrCnt_0_DEF 0x0000000000000000
-
-#define QIB_7322_RxVL15DroppedPktCnt_0_OFFS 0x121C8
-#define QIB_7322_RxVL15DroppedPktCnt_0_DEF 0x0000000000000000
-
-#define QIB_7322_RxOtherLocalPhyErrCnt_0_OFFS 0x121D0
-#define QIB_7322_RxOtherLocalPhyErrCnt_0_DEF 0x0000000000000000
-
-#define QIB_7322_RxQPInvalidContextCnt_0_OFFS 0x121D8
-#define QIB_7322_RxQPInvalidContextCnt_0_DEF 0x0000000000000000
-
-#define QIB_7322_TxHeadersErrCnt_0_OFFS 0x121F8
-#define QIB_7322_TxHeadersErrCnt_0_DEF 0x0000000000000000
-
-#define QIB_7322_PSRcvDataCount_0_OFFS 0x12218
-#define QIB_7322_PSRcvDataCount_0_DEF 0x0000000000000000
-
-#define QIB_7322_PSRcvPktsCount_0_OFFS 0x12220
-#define QIB_7322_PSRcvPktsCount_0_DEF 0x0000000000000000
-
-#define QIB_7322_PSXmitDataCount_0_OFFS 0x12228
-#define QIB_7322_PSXmitDataCount_0_DEF 0x0000000000000000
-
-#define QIB_7322_PSXmitPktsCount_0_OFFS 0x12230
-#define QIB_7322_PSXmitPktsCount_0_DEF 0x0000000000000000
-
-#define QIB_7322_PSXmitWaitCount_0_OFFS 0x12238
-#define QIB_7322_PSXmitWaitCount_0_DEF 0x0000000000000000
-
-#define QIB_7322_LBIntCnt_1_OFFS 0x13000
-#define QIB_7322_LBIntCnt_1_DEF 0x0000000000000000
-
-#define QIB_7322_TxCreditUpToDateTimeOut_1_OFFS 0x13008
-#define QIB_7322_TxCreditUpToDateTimeOut_1_DEF 0x0000000000000000
-
-#define QIB_7322_TxSDmaDescCnt_1_OFFS 0x13010
-#define QIB_7322_TxSDmaDescCnt_1_DEF 0x0000000000000000
-
-#define QIB_7322_TxUnsupVLErrCnt_1_OFFS 0x13018
-#define QIB_7322_TxUnsupVLErrCnt_1_DEF 0x0000000000000000
-
-#define QIB_7322_TxDataPktCnt_1_OFFS 0x13020
-#define QIB_7322_TxDataPktCnt_1_DEF 0x0000000000000000
-
-#define QIB_7322_TxFlowPktCnt_1_OFFS 0x13028
-#define QIB_7322_TxFlowPktCnt_1_DEF 0x0000000000000000
-
-#define QIB_7322_TxDwordCnt_1_OFFS 0x13030
-#define QIB_7322_TxDwordCnt_1_DEF 0x0000000000000000
-
-#define QIB_7322_TxLenErrCnt_1_OFFS 0x13038
-#define QIB_7322_TxLenErrCnt_1_DEF 0x0000000000000000
-
-#define QIB_7322_TxMaxMinLenErrCnt_1_OFFS 0x13040
-#define QIB_7322_TxMaxMinLenErrCnt_1_DEF 0x0000000000000000
-
-#define QIB_7322_TxUnderrunCnt_1_OFFS 0x13048
-#define QIB_7322_TxUnderrunCnt_1_DEF 0x0000000000000000
-
-#define QIB_7322_TxFlowStallCnt_1_OFFS 0x13050
-#define QIB_7322_TxFlowStallCnt_1_DEF 0x0000000000000000
-
-#define QIB_7322_TxDroppedPktCnt_1_OFFS 0x13058
-#define QIB_7322_TxDroppedPktCnt_1_DEF 0x0000000000000000
-
-#define QIB_7322_RxDroppedPktCnt_1_OFFS 0x13060
-#define QIB_7322_RxDroppedPktCnt_1_DEF 0x0000000000000000
-
-#define QIB_7322_RxDataPktCnt_1_OFFS 0x13068
-#define QIB_7322_RxDataPktCnt_1_DEF 0x0000000000000000
-
-#define QIB_7322_RxFlowPktCnt_1_OFFS 0x13070
-#define QIB_7322_RxFlowPktCnt_1_DEF 0x0000000000000000
-
-#define QIB_7322_RxDwordCnt_1_OFFS 0x13078
-#define QIB_7322_RxDwordCnt_1_DEF 0x0000000000000000
-
-#define QIB_7322_RxLenErrCnt_1_OFFS 0x13080
-#define QIB_7322_RxLenErrCnt_1_DEF 0x0000000000000000
-
-#define QIB_7322_RxMaxMinLenErrCnt_1_OFFS 0x13088
-#define QIB_7322_RxMaxMinLenErrCnt_1_DEF 0x0000000000000000
-
-#define QIB_7322_RxICRCErrCnt_1_OFFS 0x13090
-#define QIB_7322_RxICRCErrCnt_1_DEF 0x0000000000000000
-
-#define QIB_7322_RxVCRCErrCnt_1_OFFS 0x13098
-#define QIB_7322_RxVCRCErrCnt_1_DEF 0x0000000000000000
-
-#define QIB_7322_RxFlowCtrlViolCnt_1_OFFS 0x130A0
-#define QIB_7322_RxFlowCtrlViolCnt_1_DEF 0x0000000000000000
-
-#define QIB_7322_RxVersionErrCnt_1_OFFS 0x130A8
-#define QIB_7322_RxVersionErrCnt_1_DEF 0x0000000000000000
-
-#define QIB_7322_RxLinkMalformCnt_1_OFFS 0x130B0
-#define QIB_7322_RxLinkMalformCnt_1_DEF 0x0000000000000000
-
-#define QIB_7322_RxEBPCnt_1_OFFS 0x130B8
-#define QIB_7322_RxEBPCnt_1_DEF 0x0000000000000000
-
-#define QIB_7322_RxLPCRCErrCnt_1_OFFS 0x130C0
-#define QIB_7322_RxLPCRCErrCnt_1_DEF 0x0000000000000000
-
-#define QIB_7322_RxBufOvflCnt_1_OFFS 0x130C8
-#define QIB_7322_RxBufOvflCnt_1_DEF 0x0000000000000000
-
-#define QIB_7322_RxLenTruncateCnt_1_OFFS 0x130D0
-#define QIB_7322_RxLenTruncateCnt_1_DEF 0x0000000000000000
-
-#define QIB_7322_RxPKeyMismatchCnt_1_OFFS 0x130E0
-#define QIB_7322_RxPKeyMismatchCnt_1_DEF 0x0000000000000000
-
-#define QIB_7322_IBLinkDownedCnt_1_OFFS 0x13180
-#define QIB_7322_IBLinkDownedCnt_1_DEF 0x0000000000000000
-
-#define QIB_7322_IBSymbolErrCnt_1_OFFS 0x13188
-#define QIB_7322_IBSymbolErrCnt_1_DEF 0x0000000000000000
-
-#define QIB_7322_IBStatusChangeCnt_1_OFFS 0x13190
-#define QIB_7322_IBStatusChangeCnt_1_DEF 0x0000000000000000
-
-#define QIB_7322_IBLinkErrRecoveryCnt_1_OFFS 0x13198
-#define QIB_7322_IBLinkErrRecoveryCnt_1_DEF 0x0000000000000000
-
-#define QIB_7322_ExcessBufferOvflCnt_1_OFFS 0x131A8
-#define QIB_7322_ExcessBufferOvflCnt_1_DEF 0x0000000000000000
-
-#define QIB_7322_LocalLinkIntegrityErrCnt_1_OFFS 0x131B0
-#define QIB_7322_LocalLinkIntegrityErrCnt_1_DEF 0x0000000000000000
-
-#define QIB_7322_RxVlErrCnt_1_OFFS 0x131B8
-#define QIB_7322_RxVlErrCnt_1_DEF 0x0000000000000000
-
-#define QIB_7322_RxDlidFltrCnt_1_OFFS 0x131C0
-#define QIB_7322_RxDlidFltrCnt_1_DEF 0x0000000000000000
-
-#define QIB_7322_RxVL15DroppedPktCnt_1_OFFS 0x131C8
-#define QIB_7322_RxVL15DroppedPktCnt_1_DEF 0x0000000000000000
-
-#define QIB_7322_RxOtherLocalPhyErrCnt_1_OFFS 0x131D0
-#define QIB_7322_RxOtherLocalPhyErrCnt_1_DEF 0x0000000000000000
-
-#define QIB_7322_RxQPInvalidContextCnt_1_OFFS 0x131D8
-#define QIB_7322_RxQPInvalidContextCnt_1_DEF 0x0000000000000000
-
-#define QIB_7322_TxHeadersErrCnt_1_OFFS 0x131F8
-#define QIB_7322_TxHeadersErrCnt_1_DEF 0x0000000000000000
-
-#define QIB_7322_PSRcvDataCount_1_OFFS 0x13218
-#define QIB_7322_PSRcvDataCount_1_DEF 0x0000000000000000
-
-#define QIB_7322_PSRcvPktsCount_1_OFFS 0x13220
-#define QIB_7322_PSRcvPktsCount_1_DEF 0x0000000000000000
-
-#define QIB_7322_PSXmitDataCount_1_OFFS 0x13228
-#define QIB_7322_PSXmitDataCount_1_DEF 0x0000000000000000
-
-#define QIB_7322_PSXmitPktsCount_1_OFFS 0x13230
-#define QIB_7322_PSXmitPktsCount_1_DEF 0x0000000000000000
-
-#define QIB_7322_PSXmitWaitCount_1_OFFS 0x13238
-#define QIB_7322_PSXmitWaitCount_1_DEF 0x0000000000000000
-
-#define QIB_7322_RcvEgrArray_OFFS 0x14000
-#define QIB_7322_RcvEgrArray_DEF 0x0000000000000000
-#define QIB_7322_RcvEgrArray_RT_BufSize_LSB 0x25
-#define QIB_7322_RcvEgrArray_RT_BufSize_MSB 0x27
-#define QIB_7322_RcvEgrArray_RT_BufSize_RMASK 0x7
-#define QIB_7322_RcvEgrArray_RT_Addr_LSB 0x0
-#define QIB_7322_RcvEgrArray_RT_Addr_MSB 0x24
-#define QIB_7322_RcvEgrArray_RT_Addr_RMASK 0x1FFFFFFFFF
-
-#define QIB_7322_RcvTIDArray0_OFFS 0x50000
-#define QIB_7322_RcvTIDArray0_DEF 0x0000000000000000
-#define QIB_7322_RcvTIDArray0_RT_BufSize_LSB 0x25
-#define QIB_7322_RcvTIDArray0_RT_BufSize_MSB 0x27
-#define QIB_7322_RcvTIDArray0_RT_BufSize_RMASK 0x7
-#define QIB_7322_RcvTIDArray0_RT_Addr_LSB 0x0
-#define QIB_7322_RcvTIDArray0_RT_Addr_MSB 0x24
-#define QIB_7322_RcvTIDArray0_RT_Addr_RMASK 0x1FFFFFFFFF
-
-#define QIB_7322_IBSD_DDS_MAP_TABLE_0_OFFS 0xD0000
-#define QIB_7322_IBSD_DDS_MAP_TABLE_0_DEF 0x0000000000000000
-
-#define QIB_7322_RcvHdrTail0_OFFS 0x200000
-#define QIB_7322_RcvHdrTail0_DEF 0x0000000000000000
-
-#define QIB_7322_RcvHdrHead0_OFFS 0x200008
-#define QIB_7322_RcvHdrHead0_DEF 0x0000000000000000
-#define QIB_7322_RcvHdrHead0_counter_LSB 0x20
-#define QIB_7322_RcvHdrHead0_counter_MSB 0x2F
-#define QIB_7322_RcvHdrHead0_counter_RMASK 0xFFFF
-#define QIB_7322_RcvHdrHead0_RcvHeadPointer_LSB 0x0
-#define QIB_7322_RcvHdrHead0_RcvHeadPointer_MSB 0x1F
-#define QIB_7322_RcvHdrHead0_RcvHeadPointer_RMASK 0xFFFFFFFF
-
-#define QIB_7322_RcvEgrIndexTail0_OFFS 0x200010
-#define QIB_7322_RcvEgrIndexTail0_DEF 0x0000000000000000
-
-#define QIB_7322_RcvEgrIndexHead0_OFFS 0x200018
-#define QIB_7322_RcvEgrIndexHead0_DEF 0x0000000000000000
-
-#define QIB_7322_RcvTIDFlowTable0_OFFS 0x201000
-#define QIB_7322_RcvTIDFlowTable0_DEF 0x0000000000000000
-#define QIB_7322_RcvTIDFlowTable0_GenMismatch_LSB 0x1C
-#define QIB_7322_RcvTIDFlowTable0_GenMismatch_MSB 0x1C
-#define QIB_7322_RcvTIDFlowTable0_GenMismatch_RMASK 0x1
-#define QIB_7322_RcvTIDFlowTable0_SeqMismatch_LSB 0x1B
-#define QIB_7322_RcvTIDFlowTable0_SeqMismatch_MSB 0x1B
-#define QIB_7322_RcvTIDFlowTable0_SeqMismatch_RMASK 0x1
-#define QIB_7322_RcvTIDFlowTable0_KeepOnGenErr_LSB 0x16
-#define QIB_7322_RcvTIDFlowTable0_KeepOnGenErr_MSB 0x16
-#define QIB_7322_RcvTIDFlowTable0_KeepOnGenErr_RMASK 0x1
-#define QIB_7322_RcvTIDFlowTable0_KeepAfterSeqErr_LSB 0x15
-#define QIB_7322_RcvTIDFlowTable0_KeepAfterSeqErr_MSB 0x15
-#define QIB_7322_RcvTIDFlowTable0_KeepAfterSeqErr_RMASK 0x1
-#define QIB_7322_RcvTIDFlowTable0_HdrSuppEnabled_LSB 0x14
-#define QIB_7322_RcvTIDFlowTable0_HdrSuppEnabled_MSB 0x14
-#define QIB_7322_RcvTIDFlowTable0_HdrSuppEnabled_RMASK 0x1
-#define QIB_7322_RcvTIDFlowTable0_FlowValid_LSB 0x13
-#define QIB_7322_RcvTIDFlowTable0_FlowValid_MSB 0x13
-#define QIB_7322_RcvTIDFlowTable0_FlowValid_RMASK 0x1
-#define QIB_7322_RcvTIDFlowTable0_GenVal_LSB 0xB
-#define QIB_7322_RcvTIDFlowTable0_GenVal_MSB 0x12
-#define QIB_7322_RcvTIDFlowTable0_GenVal_RMASK 0xFF
-#define QIB_7322_RcvTIDFlowTable0_SeqNum_LSB 0x0
-#define QIB_7322_RcvTIDFlowTable0_SeqNum_MSB 0xA
-#define QIB_7322_RcvTIDFlowTable0_SeqNum_RMASK 0x7FF
diff --git a/drivers/infiniband/hw/qib/qib_common.h b/drivers/infiniband/hw/qib/qib_common.h
deleted file mode 100644
index cf652831d8e7..000000000000
--- a/drivers/infiniband/hw/qib/qib_common.h
+++ /dev/null
@@ -1,798 +0,0 @@
-/*
- * Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation.
- * All rights reserved.
- * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#ifndef _QIB_COMMON_H
-#define _QIB_COMMON_H
-
-/*
- * This file contains defines, structures, etc. that are used
- * to communicate between kernel and user code.
- */
-
-/* This is the IEEE-assigned OUI for QLogic Inc. QLogic_IB */
-#define QIB_SRC_OUI_1 0x00
-#define QIB_SRC_OUI_2 0x11
-#define QIB_SRC_OUI_3 0x75
-
-/* version of protocol header (known to chip also). In the long run,
- * we should be able to generate and accept a range of version numbers;
- * for now we only accept one, and it's compiled in.
- */
-#define IPS_PROTO_VERSION 2
-
-/*
- * These are compile time constants that you may want to enable or disable
- * if you are trying to debug problems with code or performance.
- * QIB_VERBOSE_TRACING define as 1 if you want additional tracing in
- * fastpath code
- * QIB_TRACE_REGWRITES define as 1 if you want register writes to be
- * traced in fastpath code
- * _QIB_TRACING define as 0 if you want to remove all tracing in a
- * compilation unit
- */
-
-/*
- * The value in the BTH QP field that QLogic_IB uses to differentiate
- * an qlogic_ib protocol IB packet vs standard IB transport
- * This it needs to be even (0x656b78), because the LSB is sometimes
- * used for the MSB of context. The change may cause a problem
- * interoperating with older software.
- */
-#define QIB_KD_QP 0x656b78
-
-/*
- * These are the status bits readable (in ascii form, 64bit value)
- * from the "status" sysfs file. For binary compatibility, values
- * must remain as is; removed states can be reused for different
- * purposes.
- */
-#define QIB_STATUS_INITTED 0x1 /* basic initialization done */
-/* Chip has been found and initted */
-#define QIB_STATUS_CHIP_PRESENT 0x20
-/* IB link is at ACTIVE, usable for data traffic */
-#define QIB_STATUS_IB_READY 0x40
-/* link is configured, LID, MTU, etc. have been set */
-#define QIB_STATUS_IB_CONF 0x80
-/* A Fatal hardware error has occurred. */
-#define QIB_STATUS_HWERROR 0x200
-
-/*
- * The list of usermode accessible registers. Also see Reg_* later in file.
- */
-enum qib_ureg {
- /* (RO) DMA RcvHdr to be used next. */
- ur_rcvhdrtail = 0,
- /* (RW) RcvHdr entry to be processed next by host. */
- ur_rcvhdrhead = 1,
- /* (RO) Index of next Eager index to use. */
- ur_rcvegrindextail = 2,
- /* (RW) Eager TID to be processed next */
- ur_rcvegrindexhead = 3,
- /* For internal use only; max register number. */
- _QIB_UregMax
-};
-
-/* bit values for spi_runtime_flags */
-#define QIB_RUNTIME_PCIE 0x0002
-#define QIB_RUNTIME_FORCE_WC_ORDER 0x0004
-#define QIB_RUNTIME_RCVHDR_COPY 0x0008
-#define QIB_RUNTIME_MASTER 0x0010
-#define QIB_RUNTIME_RCHK 0x0020
-#define QIB_RUNTIME_NODMA_RTAIL 0x0080
-#define QIB_RUNTIME_SPECIAL_TRIGGER 0x0100
-#define QIB_RUNTIME_SDMA 0x0200
-#define QIB_RUNTIME_FORCE_PIOAVAIL 0x0400
-#define QIB_RUNTIME_PIO_REGSWAPPED 0x0800
-#define QIB_RUNTIME_CTXT_MSB_IN_QP 0x1000
-#define QIB_RUNTIME_CTXT_REDIRECT 0x2000
-#define QIB_RUNTIME_HDRSUPP 0x4000
-
-/*
- * This structure is returned by qib_userinit() immediately after
- * open to get implementation-specific info, and info specific to this
- * instance.
- *
- * This struct must have explict pad fields where type sizes
- * may result in different alignments between 32 and 64 bit
- * programs, since the 64 bit * bit kernel requires the user code
- * to have matching offsets
- */
-struct qib_base_info {
- /* version of hardware, for feature checking. */
- __u32 spi_hw_version;
- /* version of software, for feature checking. */
- __u32 spi_sw_version;
- /* QLogic_IB context assigned, goes into sent packets */
- __u16 spi_ctxt;
- __u16 spi_subctxt;
- /*
- * IB MTU, packets IB data must be less than this.
- * The MTU is in bytes, and will be a multiple of 4 bytes.
- */
- __u32 spi_mtu;
- /*
- * Size of a PIO buffer. Any given packet's total size must be less
- * than this (in words). Included is the starting control word, so
- * if 513 is returned, then total pkt size is 512 words or less.
- */
- __u32 spi_piosize;
- /* size of the TID cache in qlogic_ib, in entries */
- __u32 spi_tidcnt;
- /* size of the TID Eager list in qlogic_ib, in entries */
- __u32 spi_tidegrcnt;
- /* size of a single receive header queue entry in words. */
- __u32 spi_rcvhdrent_size;
- /*
- * Count of receive header queue entries allocated.
- * This may be less than the spu_rcvhdrcnt passed in!.
- */
- __u32 spi_rcvhdr_cnt;
-
- /* per-chip and other runtime features bitmap (QIB_RUNTIME_*) */
- __u32 spi_runtime_flags;
-
- /* address where hardware receive header queue is mapped */
- __u64 spi_rcvhdr_base;
-
- /* user program. */
-
- /* base address of eager TID receive buffers used by hardware. */
- __u64 spi_rcv_egrbufs;
-
- /* Allocated by initialization code, not by protocol. */
-
- /*
- * Size of each TID buffer in host memory, starting at
- * spi_rcv_egrbufs. The buffers are virtually contiguous.
- */
- __u32 spi_rcv_egrbufsize;
- /*
- * The special QP (queue pair) value that identifies an qlogic_ib
- * protocol packet from standard IB packets. More, probably much
- * more, to be added.
- */
- __u32 spi_qpair;
-
- /*
- * User register base for init code, not to be used directly by
- * protocol or applications. Always points to chip registers,
- * for normal or shared context.
- */
- __u64 spi_uregbase;
- /*
- * Maximum buffer size in bytes that can be used in a single TID
- * entry (assuming the buffer is aligned to this boundary). This is
- * the minimum of what the hardware and software support Guaranteed
- * to be a power of 2.
- */
- __u32 spi_tid_maxsize;
- /*
- * alignment of each pio send buffer (byte count
- * to add to spi_piobufbase to get to second buffer)
- */
- __u32 spi_pioalign;
- /*
- * The index of the first pio buffer available to this process;
- * needed to do lookup in spi_pioavailaddr; not added to
- * spi_piobufbase.
- */
- __u32 spi_pioindex;
- /* number of buffers mapped for this process */
- __u32 spi_piocnt;
-
- /*
- * Base address of writeonly pio buffers for this process.
- * Each buffer has spi_piosize words, and is aligned on spi_pioalign
- * boundaries. spi_piocnt buffers are mapped from this address
- */
- __u64 spi_piobufbase;
-
- /*
- * Base address of readonly memory copy of the pioavail registers.
- * There are 2 bits for each buffer.
- */
- __u64 spi_pioavailaddr;
-
- /*
- * Address where driver updates a copy of the interface and driver
- * status (QIB_STATUS_*) as a 64 bit value. It's followed by a
- * link status qword (formerly combined with driver status), then a
- * string indicating hardware error, if there was one.
- */
- __u64 spi_status;
-
- /* number of chip ctxts available to user processes */
- __u32 spi_nctxts;
- __u16 spi_unit; /* unit number of chip we are using */
- __u16 spi_port; /* IB port number we are using */
- /* num bufs in each contiguous set */
- __u32 spi_rcv_egrperchunk;
- /* size in bytes of each contiguous set */
- __u32 spi_rcv_egrchunksize;
- /* total size of mmap to cover full rcvegrbuffers */
- __u32 spi_rcv_egrbuftotlen;
- __u32 spi_rhf_offset; /* dword offset in hdrqent for rcvhdr flags */
- /* address of readonly memory copy of the rcvhdrq tail register. */
- __u64 spi_rcvhdr_tailaddr;
-
- /*
- * shared memory pages for subctxts if ctxt is shared; these cover
- * all the processes in the group sharing a single context.
- * all have enough space for the num_subcontexts value on this job.
- */
- __u64 spi_subctxt_uregbase;
- __u64 spi_subctxt_rcvegrbuf;
- __u64 spi_subctxt_rcvhdr_base;
-
- /* shared memory page for send buffer disarm status */
- __u64 spi_sendbuf_status;
-} __aligned(8);
-
-/*
- * This version number is given to the driver by the user code during
- * initialization in the spu_userversion field of qib_user_info, so
- * the driver can check for compatibility with user code.
- *
- * The major version changes when data structures
- * change in an incompatible way. The driver must be the same or higher
- * for initialization to succeed. In some cases, a higher version
- * driver will not interoperate with older software, and initialization
- * will return an error.
- */
-#define QIB_USER_SWMAJOR 1
-
-/*
- * Minor version differences are always compatible
- * a within a major version, however if user software is larger
- * than driver software, some new features and/or structure fields
- * may not be implemented; the user code must deal with this if it
- * cares, or it must abort after initialization reports the difference.
- */
-#define QIB_USER_SWMINOR 13
-
-#define QIB_USER_SWVERSION ((QIB_USER_SWMAJOR << 16) | QIB_USER_SWMINOR)
-
-#ifndef QIB_KERN_TYPE
-#define QIB_KERN_TYPE 0
-#endif
-
-/*
- * Similarly, this is the kernel version going back to the user. It's
- * slightly different, in that we want to tell if the driver was built as
- * part of a QLogic release, or from the driver from openfabrics.org,
- * kernel.org, or a standard distribution, for support reasons.
- * The high bit is 0 for non-QLogic and 1 for QLogic-built/supplied.
- *
- * It's returned by the driver to the user code during initialization in the
- * spi_sw_version field of qib_base_info, so the user code can in turn
- * check for compatibility with the kernel.
-*/
-#define QIB_KERN_SWVERSION ((QIB_KERN_TYPE << 31) | QIB_USER_SWVERSION)
-
-/*
- * Define the driver version number. This is something that refers only
- * to the driver itself, not the software interfaces it supports.
- */
-#define QIB_DRIVER_VERSION_BASE "1.11"
-
-/* create the final driver version string */
-#ifdef QIB_IDSTR
-#define QIB_DRIVER_VERSION QIB_DRIVER_VERSION_BASE " " QIB_IDSTR
-#else
-#define QIB_DRIVER_VERSION QIB_DRIVER_VERSION_BASE
-#endif
-
-/*
- * If the unit is specified via open, HCA choice is fixed. If port is
- * specified, it's also fixed. Otherwise we try to spread contexts
- * across ports and HCAs, using different algorithims. WITHIN is
- * the old default, prior to this mechanism.
- */
-#define QIB_PORT_ALG_ACROSS 0 /* round robin contexts across HCAs, then
- * ports; this is the default */
-#define QIB_PORT_ALG_WITHIN 1 /* use all contexts on an HCA (round robin
- * active ports within), then next HCA */
-#define QIB_PORT_ALG_COUNT 2 /* number of algorithm choices */
-
-/*
- * This structure is passed to qib_userinit() to tell the driver where
- * user code buffers are, sizes, etc. The offsets and sizes of the
- * fields must remain unchanged, for binary compatibility. It can
- * be extended, if userversion is changed so user code can tell, if needed
- */
-struct qib_user_info {
- /*
- * version of user software, to detect compatibility issues.
- * Should be set to QIB_USER_SWVERSION.
- */
- __u32 spu_userversion;
-
- __u32 _spu_unused2;
-
- /* size of struct base_info to write to */
- __u32 spu_base_info_size;
-
- __u32 spu_port_alg; /* which QIB_PORT_ALG_*; unused user minor < 11 */
-
- /*
- * If two or more processes wish to share a context, each process
- * must set the spu_subctxt_cnt and spu_subctxt_id to the same
- * values. The only restriction on the spu_subctxt_id is that
- * it be unique for a given node.
- */
- __u16 spu_subctxt_cnt;
- __u16 spu_subctxt_id;
-
- __u32 spu_port; /* IB port requested by user if > 0 */
-
- /*
- * address of struct base_info to write to
- */
- __u64 spu_base_info;
-
-} __aligned(8);
-
-/* User commands. */
-
-/* 16 available, was: old set up userspace (for old user code) */
-#define QIB_CMD_CTXT_INFO 17 /* find out what resources we got */
-#define QIB_CMD_RECV_CTRL 18 /* control receipt of packets */
-#define QIB_CMD_TID_UPDATE 19 /* update expected TID entries */
-#define QIB_CMD_TID_FREE 20 /* free expected TID entries */
-#define QIB_CMD_SET_PART_KEY 21 /* add partition key */
-/* 22 available, was: return info on slave processes (for old user code) */
-#define QIB_CMD_ASSIGN_CTXT 23 /* allocate HCA and ctxt */
-#define QIB_CMD_USER_INIT 24 /* set up userspace */
-#define QIB_CMD_UNUSED_1 25
-#define QIB_CMD_UNUSED_2 26
-#define QIB_CMD_PIOAVAILUPD 27 /* force an update of PIOAvail reg */
-#define QIB_CMD_POLL_TYPE 28 /* set the kind of polling we want */
-#define QIB_CMD_ARMLAUNCH_CTRL 29 /* armlaunch detection control */
-/* 30 is unused */
-#define QIB_CMD_SDMA_INFLIGHT 31 /* sdma inflight counter request */
-#define QIB_CMD_SDMA_COMPLETE 32 /* sdma completion counter request */
-/* 33 available, was a testing feature */
-#define QIB_CMD_DISARM_BUFS 34 /* disarm send buffers w/ errors */
-#define QIB_CMD_ACK_EVENT 35 /* ack & clear bits */
-#define QIB_CMD_CPUS_LIST 36 /* list of cpus allocated, for pinned
- * processes: qib_cpus_list */
-
-/*
- * QIB_CMD_ACK_EVENT obsoletes QIB_CMD_DISARM_BUFS, but we keep it for
- * compatibility with libraries from previous release. The ACK_EVENT
- * will take appropriate driver action (if any, just DISARM for now),
- * then clear the bits passed in as part of the mask. These bits are
- * in the first 64bit word at spi_sendbuf_status, and are passed to
- * the driver in the event_mask union as well.
- */
-#define _QIB_EVENT_DISARM_BUFS_BIT 0
-#define _QIB_EVENT_LINKDOWN_BIT 1
-#define _QIB_EVENT_LID_CHANGE_BIT 2
-#define _QIB_EVENT_LMC_CHANGE_BIT 3
-#define _QIB_EVENT_SL2VL_CHANGE_BIT 4
-#define _QIB_MAX_EVENT_BIT _QIB_EVENT_SL2VL_CHANGE_BIT
-
-#define QIB_EVENT_DISARM_BUFS_BIT (1UL << _QIB_EVENT_DISARM_BUFS_BIT)
-#define QIB_EVENT_LINKDOWN_BIT (1UL << _QIB_EVENT_LINKDOWN_BIT)
-#define QIB_EVENT_LID_CHANGE_BIT (1UL << _QIB_EVENT_LID_CHANGE_BIT)
-#define QIB_EVENT_LMC_CHANGE_BIT (1UL << _QIB_EVENT_LMC_CHANGE_BIT)
-#define QIB_EVENT_SL2VL_CHANGE_BIT (1UL << _QIB_EVENT_SL2VL_CHANGE_BIT)
-
-
-/*
- * Poll types
- */
-#define QIB_POLL_TYPE_ANYRCV 0x0
-#define QIB_POLL_TYPE_URGENT 0x1
-
-struct qib_ctxt_info {
- __u16 num_active; /* number of active units */
- __u16 unit; /* unit (chip) assigned to caller */
- __u16 port; /* IB port assigned to caller (1-based) */
- __u16 ctxt; /* ctxt on unit assigned to caller */
- __u16 subctxt; /* subctxt on unit assigned to caller */
- __u16 num_ctxts; /* number of ctxts available on unit */
- __u16 num_subctxts; /* number of subctxts opened on ctxt */
- __u16 rec_cpu; /* cpu # for affinity (ffff if none) */
-};
-
-struct qib_tid_info {
- __u32 tidcnt;
- /* make structure same size in 32 and 64 bit */
- __u32 tid__unused;
- /* virtual address of first page in transfer */
- __u64 tidvaddr;
- /* pointer (same size 32/64 bit) to __u16 tid array */
- __u64 tidlist;
-
- /*
- * pointer (same size 32/64 bit) to bitmap of TIDs used
- * for this call; checked for being large enough at open
- */
- __u64 tidmap;
-};
-
-struct qib_cmd {
- __u32 type; /* command type */
- union {
- struct qib_tid_info tid_info;
- struct qib_user_info user_info;
-
- /*
- * address in userspace where we should put the sdma
- * inflight counter
- */
- __u64 sdma_inflight;
- /*
- * address in userspace where we should put the sdma
- * completion counter
- */
- __u64 sdma_complete;
- /* address in userspace of struct qib_ctxt_info to
- write result to */
- __u64 ctxt_info;
- /* enable/disable receipt of packets */
- __u32 recv_ctrl;
- /* enable/disable armlaunch errors (non-zero to enable) */
- __u32 armlaunch_ctrl;
- /* partition key to set */
- __u16 part_key;
- /* user address of __u32 bitmask of active slaves */
- __u64 slave_mask_addr;
- /* type of polling we want */
- __u16 poll_type;
- /* back pressure enable bit for one particular context */
- __u8 ctxt_bp;
- /* qib_user_event_ack(), IPATH_EVENT_* bits */
- __u64 event_mask;
- } cmd;
-};
-
-struct qib_iovec {
- /* Pointer to data, but same size 32 and 64 bit */
- __u64 iov_base;
-
- /*
- * Length of data; don't need 64 bits, but want
- * qib_sendpkt to remain same size as before 32 bit changes, so...
- */
- __u64 iov_len;
-};
-
-/*
- * Describes a single packet for send. Each packet can have one or more
- * buffers, but the total length (exclusive of IB headers) must be less
- * than the MTU, and if using the PIO method, entire packet length,
- * including IB headers, must be less than the qib_piosize value (words).
- * Use of this necessitates including sys/uio.h
- */
-struct __qib_sendpkt {
- __u32 sps_flags; /* flags for packet (TBD) */
- __u32 sps_cnt; /* number of entries to use in sps_iov */
- /* array of iov's describing packet. TEMPORARY */
- struct qib_iovec sps_iov[4];
-};
-
-/*
- * Diagnostics can send a packet by "writing" the following
- * structs to the diag data special file.
- * This allows a custom
- * pbc (+ static rate) qword, so that special modes and deliberate
- * changes to CRCs can be used. The elements were also re-ordered
- * for better alignment and to avoid padding issues.
- */
-#define _DIAG_XPKT_VERS 3
-struct qib_diag_xpkt {
- __u16 version;
- __u16 unit;
- __u16 port;
- __u16 len;
- __u64 data;
- __u64 pbc_wd;
-};
-
-/*
- * Data layout in I2C flash (for GUID, etc.)
- * All fields are little-endian binary unless otherwise stated
- */
-#define QIB_FLASH_VERSION 2
-struct qib_flash {
- /* flash layout version (QIB_FLASH_VERSION) */
- __u8 if_fversion;
- /* checksum protecting if_length bytes */
- __u8 if_csum;
- /*
- * valid length (in use, protected by if_csum), including
- * if_fversion and if_csum themselves)
- */
- __u8 if_length;
- /* the GUID, in network order */
- __u8 if_guid[8];
- /* number of GUIDs to use, starting from if_guid */
- __u8 if_numguid;
- /* the (last 10 characters of) board serial number, in ASCII */
- char if_serial[12];
- /* board mfg date (YYYYMMDD ASCII) */
- char if_mfgdate[8];
- /* last board rework/test date (YYYYMMDD ASCII) */
- char if_testdate[8];
- /* logging of error counts, TBD */
- __u8 if_errcntp[4];
- /* powered on hours, updated at driver unload */
- __u8 if_powerhour[2];
- /* ASCII free-form comment field */
- char if_comment[32];
- /* Backwards compatible prefix for longer QLogic Serial Numbers */
- char if_sprefix[4];
- /* 82 bytes used, min flash size is 128 bytes */
- __u8 if_future[46];
-};
-
-/*
- * These are the counters implemented in the chip, and are listed in order.
- * The InterCaps naming is taken straight from the chip spec.
- */
-struct qlogic_ib_counters {
- __u64 LBIntCnt;
- __u64 LBFlowStallCnt;
- __u64 TxSDmaDescCnt; /* was Reserved1 */
- __u64 TxUnsupVLErrCnt;
- __u64 TxDataPktCnt;
- __u64 TxFlowPktCnt;
- __u64 TxDwordCnt;
- __u64 TxLenErrCnt;
- __u64 TxMaxMinLenErrCnt;
- __u64 TxUnderrunCnt;
- __u64 TxFlowStallCnt;
- __u64 TxDroppedPktCnt;
- __u64 RxDroppedPktCnt;
- __u64 RxDataPktCnt;
- __u64 RxFlowPktCnt;
- __u64 RxDwordCnt;
- __u64 RxLenErrCnt;
- __u64 RxMaxMinLenErrCnt;
- __u64 RxICRCErrCnt;
- __u64 RxVCRCErrCnt;
- __u64 RxFlowCtrlErrCnt;
- __u64 RxBadFormatCnt;
- __u64 RxLinkProblemCnt;
- __u64 RxEBPCnt;
- __u64 RxLPCRCErrCnt;
- __u64 RxBufOvflCnt;
- __u64 RxTIDFullErrCnt;
- __u64 RxTIDValidErrCnt;
- __u64 RxPKeyMismatchCnt;
- __u64 RxP0HdrEgrOvflCnt;
- __u64 RxP1HdrEgrOvflCnt;
- __u64 RxP2HdrEgrOvflCnt;
- __u64 RxP3HdrEgrOvflCnt;
- __u64 RxP4HdrEgrOvflCnt;
- __u64 RxP5HdrEgrOvflCnt;
- __u64 RxP6HdrEgrOvflCnt;
- __u64 RxP7HdrEgrOvflCnt;
- __u64 RxP8HdrEgrOvflCnt;
- __u64 RxP9HdrEgrOvflCnt;
- __u64 RxP10HdrEgrOvflCnt;
- __u64 RxP11HdrEgrOvflCnt;
- __u64 RxP12HdrEgrOvflCnt;
- __u64 RxP13HdrEgrOvflCnt;
- __u64 RxP14HdrEgrOvflCnt;
- __u64 RxP15HdrEgrOvflCnt;
- __u64 RxP16HdrEgrOvflCnt;
- __u64 IBStatusChangeCnt;
- __u64 IBLinkErrRecoveryCnt;
- __u64 IBLinkDownedCnt;
- __u64 IBSymbolErrCnt;
- __u64 RxVL15DroppedPktCnt;
- __u64 RxOtherLocalPhyErrCnt;
- __u64 PcieRetryBufDiagQwordCnt;
- __u64 ExcessBufferOvflCnt;
- __u64 LocalLinkIntegrityErrCnt;
- __u64 RxVlErrCnt;
- __u64 RxDlidFltrCnt;
-};
-
-/*
- * The next set of defines are for packet headers, and chip register
- * and memory bits that are visible to and/or used by user-mode software.
- */
-
-/* RcvHdrFlags bits */
-#define QLOGIC_IB_RHF_LENGTH_MASK 0x7FF
-#define QLOGIC_IB_RHF_LENGTH_SHIFT 0
-#define QLOGIC_IB_RHF_RCVTYPE_MASK 0x7
-#define QLOGIC_IB_RHF_RCVTYPE_SHIFT 11
-#define QLOGIC_IB_RHF_EGRINDEX_MASK 0xFFF
-#define QLOGIC_IB_RHF_EGRINDEX_SHIFT 16
-#define QLOGIC_IB_RHF_SEQ_MASK 0xF
-#define QLOGIC_IB_RHF_SEQ_SHIFT 0
-#define QLOGIC_IB_RHF_HDRQ_OFFSET_MASK 0x7FF
-#define QLOGIC_IB_RHF_HDRQ_OFFSET_SHIFT 4
-#define QLOGIC_IB_RHF_H_ICRCERR 0x80000000
-#define QLOGIC_IB_RHF_H_VCRCERR 0x40000000
-#define QLOGIC_IB_RHF_H_PARITYERR 0x20000000
-#define QLOGIC_IB_RHF_H_LENERR 0x10000000
-#define QLOGIC_IB_RHF_H_MTUERR 0x08000000
-#define QLOGIC_IB_RHF_H_IHDRERR 0x04000000
-#define QLOGIC_IB_RHF_H_TIDERR 0x02000000
-#define QLOGIC_IB_RHF_H_MKERR 0x01000000
-#define QLOGIC_IB_RHF_H_IBERR 0x00800000
-#define QLOGIC_IB_RHF_H_ERR_MASK 0xFF800000
-#define QLOGIC_IB_RHF_L_USE_EGR 0x80000000
-#define QLOGIC_IB_RHF_L_SWA 0x00008000
-#define QLOGIC_IB_RHF_L_SWB 0x00004000
-
-/* qlogic_ib header fields */
-#define QLOGIC_IB_I_VERS_MASK 0xF
-#define QLOGIC_IB_I_VERS_SHIFT 28
-#define QLOGIC_IB_I_CTXT_MASK 0xF
-#define QLOGIC_IB_I_CTXT_SHIFT 24
-#define QLOGIC_IB_I_TID_MASK 0x7FF
-#define QLOGIC_IB_I_TID_SHIFT 13
-#define QLOGIC_IB_I_OFFSET_MASK 0x1FFF
-#define QLOGIC_IB_I_OFFSET_SHIFT 0
-
-/* K_PktFlags bits */
-#define QLOGIC_IB_KPF_INTR 0x1
-#define QLOGIC_IB_KPF_SUBCTXT_MASK 0x3
-#define QLOGIC_IB_KPF_SUBCTXT_SHIFT 1
-
-#define QLOGIC_IB_MAX_SUBCTXT 4
-
-/* SendPIO per-buffer control */
-#define QLOGIC_IB_SP_TEST 0x40
-#define QLOGIC_IB_SP_TESTEBP 0x20
-#define QLOGIC_IB_SP_TRIGGER_SHIFT 15
-
-/* SendPIOAvail bits */
-#define QLOGIC_IB_SENDPIOAVAIL_BUSY_SHIFT 1
-#define QLOGIC_IB_SENDPIOAVAIL_CHECK_SHIFT 0
-
-/* qlogic_ib header format */
-struct qib_header {
- /*
- * Version - 4 bits, Context - 4 bits, TID - 10 bits and Offset -
- * 14 bits before ECO change ~28 Dec 03. After that, Vers 4,
- * Context 4, TID 11, offset 13.
- */
- __le32 ver_ctxt_tid_offset;
- __le16 chksum;
- __le16 pkt_flags;
-};
-
-/*
- * qlogic_ib user message header format.
- * This structure contains the first 4 fields common to all protocols
- * that employ qlogic_ib.
- */
-struct qib_message_header {
- __be16 lrh[4];
- __be32 bth[3];
- /* fields below this point are in host byte order */
- struct qib_header iph;
- /* fields below are simplified, but should match PSM */
- /* some are accessed by driver when packet spliting is needed */
- __u8 sub_opcode;
- __u8 flags;
- __u16 commidx;
- __u32 ack_seq_num;
- __u8 flowid;
- __u8 hdr_dlen;
- __u16 mqhdr;
- __u32 uwords[4];
-};
-
-/* sequence number bits for message */
-union qib_seqnum {
- struct {
- __u32 seq:11;
- __u32 gen:8;
- __u32 flow:5;
- };
- struct {
- __u32 pkt:16;
- __u32 msg:8;
- };
- __u32 val;
-};
-
-/* qib receiving-dma tid-session-member */
-struct qib_tid_session_member {
- __u16 tid;
- __u16 offset;
- __u16 length;
-};
-
-/* IB - LRH header consts */
-#define QIB_LRH_GRH 0x0003 /* 1. word of IB LRH - next header: GRH */
-#define QIB_LRH_BTH 0x0002 /* 1. word of IB LRH - next header: BTH */
-
-/* misc. */
-#define SIZE_OF_CRC 1
-
-#define QIB_DEFAULT_P_KEY 0xFFFF
-#define QIB_PSN_MASK 0xFFFFFF
-#define QIB_EAGER_TID_ID QLOGIC_IB_I_TID_MASK
-#define QIB_MULTICAST_QPN 0xFFFFFF
-
-/* Receive Header Queue: receive type (from qlogic_ib) */
-#define RCVHQ_RCV_TYPE_EXPECTED 0
-#define RCVHQ_RCV_TYPE_EAGER 1
-#define RCVHQ_RCV_TYPE_NON_KD 2
-#define RCVHQ_RCV_TYPE_ERROR 3
-
-#define QIB_HEADER_QUEUE_WORDS 9
-
-/* functions for extracting fields from rcvhdrq entries for the driver.
- */
-static inline __u32 qib_hdrget_err_flags(const __le32 *rbuf)
-{
- return __le32_to_cpu(rbuf[1]) & QLOGIC_IB_RHF_H_ERR_MASK;
-}
-
-static inline __u32 qib_hdrget_rcv_type(const __le32 *rbuf)
-{
- return (__le32_to_cpu(rbuf[0]) >> QLOGIC_IB_RHF_RCVTYPE_SHIFT) &
- QLOGIC_IB_RHF_RCVTYPE_MASK;
-}
-
-static inline __u32 qib_hdrget_length_in_bytes(const __le32 *rbuf)
-{
- return ((__le32_to_cpu(rbuf[0]) >> QLOGIC_IB_RHF_LENGTH_SHIFT) &
- QLOGIC_IB_RHF_LENGTH_MASK) << 2;
-}
-
-static inline __u32 qib_hdrget_index(const __le32 *rbuf)
-{
- return (__le32_to_cpu(rbuf[0]) >> QLOGIC_IB_RHF_EGRINDEX_SHIFT) &
- QLOGIC_IB_RHF_EGRINDEX_MASK;
-}
-
-static inline __u32 qib_hdrget_seq(const __le32 *rbuf)
-{
- return (__le32_to_cpu(rbuf[1]) >> QLOGIC_IB_RHF_SEQ_SHIFT) &
- QLOGIC_IB_RHF_SEQ_MASK;
-}
-
-static inline __u32 qib_hdrget_offset(const __le32 *rbuf)
-{
- return (__le32_to_cpu(rbuf[1]) >> QLOGIC_IB_RHF_HDRQ_OFFSET_SHIFT) &
- QLOGIC_IB_RHF_HDRQ_OFFSET_MASK;
-}
-
-static inline __u32 qib_hdrget_use_egr_buf(const __le32 *rbuf)
-{
- return __le32_to_cpu(rbuf[0]) & QLOGIC_IB_RHF_L_USE_EGR;
-}
-#endif /* _QIB_COMMON_H */
diff --git a/drivers/infiniband/hw/qib/qib_debugfs.c b/drivers/infiniband/hw/qib/qib_debugfs.c
deleted file mode 100644
index caeb77d07a58..000000000000
--- a/drivers/infiniband/hw/qib/qib_debugfs.c
+++ /dev/null
@@ -1,274 +0,0 @@
-/*
- * Copyright (c) 2013 - 2017 Intel Corporation. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include <linux/debugfs.h>
-#include <linux/seq_file.h>
-#include <linux/kernel.h>
-#include <linux/export.h>
-
-#include "qib.h"
-#include "qib_verbs.h"
-#include "qib_debugfs.h"
-
-static struct dentry *qib_dbg_root;
-
-#define DEBUGFS_FILE(name) \
-static const struct seq_operations _##name##_seq_ops = { \
- .start = _##name##_seq_start, \
- .next = _##name##_seq_next, \
- .stop = _##name##_seq_stop, \
- .show = _##name##_seq_show \
-}; \
-static int _##name##_open(struct inode *inode, struct file *s) \
-{ \
- struct seq_file *seq; \
- int ret; \
- ret = seq_open(s, &_##name##_seq_ops); \
- if (ret) \
- return ret; \
- seq = s->private_data; \
- seq->private = inode->i_private; \
- return 0; \
-} \
-static const struct file_operations _##name##_file_ops = { \
- .owner = THIS_MODULE, \
- .open = _##name##_open, \
- .read = seq_read, \
- .llseek = seq_lseek, \
- .release = seq_release \
-};
-
-static void *_opcode_stats_seq_start(struct seq_file *s, loff_t *pos)
-{
- struct qib_opcode_stats_perctx *opstats;
-
- if (*pos >= ARRAY_SIZE(opstats->stats))
- return NULL;
- return pos;
-}
-
-static void *_opcode_stats_seq_next(struct seq_file *s, void *v, loff_t *pos)
-{
- struct qib_opcode_stats_perctx *opstats;
-
- ++*pos;
- if (*pos >= ARRAY_SIZE(opstats->stats))
- return NULL;
- return pos;
-}
-
-
-static void _opcode_stats_seq_stop(struct seq_file *s, void *v)
-{
- /* nothing allocated */
-}
-
-static int _opcode_stats_seq_show(struct seq_file *s, void *v)
-{
- loff_t *spos = v;
- loff_t i = *spos, j;
- u64 n_packets = 0, n_bytes = 0;
- struct qib_ibdev *ibd = (struct qib_ibdev *)s->private;
- struct qib_devdata *dd = dd_from_dev(ibd);
-
- for (j = 0; j < dd->first_user_ctxt; j++) {
- if (!dd->rcd[j])
- continue;
- n_packets += dd->rcd[j]->opstats->stats[i].n_packets;
- n_bytes += dd->rcd[j]->opstats->stats[i].n_bytes;
- }
- if (!n_packets && !n_bytes)
- return SEQ_SKIP;
- seq_printf(s, "%02llx %llu/%llu\n", i,
- (unsigned long long) n_packets,
- (unsigned long long) n_bytes);
-
- return 0;
-}
-
-DEBUGFS_FILE(opcode_stats)
-
-static void *_ctx_stats_seq_start(struct seq_file *s, loff_t *pos)
-{
- struct qib_ibdev *ibd = (struct qib_ibdev *)s->private;
- struct qib_devdata *dd = dd_from_dev(ibd);
-
- if (!*pos)
- return SEQ_START_TOKEN;
- if (*pos >= dd->first_user_ctxt)
- return NULL;
- return pos;
-}
-
-static void *_ctx_stats_seq_next(struct seq_file *s, void *v, loff_t *pos)
-{
- struct qib_ibdev *ibd = (struct qib_ibdev *)s->private;
- struct qib_devdata *dd = dd_from_dev(ibd);
-
- if (v == SEQ_START_TOKEN)
- return pos;
-
- ++*pos;
- if (*pos >= dd->first_user_ctxt)
- return NULL;
- return pos;
-}
-
-static void _ctx_stats_seq_stop(struct seq_file *s, void *v)
-{
- /* nothing allocated */
-}
-
-static int _ctx_stats_seq_show(struct seq_file *s, void *v)
-{
- loff_t *spos;
- loff_t i, j;
- u64 n_packets = 0;
- struct qib_ibdev *ibd = (struct qib_ibdev *)s->private;
- struct qib_devdata *dd = dd_from_dev(ibd);
-
- if (v == SEQ_START_TOKEN) {
- seq_puts(s, "Ctx:npkts\n");
- return 0;
- }
-
- spos = v;
- i = *spos;
-
- if (!dd->rcd[i])
- return SEQ_SKIP;
-
- for (j = 0; j < ARRAY_SIZE(dd->rcd[i]->opstats->stats); j++)
- n_packets += dd->rcd[i]->opstats->stats[j].n_packets;
-
- if (!n_packets)
- return SEQ_SKIP;
-
- seq_printf(s, " %llu:%llu\n", i, n_packets);
- return 0;
-}
-
-DEBUGFS_FILE(ctx_stats)
-
-static void *_qp_stats_seq_start(struct seq_file *s, loff_t *pos)
- __acquires(RCU)
-{
- struct rvt_qp_iter *iter;
- loff_t n = *pos;
-
- iter = rvt_qp_iter_init(s->private, 0, NULL);
-
- /* stop calls rcu_read_unlock */
- rcu_read_lock();
-
- if (!iter)
- return NULL;
-
- do {
- if (rvt_qp_iter_next(iter)) {
- kfree(iter);
- return NULL;
- }
- } while (n--);
-
- return iter;
-}
-
-static void *_qp_stats_seq_next(struct seq_file *s, void *iter_ptr,
- loff_t *pos)
- __must_hold(RCU)
-{
- struct rvt_qp_iter *iter = iter_ptr;
-
- (*pos)++;
-
- if (rvt_qp_iter_next(iter)) {
- kfree(iter);
- return NULL;
- }
-
- return iter;
-}
-
-static void _qp_stats_seq_stop(struct seq_file *s, void *iter_ptr)
- __releases(RCU)
-{
- rcu_read_unlock();
-}
-
-static int _qp_stats_seq_show(struct seq_file *s, void *iter_ptr)
-{
- struct rvt_qp_iter *iter = iter_ptr;
-
- if (!iter)
- return 0;
-
- qib_qp_iter_print(s, iter);
-
- return 0;
-}
-
-DEBUGFS_FILE(qp_stats)
-
-void qib_dbg_ibdev_init(struct qib_ibdev *ibd)
-{
- struct dentry *root;
- char name[10];
-
- snprintf(name, sizeof(name), "qib%d", dd_from_dev(ibd)->unit);
- root = debugfs_create_dir(name, qib_dbg_root);
- ibd->qib_ibdev_dbg = root;
-
- debugfs_create_file("opcode_stats", 0400, root, ibd,
- &_opcode_stats_file_ops);
- debugfs_create_file("ctx_stats", 0400, root, ibd, &_ctx_stats_file_ops);
- debugfs_create_file("qp_stats", 0400, root, ibd, &_qp_stats_file_ops);
-}
-
-void qib_dbg_ibdev_exit(struct qib_ibdev *ibd)
-{
- if (!qib_dbg_root)
- goto out;
- debugfs_remove_recursive(ibd->qib_ibdev_dbg);
-out:
- ibd->qib_ibdev_dbg = NULL;
-}
-
-void qib_dbg_init(void)
-{
- qib_dbg_root = debugfs_create_dir(QIB_DRV_NAME, NULL);
-}
-
-void qib_dbg_exit(void)
-{
- debugfs_remove_recursive(qib_dbg_root);
- qib_dbg_root = NULL;
-}
diff --git a/drivers/infiniband/hw/qib/qib_debugfs.h b/drivers/infiniband/hw/qib/qib_debugfs.h
deleted file mode 100644
index 7ae983a91b8b..000000000000
--- a/drivers/infiniband/hw/qib/qib_debugfs.h
+++ /dev/null
@@ -1,45 +0,0 @@
-#ifndef _QIB_DEBUGFS_H
-#define _QIB_DEBUGFS_H
-
-#ifdef CONFIG_DEBUG_FS
-/*
- * Copyright (c) 2013 Intel Corporation. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-struct qib_ibdev;
-void qib_dbg_ibdev_init(struct qib_ibdev *ibd);
-void qib_dbg_ibdev_exit(struct qib_ibdev *ibd);
-void qib_dbg_init(void);
-void qib_dbg_exit(void);
-
-#endif
-
-#endif /* _QIB_DEBUGFS_H */
diff --git a/drivers/infiniband/hw/qib/qib_diag.c b/drivers/infiniband/hw/qib/qib_diag.c
deleted file mode 100644
index 11da796dd1b7..000000000000
--- a/drivers/infiniband/hw/qib/qib_diag.c
+++ /dev/null
@@ -1,906 +0,0 @@
-/*
- * Copyright (c) 2012 Intel Corporation. All rights reserved.
- * Copyright (c) 2006 - 2012 QLogic Corporation. All rights reserved.
- * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-/*
- * This file contains support for diagnostic functions. It is accessed by
- * opening the qib_diag device, normally minor number 129. Diagnostic use
- * of the QLogic_IB chip may render the chip or board unusable until the
- * driver is unloaded, or in some cases, until the system is rebooted.
- *
- * Accesses to the chip through this interface are not similar to going
- * through the /sys/bus/pci resource mmap interface.
- */
-
-#include <linux/io.h>
-#include <linux/pci.h>
-#include <linux/poll.h>
-#include <linux/vmalloc.h>
-#include <linux/export.h>
-#include <linux/fs.h>
-#include <linux/uaccess.h>
-
-#include "qib.h"
-#include "qib_common.h"
-
-#undef pr_fmt
-#define pr_fmt(fmt) QIB_DRV_NAME ": " fmt
-
-/*
- * Each client that opens the diag device must read then write
- * offset 0, to prevent lossage from random cat or od. diag_state
- * sequences this "handshake".
- */
-enum diag_state { UNUSED = 0, OPENED, INIT, READY };
-
-/* State for an individual client. PID so children cannot abuse handshake */
-static struct qib_diag_client {
- struct qib_diag_client *next;
- struct qib_devdata *dd;
- pid_t pid;
- enum diag_state state;
-} *client_pool;
-
-/*
- * Get a client struct. Recycled if possible, else kmalloc.
- * Must be called with qib_mutex held
- */
-static struct qib_diag_client *get_client(struct qib_devdata *dd)
-{
- struct qib_diag_client *dc;
-
- dc = client_pool;
- if (dc)
- /* got from pool remove it and use */
- client_pool = dc->next;
- else
- /* None in pool, alloc and init */
- dc = kmalloc(sizeof(*dc), GFP_KERNEL);
-
- if (dc) {
- dc->next = NULL;
- dc->dd = dd;
- dc->pid = current->pid;
- dc->state = OPENED;
- }
- return dc;
-}
-
-/*
- * Return to pool. Must be called with qib_mutex held
- */
-static void return_client(struct qib_diag_client *dc)
-{
- struct qib_devdata *dd = dc->dd;
- struct qib_diag_client *tdc, *rdc;
-
- rdc = NULL;
- if (dc == dd->diag_client) {
- dd->diag_client = dc->next;
- rdc = dc;
- } else {
- tdc = dc->dd->diag_client;
- while (tdc) {
- if (dc == tdc->next) {
- tdc->next = dc->next;
- rdc = dc;
- break;
- }
- tdc = tdc->next;
- }
- }
- if (rdc) {
- rdc->state = UNUSED;
- rdc->dd = NULL;
- rdc->pid = 0;
- rdc->next = client_pool;
- client_pool = rdc;
- }
-}
-
-static int qib_diag_open(struct inode *in, struct file *fp);
-static int qib_diag_release(struct inode *in, struct file *fp);
-static ssize_t qib_diag_read(struct file *fp, char __user *data,
- size_t count, loff_t *off);
-static ssize_t qib_diag_write(struct file *fp, const char __user *data,
- size_t count, loff_t *off);
-
-static const struct file_operations diag_file_ops = {
- .owner = THIS_MODULE,
- .write = qib_diag_write,
- .read = qib_diag_read,
- .open = qib_diag_open,
- .release = qib_diag_release,
- .llseek = default_llseek,
-};
-
-static atomic_t diagpkt_count = ATOMIC_INIT(0);
-static struct cdev *diagpkt_cdev;
-static struct device *diagpkt_device;
-
-static ssize_t qib_diagpkt_write(struct file *fp, const char __user *data,
- size_t count, loff_t *off);
-
-static const struct file_operations diagpkt_file_ops = {
- .owner = THIS_MODULE,
- .write = qib_diagpkt_write,
- .llseek = noop_llseek,
-};
-
-int qib_diag_add(struct qib_devdata *dd)
-{
- char name[16];
- int ret = 0;
-
- if (atomic_inc_return(&diagpkt_count) == 1) {
- ret = qib_cdev_init(QIB_DIAGPKT_MINOR, "ipath_diagpkt",
- &diagpkt_file_ops, &diagpkt_cdev,
- &diagpkt_device);
- if (ret)
- goto done;
- }
-
- snprintf(name, sizeof(name), "ipath_diag%d", dd->unit);
- ret = qib_cdev_init(QIB_DIAG_MINOR_BASE + dd->unit, name,
- &diag_file_ops, &dd->diag_cdev,
- &dd->diag_device);
-done:
- return ret;
-}
-
-static void qib_unregister_observers(struct qib_devdata *dd);
-
-void qib_diag_remove(struct qib_devdata *dd)
-{
- struct qib_diag_client *dc;
-
- if (atomic_dec_and_test(&diagpkt_count))
- qib_cdev_cleanup(&diagpkt_cdev, &diagpkt_device);
-
- qib_cdev_cleanup(&dd->diag_cdev, &dd->diag_device);
-
- /*
- * Return all diag_clients of this device. There should be none,
- * as we are "guaranteed" that no clients are still open
- */
- while (dd->diag_client)
- return_client(dd->diag_client);
-
- /* Now clean up all unused client structs */
- while (client_pool) {
- dc = client_pool;
- client_pool = dc->next;
- kfree(dc);
- }
- /* Clean up observer list */
- qib_unregister_observers(dd);
-}
-
-/* qib_remap_ioaddr32 - remap an offset into chip address space to __iomem *
- *
- * @dd: the qlogic_ib device
- * @offs: the offset in chip-space
- * @cntp: Pointer to max (byte) count for transfer starting at offset
- * This returns a u32 __iomem * so it can be used for both 64 and 32-bit
- * mapping. It is needed because with the use of PAT for control of
- * write-combining, the logically contiguous address-space of the chip
- * may be split into virtually non-contiguous spaces, with different
- * attributes, which are them mapped to contiguous physical space
- * based from the first BAR.
- *
- * The code below makes the same assumptions as were made in
- * init_chip_wc_pat() (qib_init.c), copied here:
- * Assumes chip address space looks like:
- * - kregs + sregs + cregs + uregs (in any order)
- * - piobufs (2K and 4K bufs in either order)
- * or:
- * - kregs + sregs + cregs (in any order)
- * - piobufs (2K and 4K bufs in either order)
- * - uregs
- *
- * If cntp is non-NULL, returns how many bytes from offset can be accessed
- * Returns 0 if the offset is not mapped.
- */
-static u32 __iomem *qib_remap_ioaddr32(struct qib_devdata *dd, u32 offset,
- u32 *cntp)
-{
- u32 kreglen;
- u32 snd_bottom, snd_lim = 0;
- u32 __iomem *krb32 = (u32 __iomem *)dd->kregbase;
- u32 __iomem *map = NULL;
- u32 cnt = 0;
- u32 tot4k, offs4k;
-
- /* First, simplest case, offset is within the first map. */
- kreglen = (dd->kregend - dd->kregbase) * sizeof(u64);
- if (offset < kreglen) {
- map = krb32 + (offset / sizeof(u32));
- cnt = kreglen - offset;
- goto mapped;
- }
-
- /*
- * Next check for user regs, the next most common case,
- * and a cheap check because if they are not in the first map
- * they are last in chip.
- */
- if (dd->userbase) {
- /* If user regs mapped, they are after send, so set limit. */
- u32 ulim = (dd->cfgctxts * dd->ureg_align) + dd->uregbase;
-
- if (!dd->piovl15base)
- snd_lim = dd->uregbase;
- krb32 = (u32 __iomem *)dd->userbase;
- if (offset >= dd->uregbase && offset < ulim) {
- map = krb32 + (offset - dd->uregbase) / sizeof(u32);
- cnt = ulim - offset;
- goto mapped;
- }
- }
-
- /*
- * Lastly, check for offset within Send Buffers.
- * This is gnarly because struct devdata is deliberately vague
- * about things like 7322 VL15 buffers, and we are not in
- * chip-specific code here, so should not make many assumptions.
- * The one we _do_ make is that the only chip that has more sndbufs
- * than we admit is the 7322, and it has userregs above that, so
- * we know the snd_lim.
- */
- /* Assume 2K buffers are first. */
- snd_bottom = dd->pio2k_bufbase;
- if (snd_lim == 0) {
- u32 tot2k = dd->piobcnt2k * ALIGN(dd->piosize2k, dd->palign);
-
- snd_lim = snd_bottom + tot2k;
- }
- /* If 4k buffers exist, account for them by bumping
- * appropriate limit.
- */
- tot4k = dd->piobcnt4k * dd->align4k;
- offs4k = dd->piobufbase >> 32;
- if (dd->piobcnt4k) {
- if (snd_bottom > offs4k)
- snd_bottom = offs4k;
- else {
- /* 4k above 2k. Bump snd_lim, if needed*/
- if (!dd->userbase || dd->piovl15base)
- snd_lim = offs4k + tot4k;
- }
- }
- /*
- * Judgement call: can we ignore the space between SendBuffs and
- * UserRegs, where we would like to see vl15 buffs, but not more?
- */
- if (offset >= snd_bottom && offset < snd_lim) {
- offset -= snd_bottom;
- map = (u32 __iomem *)dd->piobase + (offset / sizeof(u32));
- cnt = snd_lim - offset;
- }
-
- if (!map && offs4k && dd->piovl15base) {
- snd_lim = offs4k + tot4k + 2 * dd->align4k;
- if (offset >= (offs4k + tot4k) && offset < snd_lim) {
- map = (u32 __iomem *)dd->piovl15base +
- ((offset - (offs4k + tot4k)) / sizeof(u32));
- cnt = snd_lim - offset;
- }
- }
-
-mapped:
- if (cntp)
- *cntp = cnt;
- return map;
-}
-
-/*
- * qib_read_umem64 - read a 64-bit quantity from the chip into user space
- * @dd: the qlogic_ib device
- * @uaddr: the location to store the data in user memory
- * @regoffs: the offset from BAR0 (_NOT_ full pointer, anymore)
- * @count: number of bytes to copy (multiple of 32 bits)
- *
- * This function also localizes all chip memory accesses.
- * The copy should be written such that we read full cacheline packets
- * from the chip. This is usually used for a single qword
- *
- * NOTE: This assumes the chip address is 64-bit aligned.
- */
-static int qib_read_umem64(struct qib_devdata *dd, void __user *uaddr,
- u32 regoffs, size_t count)
-{
- const u64 __iomem *reg_addr;
- const u64 __iomem *reg_end;
- u32 limit;
- int ret;
-
- reg_addr = (const u64 __iomem *)qib_remap_ioaddr32(dd, regoffs, &limit);
- if (reg_addr == NULL || limit == 0 || !(dd->flags & QIB_PRESENT)) {
- ret = -EINVAL;
- goto bail;
- }
- if (count >= limit)
- count = limit;
- reg_end = reg_addr + (count / sizeof(u64));
-
- /* not very efficient, but it works for now */
- while (reg_addr < reg_end) {
- u64 data = readq(reg_addr);
-
- if (copy_to_user(uaddr, &data, sizeof(u64))) {
- ret = -EFAULT;
- goto bail;
- }
- reg_addr++;
- uaddr += sizeof(u64);
- }
- ret = 0;
-bail:
- return ret;
-}
-
-/*
- * qib_write_umem64 - write a 64-bit quantity to the chip from user space
- * @dd: the qlogic_ib device
- * @regoffs: the offset from BAR0 (_NOT_ full pointer, anymore)
- * @uaddr: the source of the data in user memory
- * @count: the number of bytes to copy (multiple of 32 bits)
- *
- * This is usually used for a single qword
- * NOTE: This assumes the chip address is 64-bit aligned.
- */
-
-static int qib_write_umem64(struct qib_devdata *dd, u32 regoffs,
- const void __user *uaddr, size_t count)
-{
- u64 __iomem *reg_addr;
- const u64 __iomem *reg_end;
- u32 limit;
- int ret;
-
- reg_addr = (u64 __iomem *)qib_remap_ioaddr32(dd, regoffs, &limit);
- if (reg_addr == NULL || limit == 0 || !(dd->flags & QIB_PRESENT)) {
- ret = -EINVAL;
- goto bail;
- }
- if (count >= limit)
- count = limit;
- reg_end = reg_addr + (count / sizeof(u64));
-
- /* not very efficient, but it works for now */
- while (reg_addr < reg_end) {
- u64 data;
-
- if (copy_from_user(&data, uaddr, sizeof(data))) {
- ret = -EFAULT;
- goto bail;
- }
- writeq(data, reg_addr);
-
- reg_addr++;
- uaddr += sizeof(u64);
- }
- ret = 0;
-bail:
- return ret;
-}
-
-/*
- * qib_read_umem32 - read a 32-bit quantity from the chip into user space
- * @dd: the qlogic_ib device
- * @uaddr: the location to store the data in user memory
- * @regoffs: the offset from BAR0 (_NOT_ full pointer, anymore)
- * @count: number of bytes to copy
- *
- * read 32 bit values, not 64 bit; for memories that only
- * support 32 bit reads; usually a single dword.
- */
-static int qib_read_umem32(struct qib_devdata *dd, void __user *uaddr,
- u32 regoffs, size_t count)
-{
- const u32 __iomem *reg_addr;
- const u32 __iomem *reg_end;
- u32 limit;
- int ret;
-
- reg_addr = qib_remap_ioaddr32(dd, regoffs, &limit);
- if (reg_addr == NULL || limit == 0 || !(dd->flags & QIB_PRESENT)) {
- ret = -EINVAL;
- goto bail;
- }
- if (count >= limit)
- count = limit;
- reg_end = reg_addr + (count / sizeof(u32));
-
- /* not very efficient, but it works for now */
- while (reg_addr < reg_end) {
- u32 data = readl(reg_addr);
-
- if (copy_to_user(uaddr, &data, sizeof(data))) {
- ret = -EFAULT;
- goto bail;
- }
-
- reg_addr++;
- uaddr += sizeof(u32);
-
- }
- ret = 0;
-bail:
- return ret;
-}
-
-/*
- * qib_write_umem32 - write a 32-bit quantity to the chip from user space
- * @dd: the qlogic_ib device
- * @regoffs: the offset from BAR0 (_NOT_ full pointer, anymore)
- * @uaddr: the source of the data in user memory
- * @count: number of bytes to copy
- *
- * write 32 bit values, not 64 bit; for memories that only
- * support 32 bit write; usually a single dword.
- */
-
-static int qib_write_umem32(struct qib_devdata *dd, u32 regoffs,
- const void __user *uaddr, size_t count)
-{
- u32 __iomem *reg_addr;
- const u32 __iomem *reg_end;
- u32 limit;
- int ret;
-
- reg_addr = qib_remap_ioaddr32(dd, regoffs, &limit);
- if (reg_addr == NULL || limit == 0 || !(dd->flags & QIB_PRESENT)) {
- ret = -EINVAL;
- goto bail;
- }
- if (count >= limit)
- count = limit;
- reg_end = reg_addr + (count / sizeof(u32));
-
- while (reg_addr < reg_end) {
- u32 data;
-
- if (copy_from_user(&data, uaddr, sizeof(data))) {
- ret = -EFAULT;
- goto bail;
- }
- writel(data, reg_addr);
-
- reg_addr++;
- uaddr += sizeof(u32);
- }
- ret = 0;
-bail:
- return ret;
-}
-
-static int qib_diag_open(struct inode *in, struct file *fp)
-{
- int unit = iminor(in) - QIB_DIAG_MINOR_BASE;
- struct qib_devdata *dd;
- struct qib_diag_client *dc;
- int ret;
-
- mutex_lock(&qib_mutex);
-
- dd = qib_lookup(unit);
-
- if (dd == NULL || !(dd->flags & QIB_PRESENT) ||
- !dd->kregbase) {
- ret = -ENODEV;
- goto bail;
- }
-
- dc = get_client(dd);
- if (!dc) {
- ret = -ENOMEM;
- goto bail;
- }
- dc->next = dd->diag_client;
- dd->diag_client = dc;
- fp->private_data = dc;
- ret = 0;
-bail:
- mutex_unlock(&qib_mutex);
-
- return ret;
-}
-
-/**
- * qib_diagpkt_write - write an IB packet
- * @fp: the diag data device file pointer
- * @data: qib_diag_pkt structure saying where to get the packet
- * @count: size of data to write
- * @off: unused by this code
- */
-static ssize_t qib_diagpkt_write(struct file *fp,
- const char __user *data,
- size_t count, loff_t *off)
-{
- u32 __iomem *piobuf;
- u32 plen, pbufn, maxlen_reserve;
- struct qib_diag_xpkt dp;
- u32 *tmpbuf = NULL;
- struct qib_devdata *dd;
- struct qib_pportdata *ppd;
- ssize_t ret = 0;
-
- if (count != sizeof(dp)) {
- ret = -EINVAL;
- goto bail;
- }
- if (copy_from_user(&dp, data, sizeof(dp))) {
- ret = -EFAULT;
- goto bail;
- }
-
- dd = qib_lookup(dp.unit);
- if (!dd || !(dd->flags & QIB_PRESENT) || !dd->kregbase) {
- ret = -ENODEV;
- goto bail;
- }
- if (!(dd->flags & QIB_INITTED)) {
- /* no hardware, freeze, etc. */
- ret = -ENODEV;
- goto bail;
- }
-
- if (dp.version != _DIAG_XPKT_VERS) {
- qib_dev_err(dd, "Invalid version %u for diagpkt_write\n",
- dp.version);
- ret = -EINVAL;
- goto bail;
- }
- /* send count must be an exact number of dwords */
- if (dp.len & 3) {
- ret = -EINVAL;
- goto bail;
- }
- if (!dp.port || dp.port > dd->num_pports) {
- ret = -EINVAL;
- goto bail;
- }
- ppd = &dd->pport[dp.port - 1];
-
- /*
- * need total length before first word written, plus 2 Dwords. One Dword
- * is for padding so we get the full user data when not aligned on
- * a word boundary. The other Dword is to make sure we have room for the
- * ICRC which gets tacked on later.
- */
- maxlen_reserve = 2 * sizeof(u32);
- if (dp.len > ppd->ibmaxlen - maxlen_reserve) {
- ret = -EINVAL;
- goto bail;
- }
-
- plen = sizeof(u32) + dp.len;
-
- tmpbuf = vmalloc(plen);
- if (!tmpbuf) {
- ret = -ENOMEM;
- goto bail;
- }
-
- if (copy_from_user(tmpbuf,
- u64_to_user_ptr(dp.data),
- dp.len)) {
- ret = -EFAULT;
- goto bail;
- }
-
- plen >>= 2; /* in dwords */
-
- if (dp.pbc_wd == 0)
- dp.pbc_wd = plen;
-
- piobuf = dd->f_getsendbuf(ppd, dp.pbc_wd, &pbufn);
- if (!piobuf) {
- ret = -EBUSY;
- goto bail;
- }
- /* disarm it just to be extra sure */
- dd->f_sendctrl(dd->pport, QIB_SENDCTRL_DISARM_BUF(pbufn));
-
- /* disable header check on pbufn for this packet */
- dd->f_txchk_change(dd, pbufn, 1, TXCHK_CHG_TYPE_DIS1, NULL);
-
- writeq(dp.pbc_wd, piobuf);
- /*
- * Copy all but the trigger word, then flush, so it's written
- * to chip before trigger word, then write trigger word, then
- * flush again, so packet is sent.
- */
- if (dd->flags & QIB_PIO_FLUSH_WC) {
- qib_flush_wc();
- qib_pio_copy(piobuf + 2, tmpbuf, plen - 1);
- qib_flush_wc();
- __raw_writel(tmpbuf[plen - 1], piobuf + plen + 1);
- } else
- qib_pio_copy(piobuf + 2, tmpbuf, plen);
-
- if (dd->flags & QIB_USE_SPCL_TRIG) {
- u32 spcl_off = (pbufn >= dd->piobcnt2k) ? 2047 : 1023;
-
- qib_flush_wc();
- __raw_writel(0xaebecede, piobuf + spcl_off);
- }
-
- /*
- * Ensure buffer is written to the chip, then re-enable
- * header checks (if supported by chip). The txchk
- * code will ensure seen by chip before returning.
- */
- qib_flush_wc();
- qib_sendbuf_done(dd, pbufn);
- dd->f_txchk_change(dd, pbufn, 1, TXCHK_CHG_TYPE_ENAB1, NULL);
-
- ret = sizeof(dp);
-
-bail:
- vfree(tmpbuf);
- return ret;
-}
-
-static int qib_diag_release(struct inode *in, struct file *fp)
-{
- mutex_lock(&qib_mutex);
- return_client(fp->private_data);
- fp->private_data = NULL;
- mutex_unlock(&qib_mutex);
- return 0;
-}
-
-/*
- * Chip-specific code calls to register its interest in
- * a specific range.
- */
-struct diag_observer_list_elt {
- struct diag_observer_list_elt *next;
- const struct diag_observer *op;
-};
-
-int qib_register_observer(struct qib_devdata *dd,
- const struct diag_observer *op)
-{
- struct diag_observer_list_elt *olp;
- unsigned long flags;
-
- if (!dd || !op)
- return -EINVAL;
- olp = vmalloc(sizeof(*olp));
- if (!olp)
- return -ENOMEM;
-
- spin_lock_irqsave(&dd->qib_diag_trans_lock, flags);
- olp->op = op;
- olp->next = dd->diag_observer_list;
- dd->diag_observer_list = olp;
- spin_unlock_irqrestore(&dd->qib_diag_trans_lock, flags);
-
- return 0;
-}
-
-/* Remove all registered observers when device is closed */
-static void qib_unregister_observers(struct qib_devdata *dd)
-{
- struct diag_observer_list_elt *olp;
- unsigned long flags;
-
- spin_lock_irqsave(&dd->qib_diag_trans_lock, flags);
- olp = dd->diag_observer_list;
- while (olp) {
- /* Pop one observer, let go of lock */
- dd->diag_observer_list = olp->next;
- spin_unlock_irqrestore(&dd->qib_diag_trans_lock, flags);
- vfree(olp);
- /* try again. */
- spin_lock_irqsave(&dd->qib_diag_trans_lock, flags);
- olp = dd->diag_observer_list;
- }
- spin_unlock_irqrestore(&dd->qib_diag_trans_lock, flags);
-}
-
-/*
- * Find the observer, if any, for the specified address. Initial implementation
- * is simple stack of observers. This must be called with diag transaction
- * lock held.
- */
-static const struct diag_observer *diag_get_observer(struct qib_devdata *dd,
- u32 addr)
-{
- struct diag_observer_list_elt *olp;
- const struct diag_observer *op = NULL;
-
- olp = dd->diag_observer_list;
- while (olp) {
- op = olp->op;
- if (addr >= op->bottom && addr <= op->top)
- break;
- olp = olp->next;
- }
- if (!olp)
- op = NULL;
-
- return op;
-}
-
-static ssize_t qib_diag_read(struct file *fp, char __user *data,
- size_t count, loff_t *off)
-{
- struct qib_diag_client *dc = fp->private_data;
- struct qib_devdata *dd = dc->dd;
- ssize_t ret;
-
- if (dc->pid != current->pid) {
- ret = -EPERM;
- goto bail;
- }
-
- if (count == 0)
- ret = 0;
- else if ((count % 4) || (*off % 4))
- /* address or length is not 32-bit aligned, hence invalid */
- ret = -EINVAL;
- else if (dc->state < READY && (*off || count != 8))
- ret = -EINVAL; /* prevent cat /dev/qib_diag* */
- else {
- unsigned long flags;
- u64 data64 = 0;
- int use_32;
- const struct diag_observer *op;
-
- use_32 = (count % 8) || (*off % 8);
- ret = -1;
- spin_lock_irqsave(&dd->qib_diag_trans_lock, flags);
- /*
- * Check for observer on this address range.
- * we only support a single 32 or 64-bit read
- * via observer, currently.
- */
- op = diag_get_observer(dd, *off);
- if (op) {
- u32 offset = *off;
-
- ret = op->hook(dd, op, offset, &data64, 0, use_32);
- }
- /*
- * We need to release lock before any copy_to_user(),
- * whether implicit in qib_read_umem* or explicit below.
- */
- spin_unlock_irqrestore(&dd->qib_diag_trans_lock, flags);
- if (!op) {
- if (use_32)
- /*
- * Address or length is not 64-bit aligned;
- * do 32-bit rd
- */
- ret = qib_read_umem32(dd, data, (u32) *off,
- count);
- else
- ret = qib_read_umem64(dd, data, (u32) *off,
- count);
- } else if (ret == count) {
- /* Below finishes case where observer existed */
- ret = copy_to_user(data, &data64, use_32 ?
- sizeof(u32) : sizeof(u64));
- if (ret)
- ret = -EFAULT;
- }
- }
-
- if (ret >= 0) {
- *off += count;
- ret = count;
- if (dc->state == OPENED)
- dc->state = INIT;
- }
-bail:
- return ret;
-}
-
-static ssize_t qib_diag_write(struct file *fp, const char __user *data,
- size_t count, loff_t *off)
-{
- struct qib_diag_client *dc = fp->private_data;
- struct qib_devdata *dd = dc->dd;
- ssize_t ret;
-
- if (dc->pid != current->pid) {
- ret = -EPERM;
- goto bail;
- }
-
- if (count == 0)
- ret = 0;
- else if ((count % 4) || (*off % 4))
- /* address or length is not 32-bit aligned, hence invalid */
- ret = -EINVAL;
- else if (dc->state < READY &&
- ((*off || count != 8) || dc->state != INIT))
- /* No writes except second-step of init seq */
- ret = -EINVAL; /* before any other write allowed */
- else {
- unsigned long flags;
- const struct diag_observer *op = NULL;
- int use_32 = (count % 8) || (*off % 8);
-
- /*
- * Check for observer on this address range.
- * We only support a single 32 or 64-bit write
- * via observer, currently. This helps, because
- * we would otherwise have to jump through hoops
- * to make "diag transaction" meaningful when we
- * cannot do a copy_from_user while holding the lock.
- */
- if (count == 4 || count == 8) {
- u64 data64;
- u32 offset = *off;
-
- ret = copy_from_user(&data64, data, count);
- if (ret) {
- ret = -EFAULT;
- goto bail;
- }
- spin_lock_irqsave(&dd->qib_diag_trans_lock, flags);
- op = diag_get_observer(dd, *off);
- if (op)
- ret = op->hook(dd, op, offset, &data64, ~0Ull,
- use_32);
- spin_unlock_irqrestore(&dd->qib_diag_trans_lock, flags);
- }
-
- if (!op) {
- if (use_32)
- /*
- * Address or length is not 64-bit aligned;
- * do 32-bit write
- */
- ret = qib_write_umem32(dd, (u32) *off, data,
- count);
- else
- ret = qib_write_umem64(dd, (u32) *off, data,
- count);
- }
- }
-
- if (ret >= 0) {
- *off += count;
- ret = count;
- if (dc->state == INIT)
- dc->state = READY; /* all read/write OK now */
- }
-bail:
- return ret;
-}
diff --git a/drivers/infiniband/hw/qib/qib_driver.c b/drivers/infiniband/hw/qib/qib_driver.c
deleted file mode 100644
index 91fa5e160c0d..000000000000
--- a/drivers/infiniband/hw/qib/qib_driver.c
+++ /dev/null
@@ -1,798 +0,0 @@
-/*
- * Copyright (c) 2021 Cornelis Networks. All rights reserved.
- * Copyright (c) 2013 Intel Corporation. All rights reserved.
- * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
- * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <linux/spinlock.h>
-#include <linux/pci.h>
-#include <linux/io.h>
-#include <linux/delay.h>
-#include <linux/netdevice.h>
-#include <linux/vmalloc.h>
-#include <linux/module.h>
-#include <linux/prefetch.h>
-
-#include "qib.h"
-
-DEFINE_MUTEX(qib_mutex); /* general driver use */
-
-unsigned qib_ibmtu;
-module_param_named(ibmtu, qib_ibmtu, uint, S_IRUGO);
-MODULE_PARM_DESC(ibmtu, "Set max IB MTU (0=2KB, 1=256, 2=512, ... 5=4096");
-
-unsigned qib_compat_ddr_negotiate = 1;
-module_param_named(compat_ddr_negotiate, qib_compat_ddr_negotiate, uint,
- S_IWUSR | S_IRUGO);
-MODULE_PARM_DESC(compat_ddr_negotiate,
- "Attempt pre-IBTA 1.2 DDR speed negotiation");
-
-MODULE_LICENSE("Dual BSD/GPL");
-MODULE_AUTHOR("Cornelis <support@cornelisnetworks.com>");
-MODULE_DESCRIPTION("Cornelis IB driver");
-
-/*
- * QIB_PIO_MAXIBHDR is the max IB header size allowed for in our
- * PIO send buffers. This is well beyond anything currently
- * defined in the InfiniBand spec.
- */
-#define QIB_PIO_MAXIBHDR 128
-
-/*
- * QIB_MAX_PKT_RCV is the max # if packets processed per receive interrupt.
- */
-#define QIB_MAX_PKT_RECV 64
-
-struct qlogic_ib_stats qib_stats;
-
-struct pci_dev *qib_get_pci_dev(struct rvt_dev_info *rdi)
-{
- struct qib_ibdev *ibdev = container_of(rdi, struct qib_ibdev, rdi);
- struct qib_devdata *dd = container_of(ibdev,
- struct qib_devdata, verbs_dev);
- return dd->pcidev;
-}
-
-/*
- * Return count of units with at least one port ACTIVE.
- */
-int qib_count_active_units(void)
-{
- struct qib_devdata *dd;
- struct qib_pportdata *ppd;
- unsigned long index, flags;
- int pidx, nunits_active = 0;
-
- xa_lock_irqsave(&qib_dev_table, flags);
- xa_for_each(&qib_dev_table, index, dd) {
- if (!(dd->flags & QIB_PRESENT) || !dd->kregbase)
- continue;
- for (pidx = 0; pidx < dd->num_pports; ++pidx) {
- ppd = dd->pport + pidx;
- if (ppd->lid && (ppd->lflags & (QIBL_LINKINIT |
- QIBL_LINKARMED | QIBL_LINKACTIVE))) {
- nunits_active++;
- break;
- }
- }
- }
- xa_unlock_irqrestore(&qib_dev_table, flags);
- return nunits_active;
-}
-
-/*
- * Return count of all units, optionally return in arguments
- * the number of usable (present) units, and the number of
- * ports that are up.
- */
-int qib_count_units(int *npresentp, int *nupp)
-{
- int nunits = 0, npresent = 0, nup = 0;
- struct qib_devdata *dd;
- unsigned long index, flags;
- int pidx;
- struct qib_pportdata *ppd;
-
- xa_lock_irqsave(&qib_dev_table, flags);
- xa_for_each(&qib_dev_table, index, dd) {
- nunits++;
- if ((dd->flags & QIB_PRESENT) && dd->kregbase)
- npresent++;
- for (pidx = 0; pidx < dd->num_pports; ++pidx) {
- ppd = dd->pport + pidx;
- if (ppd->lid && (ppd->lflags & (QIBL_LINKINIT |
- QIBL_LINKARMED | QIBL_LINKACTIVE)))
- nup++;
- }
- }
- xa_unlock_irqrestore(&qib_dev_table, flags);
-
- if (npresentp)
- *npresentp = npresent;
- if (nupp)
- *nupp = nup;
-
- return nunits;
-}
-
-/**
- * qib_wait_linkstate - wait for an IB link state change to occur
- * @ppd: the qlogic_ib device
- * @state: the state to wait for
- * @msecs: the number of milliseconds to wait
- *
- * wait up to msecs milliseconds for IB link state change to occur for
- * now, take the easy polling route. Currently used only by
- * qib_set_linkstate. Returns 0 if state reached, otherwise
- * -ETIMEDOUT state can have multiple states set, for any of several
- * transitions.
- */
-int qib_wait_linkstate(struct qib_pportdata *ppd, u32 state, int msecs)
-{
- int ret;
- unsigned long flags;
-
- spin_lock_irqsave(&ppd->lflags_lock, flags);
- if (ppd->state_wanted) {
- spin_unlock_irqrestore(&ppd->lflags_lock, flags);
- ret = -EBUSY;
- goto bail;
- }
- ppd->state_wanted = state;
- spin_unlock_irqrestore(&ppd->lflags_lock, flags);
- wait_event_interruptible_timeout(ppd->state_wait,
- (ppd->lflags & state),
- msecs_to_jiffies(msecs));
- spin_lock_irqsave(&ppd->lflags_lock, flags);
- ppd->state_wanted = 0;
- spin_unlock_irqrestore(&ppd->lflags_lock, flags);
-
- if (!(ppd->lflags & state))
- ret = -ETIMEDOUT;
- else
- ret = 0;
-bail:
- return ret;
-}
-
-int qib_set_linkstate(struct qib_pportdata *ppd, u8 newstate)
-{
- u32 lstate;
- int ret;
- struct qib_devdata *dd = ppd->dd;
- unsigned long flags;
-
- switch (newstate) {
- case QIB_IB_LINKDOWN_ONLY:
- dd->f_set_ib_cfg(ppd, QIB_IB_CFG_LSTATE,
- IB_LINKCMD_DOWN | IB_LINKINITCMD_NOP);
- /* don't wait */
- ret = 0;
- goto bail;
-
- case QIB_IB_LINKDOWN:
- dd->f_set_ib_cfg(ppd, QIB_IB_CFG_LSTATE,
- IB_LINKCMD_DOWN | IB_LINKINITCMD_POLL);
- /* don't wait */
- ret = 0;
- goto bail;
-
- case QIB_IB_LINKDOWN_SLEEP:
- dd->f_set_ib_cfg(ppd, QIB_IB_CFG_LSTATE,
- IB_LINKCMD_DOWN | IB_LINKINITCMD_SLEEP);
- /* don't wait */
- ret = 0;
- goto bail;
-
- case QIB_IB_LINKDOWN_DISABLE:
- dd->f_set_ib_cfg(ppd, QIB_IB_CFG_LSTATE,
- IB_LINKCMD_DOWN | IB_LINKINITCMD_DISABLE);
- /* don't wait */
- ret = 0;
- goto bail;
-
- case QIB_IB_LINKARM:
- if (ppd->lflags & QIBL_LINKARMED) {
- ret = 0;
- goto bail;
- }
- if (!(ppd->lflags & (QIBL_LINKINIT | QIBL_LINKACTIVE))) {
- ret = -EINVAL;
- goto bail;
- }
- /*
- * Since the port can be ACTIVE when we ask for ARMED,
- * clear QIBL_LINKV so we can wait for a transition.
- * If the link isn't ARMED, then something else happened
- * and there is no point waiting for ARMED.
- */
- spin_lock_irqsave(&ppd->lflags_lock, flags);
- ppd->lflags &= ~QIBL_LINKV;
- spin_unlock_irqrestore(&ppd->lflags_lock, flags);
- dd->f_set_ib_cfg(ppd, QIB_IB_CFG_LSTATE,
- IB_LINKCMD_ARMED | IB_LINKINITCMD_NOP);
- lstate = QIBL_LINKV;
- break;
-
- case QIB_IB_LINKACTIVE:
- if (ppd->lflags & QIBL_LINKACTIVE) {
- ret = 0;
- goto bail;
- }
- if (!(ppd->lflags & QIBL_LINKARMED)) {
- ret = -EINVAL;
- goto bail;
- }
- dd->f_set_ib_cfg(ppd, QIB_IB_CFG_LSTATE,
- IB_LINKCMD_ACTIVE | IB_LINKINITCMD_NOP);
- lstate = QIBL_LINKACTIVE;
- break;
-
- default:
- ret = -EINVAL;
- goto bail;
- }
- ret = qib_wait_linkstate(ppd, lstate, 10);
-
-bail:
- return ret;
-}
-
-/*
- * Get address of eager buffer from it's index (allocated in chunks, not
- * contiguous).
- */
-static inline void *qib_get_egrbuf(const struct qib_ctxtdata *rcd, u32 etail)
-{
- const u32 chunk = etail >> rcd->rcvegrbufs_perchunk_shift;
- const u32 idx = etail & ((u32)rcd->rcvegrbufs_perchunk - 1);
-
- return rcd->rcvegrbuf[chunk] + (idx << rcd->dd->rcvegrbufsize_shift);
-}
-
-/*
- * Returns 1 if error was a CRC, else 0.
- * Needed for some chip's synthesized error counters.
- */
-static u32 qib_rcv_hdrerr(struct qib_ctxtdata *rcd, struct qib_pportdata *ppd,
- u32 ctxt, u32 eflags, u32 l, u32 etail,
- __le32 *rhf_addr, struct qib_message_header *rhdr)
-{
- u32 ret = 0;
-
- if (eflags & (QLOGIC_IB_RHF_H_ICRCERR | QLOGIC_IB_RHF_H_VCRCERR))
- ret = 1;
- else if (eflags == QLOGIC_IB_RHF_H_TIDERR) {
- /* For TIDERR and RC QPs premptively schedule a NAK */
- struct ib_header *hdr = (struct ib_header *)rhdr;
- struct ib_other_headers *ohdr = NULL;
- struct qib_ibport *ibp = &ppd->ibport_data;
- struct qib_devdata *dd = ppd->dd;
- struct rvt_dev_info *rdi = &dd->verbs_dev.rdi;
- struct rvt_qp *qp = NULL;
- u32 tlen = qib_hdrget_length_in_bytes(rhf_addr);
- u16 lid = be16_to_cpu(hdr->lrh[1]);
- int lnh = be16_to_cpu(hdr->lrh[0]) & 3;
- u32 qp_num;
- u32 opcode;
- u32 psn;
- int diff;
-
- /* Sanity check packet */
- if (tlen < 24)
- goto drop;
-
- if (lid < be16_to_cpu(IB_MULTICAST_LID_BASE)) {
- lid &= ~((1 << ppd->lmc) - 1);
- if (unlikely(lid != ppd->lid))
- goto drop;
- }
-
- /* Check for GRH */
- if (lnh == QIB_LRH_BTH)
- ohdr = &hdr->u.oth;
- else if (lnh == QIB_LRH_GRH) {
- u32 vtf;
-
- ohdr = &hdr->u.l.oth;
- if (hdr->u.l.grh.next_hdr != IB_GRH_NEXT_HDR)
- goto drop;
- vtf = be32_to_cpu(hdr->u.l.grh.version_tclass_flow);
- if ((vtf >> IB_GRH_VERSION_SHIFT) != IB_GRH_VERSION)
- goto drop;
- } else
- goto drop;
-
- /* Get opcode and PSN from packet */
- opcode = be32_to_cpu(ohdr->bth[0]);
- opcode >>= 24;
- psn = be32_to_cpu(ohdr->bth[2]);
-
- /* Get the destination QP number. */
- qp_num = be32_to_cpu(ohdr->bth[1]) & RVT_QPN_MASK;
- if (qp_num != QIB_MULTICAST_QPN) {
- int ruc_res;
-
- rcu_read_lock();
- qp = rvt_lookup_qpn(rdi, &ibp->rvp, qp_num);
- if (!qp) {
- rcu_read_unlock();
- goto drop;
- }
-
- /*
- * Handle only RC QPs - for other QP types drop error
- * packet.
- */
- spin_lock(&qp->r_lock);
-
- /* Check for valid receive state. */
- if (!(ib_rvt_state_ops[qp->state] &
- RVT_PROCESS_RECV_OK)) {
- ibp->rvp.n_pkt_drops++;
- goto unlock;
- }
-
- switch (qp->ibqp.qp_type) {
- case IB_QPT_RC:
- ruc_res =
- qib_ruc_check_hdr(
- ibp, hdr,
- lnh == QIB_LRH_GRH,
- qp,
- be32_to_cpu(ohdr->bth[0]));
- if (ruc_res)
- goto unlock;
-
- /* Only deal with RDMA Writes for now */
- if (opcode <
- IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST) {
- diff = qib_cmp24(psn, qp->r_psn);
- if (!qp->r_nak_state && diff >= 0) {
- ibp->rvp.n_rc_seqnak++;
- qp->r_nak_state =
- IB_NAK_PSN_ERROR;
- /* Use the expected PSN. */
- qp->r_ack_psn = qp->r_psn;
- /*
- * Wait to send the sequence
- * NAK until all packets
- * in the receive queue have
- * been processed.
- * Otherwise, we end up
- * propagating congestion.
- */
- if (list_empty(&qp->rspwait)) {
- qp->r_flags |=
- RVT_R_RSP_NAK;
- rvt_get_qp(qp);
- list_add_tail(
- &qp->rspwait,
- &rcd->qp_wait_list);
- }
- } /* Out of sequence NAK */
- } /* QP Request NAKs */
- break;
- case IB_QPT_SMI:
- case IB_QPT_GSI:
- case IB_QPT_UD:
- case IB_QPT_UC:
- default:
- /* For now don't handle any other QP types */
- break;
- }
-
-unlock:
- spin_unlock(&qp->r_lock);
- rcu_read_unlock();
- } /* Unicast QP */
- } /* Valid packet with TIDErr */
-
-drop:
- return ret;
-}
-
-/*
- * qib_kreceive - receive a packet
- * @rcd: the qlogic_ib context
- * @llic: gets count of good packets needed to clear lli,
- * (used with chips that need need to track crcs for lli)
- *
- * called from interrupt handler for errors or receive interrupt
- * Returns number of CRC error packets, needed by some chips for
- * local link integrity tracking. crcs are adjusted down by following
- * good packets, if any, and count of good packets is also tracked.
- */
-u32 qib_kreceive(struct qib_ctxtdata *rcd, u32 *llic, u32 *npkts)
-{
- struct qib_devdata *dd = rcd->dd;
- struct qib_pportdata *ppd = rcd->ppd;
- __le32 *rhf_addr;
- void *ebuf;
- const u32 rsize = dd->rcvhdrentsize; /* words */
- const u32 maxcnt = dd->rcvhdrcnt * rsize; /* words */
- u32 etail = -1, l, hdrqtail;
- struct qib_message_header *hdr;
- u32 eflags, etype, tlen, i = 0, updegr = 0, crcs = 0;
- int last;
- u64 lval;
- struct rvt_qp *qp, *nqp;
-
- l = rcd->head;
- rhf_addr = (__le32 *) rcd->rcvhdrq + l + dd->rhf_offset;
- if (dd->flags & QIB_NODMA_RTAIL) {
- u32 seq = qib_hdrget_seq(rhf_addr);
-
- if (seq != rcd->seq_cnt)
- goto bail;
- hdrqtail = 0;
- } else {
- hdrqtail = qib_get_rcvhdrtail(rcd);
- if (l == hdrqtail)
- goto bail;
- smp_rmb(); /* prevent speculative reads of dma'ed hdrq */
- }
-
- for (last = 0, i = 1; !last; i += !last) {
- hdr = dd->f_get_msgheader(dd, rhf_addr);
- eflags = qib_hdrget_err_flags(rhf_addr);
- etype = qib_hdrget_rcv_type(rhf_addr);
- /* total length */
- tlen = qib_hdrget_length_in_bytes(rhf_addr);
- ebuf = NULL;
- if ((dd->flags & QIB_NODMA_RTAIL) ?
- qib_hdrget_use_egr_buf(rhf_addr) :
- (etype != RCVHQ_RCV_TYPE_EXPECTED)) {
- etail = qib_hdrget_index(rhf_addr);
- updegr = 1;
- if (tlen > sizeof(*hdr) ||
- etype >= RCVHQ_RCV_TYPE_NON_KD) {
- ebuf = qib_get_egrbuf(rcd, etail);
- prefetch_range(ebuf, tlen - sizeof(*hdr));
- }
- }
- if (!eflags) {
- u16 lrh_len = be16_to_cpu(hdr->lrh[2]) << 2;
-
- if (lrh_len != tlen) {
- qib_stats.sps_lenerrs++;
- goto move_along;
- }
- }
- if (etype == RCVHQ_RCV_TYPE_NON_KD && !eflags &&
- ebuf == NULL &&
- tlen > (dd->rcvhdrentsize - 2 + 1 -
- qib_hdrget_offset(rhf_addr)) << 2) {
- goto move_along;
- }
-
- /*
- * Both tiderr and qibhdrerr are set for all plain IB
- * packets; only qibhdrerr should be set.
- */
- if (unlikely(eflags))
- crcs += qib_rcv_hdrerr(rcd, ppd, rcd->ctxt, eflags, l,
- etail, rhf_addr, hdr);
- else if (etype == RCVHQ_RCV_TYPE_NON_KD) {
- qib_ib_rcv(rcd, hdr, ebuf, tlen);
- if (crcs)
- crcs--;
- else if (llic && *llic)
- --*llic;
- }
-move_along:
- l += rsize;
- if (l >= maxcnt)
- l = 0;
- if (i == QIB_MAX_PKT_RECV)
- last = 1;
-
- rhf_addr = (__le32 *) rcd->rcvhdrq + l + dd->rhf_offset;
- if (dd->flags & QIB_NODMA_RTAIL) {
- u32 seq = qib_hdrget_seq(rhf_addr);
-
- if (++rcd->seq_cnt > 13)
- rcd->seq_cnt = 1;
- if (seq != rcd->seq_cnt)
- last = 1;
- } else if (l == hdrqtail)
- last = 1;
- /*
- * Update head regs etc., every 16 packets, if not last pkt,
- * to help prevent rcvhdrq overflows, when many packets
- * are processed and queue is nearly full.
- * Don't request an interrupt for intermediate updates.
- */
- lval = l;
- if (!last && !(i & 0xf)) {
- dd->f_update_usrhead(rcd, lval, updegr, etail, i);
- updegr = 0;
- }
- }
-
- rcd->head = l;
-
- /*
- * Iterate over all QPs waiting to respond.
- * The list won't change since the IRQ is only run on one CPU.
- */
- list_for_each_entry_safe(qp, nqp, &rcd->qp_wait_list, rspwait) {
- list_del_init(&qp->rspwait);
- if (qp->r_flags & RVT_R_RSP_NAK) {
- qp->r_flags &= ~RVT_R_RSP_NAK;
- qib_send_rc_ack(qp);
- }
- if (qp->r_flags & RVT_R_RSP_SEND) {
- unsigned long flags;
-
- qp->r_flags &= ~RVT_R_RSP_SEND;
- spin_lock_irqsave(&qp->s_lock, flags);
- if (ib_rvt_state_ops[qp->state] &
- RVT_PROCESS_OR_FLUSH_SEND)
- qib_schedule_send(qp);
- spin_unlock_irqrestore(&qp->s_lock, flags);
- }
- rvt_put_qp(qp);
- }
-
-bail:
- /* Report number of packets consumed */
- if (npkts)
- *npkts = i;
-
- /*
- * Always write head at end, and setup rcv interrupt, even
- * if no packets were processed.
- */
- lval = (u64)rcd->head | dd->rhdrhead_intr_off;
- dd->f_update_usrhead(rcd, lval, updegr, etail, i);
- return crcs;
-}
-
-/**
- * qib_set_mtu - set the MTU
- * @ppd: the perport data
- * @arg: the new MTU
- *
- * We can handle "any" incoming size, the issue here is whether we
- * need to restrict our outgoing size. For now, we don't do any
- * sanity checking on this, and we don't deal with what happens to
- * programs that are already running when the size changes.
- * NOTE: changing the MTU will usually cause the IBC to go back to
- * link INIT state...
- */
-int qib_set_mtu(struct qib_pportdata *ppd, u16 arg)
-{
- u32 piosize;
- int ret, chk;
-
- if (arg != 256 && arg != 512 && arg != 1024 && arg != 2048 &&
- arg != 4096) {
- ret = -EINVAL;
- goto bail;
- }
- chk = ib_mtu_enum_to_int(qib_ibmtu);
- if (chk > 0 && arg > chk) {
- ret = -EINVAL;
- goto bail;
- }
-
- piosize = ppd->ibmaxlen;
- ppd->ibmtu = arg;
-
- if (arg >= (piosize - QIB_PIO_MAXIBHDR)) {
- /* Only if it's not the initial value (or reset to it) */
- if (piosize != ppd->init_ibmaxlen) {
- if (arg > piosize && arg <= ppd->init_ibmaxlen)
- piosize = ppd->init_ibmaxlen - 2 * sizeof(u32);
- ppd->ibmaxlen = piosize;
- }
- } else if ((arg + QIB_PIO_MAXIBHDR) != ppd->ibmaxlen) {
- piosize = arg + QIB_PIO_MAXIBHDR - 2 * sizeof(u32);
- ppd->ibmaxlen = piosize;
- }
-
- ppd->dd->f_set_ib_cfg(ppd, QIB_IB_CFG_MTU, 0);
-
- ret = 0;
-
-bail:
- return ret;
-}
-
-int qib_set_lid(struct qib_pportdata *ppd, u32 lid, u8 lmc)
-{
- struct qib_devdata *dd = ppd->dd;
-
- ppd->lid = lid;
- ppd->lmc = lmc;
-
- dd->f_set_ib_cfg(ppd, QIB_IB_CFG_LIDLMC,
- lid | (~((1U << lmc) - 1)) << 16);
-
- qib_devinfo(dd->pcidev, "IB%u:%u got a lid: 0x%x\n",
- dd->unit, ppd->port, lid);
-
- return 0;
-}
-
-/*
- * Following deal with the "obviously simple" task of overriding the state
- * of the LEDS, which normally indicate link physical and logical status.
- * The complications arise in dealing with different hardware mappings
- * and the board-dependent routine being called from interrupts.
- * and then there's the requirement to _flash_ them.
- */
-#define LED_OVER_FREQ_SHIFT 8
-#define LED_OVER_FREQ_MASK (0xFF<<LED_OVER_FREQ_SHIFT)
-/* Below is "non-zero" to force override, but both actual LEDs are off */
-#define LED_OVER_BOTH_OFF (8)
-
-static void qib_run_led_override(struct timer_list *t)
-{
- struct qib_pportdata *ppd = timer_container_of(ppd, t,
- led_override_timer);
- struct qib_devdata *dd = ppd->dd;
- int timeoff;
- int ph_idx;
-
- if (!(dd->flags & QIB_INITTED))
- return;
-
- ph_idx = ppd->led_override_phase++ & 1;
- ppd->led_override = ppd->led_override_vals[ph_idx];
- timeoff = ppd->led_override_timeoff;
-
- dd->f_setextled(ppd, 1);
- /*
- * don't re-fire the timer if user asked for it to be off; we let
- * it fire one more time after they turn it off to simplify
- */
- if (ppd->led_override_vals[0] || ppd->led_override_vals[1])
- mod_timer(&ppd->led_override_timer, jiffies + timeoff);
-}
-
-void qib_set_led_override(struct qib_pportdata *ppd, unsigned int val)
-{
- struct qib_devdata *dd = ppd->dd;
- int timeoff, freq;
-
- if (!(dd->flags & QIB_INITTED))
- return;
-
- /* First check if we are blinking. If not, use 1HZ polling */
- timeoff = HZ;
- freq = (val & LED_OVER_FREQ_MASK) >> LED_OVER_FREQ_SHIFT;
-
- if (freq) {
- /* For blink, set each phase from one nybble of val */
- ppd->led_override_vals[0] = val & 0xF;
- ppd->led_override_vals[1] = (val >> 4) & 0xF;
- timeoff = (HZ << 4)/freq;
- } else {
- /* Non-blink set both phases the same. */
- ppd->led_override_vals[0] = val & 0xF;
- ppd->led_override_vals[1] = val & 0xF;
- }
- ppd->led_override_timeoff = timeoff;
-
- /*
- * If the timer has not already been started, do so. Use a "quick"
- * timeout so the function will be called soon, to look at our request.
- */
- if (atomic_inc_return(&ppd->led_override_timer_active) == 1) {
- /* Need to start timer */
- timer_setup(&ppd->led_override_timer, qib_run_led_override, 0);
- ppd->led_override_timer.expires = jiffies + 1;
- add_timer(&ppd->led_override_timer);
- } else {
- if (ppd->led_override_vals[0] || ppd->led_override_vals[1])
- mod_timer(&ppd->led_override_timer, jiffies + 1);
- atomic_dec(&ppd->led_override_timer_active);
- }
-}
-
-/**
- * qib_reset_device - reset the chip if possible
- * @unit: the device to reset
- *
- * Whether or not reset is successful, we attempt to re-initialize the chip
- * (that is, much like a driver unload/reload). We clear the INITTED flag
- * so that the various entry points will fail until we reinitialize. For
- * now, we only allow this if no user contexts are open that use chip resources
- */
-int qib_reset_device(int unit)
-{
- int ret, i;
- struct qib_devdata *dd = qib_lookup(unit);
- struct qib_pportdata *ppd;
- unsigned long flags;
- int pidx;
-
- if (!dd) {
- ret = -ENODEV;
- goto bail;
- }
-
- qib_devinfo(dd->pcidev, "Reset on unit %u requested\n", unit);
-
- if (!dd->kregbase || !(dd->flags & QIB_PRESENT)) {
- qib_devinfo(dd->pcidev,
- "Invalid unit number %u or not initialized or not present\n",
- unit);
- ret = -ENXIO;
- goto bail;
- }
-
- spin_lock_irqsave(&dd->uctxt_lock, flags);
- if (dd->rcd)
- for (i = dd->first_user_ctxt; i < dd->cfgctxts; i++) {
- if (!dd->rcd[i] || !dd->rcd[i]->cnt)
- continue;
- spin_unlock_irqrestore(&dd->uctxt_lock, flags);
- ret = -EBUSY;
- goto bail;
- }
- spin_unlock_irqrestore(&dd->uctxt_lock, flags);
-
- for (pidx = 0; pidx < dd->num_pports; ++pidx) {
- ppd = dd->pport + pidx;
- if (atomic_read(&ppd->led_override_timer_active)) {
- /* Need to stop LED timer, _then_ shut off LEDs */
- timer_delete_sync(&ppd->led_override_timer);
- atomic_set(&ppd->led_override_timer_active, 0);
- }
-
- /* Shut off LEDs after we are sure timer is not running */
- ppd->led_override = LED_OVER_BOTH_OFF;
- dd->f_setextled(ppd, 0);
- if (dd->flags & QIB_HAS_SEND_DMA)
- qib_teardown_sdma(ppd);
- }
-
- ret = dd->f_reset(dd);
- if (ret == 1)
- ret = qib_init(dd, 1);
- else
- ret = -EAGAIN;
- if (ret)
- qib_dev_err(dd,
- "Reinitialize unit %u after reset failed with %d\n",
- unit, ret);
- else
- qib_devinfo(dd->pcidev,
- "Reinitialized unit %u after resetting\n",
- unit);
-
-bail:
- return ret;
-}
diff --git a/drivers/infiniband/hw/qib/qib_eeprom.c b/drivers/infiniband/hw/qib/qib_eeprom.c
deleted file mode 100644
index bf660c001b6d..000000000000
--- a/drivers/infiniband/hw/qib/qib_eeprom.c
+++ /dev/null
@@ -1,271 +0,0 @@
-/*
- * Copyright (c) 2012 Intel Corporation. All rights reserved.
- * Copyright (c) 2006 - 2012 QLogic Corporation. All rights reserved.
- * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <linux/delay.h>
-#include <linux/pci.h>
-#include <linux/vmalloc.h>
-
-#include "qib.h"
-
-/*
- * Functions specific to the serial EEPROM on cards handled by ib_qib.
- * The actual serail interface code is in qib_twsi.c. This file is a client
- */
-
-/**
- * qib_eeprom_read - receives bytes from the eeprom via I2C
- * @dd: the qlogic_ib device
- * @eeprom_offset: address to read from
- * @buff: where to store result
- * @len: number of bytes to receive
- */
-int qib_eeprom_read(struct qib_devdata *dd, u8 eeprom_offset,
- void *buff, int len)
-{
- int ret;
-
- ret = mutex_lock_interruptible(&dd->eep_lock);
- if (!ret) {
- ret = qib_twsi_reset(dd);
- if (ret)
- qib_dev_err(dd, "EEPROM Reset for read failed\n");
- else
- ret = qib_twsi_blk_rd(dd, dd->twsi_eeprom_dev,
- eeprom_offset, buff, len);
- mutex_unlock(&dd->eep_lock);
- }
-
- return ret;
-}
-
-/*
- * Actually update the eeprom, first doing write enable if
- * needed, then restoring write enable state.
- * Must be called with eep_lock held
- */
-static int eeprom_write_with_enable(struct qib_devdata *dd, u8 offset,
- const void *buf, int len)
-{
- int ret, pwen;
-
- pwen = dd->f_eeprom_wen(dd, 1);
- ret = qib_twsi_reset(dd);
- if (ret)
- qib_dev_err(dd, "EEPROM Reset for write failed\n");
- else
- ret = qib_twsi_blk_wr(dd, dd->twsi_eeprom_dev,
- offset, buf, len);
- dd->f_eeprom_wen(dd, pwen);
- return ret;
-}
-
-/**
- * qib_eeprom_write - writes data to the eeprom via I2C
- * @dd: the qlogic_ib device
- * @eeprom_offset: where to place data
- * @buff: data to write
- * @len: number of bytes to write
- */
-int qib_eeprom_write(struct qib_devdata *dd, u8 eeprom_offset,
- const void *buff, int len)
-{
- int ret;
-
- ret = mutex_lock_interruptible(&dd->eep_lock);
- if (!ret) {
- ret = eeprom_write_with_enable(dd, eeprom_offset, buff, len);
- mutex_unlock(&dd->eep_lock);
- }
-
- return ret;
-}
-
-static u8 flash_csum(struct qib_flash *ifp, int adjust)
-{
- u8 *ip = (u8 *) ifp;
- u8 csum = 0, len;
-
- /*
- * Limit length checksummed to max length of actual data.
- * Checksum of erased eeprom will still be bad, but we avoid
- * reading past the end of the buffer we were passed.
- */
- len = ifp->if_length;
- if (len > sizeof(struct qib_flash))
- len = sizeof(struct qib_flash);
- while (len--)
- csum += *ip++;
- csum -= ifp->if_csum;
- csum = ~csum;
- if (adjust)
- ifp->if_csum = csum;
-
- return csum;
-}
-
-/**
- * qib_get_eeprom_info- get the GUID et al. from the TSWI EEPROM device
- * @dd: the qlogic_ib device
- *
- * We have the capability to use the nguid field, and get
- * the guid from the first chip's flash, to use for all of them.
- */
-void qib_get_eeprom_info(struct qib_devdata *dd)
-{
- void *buf;
- struct qib_flash *ifp;
- __be64 guid;
- int len, eep_stat;
- u8 csum, *bguid;
- int t = dd->unit;
- struct qib_devdata *dd0 = qib_lookup(0);
-
- if (t && dd0->nguid > 1 && t <= dd0->nguid) {
- u8 oguid;
-
- dd->base_guid = dd0->base_guid;
- bguid = (u8 *) &dd->base_guid;
-
- oguid = bguid[7];
- bguid[7] += t;
- if (oguid > bguid[7]) {
- if (bguid[6] == 0xff) {
- if (bguid[5] == 0xff) {
- qib_dev_err(dd,
- "Can't set GUID from base, wraps to OUI!\n");
- dd->base_guid = 0;
- goto bail;
- }
- bguid[5]++;
- }
- bguid[6]++;
- }
- dd->nguid = 1;
- goto bail;
- }
-
- /*
- * Read full flash, not just currently used part, since it may have
- * been written with a newer definition.
- * */
- len = sizeof(struct qib_flash);
- buf = vmalloc(len);
- if (!buf)
- goto bail;
-
- /*
- * Use "public" eeprom read function, which does locking and
- * figures out device. This will migrate to chip-specific.
- */
- eep_stat = qib_eeprom_read(dd, 0, buf, len);
-
- if (eep_stat) {
- qib_dev_err(dd, "Failed reading GUID from eeprom\n");
- goto done;
- }
- ifp = (struct qib_flash *)buf;
-
- csum = flash_csum(ifp, 0);
- if (csum != ifp->if_csum) {
- qib_devinfo(dd->pcidev,
- "Bad I2C flash checksum: 0x%x, not 0x%x\n",
- csum, ifp->if_csum);
- goto done;
- }
- if (*(__be64 *) ifp->if_guid == cpu_to_be64(0) ||
- *(__be64 *) ifp->if_guid == ~cpu_to_be64(0)) {
- qib_dev_err(dd,
- "Invalid GUID %llx from flash; ignoring\n",
- *(unsigned long long *) ifp->if_guid);
- /* don't allow GUID if all 0 or all 1's */
- goto done;
- }
-
- /* complain, but allow it */
- if (*(u64 *) ifp->if_guid == 0x100007511000000ULL)
- qib_devinfo(dd->pcidev,
- "Warning, GUID %llx is default, probably not correct!\n",
- *(unsigned long long *) ifp->if_guid);
-
- bguid = ifp->if_guid;
- if (!bguid[0] && !bguid[1] && !bguid[2]) {
- /*
- * Original incorrect GUID format in flash; fix in
- * core copy, by shifting up 2 octets; don't need to
- * change top octet, since both it and shifted are 0.
- */
- bguid[1] = bguid[3];
- bguid[2] = bguid[4];
- bguid[3] = 0;
- bguid[4] = 0;
- guid = *(__be64 *) ifp->if_guid;
- } else
- guid = *(__be64 *) ifp->if_guid;
- dd->base_guid = guid;
- dd->nguid = ifp->if_numguid;
- /*
- * Things are slightly complicated by the desire to transparently
- * support both the Pathscale 10-digit serial number and the QLogic
- * 13-character version.
- */
- if ((ifp->if_fversion > 1) && ifp->if_sprefix[0] &&
- ((u8 *) ifp->if_sprefix)[0] != 0xFF) {
- char *snp = dd->serial;
-
- /*
- * This board has a Serial-prefix, which is stored
- * elsewhere for backward-compatibility.
- */
- memcpy(snp, ifp->if_sprefix, sizeof(ifp->if_sprefix));
- snp[sizeof(ifp->if_sprefix)] = '\0';
- len = strlen(snp);
- snp += len;
- len = sizeof(dd->serial) - len;
- if (len > sizeof(ifp->if_serial))
- len = sizeof(ifp->if_serial);
- memcpy(snp, ifp->if_serial, len);
- } else {
- memcpy(dd->serial, ifp->if_serial, sizeof(ifp->if_serial));
- }
- if (!strstr(ifp->if_comment, "Tested successfully"))
- qib_dev_err(dd,
- "Board SN %s did not pass functional test: %s\n",
- dd->serial, ifp->if_comment);
-
-done:
- vfree(buf);
-
-bail:;
-}
-
diff --git a/drivers/infiniband/hw/qib/qib_file_ops.c b/drivers/infiniband/hw/qib/qib_file_ops.c
deleted file mode 100644
index 29e4c59aa23b..000000000000
--- a/drivers/infiniband/hw/qib/qib_file_ops.c
+++ /dev/null
@@ -1,2401 +0,0 @@
-/*
- * Copyright (c) 2012, 2013 Intel Corporation. All rights reserved.
- * Copyright (c) 2006 - 2012 QLogic Corporation. All rights reserved.
- * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <linux/pci.h>
-#include <linux/poll.h>
-#include <linux/cdev.h>
-#include <linux/swap.h>
-#include <linux/vmalloc.h>
-#include <linux/highmem.h>
-#include <linux/io.h>
-#include <linux/jiffies.h>
-#include <linux/delay.h>
-#include <linux/export.h>
-#include <linux/uio.h>
-#include <linux/pgtable.h>
-
-#include <rdma/ib.h>
-
-#include "qib.h"
-#include "qib_common.h"
-#include "qib_user_sdma.h"
-
-#undef pr_fmt
-#define pr_fmt(fmt) QIB_DRV_NAME ": " fmt
-
-static int qib_open(struct inode *, struct file *);
-static int qib_close(struct inode *, struct file *);
-static ssize_t qib_write(struct file *, const char __user *, size_t, loff_t *);
-static ssize_t qib_write_iter(struct kiocb *, struct iov_iter *);
-static __poll_t qib_poll(struct file *, struct poll_table_struct *);
-static int qib_mmapf(struct file *, struct vm_area_struct *);
-
-/*
- * This is really, really weird shit - write() and writev() here
- * have completely unrelated semantics. Sucky userland ABI,
- * film at 11.
- */
-static const struct file_operations qib_file_ops = {
- .owner = THIS_MODULE,
- .write = qib_write,
- .write_iter = qib_write_iter,
- .open = qib_open,
- .release = qib_close,
- .poll = qib_poll,
- .mmap = qib_mmapf,
- .llseek = noop_llseek,
-};
-
-/*
- * Convert kernel virtual addresses to physical addresses so they don't
- * potentially conflict with the chip addresses used as mmap offsets.
- * It doesn't really matter what mmap offset we use as long as we can
- * interpret it correctly.
- */
-static u64 cvt_kvaddr(void *p)
-{
- struct page *page;
- u64 paddr = 0;
-
- page = vmalloc_to_page(p);
- if (page)
- paddr = page_to_pfn(page) << PAGE_SHIFT;
-
- return paddr;
-}
-
-static int qib_get_base_info(struct file *fp, void __user *ubase,
- size_t ubase_size)
-{
- struct qib_ctxtdata *rcd = ctxt_fp(fp);
- int ret = 0;
- struct qib_base_info *kinfo = NULL;
- struct qib_devdata *dd = rcd->dd;
- struct qib_pportdata *ppd = rcd->ppd;
- unsigned subctxt_cnt;
- int shared, master;
- size_t sz;
-
- subctxt_cnt = rcd->subctxt_cnt;
- if (!subctxt_cnt) {
- shared = 0;
- master = 0;
- subctxt_cnt = 1;
- } else {
- shared = 1;
- master = !subctxt_fp(fp);
- }
-
- sz = sizeof(*kinfo);
- /* If context sharing is not requested, allow the old size structure */
- if (!shared)
- sz -= 7 * sizeof(u64);
- if (ubase_size < sz) {
- ret = -EINVAL;
- goto bail;
- }
-
- kinfo = kzalloc(sizeof(*kinfo), GFP_KERNEL);
- if (kinfo == NULL) {
- ret = -ENOMEM;
- goto bail;
- }
-
- ret = dd->f_get_base_info(rcd, kinfo);
- if (ret < 0)
- goto bail;
-
- kinfo->spi_rcvhdr_cnt = dd->rcvhdrcnt;
- kinfo->spi_rcvhdrent_size = dd->rcvhdrentsize;
- kinfo->spi_tidegrcnt = rcd->rcvegrcnt;
- kinfo->spi_rcv_egrbufsize = dd->rcvegrbufsize;
- /*
- * have to mmap whole thing
- */
- kinfo->spi_rcv_egrbuftotlen =
- rcd->rcvegrbuf_chunks * rcd->rcvegrbuf_size;
- kinfo->spi_rcv_egrperchunk = rcd->rcvegrbufs_perchunk;
- kinfo->spi_rcv_egrchunksize = kinfo->spi_rcv_egrbuftotlen /
- rcd->rcvegrbuf_chunks;
- kinfo->spi_tidcnt = dd->rcvtidcnt / subctxt_cnt;
- if (master)
- kinfo->spi_tidcnt += dd->rcvtidcnt % subctxt_cnt;
- /*
- * for this use, may be cfgctxts summed over all chips that
- * are configured and present
- */
- kinfo->spi_nctxts = dd->cfgctxts;
- /* unit (chip/board) our context is on */
- kinfo->spi_unit = dd->unit;
- kinfo->spi_port = ppd->port;
- /* for now, only a single page */
- kinfo->spi_tid_maxsize = PAGE_SIZE;
-
- /*
- * Doing this per context, and based on the skip value, etc. This has
- * to be the actual buffer size, since the protocol code treats it
- * as an array.
- *
- * These have to be set to user addresses in the user code via mmap.
- * These values are used on return to user code for the mmap target
- * addresses only. For 32 bit, same 44 bit address problem, so use
- * the physical address, not virtual. Before 2.6.11, using the
- * page_address() macro worked, but in 2.6.11, even that returns the
- * full 64 bit address (upper bits all 1's). So far, using the
- * physical addresses (or chip offsets, for chip mapping) works, but
- * no doubt some future kernel release will change that, and we'll be
- * on to yet another method of dealing with this.
- * Normally only one of rcvhdr_tailaddr or rhf_offset is useful
- * since the chips with non-zero rhf_offset don't normally
- * enable tail register updates to host memory, but for testing,
- * both can be enabled and used.
- */
- kinfo->spi_rcvhdr_base = (u64) rcd->rcvhdrq_phys;
- kinfo->spi_rcvhdr_tailaddr = (u64) rcd->rcvhdrqtailaddr_phys;
- kinfo->spi_rhf_offset = dd->rhf_offset;
- kinfo->spi_rcv_egrbufs = (u64) rcd->rcvegr_phys;
- kinfo->spi_pioavailaddr = (u64) dd->pioavailregs_phys;
- /* setup per-unit (not port) status area for user programs */
- kinfo->spi_status = (u64) kinfo->spi_pioavailaddr +
- (char *) ppd->statusp -
- (char *) dd->pioavailregs_dma;
- kinfo->spi_uregbase = (u64) dd->uregbase + dd->ureg_align * rcd->ctxt;
- if (!shared) {
- kinfo->spi_piocnt = rcd->piocnt;
- kinfo->spi_piobufbase = (u64) rcd->piobufs;
- kinfo->spi_sendbuf_status = cvt_kvaddr(rcd->user_event_mask);
- } else if (master) {
- kinfo->spi_piocnt = (rcd->piocnt / subctxt_cnt) +
- (rcd->piocnt % subctxt_cnt);
- /* Master's PIO buffers are after all the slave's */
- kinfo->spi_piobufbase = (u64) rcd->piobufs +
- dd->palign *
- (rcd->piocnt - kinfo->spi_piocnt);
- } else {
- unsigned slave = subctxt_fp(fp) - 1;
-
- kinfo->spi_piocnt = rcd->piocnt / subctxt_cnt;
- kinfo->spi_piobufbase = (u64) rcd->piobufs +
- dd->palign * kinfo->spi_piocnt * slave;
- }
-
- if (shared) {
- kinfo->spi_sendbuf_status =
- cvt_kvaddr(&rcd->user_event_mask[subctxt_fp(fp)]);
- /* only spi_subctxt_* fields should be set in this block! */
- kinfo->spi_subctxt_uregbase = cvt_kvaddr(rcd->subctxt_uregbase);
-
- kinfo->spi_subctxt_rcvegrbuf =
- cvt_kvaddr(rcd->subctxt_rcvegrbuf);
- kinfo->spi_subctxt_rcvhdr_base =
- cvt_kvaddr(rcd->subctxt_rcvhdr_base);
- }
-
- /*
- * All user buffers are 2KB buffers. If we ever support
- * giving 4KB buffers to user processes, this will need some
- * work. Can't use piobufbase directly, because it has
- * both 2K and 4K buffer base values.
- */
- kinfo->spi_pioindex = (kinfo->spi_piobufbase - dd->pio2k_bufbase) /
- dd->palign;
- kinfo->spi_pioalign = dd->palign;
- kinfo->spi_qpair = QIB_KD_QP;
- /*
- * user mode PIO buffers are always 2KB, even when 4KB can
- * be received, and sent via the kernel; this is ibmaxlen
- * for 2K MTU.
- */
- kinfo->spi_piosize = dd->piosize2k - 2 * sizeof(u32);
- kinfo->spi_mtu = ppd->ibmaxlen; /* maxlen, not ibmtu */
- kinfo->spi_ctxt = rcd->ctxt;
- kinfo->spi_subctxt = subctxt_fp(fp);
- kinfo->spi_sw_version = QIB_KERN_SWVERSION;
- kinfo->spi_sw_version |= 1U << 31; /* QLogic-built, not kernel.org */
- kinfo->spi_hw_version = dd->revision;
-
- if (master)
- kinfo->spi_runtime_flags |= QIB_RUNTIME_MASTER;
-
- sz = (ubase_size < sizeof(*kinfo)) ? ubase_size : sizeof(*kinfo);
- if (copy_to_user(ubase, kinfo, sz))
- ret = -EFAULT;
-bail:
- kfree(kinfo);
- return ret;
-}
-
-/**
- * qib_tid_update - update a context TID
- * @rcd: the context
- * @fp: the qib device file
- * @ti: the TID information
- *
- * The new implementation as of Oct 2004 is that the driver assigns
- * the tid and returns it to the caller. To reduce search time, we
- * keep a cursor for each context, walking the shadow tid array to find
- * one that's not in use.
- *
- * For now, if we can't allocate the full list, we fail, although
- * in the long run, we'll allocate as many as we can, and the
- * caller will deal with that by trying the remaining pages later.
- * That means that when we fail, we have to mark the tids as not in
- * use again, in our shadow copy.
- *
- * It's up to the caller to free the tids when they are done.
- * We'll unlock the pages as they free them.
- *
- * Also, right now we are locking one page at a time, but since
- * the intended use of this routine is for a single group of
- * virtually contiguous pages, that should change to improve
- * performance.
- */
-static int qib_tid_update(struct qib_ctxtdata *rcd, struct file *fp,
- const struct qib_tid_info *ti)
-{
- int ret = 0, ntids;
- u32 tid, ctxttid, cnt, i, tidcnt, tidoff;
- u16 *tidlist;
- struct qib_devdata *dd = rcd->dd;
- u64 physaddr;
- unsigned long vaddr;
- u64 __iomem *tidbase;
- unsigned long tidmap[8];
- struct page **pagep = NULL;
- unsigned subctxt = subctxt_fp(fp);
-
- if (!dd->pageshadow) {
- ret = -ENOMEM;
- goto done;
- }
-
- cnt = ti->tidcnt;
- if (!cnt) {
- ret = -EFAULT;
- goto done;
- }
- ctxttid = rcd->ctxt * dd->rcvtidcnt;
- if (!rcd->subctxt_cnt) {
- tidcnt = dd->rcvtidcnt;
- tid = rcd->tidcursor;
- tidoff = 0;
- } else if (!subctxt) {
- tidcnt = (dd->rcvtidcnt / rcd->subctxt_cnt) +
- (dd->rcvtidcnt % rcd->subctxt_cnt);
- tidoff = dd->rcvtidcnt - tidcnt;
- ctxttid += tidoff;
- tid = tidcursor_fp(fp);
- } else {
- tidcnt = dd->rcvtidcnt / rcd->subctxt_cnt;
- tidoff = tidcnt * (subctxt - 1);
- ctxttid += tidoff;
- tid = tidcursor_fp(fp);
- }
- if (cnt > tidcnt) {
- /* make sure it all fits in tid_pg_list */
- qib_devinfo(dd->pcidev,
- "Process tried to allocate %u TIDs, only trying max (%u)\n",
- cnt, tidcnt);
- cnt = tidcnt;
- }
- pagep = (struct page **) rcd->tid_pg_list;
- tidlist = (u16 *) &pagep[dd->rcvtidcnt];
- pagep += tidoff;
- tidlist += tidoff;
-
- memset(tidmap, 0, sizeof(tidmap));
- /* before decrement; chip actual # */
- ntids = tidcnt;
- tidbase = (u64 __iomem *) (((char __iomem *) dd->kregbase) +
- dd->rcvtidbase +
- ctxttid * sizeof(*tidbase));
-
- /* virtual address of first page in transfer */
- vaddr = ti->tidvaddr;
- if (!access_ok((void __user *) vaddr,
- cnt * PAGE_SIZE)) {
- ret = -EFAULT;
- goto done;
- }
- ret = qib_get_user_pages(vaddr, cnt, pagep);
- if (ret) {
- /*
- * if (ret == -EBUSY)
- * We can't continue because the pagep array won't be
- * initialized. This should never happen,
- * unless perhaps the user has mpin'ed the pages
- * themselves.
- */
- qib_devinfo(
- dd->pcidev,
- "Failed to lock addr %p, %u pages: errno %d\n",
- (void *) vaddr, cnt, -ret);
- goto done;
- }
- for (i = 0; i < cnt; i++, vaddr += PAGE_SIZE) {
- dma_addr_t daddr;
-
- for (; ntids--; tid++) {
- if (tid == tidcnt)
- tid = 0;
- if (!dd->pageshadow[ctxttid + tid])
- break;
- }
- if (ntids < 0) {
- /*
- * Oops, wrapped all the way through their TIDs,
- * and didn't have enough free; see comments at
- * start of routine
- */
- i--; /* last tidlist[i] not filled in */
- ret = -ENOMEM;
- break;
- }
- ret = qib_map_page(dd->pcidev, pagep[i], &daddr);
- if (ret)
- break;
-
- tidlist[i] = tid + tidoff;
- /* we "know" system pages and TID pages are same size */
- dd->pageshadow[ctxttid + tid] = pagep[i];
- dd->physshadow[ctxttid + tid] = daddr;
- /*
- * don't need atomic or it's overhead
- */
- __set_bit(tid, tidmap);
- physaddr = dd->physshadow[ctxttid + tid];
- /* PERFORMANCE: below should almost certainly be cached */
- dd->f_put_tid(dd, &tidbase[tid],
- RCVHQ_RCV_TYPE_EXPECTED, physaddr);
- /*
- * don't check this tid in qib_ctxtshadow, since we
- * just filled it in; start with the next one.
- */
- tid++;
- }
-
- if (ret) {
- u32 limit;
-cleanup:
- /* jump here if copy out of updated info failed... */
- /* same code that's in qib_free_tid() */
- limit = sizeof(tidmap) * BITS_PER_BYTE;
- if (limit > tidcnt)
- /* just in case size changes in future */
- limit = tidcnt;
- tid = find_first_bit((const unsigned long *)tidmap, limit);
- for (; tid < limit; tid++) {
- if (!test_bit(tid, tidmap))
- continue;
- if (dd->pageshadow[ctxttid + tid]) {
- dma_addr_t phys;
-
- phys = dd->physshadow[ctxttid + tid];
- dd->physshadow[ctxttid + tid] = dd->tidinvalid;
- /* PERFORMANCE: below should almost certainly
- * be cached
- */
- dd->f_put_tid(dd, &tidbase[tid],
- RCVHQ_RCV_TYPE_EXPECTED,
- dd->tidinvalid);
- dma_unmap_page(&dd->pcidev->dev, phys,
- PAGE_SIZE, DMA_FROM_DEVICE);
- dd->pageshadow[ctxttid + tid] = NULL;
- }
- }
- qib_release_user_pages(pagep, cnt);
- } else {
- /*
- * Copy the updated array, with qib_tid's filled in, back
- * to user. Since we did the copy in already, this "should
- * never fail" If it does, we have to clean up...
- */
- if (copy_to_user((void __user *)
- (unsigned long) ti->tidlist,
- tidlist, cnt * sizeof(*tidlist))) {
- ret = -EFAULT;
- goto cleanup;
- }
- if (copy_to_user(u64_to_user_ptr(ti->tidmap),
- tidmap, sizeof(tidmap))) {
- ret = -EFAULT;
- goto cleanup;
- }
- if (tid == tidcnt)
- tid = 0;
- if (!rcd->subctxt_cnt)
- rcd->tidcursor = tid;
- else
- tidcursor_fp(fp) = tid;
- }
-
-done:
- return ret;
-}
-
-/**
- * qib_tid_free - free a context TID
- * @rcd: the context
- * @subctxt: the subcontext
- * @ti: the TID info
- *
- * right now we are unlocking one page at a time, but since
- * the intended use of this routine is for a single group of
- * virtually contiguous pages, that should change to improve
- * performance. We check that the TID is in range for this context
- * but otherwise don't check validity; if user has an error and
- * frees the wrong tid, it's only their own data that can thereby
- * be corrupted. We do check that the TID was in use, for sanity
- * We always use our idea of the saved address, not the address that
- * they pass in to us.
- */
-static int qib_tid_free(struct qib_ctxtdata *rcd, unsigned subctxt,
- const struct qib_tid_info *ti)
-{
- int ret = 0;
- u32 tid, ctxttid, limit, tidcnt;
- struct qib_devdata *dd = rcd->dd;
- u64 __iomem *tidbase;
- unsigned long tidmap[8];
-
- if (!dd->pageshadow) {
- ret = -ENOMEM;
- goto done;
- }
-
- if (copy_from_user(tidmap, u64_to_user_ptr(ti->tidmap),
- sizeof(tidmap))) {
- ret = -EFAULT;
- goto done;
- }
-
- ctxttid = rcd->ctxt * dd->rcvtidcnt;
- if (!rcd->subctxt_cnt)
- tidcnt = dd->rcvtidcnt;
- else if (!subctxt) {
- tidcnt = (dd->rcvtidcnt / rcd->subctxt_cnt) +
- (dd->rcvtidcnt % rcd->subctxt_cnt);
- ctxttid += dd->rcvtidcnt - tidcnt;
- } else {
- tidcnt = dd->rcvtidcnt / rcd->subctxt_cnt;
- ctxttid += tidcnt * (subctxt - 1);
- }
- tidbase = (u64 __iomem *) ((char __iomem *)(dd->kregbase) +
- dd->rcvtidbase +
- ctxttid * sizeof(*tidbase));
-
- limit = sizeof(tidmap) * BITS_PER_BYTE;
- if (limit > tidcnt)
- /* just in case size changes in future */
- limit = tidcnt;
- tid = find_first_bit(tidmap, limit);
- for (; tid < limit; tid++) {
- /*
- * small optimization; if we detect a run of 3 or so without
- * any set, use find_first_bit again. That's mainly to
- * accelerate the case where we wrapped, so we have some at
- * the beginning, and some at the end, and a big gap
- * in the middle.
- */
- if (!test_bit(tid, tidmap))
- continue;
-
- if (dd->pageshadow[ctxttid + tid]) {
- struct page *p;
- dma_addr_t phys;
-
- p = dd->pageshadow[ctxttid + tid];
- dd->pageshadow[ctxttid + tid] = NULL;
- phys = dd->physshadow[ctxttid + tid];
- dd->physshadow[ctxttid + tid] = dd->tidinvalid;
- /* PERFORMANCE: below should almost certainly be
- * cached
- */
- dd->f_put_tid(dd, &tidbase[tid],
- RCVHQ_RCV_TYPE_EXPECTED, dd->tidinvalid);
- dma_unmap_page(&dd->pcidev->dev, phys, PAGE_SIZE,
- DMA_FROM_DEVICE);
- qib_release_user_pages(&p, 1);
- }
- }
-done:
- return ret;
-}
-
-/**
- * qib_set_part_key - set a partition key
- * @rcd: the context
- * @key: the key
- *
- * We can have up to 4 active at a time (other than the default, which is
- * always allowed). This is somewhat tricky, since multiple contexts may set
- * the same key, so we reference count them, and clean up at exit. All 4
- * partition keys are packed into a single qlogic_ib register. It's an
- * error for a process to set the same pkey multiple times. We provide no
- * mechanism to de-allocate a pkey at this time, we may eventually need to
- * do that. I've used the atomic operations, and no locking, and only make
- * a single pass through what's available. This should be more than
- * adequate for some time. I'll think about spinlocks or the like if and as
- * it's necessary.
- */
-static int qib_set_part_key(struct qib_ctxtdata *rcd, u16 key)
-{
- struct qib_pportdata *ppd = rcd->ppd;
- int i, pidx = -1;
- bool any = false;
- u16 lkey = key & 0x7FFF;
-
- if (lkey == (QIB_DEFAULT_P_KEY & 0x7FFF))
- /* nothing to do; this key always valid */
- return 0;
-
- if (!lkey)
- return -EINVAL;
-
- /*
- * Set the full membership bit, because it has to be
- * set in the register or the packet, and it seems
- * cleaner to set in the register than to force all
- * callers to set it.
- */
- key |= 0x8000;
-
- for (i = 0; i < ARRAY_SIZE(rcd->pkeys); i++) {
- if (!rcd->pkeys[i] && pidx == -1)
- pidx = i;
- if (rcd->pkeys[i] == key)
- return -EEXIST;
- }
- if (pidx == -1)
- return -EBUSY;
- for (i = 0; i < ARRAY_SIZE(ppd->pkeys); i++) {
- if (!ppd->pkeys[i]) {
- any = true;
- continue;
- }
- if (ppd->pkeys[i] == key) {
- atomic_t *pkrefs = &ppd->pkeyrefs[i];
-
- if (atomic_inc_return(pkrefs) > 1) {
- rcd->pkeys[pidx] = key;
- return 0;
- }
- /*
- * lost race, decrement count, catch below
- */
- atomic_dec(pkrefs);
- any = true;
- }
- if ((ppd->pkeys[i] & 0x7FFF) == lkey)
- /*
- * It makes no sense to have both the limited and
- * full membership PKEY set at the same time since
- * the unlimited one will disable the limited one.
- */
- return -EEXIST;
- }
- if (!any)
- return -EBUSY;
- for (i = 0; i < ARRAY_SIZE(ppd->pkeys); i++) {
- if (!ppd->pkeys[i] &&
- atomic_inc_return(&ppd->pkeyrefs[i]) == 1) {
- rcd->pkeys[pidx] = key;
- ppd->pkeys[i] = key;
- (void) ppd->dd->f_set_ib_cfg(ppd, QIB_IB_CFG_PKEYS, 0);
- return 0;
- }
- }
- return -EBUSY;
-}
-
-/**
- * qib_manage_rcvq - manage a context's receive queue
- * @rcd: the context
- * @subctxt: the subcontext
- * @start_stop: action to carry out
- *
- * start_stop == 0 disables receive on the context, for use in queue
- * overflow conditions. start_stop==1 re-enables, to be used to
- * re-init the software copy of the head register
- */
-static int qib_manage_rcvq(struct qib_ctxtdata *rcd, unsigned subctxt,
- int start_stop)
-{
- struct qib_devdata *dd = rcd->dd;
- unsigned int rcvctrl_op;
-
- if (subctxt)
- goto bail;
- /* atomically clear receive enable ctxt. */
- if (start_stop) {
- /*
- * On enable, force in-memory copy of the tail register to
- * 0, so that protocol code doesn't have to worry about
- * whether or not the chip has yet updated the in-memory
- * copy or not on return from the system call. The chip
- * always resets it's tail register back to 0 on a
- * transition from disabled to enabled.
- */
- if (rcd->rcvhdrtail_kvaddr)
- qib_clear_rcvhdrtail(rcd);
- rcvctrl_op = QIB_RCVCTRL_CTXT_ENB;
- } else
- rcvctrl_op = QIB_RCVCTRL_CTXT_DIS;
- dd->f_rcvctrl(rcd->ppd, rcvctrl_op, rcd->ctxt);
- /* always; new head should be equal to new tail; see above */
-bail:
- return 0;
-}
-
-static void qib_clean_part_key(struct qib_ctxtdata *rcd,
- struct qib_devdata *dd)
-{
- int i, j, pchanged = 0;
- struct qib_pportdata *ppd = rcd->ppd;
-
- for (i = 0; i < ARRAY_SIZE(rcd->pkeys); i++) {
- if (!rcd->pkeys[i])
- continue;
- for (j = 0; j < ARRAY_SIZE(ppd->pkeys); j++) {
- /* check for match independent of the global bit */
- if ((ppd->pkeys[j] & 0x7fff) !=
- (rcd->pkeys[i] & 0x7fff))
- continue;
- if (atomic_dec_and_test(&ppd->pkeyrefs[j])) {
- ppd->pkeys[j] = 0;
- pchanged++;
- }
- break;
- }
- rcd->pkeys[i] = 0;
- }
- if (pchanged)
- (void) ppd->dd->f_set_ib_cfg(ppd, QIB_IB_CFG_PKEYS, 0);
-}
-
-/* common code for the mappings on dma_alloc_coherent mem */
-static int qib_mmap_mem(struct vm_area_struct *vma, struct qib_ctxtdata *rcd,
- unsigned len, void *kvaddr, u32 write_ok, char *what)
-{
- struct qib_devdata *dd = rcd->dd;
- unsigned long pfn;
- int ret;
-
- if ((vma->vm_end - vma->vm_start) > len) {
- qib_devinfo(dd->pcidev,
- "FAIL on %s: len %lx > %x\n", what,
- vma->vm_end - vma->vm_start, len);
- ret = -EFAULT;
- goto bail;
- }
-
- /*
- * shared context user code requires rcvhdrq mapped r/w, others
- * only allowed readonly mapping.
- */
- if (!write_ok) {
- if (vma->vm_flags & VM_WRITE) {
- qib_devinfo(dd->pcidev,
- "%s must be mapped readonly\n", what);
- ret = -EPERM;
- goto bail;
- }
-
- /* don't allow them to later change with mprotect */
- vm_flags_clear(vma, VM_MAYWRITE);
- }
-
- pfn = virt_to_phys(kvaddr) >> PAGE_SHIFT;
- ret = remap_pfn_range(vma, vma->vm_start, pfn,
- len, vma->vm_page_prot);
- if (ret)
- qib_devinfo(dd->pcidev,
- "%s ctxt%u mmap of %lx, %x bytes failed: %d\n",
- what, rcd->ctxt, pfn, len, ret);
-bail:
- return ret;
-}
-
-static int mmap_ureg(struct vm_area_struct *vma, struct qib_devdata *dd,
- u64 ureg)
-{
- unsigned long phys;
- unsigned long sz;
- int ret;
-
- /*
- * This is real hardware, so use io_remap. This is the mechanism
- * for the user process to update the head registers for their ctxt
- * in the chip.
- */
- sz = dd->flags & QIB_HAS_HDRSUPP ? 2 * PAGE_SIZE : PAGE_SIZE;
- if ((vma->vm_end - vma->vm_start) > sz) {
- qib_devinfo(dd->pcidev,
- "FAIL mmap userreg: reqlen %lx > PAGE\n",
- vma->vm_end - vma->vm_start);
- ret = -EFAULT;
- } else {
- phys = dd->physaddr + ureg;
- vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
-
- vm_flags_set(vma, VM_DONTCOPY | VM_DONTEXPAND);
- ret = io_remap_pfn_range(vma, vma->vm_start,
- phys >> PAGE_SHIFT,
- vma->vm_end - vma->vm_start,
- vma->vm_page_prot);
- }
- return ret;
-}
-
-static int mmap_piobufs(struct vm_area_struct *vma,
- struct qib_devdata *dd,
- struct qib_ctxtdata *rcd,
- unsigned piobufs, unsigned piocnt)
-{
- unsigned long phys;
- int ret;
-
- /*
- * When we map the PIO buffers in the chip, we want to map them as
- * writeonly, no read possible; unfortunately, x86 doesn't allow
- * for this in hardware, but we still prevent users from asking
- * for it.
- */
- if ((vma->vm_end - vma->vm_start) > (piocnt * dd->palign)) {
- qib_devinfo(dd->pcidev,
- "FAIL mmap piobufs: reqlen %lx > PAGE\n",
- vma->vm_end - vma->vm_start);
- ret = -EINVAL;
- goto bail;
- }
-
- phys = dd->physaddr + piobufs;
-
-#if defined(__powerpc__)
- vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
-#endif
-
- /*
- * don't allow them to later change to readable with mprotect (for when
- * not initially mapped readable, as is normally the case)
- */
- vm_flags_mod(vma, VM_DONTCOPY | VM_DONTEXPAND, VM_MAYREAD);
-
- /* We used PAT if wc_cookie == 0 */
- if (!dd->wc_cookie)
- vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
-
- ret = io_remap_pfn_range(vma, vma->vm_start, phys >> PAGE_SHIFT,
- vma->vm_end - vma->vm_start,
- vma->vm_page_prot);
-bail:
- return ret;
-}
-
-static int mmap_rcvegrbufs(struct vm_area_struct *vma,
- struct qib_ctxtdata *rcd)
-{
- struct qib_devdata *dd = rcd->dd;
- unsigned long start, size;
- size_t total_size, i;
- unsigned long pfn;
- int ret;
-
- size = rcd->rcvegrbuf_size;
- total_size = rcd->rcvegrbuf_chunks * size;
- if ((vma->vm_end - vma->vm_start) > total_size) {
- qib_devinfo(dd->pcidev,
- "FAIL on egr bufs: reqlen %lx > actual %lx\n",
- vma->vm_end - vma->vm_start,
- (unsigned long) total_size);
- ret = -EINVAL;
- goto bail;
- }
-
- if (vma->vm_flags & VM_WRITE) {
- qib_devinfo(dd->pcidev,
- "Can't map eager buffers as writable (flags=%lx)\n",
- vma->vm_flags);
- ret = -EPERM;
- goto bail;
- }
- /* don't allow them to later change to writable with mprotect */
- vm_flags_clear(vma, VM_MAYWRITE);
-
- start = vma->vm_start;
-
- for (i = 0; i < rcd->rcvegrbuf_chunks; i++, start += size) {
- pfn = virt_to_phys(rcd->rcvegrbuf[i]) >> PAGE_SHIFT;
- ret = remap_pfn_range(vma, start, pfn, size,
- vma->vm_page_prot);
- if (ret < 0)
- goto bail;
- }
- ret = 0;
-
-bail:
- return ret;
-}
-
-/*
- * qib_file_vma_fault - handle a VMA page fault.
- */
-static vm_fault_t qib_file_vma_fault(struct vm_fault *vmf)
-{
- struct page *page;
-
- page = vmalloc_to_page((void *)(vmf->pgoff << PAGE_SHIFT));
- if (!page)
- return VM_FAULT_SIGBUS;
-
- get_page(page);
- vmf->page = page;
-
- return 0;
-}
-
-static const struct vm_operations_struct qib_file_vm_ops = {
- .fault = qib_file_vma_fault,
-};
-
-static int mmap_kvaddr(struct vm_area_struct *vma, u64 pgaddr,
- struct qib_ctxtdata *rcd, unsigned subctxt)
-{
- struct qib_devdata *dd = rcd->dd;
- unsigned subctxt_cnt;
- unsigned long len;
- void *addr;
- size_t size;
- int ret = 0;
-
- subctxt_cnt = rcd->subctxt_cnt;
- size = rcd->rcvegrbuf_chunks * rcd->rcvegrbuf_size;
-
- /*
- * Each process has all the subctxt uregbase, rcvhdrq, and
- * rcvegrbufs mmapped - as an array for all the processes,
- * and also separately for this process.
- */
- if (pgaddr == cvt_kvaddr(rcd->subctxt_uregbase)) {
- addr = rcd->subctxt_uregbase;
- size = PAGE_SIZE * subctxt_cnt;
- } else if (pgaddr == cvt_kvaddr(rcd->subctxt_rcvhdr_base)) {
- addr = rcd->subctxt_rcvhdr_base;
- size = rcd->rcvhdrq_size * subctxt_cnt;
- } else if (pgaddr == cvt_kvaddr(rcd->subctxt_rcvegrbuf)) {
- addr = rcd->subctxt_rcvegrbuf;
- size *= subctxt_cnt;
- } else if (pgaddr == cvt_kvaddr(rcd->subctxt_uregbase +
- PAGE_SIZE * subctxt)) {
- addr = rcd->subctxt_uregbase + PAGE_SIZE * subctxt;
- size = PAGE_SIZE;
- } else if (pgaddr == cvt_kvaddr(rcd->subctxt_rcvhdr_base +
- rcd->rcvhdrq_size * subctxt)) {
- addr = rcd->subctxt_rcvhdr_base +
- rcd->rcvhdrq_size * subctxt;
- size = rcd->rcvhdrq_size;
- } else if (pgaddr == cvt_kvaddr(&rcd->user_event_mask[subctxt])) {
- addr = rcd->user_event_mask;
- size = PAGE_SIZE;
- } else if (pgaddr == cvt_kvaddr(rcd->subctxt_rcvegrbuf +
- size * subctxt)) {
- addr = rcd->subctxt_rcvegrbuf + size * subctxt;
- /* rcvegrbufs are read-only on the slave */
- if (vma->vm_flags & VM_WRITE) {
- qib_devinfo(dd->pcidev,
- "Can't map eager buffers as writable (flags=%lx)\n",
- vma->vm_flags);
- ret = -EPERM;
- goto bail;
- }
- /*
- * Don't allow permission to later change to writable
- * with mprotect.
- */
- vm_flags_clear(vma, VM_MAYWRITE);
- } else
- goto bail;
- len = vma->vm_end - vma->vm_start;
- if (len > size) {
- ret = -EINVAL;
- goto bail;
- }
-
- vma->vm_pgoff = (unsigned long) addr >> PAGE_SHIFT;
- vma->vm_ops = &qib_file_vm_ops;
- vm_flags_set(vma, VM_DONTEXPAND | VM_DONTDUMP);
- ret = 1;
-
-bail:
- return ret;
-}
-
-/**
- * qib_mmapf - mmap various structures into user space
- * @fp: the file pointer
- * @vma: the VM area
- *
- * We use this to have a shared buffer between the kernel and the user code
- * for the rcvhdr queue, egr buffers, and the per-context user regs and pio
- * buffers in the chip. We have the open and close entries so we can bump
- * the ref count and keep the driver from being unloaded while still mapped.
- */
-static int qib_mmapf(struct file *fp, struct vm_area_struct *vma)
-{
- struct qib_ctxtdata *rcd;
- struct qib_devdata *dd;
- u64 pgaddr, ureg;
- unsigned piobufs, piocnt;
- int ret, match = 1;
-
- rcd = ctxt_fp(fp);
- if (!rcd || !(vma->vm_flags & VM_SHARED)) {
- ret = -EINVAL;
- goto bail;
- }
- dd = rcd->dd;
-
- /*
- * This is the qib_do_user_init() code, mapping the shared buffers
- * and per-context user registers into the user process. The address
- * referred to by vm_pgoff is the file offset passed via mmap().
- * For shared contexts, this is the kernel vmalloc() address of the
- * pages to share with the master.
- * For non-shared or master ctxts, this is a physical address.
- * We only do one mmap for each space mapped.
- */
- pgaddr = vma->vm_pgoff << PAGE_SHIFT;
-
- /*
- * Check for 0 in case one of the allocations failed, but user
- * called mmap anyway.
- */
- if (!pgaddr) {
- ret = -EINVAL;
- goto bail;
- }
-
- /*
- * Physical addresses must fit in 40 bits for our hardware.
- * Check for kernel virtual addresses first, anything else must
- * match a HW or memory address.
- */
- ret = mmap_kvaddr(vma, pgaddr, rcd, subctxt_fp(fp));
- if (ret) {
- if (ret > 0)
- ret = 0;
- goto bail;
- }
-
- ureg = dd->uregbase + dd->ureg_align * rcd->ctxt;
- if (!rcd->subctxt_cnt) {
- /* ctxt is not shared */
- piocnt = rcd->piocnt;
- piobufs = rcd->piobufs;
- } else if (!subctxt_fp(fp)) {
- /* caller is the master */
- piocnt = (rcd->piocnt / rcd->subctxt_cnt) +
- (rcd->piocnt % rcd->subctxt_cnt);
- piobufs = rcd->piobufs +
- dd->palign * (rcd->piocnt - piocnt);
- } else {
- unsigned slave = subctxt_fp(fp) - 1;
-
- /* caller is a slave */
- piocnt = rcd->piocnt / rcd->subctxt_cnt;
- piobufs = rcd->piobufs + dd->palign * piocnt * slave;
- }
-
- if (pgaddr == ureg)
- ret = mmap_ureg(vma, dd, ureg);
- else if (pgaddr == piobufs)
- ret = mmap_piobufs(vma, dd, rcd, piobufs, piocnt);
- else if (pgaddr == dd->pioavailregs_phys)
- /* in-memory copy of pioavail registers */
- ret = qib_mmap_mem(vma, rcd, PAGE_SIZE,
- (void *) dd->pioavailregs_dma, 0,
- "pioavail registers");
- else if (pgaddr == rcd->rcvegr_phys)
- ret = mmap_rcvegrbufs(vma, rcd);
- else if (pgaddr == (u64) rcd->rcvhdrq_phys)
- /*
- * The rcvhdrq itself; multiple pages, contiguous
- * from an i/o perspective. Shared contexts need
- * to map r/w, so we allow writing.
- */
- ret = qib_mmap_mem(vma, rcd, rcd->rcvhdrq_size,
- rcd->rcvhdrq, 1, "rcvhdrq");
- else if (pgaddr == (u64) rcd->rcvhdrqtailaddr_phys)
- /* in-memory copy of rcvhdrq tail register */
- ret = qib_mmap_mem(vma, rcd, PAGE_SIZE,
- rcd->rcvhdrtail_kvaddr, 0,
- "rcvhdrq tail");
- else
- match = 0;
- if (!match)
- ret = -EINVAL;
-
- vma->vm_private_data = NULL;
-
- if (ret < 0)
- qib_devinfo(dd->pcidev,
- "mmap Failure %d: off %llx len %lx\n",
- -ret, (unsigned long long)pgaddr,
- vma->vm_end - vma->vm_start);
-bail:
- return ret;
-}
-
-static __poll_t qib_poll_urgent(struct qib_ctxtdata *rcd,
- struct file *fp,
- struct poll_table_struct *pt)
-{
- struct qib_devdata *dd = rcd->dd;
- __poll_t pollflag;
-
- poll_wait(fp, &rcd->wait, pt);
-
- spin_lock_irq(&dd->uctxt_lock);
- if (rcd->urgent != rcd->urgent_poll) {
- pollflag = EPOLLIN | EPOLLRDNORM;
- rcd->urgent_poll = rcd->urgent;
- } else {
- pollflag = 0;
- set_bit(QIB_CTXT_WAITING_URG, &rcd->flag);
- }
- spin_unlock_irq(&dd->uctxt_lock);
-
- return pollflag;
-}
-
-static __poll_t qib_poll_next(struct qib_ctxtdata *rcd,
- struct file *fp,
- struct poll_table_struct *pt)
-{
- struct qib_devdata *dd = rcd->dd;
- __poll_t pollflag;
-
- poll_wait(fp, &rcd->wait, pt);
-
- spin_lock_irq(&dd->uctxt_lock);
- if (dd->f_hdrqempty(rcd)) {
- set_bit(QIB_CTXT_WAITING_RCV, &rcd->flag);
- dd->f_rcvctrl(rcd->ppd, QIB_RCVCTRL_INTRAVAIL_ENB, rcd->ctxt);
- pollflag = 0;
- } else
- pollflag = EPOLLIN | EPOLLRDNORM;
- spin_unlock_irq(&dd->uctxt_lock);
-
- return pollflag;
-}
-
-static __poll_t qib_poll(struct file *fp, struct poll_table_struct *pt)
-{
- struct qib_ctxtdata *rcd;
- __poll_t pollflag;
-
- rcd = ctxt_fp(fp);
- if (!rcd)
- pollflag = EPOLLERR;
- else if (rcd->poll_type == QIB_POLL_TYPE_URGENT)
- pollflag = qib_poll_urgent(rcd, fp, pt);
- else if (rcd->poll_type == QIB_POLL_TYPE_ANYRCV)
- pollflag = qib_poll_next(rcd, fp, pt);
- else /* invalid */
- pollflag = EPOLLERR;
-
- return pollflag;
-}
-
-static void assign_ctxt_affinity(struct file *fp, struct qib_devdata *dd)
-{
- struct qib_filedata *fd = fp->private_data;
- const unsigned int weight = current->nr_cpus_allowed;
- const struct cpumask *local_mask = cpumask_of_pcibus(dd->pcidev->bus);
- int local_cpu;
-
- /*
- * If process has NOT already set it's affinity, select and
- * reserve a processor for it on the local NUMA node.
- */
- if ((weight >= qib_cpulist_count) &&
- (cpumask_weight(local_mask) <= qib_cpulist_count)) {
- for_each_cpu(local_cpu, local_mask)
- if (!test_and_set_bit(local_cpu, qib_cpulist)) {
- fd->rec_cpu_num = local_cpu;
- return;
- }
- }
-
- /*
- * If process has NOT already set it's affinity, select and
- * reserve a processor for it, as a rendevous for all
- * users of the driver. If they don't actually later
- * set affinity to this cpu, or set it to some other cpu,
- * it just means that sooner or later we don't recommend
- * a cpu, and let the scheduler do it's best.
- */
- if (weight >= qib_cpulist_count) {
- int cpu;
-
- cpu = find_first_zero_bit(qib_cpulist,
- qib_cpulist_count);
- if (cpu == qib_cpulist_count)
- qib_dev_err(dd,
- "no cpus avail for affinity PID %u\n",
- current->pid);
- else {
- __set_bit(cpu, qib_cpulist);
- fd->rec_cpu_num = cpu;
- }
- }
-}
-
-/*
- * Check that userland and driver are compatible for subcontexts.
- */
-static int qib_compatible_subctxts(int user_swmajor, int user_swminor)
-{
- /* this code is written long-hand for clarity */
- if (QIB_USER_SWMAJOR != user_swmajor) {
- /* no promise of compatibility if major mismatch */
- return 0;
- }
- if (QIB_USER_SWMAJOR == 1) {
- switch (QIB_USER_SWMINOR) {
- case 0:
- case 1:
- case 2:
- /* no subctxt implementation so cannot be compatible */
- return 0;
- case 3:
- /* 3 is only compatible with itself */
- return user_swminor == 3;
- default:
- /* >= 4 are compatible (or are expected to be) */
- return user_swminor <= QIB_USER_SWMINOR;
- }
- }
- /* make no promises yet for future major versions */
- return 0;
-}
-
-static int init_subctxts(struct qib_devdata *dd,
- struct qib_ctxtdata *rcd,
- const struct qib_user_info *uinfo)
-{
- int ret = 0;
- unsigned num_subctxts;
- size_t size;
-
- /*
- * If the user is requesting zero subctxts,
- * skip the subctxt allocation.
- */
- if (uinfo->spu_subctxt_cnt <= 0)
- goto bail;
- num_subctxts = uinfo->spu_subctxt_cnt;
-
- /* Check for subctxt compatibility */
- if (!qib_compatible_subctxts(uinfo->spu_userversion >> 16,
- uinfo->spu_userversion & 0xffff)) {
- qib_devinfo(dd->pcidev,
- "Mismatched user version (%d.%d) and driver version (%d.%d) while context sharing. Ensure that driver and library are from the same release.\n",
- (int) (uinfo->spu_userversion >> 16),
- (int) (uinfo->spu_userversion & 0xffff),
- QIB_USER_SWMAJOR, QIB_USER_SWMINOR);
- goto bail;
- }
- if (num_subctxts > QLOGIC_IB_MAX_SUBCTXT) {
- ret = -EINVAL;
- goto bail;
- }
-
- rcd->subctxt_uregbase = vmalloc_user(PAGE_SIZE * num_subctxts);
- if (!rcd->subctxt_uregbase) {
- ret = -ENOMEM;
- goto bail;
- }
- /* Note: rcd->rcvhdrq_size isn't initialized yet. */
- size = ALIGN(dd->rcvhdrcnt * dd->rcvhdrentsize *
- sizeof(u32), PAGE_SIZE) * num_subctxts;
- rcd->subctxt_rcvhdr_base = vmalloc_user(size);
- if (!rcd->subctxt_rcvhdr_base) {
- ret = -ENOMEM;
- goto bail_ureg;
- }
-
- rcd->subctxt_rcvegrbuf = vmalloc_user(rcd->rcvegrbuf_chunks *
- rcd->rcvegrbuf_size *
- num_subctxts);
- if (!rcd->subctxt_rcvegrbuf) {
- ret = -ENOMEM;
- goto bail_rhdr;
- }
-
- rcd->subctxt_cnt = uinfo->spu_subctxt_cnt;
- rcd->subctxt_id = uinfo->spu_subctxt_id;
- rcd->active_slaves = 1;
- rcd->redirect_seq_cnt = 1;
- set_bit(QIB_CTXT_MASTER_UNINIT, &rcd->flag);
- goto bail;
-
-bail_rhdr:
- vfree(rcd->subctxt_rcvhdr_base);
-bail_ureg:
- vfree(rcd->subctxt_uregbase);
- rcd->subctxt_uregbase = NULL;
-bail:
- return ret;
-}
-
-static int setup_ctxt(struct qib_pportdata *ppd, int ctxt,
- struct file *fp, const struct qib_user_info *uinfo)
-{
- struct qib_filedata *fd = fp->private_data;
- struct qib_devdata *dd = ppd->dd;
- struct qib_ctxtdata *rcd;
- void *ptmp = NULL;
- int ret;
- int numa_id;
-
- assign_ctxt_affinity(fp, dd);
-
- numa_id = qib_numa_aware ? ((fd->rec_cpu_num != -1) ?
- cpu_to_node(fd->rec_cpu_num) :
- numa_node_id()) : dd->assigned_node_id;
-
- rcd = qib_create_ctxtdata(ppd, ctxt, numa_id);
-
- /*
- * Allocate memory for use in qib_tid_update() at open to
- * reduce cost of expected send setup per message segment
- */
- if (rcd)
- ptmp = kmalloc(dd->rcvtidcnt * sizeof(u16) +
- dd->rcvtidcnt * sizeof(struct page **),
- GFP_KERNEL);
-
- if (!rcd || !ptmp) {
- qib_dev_err(dd,
- "Unable to allocate ctxtdata memory, failing open\n");
- ret = -ENOMEM;
- goto bailerr;
- }
- rcd->userversion = uinfo->spu_userversion;
- ret = init_subctxts(dd, rcd, uinfo);
- if (ret)
- goto bailerr;
- rcd->tid_pg_list = ptmp;
- rcd->pid = current->pid;
- init_waitqueue_head(&dd->rcd[ctxt]->wait);
- get_task_comm(rcd->comm, current);
- ctxt_fp(fp) = rcd;
- qib_stats.sps_ctxts++;
- dd->freectxts--;
- ret = 0;
- goto bail;
-
-bailerr:
- if (fd->rec_cpu_num != -1)
- __clear_bit(fd->rec_cpu_num, qib_cpulist);
-
- dd->rcd[ctxt] = NULL;
- kfree(rcd);
- kfree(ptmp);
-bail:
- return ret;
-}
-
-static inline int usable(struct qib_pportdata *ppd)
-{
- struct qib_devdata *dd = ppd->dd;
-
- return dd && (dd->flags & QIB_PRESENT) && dd->kregbase && ppd->lid &&
- (ppd->lflags & QIBL_LINKACTIVE);
-}
-
-/*
- * Select a context on the given device, either using a requested port
- * or the port based on the context number.
- */
-static int choose_port_ctxt(struct file *fp, struct qib_devdata *dd, u32 port,
- const struct qib_user_info *uinfo)
-{
- struct qib_pportdata *ppd = NULL;
- int ret, ctxt;
-
- if (port) {
- if (!usable(dd->pport + port - 1)) {
- ret = -ENETDOWN;
- goto done;
- } else
- ppd = dd->pport + port - 1;
- }
- for (ctxt = dd->first_user_ctxt; ctxt < dd->cfgctxts && dd->rcd[ctxt];
- ctxt++)
- ;
- if (ctxt == dd->cfgctxts) {
- ret = -EBUSY;
- goto done;
- }
- if (!ppd) {
- u32 pidx = ctxt % dd->num_pports;
-
- if (usable(dd->pport + pidx))
- ppd = dd->pport + pidx;
- else {
- for (pidx = 0; pidx < dd->num_pports && !ppd;
- pidx++)
- if (usable(dd->pport + pidx))
- ppd = dd->pport + pidx;
- }
- }
- ret = ppd ? setup_ctxt(ppd, ctxt, fp, uinfo) : -ENETDOWN;
-done:
- return ret;
-}
-
-static int find_free_ctxt(int unit, struct file *fp,
- const struct qib_user_info *uinfo)
-{
- struct qib_devdata *dd = qib_lookup(unit);
- int ret;
-
- if (!dd || (uinfo->spu_port && uinfo->spu_port > dd->num_pports))
- ret = -ENODEV;
- else
- ret = choose_port_ctxt(fp, dd, uinfo->spu_port, uinfo);
-
- return ret;
-}
-
-static int get_a_ctxt(struct file *fp, const struct qib_user_info *uinfo,
- unsigned alg)
-{
- struct qib_devdata *udd = NULL;
- int ret = 0, devmax, npresent, nup, ndev, dusable = 0, i;
- u32 port = uinfo->spu_port, ctxt;
-
- devmax = qib_count_units(&npresent, &nup);
- if (!npresent) {
- ret = -ENXIO;
- goto done;
- }
- if (nup == 0) {
- ret = -ENETDOWN;
- goto done;
- }
-
- if (alg == QIB_PORT_ALG_ACROSS) {
- unsigned inuse = ~0U;
-
- /* find device (with ACTIVE ports) with fewest ctxts in use */
- for (ndev = 0; ndev < devmax; ndev++) {
- struct qib_devdata *dd = qib_lookup(ndev);
- unsigned cused = 0, cfree = 0, pusable = 0;
-
- if (!dd)
- continue;
- if (port && port <= dd->num_pports &&
- usable(dd->pport + port - 1))
- pusable = 1;
- else
- for (i = 0; i < dd->num_pports; i++)
- if (usable(dd->pport + i))
- pusable++;
- if (!pusable)
- continue;
- for (ctxt = dd->first_user_ctxt; ctxt < dd->cfgctxts;
- ctxt++)
- if (dd->rcd[ctxt])
- cused++;
- else
- cfree++;
- if (cfree && cused < inuse) {
- udd = dd;
- inuse = cused;
- }
- }
- if (udd) {
- ret = choose_port_ctxt(fp, udd, port, uinfo);
- goto done;
- }
- } else {
- for (ndev = 0; ndev < devmax; ndev++) {
- struct qib_devdata *dd = qib_lookup(ndev);
-
- if (dd) {
- ret = choose_port_ctxt(fp, dd, port, uinfo);
- if (!ret)
- goto done;
- if (ret == -EBUSY)
- dusable++;
- }
- }
- }
- ret = dusable ? -EBUSY : -ENETDOWN;
-
-done:
- return ret;
-}
-
-static int find_shared_ctxt(struct file *fp,
- const struct qib_user_info *uinfo)
-{
- int devmax, ndev, i;
- int ret = 0;
-
- devmax = qib_count_units(NULL, NULL);
-
- for (ndev = 0; ndev < devmax; ndev++) {
- struct qib_devdata *dd = qib_lookup(ndev);
-
- /* device portion of usable() */
- if (!(dd && (dd->flags & QIB_PRESENT) && dd->kregbase))
- continue;
- for (i = dd->first_user_ctxt; i < dd->cfgctxts; i++) {
- struct qib_ctxtdata *rcd = dd->rcd[i];
-
- /* Skip ctxts which are not yet open */
- if (!rcd || !rcd->cnt)
- continue;
- /* Skip ctxt if it doesn't match the requested one */
- if (rcd->subctxt_id != uinfo->spu_subctxt_id)
- continue;
- /* Verify the sharing process matches the master */
- if (rcd->subctxt_cnt != uinfo->spu_subctxt_cnt ||
- rcd->userversion != uinfo->spu_userversion ||
- rcd->cnt >= rcd->subctxt_cnt) {
- ret = -EINVAL;
- goto done;
- }
- ctxt_fp(fp) = rcd;
- subctxt_fp(fp) = rcd->cnt++;
- rcd->subpid[subctxt_fp(fp)] = current->pid;
- tidcursor_fp(fp) = 0;
- rcd->active_slaves |= 1 << subctxt_fp(fp);
- ret = 1;
- goto done;
- }
- }
-
-done:
- return ret;
-}
-
-static int qib_open(struct inode *in, struct file *fp)
-{
- /* The real work is performed later in qib_assign_ctxt() */
- fp->private_data = kzalloc(sizeof(struct qib_filedata), GFP_KERNEL);
- if (fp->private_data) /* no cpu affinity by default */
- ((struct qib_filedata *)fp->private_data)->rec_cpu_num = -1;
- return fp->private_data ? 0 : -ENOMEM;
-}
-
-static int find_hca(unsigned int cpu, int *unit)
-{
- int ret = 0, devmax, npresent, nup, ndev;
-
- *unit = -1;
-
- devmax = qib_count_units(&npresent, &nup);
- if (!npresent) {
- ret = -ENXIO;
- goto done;
- }
- if (!nup) {
- ret = -ENETDOWN;
- goto done;
- }
- for (ndev = 0; ndev < devmax; ndev++) {
- struct qib_devdata *dd = qib_lookup(ndev);
-
- if (dd) {
- if (pcibus_to_node(dd->pcidev->bus) < 0) {
- ret = -EINVAL;
- goto done;
- }
- if (cpu_to_node(cpu) ==
- pcibus_to_node(dd->pcidev->bus)) {
- *unit = ndev;
- goto done;
- }
- }
- }
-done:
- return ret;
-}
-
-static int do_qib_user_sdma_queue_create(struct file *fp)
-{
- struct qib_filedata *fd = fp->private_data;
- struct qib_ctxtdata *rcd = fd->rcd;
- struct qib_devdata *dd = rcd->dd;
-
- if (dd->flags & QIB_HAS_SEND_DMA) {
-
- fd->pq = qib_user_sdma_queue_create(&dd->pcidev->dev,
- dd->unit,
- rcd->ctxt,
- fd->subctxt);
- if (!fd->pq)
- return -ENOMEM;
- }
-
- return 0;
-}
-
-/*
- * Get ctxt early, so can set affinity prior to memory allocation.
- */
-static int qib_assign_ctxt(struct file *fp, const struct qib_user_info *uinfo)
-{
- int ret;
- int i_minor;
- unsigned swmajor, swminor, alg = QIB_PORT_ALG_ACROSS;
-
- /* Check to be sure we haven't already initialized this file */
- if (ctxt_fp(fp)) {
- ret = -EINVAL;
- goto done;
- }
-
- /* for now, if major version is different, bail */
- swmajor = uinfo->spu_userversion >> 16;
- if (swmajor != QIB_USER_SWMAJOR) {
- ret = -ENODEV;
- goto done;
- }
-
- swminor = uinfo->spu_userversion & 0xffff;
-
- if (swminor >= 11 && uinfo->spu_port_alg < QIB_PORT_ALG_COUNT)
- alg = uinfo->spu_port_alg;
-
- mutex_lock(&qib_mutex);
-
- if (qib_compatible_subctxts(swmajor, swminor) &&
- uinfo->spu_subctxt_cnt) {
- ret = find_shared_ctxt(fp, uinfo);
- if (ret > 0) {
- ret = do_qib_user_sdma_queue_create(fp);
- if (!ret)
- assign_ctxt_affinity(fp, (ctxt_fp(fp))->dd);
- goto done_ok;
- }
- }
-
- i_minor = iminor(file_inode(fp)) - QIB_USER_MINOR_BASE;
- if (i_minor)
- ret = find_free_ctxt(i_minor - 1, fp, uinfo);
- else {
- int unit;
- const unsigned int cpu = cpumask_first(current->cpus_ptr);
- const unsigned int weight = current->nr_cpus_allowed;
-
- if (weight == 1 && !test_bit(cpu, qib_cpulist))
- if (!find_hca(cpu, &unit) && unit >= 0)
- if (!find_free_ctxt(unit, fp, uinfo)) {
- ret = 0;
- goto done_chk_sdma;
- }
- ret = get_a_ctxt(fp, uinfo, alg);
- }
-
-done_chk_sdma:
- if (!ret)
- ret = do_qib_user_sdma_queue_create(fp);
-done_ok:
- mutex_unlock(&qib_mutex);
-
-done:
- return ret;
-}
-
-
-static int qib_do_user_init(struct file *fp,
- const struct qib_user_info *uinfo)
-{
- int ret;
- struct qib_ctxtdata *rcd = ctxt_fp(fp);
- struct qib_devdata *dd;
- unsigned uctxt;
-
- /* Subctxts don't need to initialize anything since master did it. */
- if (subctxt_fp(fp)) {
- ret = wait_event_interruptible(rcd->wait,
- !test_bit(QIB_CTXT_MASTER_UNINIT, &rcd->flag));
- goto bail;
- }
-
- dd = rcd->dd;
-
- /* some ctxts may get extra buffers, calculate that here */
- uctxt = rcd->ctxt - dd->first_user_ctxt;
- if (uctxt < dd->ctxts_extrabuf) {
- rcd->piocnt = dd->pbufsctxt + 1;
- rcd->pio_base = rcd->piocnt * uctxt;
- } else {
- rcd->piocnt = dd->pbufsctxt;
- rcd->pio_base = rcd->piocnt * uctxt +
- dd->ctxts_extrabuf;
- }
-
- /*
- * All user buffers are 2KB buffers. If we ever support
- * giving 4KB buffers to user processes, this will need some
- * work. Can't use piobufbase directly, because it has
- * both 2K and 4K buffer base values. So check and handle.
- */
- if ((rcd->pio_base + rcd->piocnt) > dd->piobcnt2k) {
- if (rcd->pio_base >= dd->piobcnt2k) {
- qib_dev_err(dd,
- "%u:ctxt%u: no 2KB buffers available\n",
- dd->unit, rcd->ctxt);
- ret = -ENOBUFS;
- goto bail;
- }
- rcd->piocnt = dd->piobcnt2k - rcd->pio_base;
- qib_dev_err(dd, "Ctxt%u: would use 4KB bufs, using %u\n",
- rcd->ctxt, rcd->piocnt);
- }
-
- rcd->piobufs = dd->pio2k_bufbase + rcd->pio_base * dd->palign;
- qib_chg_pioavailkernel(dd, rcd->pio_base, rcd->piocnt,
- TXCHK_CHG_TYPE_USER, rcd);
- /*
- * try to ensure that processes start up with consistent avail update
- * for their own range, at least. If system very quiet, it might
- * have the in-memory copy out of date at startup for this range of
- * buffers, when a context gets re-used. Do after the chg_pioavail
- * and before the rest of setup, so it's "almost certain" the dma
- * will have occurred (can't 100% guarantee, but should be many
- * decimals of 9s, with this ordering), given how much else happens
- * after this.
- */
- dd->f_sendctrl(dd->pport, QIB_SENDCTRL_AVAIL_BLIP);
-
- /*
- * Now allocate the rcvhdr Q and eager TIDs; skip the TID
- * array for time being. If rcd->ctxt > chip-supported,
- * we need to do extra stuff here to handle by handling overflow
- * through ctxt 0, someday
- */
- ret = qib_create_rcvhdrq(dd, rcd);
- if (!ret)
- ret = qib_setup_eagerbufs(rcd);
- if (ret)
- goto bail_pio;
-
- rcd->tidcursor = 0; /* start at beginning after open */
-
- /* initialize poll variables... */
- rcd->urgent = 0;
- rcd->urgent_poll = 0;
-
- /*
- * Now enable the ctxt for receive.
- * For chips that are set to DMA the tail register to memory
- * when they change (and when the update bit transitions from
- * 0 to 1. So for those chips, we turn it off and then back on.
- * This will (very briefly) affect any other open ctxts, but the
- * duration is very short, and therefore isn't an issue. We
- * explicitly set the in-memory tail copy to 0 beforehand, so we
- * don't have to wait to be sure the DMA update has happened
- * (chip resets head/tail to 0 on transition to enable).
- */
- if (rcd->rcvhdrtail_kvaddr)
- qib_clear_rcvhdrtail(rcd);
-
- dd->f_rcvctrl(rcd->ppd, QIB_RCVCTRL_CTXT_ENB | QIB_RCVCTRL_TIDFLOW_ENB,
- rcd->ctxt);
-
- /* Notify any waiting slaves */
- if (rcd->subctxt_cnt) {
- clear_bit(QIB_CTXT_MASTER_UNINIT, &rcd->flag);
- wake_up(&rcd->wait);
- }
- return 0;
-
-bail_pio:
- qib_chg_pioavailkernel(dd, rcd->pio_base, rcd->piocnt,
- TXCHK_CHG_TYPE_KERN, rcd);
-bail:
- return ret;
-}
-
-/**
- * unlock_expected_tids - unlock any expected TID entries context still had
- * in use
- * @rcd: ctxt
- *
- * We don't actually update the chip here, because we do a bulk update
- * below, using f_clear_tids.
- */
-static void unlock_expected_tids(struct qib_ctxtdata *rcd)
-{
- struct qib_devdata *dd = rcd->dd;
- int ctxt_tidbase = rcd->ctxt * dd->rcvtidcnt;
- int i, maxtid = ctxt_tidbase + dd->rcvtidcnt;
-
- for (i = ctxt_tidbase; i < maxtid; i++) {
- struct page *p = dd->pageshadow[i];
- dma_addr_t phys;
-
- if (!p)
- continue;
-
- phys = dd->physshadow[i];
- dd->physshadow[i] = dd->tidinvalid;
- dd->pageshadow[i] = NULL;
- dma_unmap_page(&dd->pcidev->dev, phys, PAGE_SIZE,
- DMA_FROM_DEVICE);
- qib_release_user_pages(&p, 1);
- }
-}
-
-static int qib_close(struct inode *in, struct file *fp)
-{
- struct qib_filedata *fd;
- struct qib_ctxtdata *rcd;
- struct qib_devdata *dd;
- unsigned long flags;
- unsigned ctxt;
-
- mutex_lock(&qib_mutex);
-
- fd = fp->private_data;
- fp->private_data = NULL;
- rcd = fd->rcd;
- if (!rcd) {
- mutex_unlock(&qib_mutex);
- goto bail;
- }
-
- dd = rcd->dd;
-
- /* ensure all pio buffer writes in progress are flushed */
- qib_flush_wc();
-
- /* drain user sdma queue */
- if (fd->pq) {
- qib_user_sdma_queue_drain(rcd->ppd, fd->pq);
- qib_user_sdma_queue_destroy(fd->pq);
- }
-
- if (fd->rec_cpu_num != -1)
- __clear_bit(fd->rec_cpu_num, qib_cpulist);
-
- if (--rcd->cnt) {
- /*
- * XXX If the master closes the context before the slave(s),
- * revoke the mmap for the eager receive queue so
- * the slave(s) don't wait for receive data forever.
- */
- rcd->active_slaves &= ~(1 << fd->subctxt);
- rcd->subpid[fd->subctxt] = 0;
- mutex_unlock(&qib_mutex);
- goto bail;
- }
-
- /* early; no interrupt users after this */
- spin_lock_irqsave(&dd->uctxt_lock, flags);
- ctxt = rcd->ctxt;
- dd->rcd[ctxt] = NULL;
- rcd->pid = 0;
- spin_unlock_irqrestore(&dd->uctxt_lock, flags);
-
- if (rcd->rcvwait_to || rcd->piowait_to ||
- rcd->rcvnowait || rcd->pionowait) {
- rcd->rcvwait_to = 0;
- rcd->piowait_to = 0;
- rcd->rcvnowait = 0;
- rcd->pionowait = 0;
- }
- if (rcd->flag)
- rcd->flag = 0;
-
- if (dd->kregbase) {
- /* atomically clear receive enable ctxt and intr avail. */
- dd->f_rcvctrl(rcd->ppd, QIB_RCVCTRL_CTXT_DIS |
- QIB_RCVCTRL_INTRAVAIL_DIS, ctxt);
-
- /* clean up the pkeys for this ctxt user */
- qib_clean_part_key(rcd, dd);
- qib_disarm_piobufs(dd, rcd->pio_base, rcd->piocnt);
- qib_chg_pioavailkernel(dd, rcd->pio_base,
- rcd->piocnt, TXCHK_CHG_TYPE_KERN, NULL);
-
- dd->f_clear_tids(dd, rcd);
-
- if (dd->pageshadow)
- unlock_expected_tids(rcd);
- qib_stats.sps_ctxts--;
- dd->freectxts++;
- }
-
- mutex_unlock(&qib_mutex);
- qib_free_ctxtdata(dd, rcd); /* after releasing the mutex */
-
-bail:
- kfree(fd);
- return 0;
-}
-
-static int qib_ctxt_info(struct file *fp, struct qib_ctxt_info __user *uinfo)
-{
- struct qib_ctxt_info info;
- int ret;
- size_t sz;
- struct qib_ctxtdata *rcd = ctxt_fp(fp);
- struct qib_filedata *fd;
-
- fd = fp->private_data;
-
- info.num_active = qib_count_active_units();
- info.unit = rcd->dd->unit;
- info.port = rcd->ppd->port;
- info.ctxt = rcd->ctxt;
- info.subctxt = subctxt_fp(fp);
- /* Number of user ctxts available for this device. */
- info.num_ctxts = rcd->dd->cfgctxts - rcd->dd->first_user_ctxt;
- info.num_subctxts = rcd->subctxt_cnt;
- info.rec_cpu = fd->rec_cpu_num;
- sz = sizeof(info);
-
- if (copy_to_user(uinfo, &info, sz)) {
- ret = -EFAULT;
- goto bail;
- }
- ret = 0;
-
-bail:
- return ret;
-}
-
-static int qib_sdma_get_inflight(struct qib_user_sdma_queue *pq,
- u32 __user *inflightp)
-{
- const u32 val = qib_user_sdma_inflight_counter(pq);
-
- if (put_user(val, inflightp))
- return -EFAULT;
-
- return 0;
-}
-
-static int qib_sdma_get_complete(struct qib_pportdata *ppd,
- struct qib_user_sdma_queue *pq,
- u32 __user *completep)
-{
- u32 val;
- int err;
-
- if (!pq)
- return -EINVAL;
-
- err = qib_user_sdma_make_progress(ppd, pq);
- if (err < 0)
- return err;
-
- val = qib_user_sdma_complete_counter(pq);
- if (put_user(val, completep))
- return -EFAULT;
-
- return 0;
-}
-
-static int disarm_req_delay(struct qib_ctxtdata *rcd)
-{
- int ret = 0;
-
- if (!usable(rcd->ppd)) {
- int i;
- /*
- * if link is down, or otherwise not usable, delay
- * the caller up to 30 seconds, so we don't thrash
- * in trying to get the chip back to ACTIVE, and
- * set flag so they make the call again.
- */
- if (rcd->user_event_mask) {
- /*
- * subctxt_cnt is 0 if not shared, so do base
- * separately, first, then remaining subctxt, if any
- */
- set_bit(_QIB_EVENT_DISARM_BUFS_BIT,
- &rcd->user_event_mask[0]);
- for (i = 1; i < rcd->subctxt_cnt; i++)
- set_bit(_QIB_EVENT_DISARM_BUFS_BIT,
- &rcd->user_event_mask[i]);
- }
- for (i = 0; !usable(rcd->ppd) && i < 300; i++)
- msleep(100);
- ret = -ENETDOWN;
- }
- return ret;
-}
-
-/*
- * Find all user contexts in use, and set the specified bit in their
- * event mask.
- * See also find_ctxt() for a similar use, that is specific to send buffers.
- */
-int qib_set_uevent_bits(struct qib_pportdata *ppd, const int evtbit)
-{
- struct qib_ctxtdata *rcd;
- unsigned ctxt;
- int ret = 0;
- unsigned long flags;
-
- spin_lock_irqsave(&ppd->dd->uctxt_lock, flags);
- for (ctxt = ppd->dd->first_user_ctxt; ctxt < ppd->dd->cfgctxts;
- ctxt++) {
- rcd = ppd->dd->rcd[ctxt];
- if (!rcd)
- continue;
- if (rcd->user_event_mask) {
- int i;
- /*
- * subctxt_cnt is 0 if not shared, so do base
- * separately, first, then remaining subctxt, if any
- */
- set_bit(evtbit, &rcd->user_event_mask[0]);
- for (i = 1; i < rcd->subctxt_cnt; i++)
- set_bit(evtbit, &rcd->user_event_mask[i]);
- }
- ret = 1;
- break;
- }
- spin_unlock_irqrestore(&ppd->dd->uctxt_lock, flags);
-
- return ret;
-}
-
-/*
- * clear the event notifier events for this context.
- * For the DISARM_BUFS case, we also take action (this obsoletes
- * the older QIB_CMD_DISARM_BUFS, but we keep it for backwards
- * compatibility.
- * Other bits don't currently require actions, just atomically clear.
- * User process then performs actions appropriate to bit having been
- * set, if desired, and checks again in future.
- */
-static int qib_user_event_ack(struct qib_ctxtdata *rcd, int subctxt,
- unsigned long events)
-{
- int ret = 0, i;
-
- for (i = 0; i <= _QIB_MAX_EVENT_BIT; i++) {
- if (!test_bit(i, &events))
- continue;
- if (i == _QIB_EVENT_DISARM_BUFS_BIT) {
- (void)qib_disarm_piobufs_ifneeded(rcd);
- ret = disarm_req_delay(rcd);
- } else
- clear_bit(i, &rcd->user_event_mask[subctxt]);
- }
- return ret;
-}
-
-static ssize_t qib_write(struct file *fp, const char __user *data,
- size_t count, loff_t *off)
-{
- const struct qib_cmd __user *ucmd;
- struct qib_ctxtdata *rcd;
- const void __user *src;
- size_t consumed, copy = 0;
- struct qib_cmd cmd;
- ssize_t ret = 0;
- void *dest;
-
- if (!ib_safe_file_access(fp)) {
- pr_err_once("qib_write: process %d (%s) changed security contexts after opening file descriptor, this is not allowed.\n",
- task_tgid_vnr(current), current->comm);
- return -EACCES;
- }
-
- if (count < sizeof(cmd.type)) {
- ret = -EINVAL;
- goto bail;
- }
-
- ucmd = (const struct qib_cmd __user *) data;
-
- if (copy_from_user(&cmd.type, &ucmd->type, sizeof(cmd.type))) {
- ret = -EFAULT;
- goto bail;
- }
-
- consumed = sizeof(cmd.type);
-
- switch (cmd.type) {
- case QIB_CMD_ASSIGN_CTXT:
- case QIB_CMD_USER_INIT:
- copy = sizeof(cmd.cmd.user_info);
- dest = &cmd.cmd.user_info;
- src = &ucmd->cmd.user_info;
- break;
-
- case QIB_CMD_RECV_CTRL:
- copy = sizeof(cmd.cmd.recv_ctrl);
- dest = &cmd.cmd.recv_ctrl;
- src = &ucmd->cmd.recv_ctrl;
- break;
-
- case QIB_CMD_CTXT_INFO:
- copy = sizeof(cmd.cmd.ctxt_info);
- dest = &cmd.cmd.ctxt_info;
- src = &ucmd->cmd.ctxt_info;
- break;
-
- case QIB_CMD_TID_UPDATE:
- case QIB_CMD_TID_FREE:
- copy = sizeof(cmd.cmd.tid_info);
- dest = &cmd.cmd.tid_info;
- src = &ucmd->cmd.tid_info;
- break;
-
- case QIB_CMD_SET_PART_KEY:
- copy = sizeof(cmd.cmd.part_key);
- dest = &cmd.cmd.part_key;
- src = &ucmd->cmd.part_key;
- break;
-
- case QIB_CMD_DISARM_BUFS:
- case QIB_CMD_PIOAVAILUPD: /* force an update of PIOAvail reg */
- copy = 0;
- src = NULL;
- dest = NULL;
- break;
-
- case QIB_CMD_POLL_TYPE:
- copy = sizeof(cmd.cmd.poll_type);
- dest = &cmd.cmd.poll_type;
- src = &ucmd->cmd.poll_type;
- break;
-
- case QIB_CMD_ARMLAUNCH_CTRL:
- copy = sizeof(cmd.cmd.armlaunch_ctrl);
- dest = &cmd.cmd.armlaunch_ctrl;
- src = &ucmd->cmd.armlaunch_ctrl;
- break;
-
- case QIB_CMD_SDMA_INFLIGHT:
- copy = sizeof(cmd.cmd.sdma_inflight);
- dest = &cmd.cmd.sdma_inflight;
- src = &ucmd->cmd.sdma_inflight;
- break;
-
- case QIB_CMD_SDMA_COMPLETE:
- copy = sizeof(cmd.cmd.sdma_complete);
- dest = &cmd.cmd.sdma_complete;
- src = &ucmd->cmd.sdma_complete;
- break;
-
- case QIB_CMD_ACK_EVENT:
- copy = sizeof(cmd.cmd.event_mask);
- dest = &cmd.cmd.event_mask;
- src = &ucmd->cmd.event_mask;
- break;
-
- default:
- ret = -EINVAL;
- goto bail;
- }
-
- if (copy) {
- if ((count - consumed) < copy) {
- ret = -EINVAL;
- goto bail;
- }
- if (copy_from_user(dest, src, copy)) {
- ret = -EFAULT;
- goto bail;
- }
- consumed += copy;
- }
-
- rcd = ctxt_fp(fp);
- if (!rcd && cmd.type != QIB_CMD_ASSIGN_CTXT) {
- ret = -EINVAL;
- goto bail;
- }
-
- switch (cmd.type) {
- case QIB_CMD_ASSIGN_CTXT:
- if (rcd) {
- ret = -EINVAL;
- goto bail;
- }
-
- ret = qib_assign_ctxt(fp, &cmd.cmd.user_info);
- if (ret)
- goto bail;
- break;
-
- case QIB_CMD_USER_INIT:
- ret = qib_do_user_init(fp, &cmd.cmd.user_info);
- if (ret)
- goto bail;
- ret = qib_get_base_info(fp, u64_to_user_ptr(
- cmd.cmd.user_info.spu_base_info),
- cmd.cmd.user_info.spu_base_info_size);
- break;
-
- case QIB_CMD_RECV_CTRL:
- ret = qib_manage_rcvq(rcd, subctxt_fp(fp), cmd.cmd.recv_ctrl);
- break;
-
- case QIB_CMD_CTXT_INFO:
- ret = qib_ctxt_info(fp, (struct qib_ctxt_info __user *)
- (unsigned long) cmd.cmd.ctxt_info);
- break;
-
- case QIB_CMD_TID_UPDATE:
- ret = qib_tid_update(rcd, fp, &cmd.cmd.tid_info);
- break;
-
- case QIB_CMD_TID_FREE:
- ret = qib_tid_free(rcd, subctxt_fp(fp), &cmd.cmd.tid_info);
- break;
-
- case QIB_CMD_SET_PART_KEY:
- ret = qib_set_part_key(rcd, cmd.cmd.part_key);
- break;
-
- case QIB_CMD_DISARM_BUFS:
- (void)qib_disarm_piobufs_ifneeded(rcd);
- ret = disarm_req_delay(rcd);
- break;
-
- case QIB_CMD_PIOAVAILUPD:
- qib_force_pio_avail_update(rcd->dd);
- break;
-
- case QIB_CMD_POLL_TYPE:
- rcd->poll_type = cmd.cmd.poll_type;
- break;
-
- case QIB_CMD_ARMLAUNCH_CTRL:
- rcd->dd->f_set_armlaunch(rcd->dd, cmd.cmd.armlaunch_ctrl);
- break;
-
- case QIB_CMD_SDMA_INFLIGHT:
- ret = qib_sdma_get_inflight(user_sdma_queue_fp(fp),
- (u32 __user *) (unsigned long)
- cmd.cmd.sdma_inflight);
- break;
-
- case QIB_CMD_SDMA_COMPLETE:
- ret = qib_sdma_get_complete(rcd->ppd,
- user_sdma_queue_fp(fp),
- (u32 __user *) (unsigned long)
- cmd.cmd.sdma_complete);
- break;
-
- case QIB_CMD_ACK_EVENT:
- ret = qib_user_event_ack(rcd, subctxt_fp(fp),
- cmd.cmd.event_mask);
- break;
- }
-
- if (ret >= 0)
- ret = consumed;
-
-bail:
- return ret;
-}
-
-static ssize_t qib_write_iter(struct kiocb *iocb, struct iov_iter *from)
-{
- struct qib_filedata *fp = iocb->ki_filp->private_data;
- struct qib_ctxtdata *rcd = ctxt_fp(iocb->ki_filp);
- struct qib_user_sdma_queue *pq = fp->pq;
-
- if (!user_backed_iter(from) || !from->nr_segs || !pq)
- return -EINVAL;
-
- return qib_user_sdma_writev(rcd, pq, iter_iov(from), from->nr_segs);
-}
-
-static const struct class qib_class = {
- .name = "ipath",
-};
-static dev_t qib_dev;
-
-int qib_cdev_init(int minor, const char *name,
- const struct file_operations *fops,
- struct cdev **cdevp, struct device **devp)
-{
- const dev_t dev = MKDEV(MAJOR(qib_dev), minor);
- struct cdev *cdev;
- struct device *device = NULL;
- int ret;
-
- cdev = cdev_alloc();
- if (!cdev) {
- pr_err("Could not allocate cdev for minor %d, %s\n",
- minor, name);
- ret = -ENOMEM;
- goto done;
- }
-
- cdev->owner = THIS_MODULE;
- cdev->ops = fops;
- kobject_set_name(&cdev->kobj, name);
-
- ret = cdev_add(cdev, dev, 1);
- if (ret < 0) {
- pr_err("Could not add cdev for minor %d, %s (err %d)\n",
- minor, name, -ret);
- goto err_cdev;
- }
-
- device = device_create(&qib_class, NULL, dev, NULL, "%s", name);
- if (!IS_ERR(device))
- goto done;
- ret = PTR_ERR(device);
- device = NULL;
- pr_err("Could not create device for minor %d, %s (err %d)\n",
- minor, name, -ret);
-err_cdev:
- cdev_del(cdev);
- cdev = NULL;
-done:
- *cdevp = cdev;
- *devp = device;
- return ret;
-}
-
-void qib_cdev_cleanup(struct cdev **cdevp, struct device **devp)
-{
- struct device *device = *devp;
-
- if (device) {
- device_unregister(device);
- *devp = NULL;
- }
-
- if (*cdevp) {
- cdev_del(*cdevp);
- *cdevp = NULL;
- }
-}
-
-static struct cdev *wildcard_cdev;
-static struct device *wildcard_device;
-
-int __init qib_dev_init(void)
-{
- int ret;
-
- ret = alloc_chrdev_region(&qib_dev, 0, QIB_NMINORS, QIB_DRV_NAME);
- if (ret < 0) {
- pr_err("Could not allocate chrdev region (err %d)\n", -ret);
- goto done;
- }
-
- ret = class_register(&qib_class);
- if (ret) {
- pr_err("Could not create device class (err %d)\n", -ret);
- unregister_chrdev_region(qib_dev, QIB_NMINORS);
- }
-
-done:
- return ret;
-}
-
-void qib_dev_cleanup(void)
-{
- if (class_is_registered(&qib_class))
- class_unregister(&qib_class);
-
- unregister_chrdev_region(qib_dev, QIB_NMINORS);
-}
-
-static atomic_t user_count = ATOMIC_INIT(0);
-
-static void qib_user_remove(struct qib_devdata *dd)
-{
- if (atomic_dec_return(&user_count) == 0)
- qib_cdev_cleanup(&wildcard_cdev, &wildcard_device);
-
- qib_cdev_cleanup(&dd->user_cdev, &dd->user_device);
-}
-
-static int qib_user_add(struct qib_devdata *dd)
-{
- char name[10];
- int ret;
-
- if (atomic_inc_return(&user_count) == 1) {
- ret = qib_cdev_init(0, "ipath", &qib_file_ops,
- &wildcard_cdev, &wildcard_device);
- if (ret)
- goto done;
- }
-
- snprintf(name, sizeof(name), "ipath%d", dd->unit);
- ret = qib_cdev_init(dd->unit + 1, name, &qib_file_ops,
- &dd->user_cdev, &dd->user_device);
- if (ret)
- qib_user_remove(dd);
-done:
- return ret;
-}
-
-/*
- * Create per-unit files in /dev
- */
-int qib_device_create(struct qib_devdata *dd)
-{
- int r, ret;
-
- r = qib_user_add(dd);
- ret = qib_diag_add(dd);
- if (r && !ret)
- ret = r;
- return ret;
-}
-
-/*
- * Remove per-unit files in /dev
- * void, core kernel returns no errors for this stuff
- */
-void qib_device_remove(struct qib_devdata *dd)
-{
- qib_user_remove(dd);
- qib_diag_remove(dd);
-}
diff --git a/drivers/infiniband/hw/qib/qib_fs.c b/drivers/infiniband/hw/qib/qib_fs.c
deleted file mode 100644
index 2098de762bf5..000000000000
--- a/drivers/infiniband/hw/qib/qib_fs.c
+++ /dev/null
@@ -1,549 +0,0 @@
-/*
- * Copyright (c) 2012 Intel Corporation. All rights reserved.
- * Copyright (c) 2006 - 2012 QLogic Corporation. All rights reserved.
- * Copyright (c) 2006 PathScale, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <linux/fs.h>
-#include <linux/fs_context.h>
-#include <linux/mount.h>
-#include <linux/pagemap.h>
-#include <linux/init.h>
-#include <linux/namei.h>
-
-#include "qib.h"
-
-#define QIBFS_MAGIC 0x726a77
-
-static struct super_block *qib_super;
-
-#define private2dd(file) (file_inode(file)->i_private)
-
-static int qibfs_mknod(struct inode *dir, struct dentry *dentry,
- umode_t mode, const struct file_operations *fops,
- void *data)
-{
- int error;
- struct inode *inode = new_inode(dir->i_sb);
-
- if (!inode) {
- dput(dentry);
- error = -EPERM;
- goto bail;
- }
-
- inode->i_ino = get_next_ino();
- inode->i_mode = mode;
- inode->i_uid = GLOBAL_ROOT_UID;
- inode->i_gid = GLOBAL_ROOT_GID;
- inode->i_blocks = 0;
- simple_inode_init_ts(inode);
-
- inode->i_private = data;
- if (S_ISDIR(mode)) {
- inode->i_op = &simple_dir_inode_operations;
- inc_nlink(inode);
- inc_nlink(dir);
- }
-
- inode->i_fop = fops;
-
- d_instantiate(dentry, inode);
- error = 0;
-
-bail:
- return error;
-}
-
-static int create_file(const char *name, umode_t mode,
- struct dentry *parent, struct dentry **dentry,
- const struct file_operations *fops, void *data)
-{
- int error;
-
- inode_lock(d_inode(parent));
- *dentry = lookup_noperm(&QSTR(name), parent);
- if (!IS_ERR(*dentry))
- error = qibfs_mknod(d_inode(parent), *dentry,
- mode, fops, data);
- else
- error = PTR_ERR(*dentry);
- inode_unlock(d_inode(parent));
-
- return error;
-}
-
-static ssize_t driver_stats_read(struct file *file, char __user *buf,
- size_t count, loff_t *ppos)
-{
- qib_stats.sps_ints = qib_sps_ints();
- return simple_read_from_buffer(buf, count, ppos, &qib_stats,
- sizeof(qib_stats));
-}
-
-/*
- * driver stats field names, one line per stat, single string. Used by
- * programs like ipathstats to print the stats in a way which works for
- * different versions of drivers, without changing program source.
- * if qlogic_ib_stats changes, this needs to change. Names need to be
- * 12 chars or less (w/o newline), for proper display by ipathstats utility.
- */
-static const char qib_statnames[] =
- "KernIntr\n"
- "ErrorIntr\n"
- "Tx_Errs\n"
- "Rcv_Errs\n"
- "H/W_Errs\n"
- "NoPIOBufs\n"
- "CtxtsOpen\n"
- "RcvLen_Errs\n"
- "EgrBufFull\n"
- "EgrHdrFull\n"
- ;
-
-static ssize_t driver_names_read(struct file *file, char __user *buf,
- size_t count, loff_t *ppos)
-{
- return simple_read_from_buffer(buf, count, ppos, qib_statnames,
- sizeof(qib_statnames) - 1); /* no null */
-}
-
-static const struct file_operations driver_ops[] = {
- { .read = driver_stats_read, .llseek = generic_file_llseek, },
- { .read = driver_names_read, .llseek = generic_file_llseek, },
-};
-
-/* read the per-device counters */
-static ssize_t dev_counters_read(struct file *file, char __user *buf,
- size_t count, loff_t *ppos)
-{
- u64 *counters;
- size_t avail;
- struct qib_devdata *dd = private2dd(file);
-
- avail = dd->f_read_cntrs(dd, *ppos, NULL, &counters);
- return simple_read_from_buffer(buf, count, ppos, counters, avail);
-}
-
-/* read the per-device counters */
-static ssize_t dev_names_read(struct file *file, char __user *buf,
- size_t count, loff_t *ppos)
-{
- char *names;
- size_t avail;
- struct qib_devdata *dd = private2dd(file);
-
- avail = dd->f_read_cntrs(dd, *ppos, &names, NULL);
- return simple_read_from_buffer(buf, count, ppos, names, avail);
-}
-
-static const struct file_operations cntr_ops[] = {
- { .read = dev_counters_read, .llseek = generic_file_llseek, },
- { .read = dev_names_read, .llseek = generic_file_llseek, },
-};
-
-/*
- * Could use file_inode(file)->i_ino to figure out which file,
- * instead of separate routine for each, but for now, this works...
- */
-
-/* read the per-port names (same for each port) */
-static ssize_t portnames_read(struct file *file, char __user *buf,
- size_t count, loff_t *ppos)
-{
- char *names;
- size_t avail;
- struct qib_devdata *dd = private2dd(file);
-
- avail = dd->f_read_portcntrs(dd, *ppos, 0, &names, NULL);
- return simple_read_from_buffer(buf, count, ppos, names, avail);
-}
-
-/* read the per-port counters for port 1 (pidx 0) */
-static ssize_t portcntrs_1_read(struct file *file, char __user *buf,
- size_t count, loff_t *ppos)
-{
- u64 *counters;
- size_t avail;
- struct qib_devdata *dd = private2dd(file);
-
- avail = dd->f_read_portcntrs(dd, *ppos, 0, NULL, &counters);
- return simple_read_from_buffer(buf, count, ppos, counters, avail);
-}
-
-/* read the per-port counters for port 2 (pidx 1) */
-static ssize_t portcntrs_2_read(struct file *file, char __user *buf,
- size_t count, loff_t *ppos)
-{
- u64 *counters;
- size_t avail;
- struct qib_devdata *dd = private2dd(file);
-
- avail = dd->f_read_portcntrs(dd, *ppos, 1, NULL, &counters);
- return simple_read_from_buffer(buf, count, ppos, counters, avail);
-}
-
-static const struct file_operations portcntr_ops[] = {
- { .read = portnames_read, .llseek = generic_file_llseek, },
- { .read = portcntrs_1_read, .llseek = generic_file_llseek, },
- { .read = portcntrs_2_read, .llseek = generic_file_llseek, },
-};
-
-/*
- * read the per-port QSFP data for port 1 (pidx 0)
- */
-static ssize_t qsfp_1_read(struct file *file, char __user *buf,
- size_t count, loff_t *ppos)
-{
- struct qib_devdata *dd = private2dd(file);
- char *tmp;
- int ret;
-
- tmp = kmalloc(PAGE_SIZE, GFP_KERNEL);
- if (!tmp)
- return -ENOMEM;
-
- ret = qib_qsfp_dump(dd->pport, tmp, PAGE_SIZE);
- if (ret > 0)
- ret = simple_read_from_buffer(buf, count, ppos, tmp, ret);
- kfree(tmp);
- return ret;
-}
-
-/*
- * read the per-port QSFP data for port 2 (pidx 1)
- */
-static ssize_t qsfp_2_read(struct file *file, char __user *buf,
- size_t count, loff_t *ppos)
-{
- struct qib_devdata *dd = private2dd(file);
- char *tmp;
- int ret;
-
- if (dd->num_pports < 2)
- return -ENODEV;
-
- tmp = kmalloc(PAGE_SIZE, GFP_KERNEL);
- if (!tmp)
- return -ENOMEM;
-
- ret = qib_qsfp_dump(dd->pport + 1, tmp, PAGE_SIZE);
- if (ret > 0)
- ret = simple_read_from_buffer(buf, count, ppos, tmp, ret);
- kfree(tmp);
- return ret;
-}
-
-static const struct file_operations qsfp_ops[] = {
- { .read = qsfp_1_read, .llseek = generic_file_llseek, },
- { .read = qsfp_2_read, .llseek = generic_file_llseek, },
-};
-
-static ssize_t flash_read(struct file *file, char __user *buf,
- size_t count, loff_t *ppos)
-{
- struct qib_devdata *dd;
- ssize_t ret;
- loff_t pos;
- char *tmp;
-
- pos = *ppos;
-
- if (pos < 0) {
- ret = -EINVAL;
- goto bail;
- }
-
- if (pos >= sizeof(struct qib_flash)) {
- ret = 0;
- goto bail;
- }
-
- if (count > sizeof(struct qib_flash) - pos)
- count = sizeof(struct qib_flash) - pos;
-
- tmp = kmalloc(count, GFP_KERNEL);
- if (!tmp) {
- ret = -ENOMEM;
- goto bail;
- }
-
- dd = private2dd(file);
- if (qib_eeprom_read(dd, pos, tmp, count)) {
- qib_dev_err(dd, "failed to read from flash\n");
- ret = -ENXIO;
- goto bail_tmp;
- }
-
- if (copy_to_user(buf, tmp, count)) {
- ret = -EFAULT;
- goto bail_tmp;
- }
-
- *ppos = pos + count;
- ret = count;
-
-bail_tmp:
- kfree(tmp);
-
-bail:
- return ret;
-}
-
-static ssize_t flash_write(struct file *file, const char __user *buf,
- size_t count, loff_t *ppos)
-{
- struct qib_devdata *dd;
- ssize_t ret;
- loff_t pos;
- char *tmp;
-
- pos = *ppos;
-
- if (pos != 0 || count != sizeof(struct qib_flash))
- return -EINVAL;
-
- tmp = memdup_user(buf, count);
- if (IS_ERR(tmp))
- return PTR_ERR(tmp);
-
- dd = private2dd(file);
- if (qib_eeprom_write(dd, pos, tmp, count)) {
- ret = -ENXIO;
- qib_dev_err(dd, "failed to write to flash\n");
- goto bail_tmp;
- }
-
- *ppos = pos + count;
- ret = count;
-
-bail_tmp:
- kfree(tmp);
- return ret;
-}
-
-static const struct file_operations flash_ops = {
- .read = flash_read,
- .write = flash_write,
- .llseek = default_llseek,
-};
-
-static int add_cntr_files(struct super_block *sb, struct qib_devdata *dd)
-{
- struct dentry *dir, *tmp;
- char unit[10];
- int ret, i;
-
- /* create the per-unit directory */
- snprintf(unit, sizeof(unit), "%u", dd->unit);
- ret = create_file(unit, S_IFDIR|S_IRUGO|S_IXUGO, sb->s_root, &dir,
- &simple_dir_operations, dd);
- if (ret) {
- pr_err("create_file(%s) failed: %d\n", unit, ret);
- goto bail;
- }
-
- /* create the files in the new directory */
- ret = create_file("counters", S_IFREG|S_IRUGO, dir, &tmp,
- &cntr_ops[0], dd);
- if (ret) {
- pr_err("create_file(%s/counters) failed: %d\n",
- unit, ret);
- goto bail;
- }
- ret = create_file("counter_names", S_IFREG|S_IRUGO, dir, &tmp,
- &cntr_ops[1], dd);
- if (ret) {
- pr_err("create_file(%s/counter_names) failed: %d\n",
- unit, ret);
- goto bail;
- }
- ret = create_file("portcounter_names", S_IFREG|S_IRUGO, dir, &tmp,
- &portcntr_ops[0], dd);
- if (ret) {
- pr_err("create_file(%s/%s) failed: %d\n",
- unit, "portcounter_names", ret);
- goto bail;
- }
- for (i = 1; i <= dd->num_pports; i++) {
- char fname[24];
-
- sprintf(fname, "port%dcounters", i);
- /* create the files in the new directory */
- ret = create_file(fname, S_IFREG|S_IRUGO, dir, &tmp,
- &portcntr_ops[i], dd);
- if (ret) {
- pr_err("create_file(%s/%s) failed: %d\n",
- unit, fname, ret);
- goto bail;
- }
- if (!(dd->flags & QIB_HAS_QSFP))
- continue;
- sprintf(fname, "qsfp%d", i);
- ret = create_file(fname, S_IFREG|S_IRUGO, dir, &tmp,
- &qsfp_ops[i - 1], dd);
- if (ret) {
- pr_err("create_file(%s/%s) failed: %d\n",
- unit, fname, ret);
- goto bail;
- }
- }
-
- ret = create_file("flash", S_IFREG|S_IWUSR|S_IRUGO, dir, &tmp,
- &flash_ops, dd);
- if (ret)
- pr_err("create_file(%s/flash) failed: %d\n",
- unit, ret);
-bail:
- return ret;
-}
-
-static int remove_device_files(struct super_block *sb,
- struct qib_devdata *dd)
-{
- struct dentry *dir;
- char unit[10];
-
- snprintf(unit, sizeof(unit), "%u", dd->unit);
- dir = lookup_noperm_unlocked(&QSTR(unit), sb->s_root);
-
- if (IS_ERR(dir)) {
- pr_err("Lookup of %s failed\n", unit);
- return PTR_ERR(dir);
- }
- simple_recursive_removal(dir, NULL);
- dput(dir);
- return 0;
-}
-
-/*
- * This fills everything in when the fs is mounted, to handle umount/mount
- * after device init. The direct add_cntr_files() call handles adding
- * them from the init code, when the fs is already mounted.
- */
-static int qibfs_fill_super(struct super_block *sb, struct fs_context *fc)
-{
- struct qib_devdata *dd;
- unsigned long index;
- int ret;
-
- static const struct tree_descr files[] = {
- [2] = {"driver_stats", &driver_ops[0], S_IRUGO},
- [3] = {"driver_stats_names", &driver_ops[1], S_IRUGO},
- {""},
- };
-
- ret = simple_fill_super(sb, QIBFS_MAGIC, files);
- if (ret) {
- pr_err("simple_fill_super failed: %d\n", ret);
- goto bail;
- }
-
- xa_for_each(&qib_dev_table, index, dd) {
- ret = add_cntr_files(sb, dd);
- if (ret)
- goto bail;
- }
-
-bail:
- return ret;
-}
-
-static int qibfs_get_tree(struct fs_context *fc)
-{
- int ret = get_tree_single(fc, qibfs_fill_super);
- if (ret == 0)
- qib_super = fc->root->d_sb;
- return ret;
-}
-
-static const struct fs_context_operations qibfs_context_ops = {
- .get_tree = qibfs_get_tree,
-};
-
-static int qibfs_init_fs_context(struct fs_context *fc)
-{
- fc->ops = &qibfs_context_ops;
- return 0;
-}
-
-static void qibfs_kill_super(struct super_block *s)
-{
- kill_litter_super(s);
- qib_super = NULL;
-}
-
-int qibfs_add(struct qib_devdata *dd)
-{
- int ret;
-
- /*
- * On first unit initialized, qib_super will not yet exist
- * because nobody has yet tried to mount the filesystem, so
- * we can't consider that to be an error; if an error occurs
- * during the mount, that will get a complaint, so this is OK.
- * add_cntr_files() for all units is done at mount from
- * qibfs_fill_super(), so one way or another, everything works.
- */
- if (qib_super == NULL)
- ret = 0;
- else
- ret = add_cntr_files(qib_super, dd);
- return ret;
-}
-
-int qibfs_remove(struct qib_devdata *dd)
-{
- int ret = 0;
-
- if (qib_super)
- ret = remove_device_files(qib_super, dd);
-
- return ret;
-}
-
-static struct file_system_type qibfs_fs_type = {
- .owner = THIS_MODULE,
- .name = "ipathfs",
- .init_fs_context = qibfs_init_fs_context,
- .kill_sb = qibfs_kill_super,
-};
-MODULE_ALIAS_FS("ipathfs");
-
-int __init qib_init_qibfs(void)
-{
- return register_filesystem(&qibfs_fs_type);
-}
-
-int __exit qib_exit_qibfs(void)
-{
- return unregister_filesystem(&qibfs_fs_type);
-}
diff --git a/drivers/infiniband/hw/qib/qib_iba6120.c b/drivers/infiniband/hw/qib/qib_iba6120.c
deleted file mode 100644
index 2640d283eee6..000000000000
--- a/drivers/infiniband/hw/qib/qib_iba6120.c
+++ /dev/null
@@ -1,3533 +0,0 @@
-/*
- * Copyright (c) 2013 - 2017 Intel Corporation. All rights reserved.
- * Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation.
- * All rights reserved.
- * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-/*
- * This file contains all of the code that is specific to the
- * QLogic_IB 6120 PCIe chip.
- */
-
-#include <linux/interrupt.h>
-#include <linux/pci.h>
-#include <linux/delay.h>
-#include <rdma/ib_verbs.h>
-
-#include "qib.h"
-#include "qib_6120_regs.h"
-
-static void qib_6120_setup_setextled(struct qib_pportdata *, u32);
-static void sendctrl_6120_mod(struct qib_pportdata *ppd, u32 op);
-static u8 qib_6120_phys_portstate(u64);
-static u32 qib_6120_iblink_state(u64);
-
-/*
- * This file contains all the chip-specific register information and
- * access functions for the Intel Intel_IB PCI-Express chip.
- *
- */
-
-/* KREG_IDX uses machine-generated #defines */
-#define KREG_IDX(regname) (QIB_6120_##regname##_OFFS / sizeof(u64))
-
-/* Use defines to tie machine-generated names to lower-case names */
-#define kr_extctrl KREG_IDX(EXTCtrl)
-#define kr_extstatus KREG_IDX(EXTStatus)
-#define kr_gpio_clear KREG_IDX(GPIOClear)
-#define kr_gpio_mask KREG_IDX(GPIOMask)
-#define kr_gpio_out KREG_IDX(GPIOOut)
-#define kr_gpio_status KREG_IDX(GPIOStatus)
-#define kr_rcvctrl KREG_IDX(RcvCtrl)
-#define kr_sendctrl KREG_IDX(SendCtrl)
-#define kr_partitionkey KREG_IDX(RcvPartitionKey)
-#define kr_hwdiagctrl KREG_IDX(HwDiagCtrl)
-#define kr_ibcstatus KREG_IDX(IBCStatus)
-#define kr_ibcctrl KREG_IDX(IBCCtrl)
-#define kr_sendbuffererror KREG_IDX(SendBufErr0)
-#define kr_rcvbthqp KREG_IDX(RcvBTHQP)
-#define kr_counterregbase KREG_IDX(CntrRegBase)
-#define kr_palign KREG_IDX(PageAlign)
-#define kr_rcvegrbase KREG_IDX(RcvEgrBase)
-#define kr_rcvegrcnt KREG_IDX(RcvEgrCnt)
-#define kr_rcvhdrcnt KREG_IDX(RcvHdrCnt)
-#define kr_rcvhdrentsize KREG_IDX(RcvHdrEntSize)
-#define kr_rcvhdrsize KREG_IDX(RcvHdrSize)
-#define kr_rcvtidbase KREG_IDX(RcvTIDBase)
-#define kr_rcvtidcnt KREG_IDX(RcvTIDCnt)
-#define kr_scratch KREG_IDX(Scratch)
-#define kr_sendctrl KREG_IDX(SendCtrl)
-#define kr_sendpioavailaddr KREG_IDX(SendPIOAvailAddr)
-#define kr_sendpiobufbase KREG_IDX(SendPIOBufBase)
-#define kr_sendpiobufcnt KREG_IDX(SendPIOBufCnt)
-#define kr_sendpiosize KREG_IDX(SendPIOSize)
-#define kr_sendregbase KREG_IDX(SendRegBase)
-#define kr_userregbase KREG_IDX(UserRegBase)
-#define kr_control KREG_IDX(Control)
-#define kr_intclear KREG_IDX(IntClear)
-#define kr_intmask KREG_IDX(IntMask)
-#define kr_intstatus KREG_IDX(IntStatus)
-#define kr_errclear KREG_IDX(ErrClear)
-#define kr_errmask KREG_IDX(ErrMask)
-#define kr_errstatus KREG_IDX(ErrStatus)
-#define kr_hwerrclear KREG_IDX(HwErrClear)
-#define kr_hwerrmask KREG_IDX(HwErrMask)
-#define kr_hwerrstatus KREG_IDX(HwErrStatus)
-#define kr_revision KREG_IDX(Revision)
-#define kr_portcnt KREG_IDX(PortCnt)
-#define kr_serdes_cfg0 KREG_IDX(SerdesCfg0)
-#define kr_serdes_cfg1 (kr_serdes_cfg0 + 1)
-#define kr_serdes_stat KREG_IDX(SerdesStat)
-#define kr_xgxs_cfg KREG_IDX(XGXSCfg)
-
-/* These must only be written via qib_write_kreg_ctxt() */
-#define kr_rcvhdraddr KREG_IDX(RcvHdrAddr0)
-#define kr_rcvhdrtailaddr KREG_IDX(RcvHdrTailAddr0)
-
-#define CREG_IDX(regname) ((QIB_6120_##regname##_OFFS - \
- QIB_6120_LBIntCnt_OFFS) / sizeof(u64))
-
-#define cr_badformat CREG_IDX(RxBadFormatCnt)
-#define cr_erricrc CREG_IDX(RxICRCErrCnt)
-#define cr_errlink CREG_IDX(RxLinkProblemCnt)
-#define cr_errlpcrc CREG_IDX(RxLPCRCErrCnt)
-#define cr_errpkey CREG_IDX(RxPKeyMismatchCnt)
-#define cr_rcvflowctrl_err CREG_IDX(RxFlowCtrlErrCnt)
-#define cr_err_rlen CREG_IDX(RxLenErrCnt)
-#define cr_errslen CREG_IDX(TxLenErrCnt)
-#define cr_errtidfull CREG_IDX(RxTIDFullErrCnt)
-#define cr_errtidvalid CREG_IDX(RxTIDValidErrCnt)
-#define cr_errvcrc CREG_IDX(RxVCRCErrCnt)
-#define cr_ibstatuschange CREG_IDX(IBStatusChangeCnt)
-#define cr_lbint CREG_IDX(LBIntCnt)
-#define cr_invalidrlen CREG_IDX(RxMaxMinLenErrCnt)
-#define cr_invalidslen CREG_IDX(TxMaxMinLenErrCnt)
-#define cr_lbflowstall CREG_IDX(LBFlowStallCnt)
-#define cr_pktrcv CREG_IDX(RxDataPktCnt)
-#define cr_pktrcvflowctrl CREG_IDX(RxFlowPktCnt)
-#define cr_pktsend CREG_IDX(TxDataPktCnt)
-#define cr_pktsendflow CREG_IDX(TxFlowPktCnt)
-#define cr_portovfl CREG_IDX(RxP0HdrEgrOvflCnt)
-#define cr_rcvebp CREG_IDX(RxEBPCnt)
-#define cr_rcvovfl CREG_IDX(RxBufOvflCnt)
-#define cr_senddropped CREG_IDX(TxDroppedPktCnt)
-#define cr_sendstall CREG_IDX(TxFlowStallCnt)
-#define cr_sendunderrun CREG_IDX(TxUnderrunCnt)
-#define cr_wordrcv CREG_IDX(RxDwordCnt)
-#define cr_wordsend CREG_IDX(TxDwordCnt)
-#define cr_txunsupvl CREG_IDX(TxUnsupVLErrCnt)
-#define cr_rxdroppkt CREG_IDX(RxDroppedPktCnt)
-#define cr_iblinkerrrecov CREG_IDX(IBLinkErrRecoveryCnt)
-#define cr_iblinkdown CREG_IDX(IBLinkDownedCnt)
-#define cr_ibsymbolerr CREG_IDX(IBSymbolErrCnt)
-
-#define SYM_RMASK(regname, fldname) ((u64) \
- QIB_6120_##regname##_##fldname##_RMASK)
-#define SYM_MASK(regname, fldname) ((u64) \
- QIB_6120_##regname##_##fldname##_RMASK << \
- QIB_6120_##regname##_##fldname##_LSB)
-#define SYM_LSB(regname, fldname) (QIB_6120_##regname##_##fldname##_LSB)
-
-#define SYM_FIELD(value, regname, fldname) ((u64) \
- (((value) >> SYM_LSB(regname, fldname)) & \
- SYM_RMASK(regname, fldname)))
-#define ERR_MASK(fldname) SYM_MASK(ErrMask, fldname##Mask)
-#define HWE_MASK(fldname) SYM_MASK(HwErrMask, fldname##Mask)
-
-/* link training states, from IBC */
-#define IB_6120_LT_STATE_DISABLED 0x00
-#define IB_6120_LT_STATE_LINKUP 0x01
-#define IB_6120_LT_STATE_POLLACTIVE 0x02
-#define IB_6120_LT_STATE_POLLQUIET 0x03
-#define IB_6120_LT_STATE_SLEEPDELAY 0x04
-#define IB_6120_LT_STATE_SLEEPQUIET 0x05
-#define IB_6120_LT_STATE_CFGDEBOUNCE 0x08
-#define IB_6120_LT_STATE_CFGRCVFCFG 0x09
-#define IB_6120_LT_STATE_CFGWAITRMT 0x0a
-#define IB_6120_LT_STATE_CFGIDLE 0x0b
-#define IB_6120_LT_STATE_RECOVERRETRAIN 0x0c
-#define IB_6120_LT_STATE_RECOVERWAITRMT 0x0e
-#define IB_6120_LT_STATE_RECOVERIDLE 0x0f
-
-/* link state machine states from IBC */
-#define IB_6120_L_STATE_DOWN 0x0
-#define IB_6120_L_STATE_INIT 0x1
-#define IB_6120_L_STATE_ARM 0x2
-#define IB_6120_L_STATE_ACTIVE 0x3
-#define IB_6120_L_STATE_ACT_DEFER 0x4
-
-static const u8 qib_6120_physportstate[0x20] = {
- [IB_6120_LT_STATE_DISABLED] = IB_PHYSPORTSTATE_DISABLED,
- [IB_6120_LT_STATE_LINKUP] = IB_PHYSPORTSTATE_LINKUP,
- [IB_6120_LT_STATE_POLLACTIVE] = IB_PHYSPORTSTATE_POLL,
- [IB_6120_LT_STATE_POLLQUIET] = IB_PHYSPORTSTATE_POLL,
- [IB_6120_LT_STATE_SLEEPDELAY] = IB_PHYSPORTSTATE_SLEEP,
- [IB_6120_LT_STATE_SLEEPQUIET] = IB_PHYSPORTSTATE_SLEEP,
- [IB_6120_LT_STATE_CFGDEBOUNCE] =
- IB_PHYSPORTSTATE_CFG_TRAIN,
- [IB_6120_LT_STATE_CFGRCVFCFG] =
- IB_PHYSPORTSTATE_CFG_TRAIN,
- [IB_6120_LT_STATE_CFGWAITRMT] =
- IB_PHYSPORTSTATE_CFG_TRAIN,
- [IB_6120_LT_STATE_CFGIDLE] = IB_PHYSPORTSTATE_CFG_TRAIN,
- [IB_6120_LT_STATE_RECOVERRETRAIN] =
- IB_PHYSPORTSTATE_LINK_ERR_RECOVER,
- [IB_6120_LT_STATE_RECOVERWAITRMT] =
- IB_PHYSPORTSTATE_LINK_ERR_RECOVER,
- [IB_6120_LT_STATE_RECOVERIDLE] =
- IB_PHYSPORTSTATE_LINK_ERR_RECOVER,
- [0x10] = IB_PHYSPORTSTATE_CFG_TRAIN,
- [0x11] = IB_PHYSPORTSTATE_CFG_TRAIN,
- [0x12] = IB_PHYSPORTSTATE_CFG_TRAIN,
- [0x13] = IB_PHYSPORTSTATE_CFG_TRAIN,
- [0x14] = IB_PHYSPORTSTATE_CFG_TRAIN,
- [0x15] = IB_PHYSPORTSTATE_CFG_TRAIN,
- [0x16] = IB_PHYSPORTSTATE_CFG_TRAIN,
- [0x17] = IB_PHYSPORTSTATE_CFG_TRAIN
-};
-
-
-struct qib_chip_specific {
- u64 __iomem *cregbase;
- u64 *cntrs;
- u64 *portcntrs;
- void *dummy_hdrq; /* used after ctxt close */
- dma_addr_t dummy_hdrq_phys;
- spinlock_t kernel_tid_lock; /* no back to back kernel TID writes */
- spinlock_t user_tid_lock; /* no back to back user TID writes */
- spinlock_t rcvmod_lock; /* protect rcvctrl shadow changes */
- spinlock_t gpio_lock; /* RMW of shadows/regs for ExtCtrl and GPIO */
- u64 hwerrmask;
- u64 errormask;
- u64 gpio_out; /* shadow of kr_gpio_out, for rmw ops */
- u64 gpio_mask; /* shadow the gpio mask register */
- u64 extctrl; /* shadow the gpio output enable, etc... */
- /*
- * these 5 fields are used to establish deltas for IB symbol
- * errors and linkrecovery errors. They can be reported on
- * some chips during link negotiation prior to INIT, and with
- * DDR when faking DDR negotiations with non-IBTA switches.
- * The chip counters are adjusted at driver unload if there is
- * a non-zero delta.
- */
- u64 ibdeltainprog;
- u64 ibsymdelta;
- u64 ibsymsnap;
- u64 iblnkerrdelta;
- u64 iblnkerrsnap;
- u64 ibcctrl; /* shadow for kr_ibcctrl */
- u32 lastlinkrecov; /* link recovery issue */
- u32 cntrnamelen;
- u32 portcntrnamelen;
- u32 ncntrs;
- u32 nportcntrs;
- /* used with gpio interrupts to implement IB counters */
- u32 rxfc_unsupvl_errs;
- u32 overrun_thresh_errs;
- /*
- * these count only cases where _successive_ LocalLinkIntegrity
- * errors were seen in the receive headers of IB standard packets
- */
- u32 lli_errs;
- u32 lli_counter;
- u64 lli_thresh;
- u64 sword; /* total dwords sent (sample result) */
- u64 rword; /* total dwords received (sample result) */
- u64 spkts; /* total packets sent (sample result) */
- u64 rpkts; /* total packets received (sample result) */
- u64 xmit_wait; /* # of ticks no data sent (sample result) */
- struct timer_list pma_timer;
- struct qib_pportdata *ppd;
- char emsgbuf[128];
- char bitsmsgbuf[64];
- u8 pma_sample_status;
-};
-
-/* ibcctrl bits */
-#define QLOGIC_IB_IBCC_LINKINITCMD_DISABLE 1
-/* cycle through TS1/TS2 till OK */
-#define QLOGIC_IB_IBCC_LINKINITCMD_POLL 2
-/* wait for TS1, then go on */
-#define QLOGIC_IB_IBCC_LINKINITCMD_SLEEP 3
-#define QLOGIC_IB_IBCC_LINKINITCMD_SHIFT 16
-
-#define QLOGIC_IB_IBCC_LINKCMD_DOWN 1 /* move to 0x11 */
-#define QLOGIC_IB_IBCC_LINKCMD_ARMED 2 /* move to 0x21 */
-#define QLOGIC_IB_IBCC_LINKCMD_ACTIVE 3 /* move to 0x31 */
-#define QLOGIC_IB_IBCC_LINKCMD_SHIFT 18
-
-/*
- * We could have a single register get/put routine, that takes a group type,
- * but this is somewhat clearer and cleaner. It also gives us some error
- * checking. 64 bit register reads should always work, but are inefficient
- * on opteron (the northbridge always generates 2 separate HT 32 bit reads),
- * so we use kreg32 wherever possible. User register and counter register
- * reads are always 32 bit reads, so only one form of those routines.
- */
-
-/**
- * qib_read_ureg32 - read 32-bit virtualized per-context register
- * @dd: device
- * @regno: register number
- * @ctxt: context number
- *
- * Return the contents of a register that is virtualized to be per context.
- * Returns -1 on errors (not distinguishable from valid contents at
- * runtime; we may add a separate error variable at some point).
- */
-static inline u32 qib_read_ureg32(const struct qib_devdata *dd,
- enum qib_ureg regno, int ctxt)
-{
- if (!dd->kregbase || !(dd->flags & QIB_PRESENT))
- return 0;
-
- if (dd->userbase)
- return readl(regno + (u64 __iomem *)
- ((char __iomem *)dd->userbase +
- dd->ureg_align * ctxt));
- else
- return readl(regno + (u64 __iomem *)
- (dd->uregbase +
- (char __iomem *)dd->kregbase +
- dd->ureg_align * ctxt));
-}
-
-/**
- * qib_write_ureg - write 32-bit virtualized per-context register
- * @dd: device
- * @regno: register number
- * @value: value
- * @ctxt: context
- *
- * Write the contents of a register that is virtualized to be per context.
- */
-static inline void qib_write_ureg(const struct qib_devdata *dd,
- enum qib_ureg regno, u64 value, int ctxt)
-{
- u64 __iomem *ubase;
-
- if (dd->userbase)
- ubase = (u64 __iomem *)
- ((char __iomem *) dd->userbase +
- dd->ureg_align * ctxt);
- else
- ubase = (u64 __iomem *)
- (dd->uregbase +
- (char __iomem *) dd->kregbase +
- dd->ureg_align * ctxt);
-
- if (dd->kregbase && (dd->flags & QIB_PRESENT))
- writeq(value, &ubase[regno]);
-}
-
-static inline u32 qib_read_kreg32(const struct qib_devdata *dd,
- const u16 regno)
-{
- if (!dd->kregbase || !(dd->flags & QIB_PRESENT))
- return -1;
- return readl((u32 __iomem *)&dd->kregbase[regno]);
-}
-
-static inline u64 qib_read_kreg64(const struct qib_devdata *dd,
- const u16 regno)
-{
- if (!dd->kregbase || !(dd->flags & QIB_PRESENT))
- return -1;
-
- return readq(&dd->kregbase[regno]);
-}
-
-static inline void qib_write_kreg(const struct qib_devdata *dd,
- const u16 regno, u64 value)
-{
- if (dd->kregbase && (dd->flags & QIB_PRESENT))
- writeq(value, &dd->kregbase[regno]);
-}
-
-/**
- * qib_write_kreg_ctxt - write a device's per-ctxt 64-bit kernel register
- * @dd: the qlogic_ib device
- * @regno: the register number to write
- * @ctxt: the context containing the register
- * @value: the value to write
- */
-static inline void qib_write_kreg_ctxt(const struct qib_devdata *dd,
- const u16 regno, unsigned ctxt,
- u64 value)
-{
- qib_write_kreg(dd, regno + ctxt, value);
-}
-
-static inline void write_6120_creg(const struct qib_devdata *dd,
- u16 regno, u64 value)
-{
- if (dd->cspec->cregbase && (dd->flags & QIB_PRESENT))
- writeq(value, &dd->cspec->cregbase[regno]);
-}
-
-static inline u64 read_6120_creg(const struct qib_devdata *dd, u16 regno)
-{
- if (!dd->cspec->cregbase || !(dd->flags & QIB_PRESENT))
- return 0;
- return readq(&dd->cspec->cregbase[regno]);
-}
-
-static inline u32 read_6120_creg32(const struct qib_devdata *dd, u16 regno)
-{
- if (!dd->cspec->cregbase || !(dd->flags & QIB_PRESENT))
- return 0;
- return readl(&dd->cspec->cregbase[regno]);
-}
-
-/* kr_control bits */
-#define QLOGIC_IB_C_RESET 1U
-
-/* kr_intstatus, kr_intclear, kr_intmask bits */
-#define QLOGIC_IB_I_RCVURG_MASK ((1U << 5) - 1)
-#define QLOGIC_IB_I_RCVURG_SHIFT 0
-#define QLOGIC_IB_I_RCVAVAIL_MASK ((1U << 5) - 1)
-#define QLOGIC_IB_I_RCVAVAIL_SHIFT 12
-
-#define QLOGIC_IB_C_FREEZEMODE 0x00000002
-#define QLOGIC_IB_C_LINKENABLE 0x00000004
-#define QLOGIC_IB_I_ERROR 0x0000000080000000ULL
-#define QLOGIC_IB_I_SPIOSENT 0x0000000040000000ULL
-#define QLOGIC_IB_I_SPIOBUFAVAIL 0x0000000020000000ULL
-#define QLOGIC_IB_I_GPIO 0x0000000010000000ULL
-#define QLOGIC_IB_I_BITSEXTANT \
- ((QLOGIC_IB_I_RCVURG_MASK << QLOGIC_IB_I_RCVURG_SHIFT) | \
- (QLOGIC_IB_I_RCVAVAIL_MASK << \
- QLOGIC_IB_I_RCVAVAIL_SHIFT) | \
- QLOGIC_IB_I_ERROR | QLOGIC_IB_I_SPIOSENT | \
- QLOGIC_IB_I_SPIOBUFAVAIL | QLOGIC_IB_I_GPIO)
-
-/* kr_hwerrclear, kr_hwerrmask, kr_hwerrstatus, bits */
-#define QLOGIC_IB_HWE_PCIEMEMPARITYERR_MASK 0x000000000000003fULL
-#define QLOGIC_IB_HWE_PCIEMEMPARITYERR_SHIFT 0
-#define QLOGIC_IB_HWE_PCIEPOISONEDTLP 0x0000000010000000ULL
-#define QLOGIC_IB_HWE_PCIECPLTIMEOUT 0x0000000020000000ULL
-#define QLOGIC_IB_HWE_PCIEBUSPARITYXTLH 0x0000000040000000ULL
-#define QLOGIC_IB_HWE_PCIEBUSPARITYXADM 0x0000000080000000ULL
-#define QLOGIC_IB_HWE_PCIEBUSPARITYRADM 0x0000000100000000ULL
-#define QLOGIC_IB_HWE_COREPLL_FBSLIP 0x0080000000000000ULL
-#define QLOGIC_IB_HWE_COREPLL_RFSLIP 0x0100000000000000ULL
-#define QLOGIC_IB_HWE_PCIE1PLLFAILED 0x0400000000000000ULL
-#define QLOGIC_IB_HWE_PCIE0PLLFAILED 0x0800000000000000ULL
-#define QLOGIC_IB_HWE_SERDESPLLFAILED 0x1000000000000000ULL
-
-
-/* kr_extstatus bits */
-#define QLOGIC_IB_EXTS_FREQSEL 0x2
-#define QLOGIC_IB_EXTS_SERDESSEL 0x4
-#define QLOGIC_IB_EXTS_MEMBIST_ENDTEST 0x0000000000004000
-#define QLOGIC_IB_EXTS_MEMBIST_FOUND 0x0000000000008000
-
-/* kr_xgxsconfig bits */
-#define QLOGIC_IB_XGXS_RESET 0x5ULL
-
-#define _QIB_GPIO_SDA_NUM 1
-#define _QIB_GPIO_SCL_NUM 0
-
-/* Bits in GPIO for the added IB link interrupts */
-#define GPIO_RXUVL_BIT 3
-#define GPIO_OVRUN_BIT 4
-#define GPIO_LLI_BIT 5
-#define GPIO_ERRINTR_MASK 0x38
-
-
-#define QLOGIC_IB_RT_BUFSIZE_MASK 0xe0000000ULL
-#define QLOGIC_IB_RT_BUFSIZE_SHIFTVAL(tid) \
- ((((tid) & QLOGIC_IB_RT_BUFSIZE_MASK) >> 29) + 11 - 1)
-#define QLOGIC_IB_RT_BUFSIZE(tid) (1 << QLOGIC_IB_RT_BUFSIZE_SHIFTVAL(tid))
-#define QLOGIC_IB_RT_IS_VALID(tid) \
- (((tid) & QLOGIC_IB_RT_BUFSIZE_MASK) && \
- ((((tid) & QLOGIC_IB_RT_BUFSIZE_MASK) != QLOGIC_IB_RT_BUFSIZE_MASK)))
-#define QLOGIC_IB_RT_ADDR_MASK 0x1FFFFFFFULL /* 29 bits valid */
-#define QLOGIC_IB_RT_ADDR_SHIFT 10
-
-#define QLOGIC_IB_R_INTRAVAIL_SHIFT 16
-#define QLOGIC_IB_R_TAILUPD_SHIFT 31
-#define IBA6120_R_PKEY_DIS_SHIFT 30
-
-#define PBC_6120_VL15_SEND_CTRL (1ULL << 31) /* pbc; VL15; link_buf only */
-
-#define IBCBUSFRSPCPARITYERR HWE_MASK(IBCBusFromSPCParityErr)
-#define IBCBUSTOSPCPARITYERR HWE_MASK(IBCBusToSPCParityErr)
-
-#define SYM_MASK_BIT(regname, fldname, bit) ((u64) \
- ((1ULL << (SYM_LSB(regname, fldname) + (bit)))))
-
-#define TXEMEMPARITYERR_PIOBUF \
- SYM_MASK_BIT(HwErrMask, TXEMemParityErrMask, 0)
-#define TXEMEMPARITYERR_PIOPBC \
- SYM_MASK_BIT(HwErrMask, TXEMemParityErrMask, 1)
-#define TXEMEMPARITYERR_PIOLAUNCHFIFO \
- SYM_MASK_BIT(HwErrMask, TXEMemParityErrMask, 2)
-
-#define RXEMEMPARITYERR_RCVBUF \
- SYM_MASK_BIT(HwErrMask, RXEMemParityErrMask, 0)
-#define RXEMEMPARITYERR_LOOKUPQ \
- SYM_MASK_BIT(HwErrMask, RXEMemParityErrMask, 1)
-#define RXEMEMPARITYERR_EXPTID \
- SYM_MASK_BIT(HwErrMask, RXEMemParityErrMask, 2)
-#define RXEMEMPARITYERR_EAGERTID \
- SYM_MASK_BIT(HwErrMask, RXEMemParityErrMask, 3)
-#define RXEMEMPARITYERR_FLAGBUF \
- SYM_MASK_BIT(HwErrMask, RXEMemParityErrMask, 4)
-#define RXEMEMPARITYERR_DATAINFO \
- SYM_MASK_BIT(HwErrMask, RXEMemParityErrMask, 5)
-#define RXEMEMPARITYERR_HDRINFO \
- SYM_MASK_BIT(HwErrMask, RXEMemParityErrMask, 6)
-
-/* 6120 specific hardware errors... */
-static const struct qib_hwerror_msgs qib_6120_hwerror_msgs[] = {
- /* generic hardware errors */
- QLOGIC_IB_HWE_MSG(IBCBUSFRSPCPARITYERR, "QIB2IB Parity"),
- QLOGIC_IB_HWE_MSG(IBCBUSTOSPCPARITYERR, "IB2QIB Parity"),
-
- QLOGIC_IB_HWE_MSG(TXEMEMPARITYERR_PIOBUF,
- "TXE PIOBUF Memory Parity"),
- QLOGIC_IB_HWE_MSG(TXEMEMPARITYERR_PIOPBC,
- "TXE PIOPBC Memory Parity"),
- QLOGIC_IB_HWE_MSG(TXEMEMPARITYERR_PIOLAUNCHFIFO,
- "TXE PIOLAUNCHFIFO Memory Parity"),
-
- QLOGIC_IB_HWE_MSG(RXEMEMPARITYERR_RCVBUF,
- "RXE RCVBUF Memory Parity"),
- QLOGIC_IB_HWE_MSG(RXEMEMPARITYERR_LOOKUPQ,
- "RXE LOOKUPQ Memory Parity"),
- QLOGIC_IB_HWE_MSG(RXEMEMPARITYERR_EAGERTID,
- "RXE EAGERTID Memory Parity"),
- QLOGIC_IB_HWE_MSG(RXEMEMPARITYERR_EXPTID,
- "RXE EXPTID Memory Parity"),
- QLOGIC_IB_HWE_MSG(RXEMEMPARITYERR_FLAGBUF,
- "RXE FLAGBUF Memory Parity"),
- QLOGIC_IB_HWE_MSG(RXEMEMPARITYERR_DATAINFO,
- "RXE DATAINFO Memory Parity"),
- QLOGIC_IB_HWE_MSG(RXEMEMPARITYERR_HDRINFO,
- "RXE HDRINFO Memory Parity"),
-
- /* chip-specific hardware errors */
- QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIEPOISONEDTLP,
- "PCIe Poisoned TLP"),
- QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIECPLTIMEOUT,
- "PCIe completion timeout"),
- /*
- * In practice, it's unlikely wthat we'll see PCIe PLL, or bus
- * parity or memory parity error failures, because most likely we
- * won't be able to talk to the core of the chip. Nonetheless, we
- * might see them, if they are in parts of the PCIe core that aren't
- * essential.
- */
- QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIE1PLLFAILED,
- "PCIePLL1"),
- QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIE0PLLFAILED,
- "PCIePLL0"),
- QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIEBUSPARITYXTLH,
- "PCIe XTLH core parity"),
- QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIEBUSPARITYXADM,
- "PCIe ADM TX core parity"),
- QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIEBUSPARITYRADM,
- "PCIe ADM RX core parity"),
- QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_SERDESPLLFAILED,
- "SerDes PLL"),
-};
-
-#define TXE_PIO_PARITY (TXEMEMPARITYERR_PIOBUF | TXEMEMPARITYERR_PIOPBC)
-#define _QIB_PLL_FAIL (QLOGIC_IB_HWE_COREPLL_FBSLIP | \
- QLOGIC_IB_HWE_COREPLL_RFSLIP)
-
- /* variables for sanity checking interrupt and errors */
-#define IB_HWE_BITSEXTANT \
- (HWE_MASK(RXEMemParityErr) | \
- HWE_MASK(TXEMemParityErr) | \
- (QLOGIC_IB_HWE_PCIEMEMPARITYERR_MASK << \
- QLOGIC_IB_HWE_PCIEMEMPARITYERR_SHIFT) | \
- QLOGIC_IB_HWE_PCIE1PLLFAILED | \
- QLOGIC_IB_HWE_PCIE0PLLFAILED | \
- QLOGIC_IB_HWE_PCIEPOISONEDTLP | \
- QLOGIC_IB_HWE_PCIECPLTIMEOUT | \
- QLOGIC_IB_HWE_PCIEBUSPARITYXTLH | \
- QLOGIC_IB_HWE_PCIEBUSPARITYXADM | \
- QLOGIC_IB_HWE_PCIEBUSPARITYRADM | \
- HWE_MASK(PowerOnBISTFailed) | \
- QLOGIC_IB_HWE_COREPLL_FBSLIP | \
- QLOGIC_IB_HWE_COREPLL_RFSLIP | \
- QLOGIC_IB_HWE_SERDESPLLFAILED | \
- HWE_MASK(IBCBusToSPCParityErr) | \
- HWE_MASK(IBCBusFromSPCParityErr))
-
-#define IB_E_BITSEXTANT \
- (ERR_MASK(RcvFormatErr) | ERR_MASK(RcvVCRCErr) | \
- ERR_MASK(RcvICRCErr) | ERR_MASK(RcvMinPktLenErr) | \
- ERR_MASK(RcvMaxPktLenErr) | ERR_MASK(RcvLongPktLenErr) | \
- ERR_MASK(RcvShortPktLenErr) | ERR_MASK(RcvUnexpectedCharErr) | \
- ERR_MASK(RcvUnsupportedVLErr) | ERR_MASK(RcvEBPErr) | \
- ERR_MASK(RcvIBFlowErr) | ERR_MASK(RcvBadVersionErr) | \
- ERR_MASK(RcvEgrFullErr) | ERR_MASK(RcvHdrFullErr) | \
- ERR_MASK(RcvBadTidErr) | ERR_MASK(RcvHdrLenErr) | \
- ERR_MASK(RcvHdrErr) | ERR_MASK(RcvIBLostLinkErr) | \
- ERR_MASK(SendMinPktLenErr) | ERR_MASK(SendMaxPktLenErr) | \
- ERR_MASK(SendUnderRunErr) | ERR_MASK(SendPktLenErr) | \
- ERR_MASK(SendDroppedSmpPktErr) | \
- ERR_MASK(SendDroppedDataPktErr) | \
- ERR_MASK(SendPioArmLaunchErr) | \
- ERR_MASK(SendUnexpectedPktNumErr) | \
- ERR_MASK(SendUnsupportedVLErr) | ERR_MASK(IBStatusChanged) | \
- ERR_MASK(InvalidAddrErr) | ERR_MASK(ResetNegated) | \
- ERR_MASK(HardwareErr))
-
-#define QLOGIC_IB_E_PKTERRS ( \
- ERR_MASK(SendPktLenErr) | \
- ERR_MASK(SendDroppedDataPktErr) | \
- ERR_MASK(RcvVCRCErr) | \
- ERR_MASK(RcvICRCErr) | \
- ERR_MASK(RcvShortPktLenErr) | \
- ERR_MASK(RcvEBPErr))
-
-/* These are all rcv-related errors which we want to count for stats */
-#define E_SUM_PKTERRS \
- (ERR_MASK(RcvHdrLenErr) | ERR_MASK(RcvBadTidErr) | \
- ERR_MASK(RcvBadVersionErr) | ERR_MASK(RcvHdrErr) | \
- ERR_MASK(RcvLongPktLenErr) | ERR_MASK(RcvShortPktLenErr) | \
- ERR_MASK(RcvMaxPktLenErr) | ERR_MASK(RcvMinPktLenErr) | \
- ERR_MASK(RcvFormatErr) | ERR_MASK(RcvUnsupportedVLErr) | \
- ERR_MASK(RcvUnexpectedCharErr) | ERR_MASK(RcvEBPErr))
-
-/* These are all send-related errors which we want to count for stats */
-#define E_SUM_ERRS \
- (ERR_MASK(SendPioArmLaunchErr) | \
- ERR_MASK(SendUnexpectedPktNumErr) | \
- ERR_MASK(SendDroppedDataPktErr) | \
- ERR_MASK(SendDroppedSmpPktErr) | \
- ERR_MASK(SendMaxPktLenErr) | ERR_MASK(SendUnsupportedVLErr) | \
- ERR_MASK(SendMinPktLenErr) | ERR_MASK(SendPktLenErr) | \
- ERR_MASK(InvalidAddrErr))
-
-/*
- * this is similar to E_SUM_ERRS, but can't ignore armlaunch, don't ignore
- * errors not related to freeze and cancelling buffers. Can't ignore
- * armlaunch because could get more while still cleaning up, and need
- * to cancel those as they happen.
- */
-#define E_SPKT_ERRS_IGNORE \
- (ERR_MASK(SendDroppedDataPktErr) | \
- ERR_MASK(SendDroppedSmpPktErr) | \
- ERR_MASK(SendMaxPktLenErr) | ERR_MASK(SendMinPktLenErr) | \
- ERR_MASK(SendPktLenErr))
-
-/*
- * these are errors that can occur when the link changes state while
- * a packet is being sent or received. This doesn't cover things
- * like EBP or VCRC that can be the result of a sending having the
- * link change state, so we receive a "known bad" packet.
- */
-#define E_SUM_LINK_PKTERRS \
- (ERR_MASK(SendDroppedDataPktErr) | \
- ERR_MASK(SendDroppedSmpPktErr) | \
- ERR_MASK(SendMinPktLenErr) | ERR_MASK(SendPktLenErr) | \
- ERR_MASK(RcvShortPktLenErr) | ERR_MASK(RcvMinPktLenErr) | \
- ERR_MASK(RcvUnexpectedCharErr))
-
-static void qib_6120_put_tid_2(struct qib_devdata *, u64 __iomem *,
- u32, unsigned long);
-
-/*
- * On platforms using this chip, and not having ordered WC stores, we
- * can get TXE parity errors due to speculative reads to the PIO buffers,
- * and this, due to a chip issue can result in (many) false parity error
- * reports. So it's a debug print on those, and an info print on systems
- * where the speculative reads don't occur.
- */
-static void qib_6120_txe_recover(struct qib_devdata *dd)
-{
- if (!qib_unordered_wc())
- qib_devinfo(dd->pcidev,
- "Recovering from TXE PIO parity error\n");
-}
-
-/* enable/disable chip from delivering interrupts */
-static void qib_6120_set_intr_state(struct qib_devdata *dd, u32 enable)
-{
- if (enable) {
- if (dd->flags & QIB_BADINTR)
- return;
- qib_write_kreg(dd, kr_intmask, ~0ULL);
- /* force re-interrupt of any pending interrupts. */
- qib_write_kreg(dd, kr_intclear, 0ULL);
- } else
- qib_write_kreg(dd, kr_intmask, 0ULL);
-}
-
-/*
- * Try to cleanup as much as possible for anything that might have gone
- * wrong while in freeze mode, such as pio buffers being written by user
- * processes (causing armlaunch), send errors due to going into freeze mode,
- * etc., and try to avoid causing extra interrupts while doing so.
- * Forcibly update the in-memory pioavail register copies after cleanup
- * because the chip won't do it while in freeze mode (the register values
- * themselves are kept correct).
- * Make sure that we don't lose any important interrupts by using the chip
- * feature that says that writing 0 to a bit in *clear that is set in
- * *status will cause an interrupt to be generated again (if allowed by
- * the *mask value).
- * This is in chip-specific code because of all of the register accesses,
- * even though the details are similar on most chips
- */
-static void qib_6120_clear_freeze(struct qib_devdata *dd)
-{
- /* disable error interrupts, to avoid confusion */
- qib_write_kreg(dd, kr_errmask, 0ULL);
-
- /* also disable interrupts; errormask is sometimes overwritten */
- qib_6120_set_intr_state(dd, 0);
-
- qib_cancel_sends(dd->pport);
-
- /* clear the freeze, and be sure chip saw it */
- qib_write_kreg(dd, kr_control, dd->control);
- qib_read_kreg32(dd, kr_scratch);
-
- /* force in-memory update now we are out of freeze */
- qib_force_pio_avail_update(dd);
-
- /*
- * force new interrupt if any hwerr, error or interrupt bits are
- * still set, and clear "safe" send packet errors related to freeze
- * and cancelling sends. Re-enable error interrupts before possible
- * force of re-interrupt on pending interrupts.
- */
- qib_write_kreg(dd, kr_hwerrclear, 0ULL);
- qib_write_kreg(dd, kr_errclear, E_SPKT_ERRS_IGNORE);
- qib_write_kreg(dd, kr_errmask, dd->cspec->errormask);
- qib_6120_set_intr_state(dd, 1);
-}
-
-/**
- * qib_handle_6120_hwerrors - display hardware errors.
- * @dd: the qlogic_ib device
- * @msg: the output buffer
- * @msgl: the size of the output buffer
- *
- * Use same msg buffer as regular errors to avoid excessive stack
- * use. Most hardware errors are catastrophic, but for right now,
- * we'll print them and continue. Reuse the same message buffer as
- * handle_6120_errors() to avoid excessive stack usage.
- */
-static void qib_handle_6120_hwerrors(struct qib_devdata *dd, char *msg,
- size_t msgl)
-{
- u64 hwerrs;
- u32 bits, ctrl;
- int isfatal = 0;
- char *bitsmsg;
-
- hwerrs = qib_read_kreg64(dd, kr_hwerrstatus);
- if (!hwerrs)
- return;
- if (hwerrs == ~0ULL) {
- qib_dev_err(dd,
- "Read of hardware error status failed (all bits set); ignoring\n");
- return;
- }
- qib_stats.sps_hwerrs++;
-
- /* Always clear the error status register, except MEMBISTFAIL,
- * regardless of whether we continue or stop using the chip.
- * We want that set so we know it failed, even across driver reload.
- * We'll still ignore it in the hwerrmask. We do this partly for
- * diagnostics, but also for support */
- qib_write_kreg(dd, kr_hwerrclear,
- hwerrs & ~HWE_MASK(PowerOnBISTFailed));
-
- hwerrs &= dd->cspec->hwerrmask;
-
- /*
- * Make sure we get this much out, unless told to be quiet,
- * or it's occurred within the last 5 seconds.
- */
- if (hwerrs & ~(TXE_PIO_PARITY | RXEMEMPARITYERR_EAGERTID))
- qib_devinfo(dd->pcidev,
- "Hardware error: hwerr=0x%llx (cleared)\n",
- (unsigned long long) hwerrs);
-
- if (hwerrs & ~IB_HWE_BITSEXTANT)
- qib_dev_err(dd,
- "hwerror interrupt with unknown errors %llx set\n",
- (unsigned long long)(hwerrs & ~IB_HWE_BITSEXTANT));
-
- ctrl = qib_read_kreg32(dd, kr_control);
- if ((ctrl & QLOGIC_IB_C_FREEZEMODE) && !dd->diag_client) {
- /*
- * Parity errors in send memory are recoverable,
- * just cancel the send (if indicated in * sendbuffererror),
- * count the occurrence, unfreeze (if no other handled
- * hardware error bits are set), and continue. They can
- * occur if a processor speculative read is done to the PIO
- * buffer while we are sending a packet, for example.
- */
- if (hwerrs & TXE_PIO_PARITY) {
- qib_6120_txe_recover(dd);
- hwerrs &= ~TXE_PIO_PARITY;
- }
-
- if (!hwerrs)
- qib_6120_clear_freeze(dd);
- else
- isfatal = 1;
- }
-
- *msg = '\0';
-
- if (hwerrs & HWE_MASK(PowerOnBISTFailed)) {
- isfatal = 1;
- strlcat(msg,
- "[Memory BIST test failed, InfiniPath hardware unusable]",
- msgl);
- /* ignore from now on, so disable until driver reloaded */
- dd->cspec->hwerrmask &= ~HWE_MASK(PowerOnBISTFailed);
- qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask);
- }
-
- qib_format_hwerrors(hwerrs, qib_6120_hwerror_msgs,
- ARRAY_SIZE(qib_6120_hwerror_msgs), msg, msgl);
-
- bitsmsg = dd->cspec->bitsmsgbuf;
- if (hwerrs & (QLOGIC_IB_HWE_PCIEMEMPARITYERR_MASK <<
- QLOGIC_IB_HWE_PCIEMEMPARITYERR_SHIFT)) {
- bits = (u32) ((hwerrs >>
- QLOGIC_IB_HWE_PCIEMEMPARITYERR_SHIFT) &
- QLOGIC_IB_HWE_PCIEMEMPARITYERR_MASK);
- snprintf(bitsmsg, sizeof(dd->cspec->bitsmsgbuf),
- "[PCIe Mem Parity Errs %x] ", bits);
- strlcat(msg, bitsmsg, msgl);
- }
-
- if (hwerrs & _QIB_PLL_FAIL) {
- isfatal = 1;
- snprintf(bitsmsg, sizeof(dd->cspec->bitsmsgbuf),
- "[PLL failed (%llx), InfiniPath hardware unusable]",
- (unsigned long long) hwerrs & _QIB_PLL_FAIL);
- strlcat(msg, bitsmsg, msgl);
- /* ignore from now on, so disable until driver reloaded */
- dd->cspec->hwerrmask &= ~(hwerrs & _QIB_PLL_FAIL);
- qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask);
- }
-
- if (hwerrs & QLOGIC_IB_HWE_SERDESPLLFAILED) {
- /*
- * If it occurs, it is left masked since the external
- * interface is unused
- */
- dd->cspec->hwerrmask &= ~QLOGIC_IB_HWE_SERDESPLLFAILED;
- qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask);
- }
-
- if (hwerrs)
- /*
- * if any set that we aren't ignoring; only
- * make the complaint once, in case it's stuck
- * or recurring, and we get here multiple
- * times.
- */
- qib_dev_err(dd, "%s hardware error\n", msg);
- else
- *msg = 0; /* recovered from all of them */
-
- if (isfatal && !dd->diag_client) {
- qib_dev_err(dd,
- "Fatal Hardware Error, no longer usable, SN %.16s\n",
- dd->serial);
- /*
- * for /sys status file and user programs to print; if no
- * trailing brace is copied, we'll know it was truncated.
- */
- if (dd->freezemsg)
- snprintf(dd->freezemsg, dd->freezelen,
- "{%s}", msg);
- qib_disable_after_error(dd);
- }
-}
-
-/*
- * Decode the error status into strings, deciding whether to always
- * print * it or not depending on "normal packet errors" vs everything
- * else. Return 1 if "real" errors, otherwise 0 if only packet
- * errors, so caller can decide what to print with the string.
- */
-static int qib_decode_6120_err(struct qib_devdata *dd, char *buf, size_t blen,
- u64 err)
-{
- int iserr = 1;
-
- *buf = '\0';
- if (err & QLOGIC_IB_E_PKTERRS) {
- if (!(err & ~QLOGIC_IB_E_PKTERRS))
- iserr = 0;
- if ((err & ERR_MASK(RcvICRCErr)) &&
- !(err&(ERR_MASK(RcvVCRCErr)|ERR_MASK(RcvEBPErr))))
- strlcat(buf, "CRC ", blen);
- if (!iserr)
- goto done;
- }
- if (err & ERR_MASK(RcvHdrLenErr))
- strlcat(buf, "rhdrlen ", blen);
- if (err & ERR_MASK(RcvBadTidErr))
- strlcat(buf, "rbadtid ", blen);
- if (err & ERR_MASK(RcvBadVersionErr))
- strlcat(buf, "rbadversion ", blen);
- if (err & ERR_MASK(RcvHdrErr))
- strlcat(buf, "rhdr ", blen);
- if (err & ERR_MASK(RcvLongPktLenErr))
- strlcat(buf, "rlongpktlen ", blen);
- if (err & ERR_MASK(RcvMaxPktLenErr))
- strlcat(buf, "rmaxpktlen ", blen);
- if (err & ERR_MASK(RcvMinPktLenErr))
- strlcat(buf, "rminpktlen ", blen);
- if (err & ERR_MASK(SendMinPktLenErr))
- strlcat(buf, "sminpktlen ", blen);
- if (err & ERR_MASK(RcvFormatErr))
- strlcat(buf, "rformaterr ", blen);
- if (err & ERR_MASK(RcvUnsupportedVLErr))
- strlcat(buf, "runsupvl ", blen);
- if (err & ERR_MASK(RcvUnexpectedCharErr))
- strlcat(buf, "runexpchar ", blen);
- if (err & ERR_MASK(RcvIBFlowErr))
- strlcat(buf, "ribflow ", blen);
- if (err & ERR_MASK(SendUnderRunErr))
- strlcat(buf, "sunderrun ", blen);
- if (err & ERR_MASK(SendPioArmLaunchErr))
- strlcat(buf, "spioarmlaunch ", blen);
- if (err & ERR_MASK(SendUnexpectedPktNumErr))
- strlcat(buf, "sunexperrpktnum ", blen);
- if (err & ERR_MASK(SendDroppedSmpPktErr))
- strlcat(buf, "sdroppedsmppkt ", blen);
- if (err & ERR_MASK(SendMaxPktLenErr))
- strlcat(buf, "smaxpktlen ", blen);
- if (err & ERR_MASK(SendUnsupportedVLErr))
- strlcat(buf, "sunsupVL ", blen);
- if (err & ERR_MASK(InvalidAddrErr))
- strlcat(buf, "invalidaddr ", blen);
- if (err & ERR_MASK(RcvEgrFullErr))
- strlcat(buf, "rcvegrfull ", blen);
- if (err & ERR_MASK(RcvHdrFullErr))
- strlcat(buf, "rcvhdrfull ", blen);
- if (err & ERR_MASK(IBStatusChanged))
- strlcat(buf, "ibcstatuschg ", blen);
- if (err & ERR_MASK(RcvIBLostLinkErr))
- strlcat(buf, "riblostlink ", blen);
- if (err & ERR_MASK(HardwareErr))
- strlcat(buf, "hardware ", blen);
- if (err & ERR_MASK(ResetNegated))
- strlcat(buf, "reset ", blen);
-done:
- return iserr;
-}
-
-/*
- * Called when we might have an error that is specific to a particular
- * PIO buffer, and may need to cancel that buffer, so it can be re-used.
- */
-static void qib_disarm_6120_senderrbufs(struct qib_pportdata *ppd)
-{
- unsigned long sbuf[2];
- struct qib_devdata *dd = ppd->dd;
-
- /*
- * It's possible that sendbuffererror could have bits set; might
- * have already done this as a result of hardware error handling.
- */
- sbuf[0] = qib_read_kreg64(dd, kr_sendbuffererror);
- sbuf[1] = qib_read_kreg64(dd, kr_sendbuffererror + 1);
-
- if (sbuf[0] || sbuf[1])
- qib_disarm_piobufs_set(dd, sbuf,
- dd->piobcnt2k + dd->piobcnt4k);
-}
-
-static int chk_6120_linkrecovery(struct qib_devdata *dd, u64 ibcs)
-{
- int ret = 1;
- u32 ibstate = qib_6120_iblink_state(ibcs);
- u32 linkrecov = read_6120_creg32(dd, cr_iblinkerrrecov);
-
- if (linkrecov != dd->cspec->lastlinkrecov) {
- /* and no more until active again */
- dd->cspec->lastlinkrecov = 0;
- qib_set_linkstate(dd->pport, QIB_IB_LINKDOWN);
- ret = 0;
- }
- if (ibstate == IB_PORT_ACTIVE)
- dd->cspec->lastlinkrecov =
- read_6120_creg32(dd, cr_iblinkerrrecov);
- return ret;
-}
-
-static void handle_6120_errors(struct qib_devdata *dd, u64 errs)
-{
- char *msg;
- u64 ignore_this_time = 0;
- u64 iserr = 0;
- struct qib_pportdata *ppd = dd->pport;
- u64 mask;
-
- /* don't report errors that are masked */
- errs &= dd->cspec->errormask;
- msg = dd->cspec->emsgbuf;
-
- /* do these first, they are most important */
- if (errs & ERR_MASK(HardwareErr))
- qib_handle_6120_hwerrors(dd, msg, sizeof(dd->cspec->emsgbuf));
-
- if (errs & ~IB_E_BITSEXTANT)
- qib_dev_err(dd,
- "error interrupt with unknown errors %llx set\n",
- (unsigned long long) (errs & ~IB_E_BITSEXTANT));
-
- if (errs & E_SUM_ERRS) {
- qib_disarm_6120_senderrbufs(ppd);
- if ((errs & E_SUM_LINK_PKTERRS) &&
- !(ppd->lflags & QIBL_LINKACTIVE)) {
- /*
- * This can happen when trying to bring the link
- * up, but the IB link changes state at the "wrong"
- * time. The IB logic then complains that the packet
- * isn't valid. We don't want to confuse people, so
- * we just don't print them, except at debug
- */
- ignore_this_time = errs & E_SUM_LINK_PKTERRS;
- }
- } else if ((errs & E_SUM_LINK_PKTERRS) &&
- !(ppd->lflags & QIBL_LINKACTIVE)) {
- /*
- * This can happen when SMA is trying to bring the link
- * up, but the IB link changes state at the "wrong" time.
- * The IB logic then complains that the packet isn't
- * valid. We don't want to confuse people, so we just
- * don't print them, except at debug
- */
- ignore_this_time = errs & E_SUM_LINK_PKTERRS;
- }
-
- qib_write_kreg(dd, kr_errclear, errs);
-
- errs &= ~ignore_this_time;
- if (!errs)
- goto done;
-
- /*
- * The ones we mask off are handled specially below
- * or above.
- */
- mask = ERR_MASK(IBStatusChanged) | ERR_MASK(RcvEgrFullErr) |
- ERR_MASK(RcvHdrFullErr) | ERR_MASK(HardwareErr);
- qib_decode_6120_err(dd, msg, sizeof(dd->cspec->emsgbuf), errs & ~mask);
-
- if (errs & E_SUM_PKTERRS)
- qib_stats.sps_rcverrs++;
- if (errs & E_SUM_ERRS)
- qib_stats.sps_txerrs++;
-
- iserr = errs & ~(E_SUM_PKTERRS | QLOGIC_IB_E_PKTERRS);
-
- if (errs & ERR_MASK(IBStatusChanged)) {
- u64 ibcs = qib_read_kreg64(dd, kr_ibcstatus);
- u32 ibstate = qib_6120_iblink_state(ibcs);
- int handle = 1;
-
- if (ibstate != IB_PORT_INIT && dd->cspec->lastlinkrecov)
- handle = chk_6120_linkrecovery(dd, ibcs);
- /*
- * Since going into a recovery state causes the link state
- * to go down and since recovery is transitory, it is better
- * if we "miss" ever seeing the link training state go into
- * recovery (i.e., ignore this transition for link state
- * special handling purposes) without updating lastibcstat.
- */
- if (handle && qib_6120_phys_portstate(ibcs) ==
- IB_PHYSPORTSTATE_LINK_ERR_RECOVER)
- handle = 0;
- if (handle)
- qib_handle_e_ibstatuschanged(ppd, ibcs);
- }
-
- if (errs & ERR_MASK(ResetNegated)) {
- qib_dev_err(dd,
- "Got reset, requires re-init (unload and reload driver)\n");
- dd->flags &= ~QIB_INITTED; /* needs re-init */
- /* mark as having had error */
- *dd->devstatusp |= QIB_STATUS_HWERROR;
- *dd->pport->statusp &= ~QIB_STATUS_IB_CONF;
- }
-
- if (*msg && iserr)
- qib_dev_porterr(dd, ppd->port, "%s error\n", msg);
-
- if (ppd->state_wanted & ppd->lflags)
- wake_up_interruptible(&ppd->state_wait);
-
- /*
- * If there were hdrq or egrfull errors, wake up any processes
- * waiting in poll. We used to try to check which contexts had
- * the overflow, but given the cost of that and the chip reads
- * to support it, it's better to just wake everybody up if we
- * get an overflow; waiters can poll again if it's not them.
- */
- if (errs & (ERR_MASK(RcvEgrFullErr) | ERR_MASK(RcvHdrFullErr))) {
- qib_handle_urcv(dd, ~0U);
- if (errs & ERR_MASK(RcvEgrFullErr))
- qib_stats.sps_buffull++;
- else
- qib_stats.sps_hdrfull++;
- }
-done:
- return;
-}
-
-/**
- * qib_6120_init_hwerrors - enable hardware errors
- * @dd: the qlogic_ib device
- *
- * now that we have finished initializing everything that might reasonably
- * cause a hardware error, and cleared those errors bits as they occur,
- * we can enable hardware errors in the mask (potentially enabling
- * freeze mode), and enable hardware errors as errors (along with
- * everything else) in errormask
- */
-static void qib_6120_init_hwerrors(struct qib_devdata *dd)
-{
- u64 val;
- u64 extsval;
-
- extsval = qib_read_kreg64(dd, kr_extstatus);
-
- if (!(extsval & QLOGIC_IB_EXTS_MEMBIST_ENDTEST))
- qib_dev_err(dd, "MemBIST did not complete!\n");
-
- /* init so all hwerrors interrupt, and enter freeze, ajdust below */
- val = ~0ULL;
- if (dd->minrev < 2) {
- /*
- * Avoid problem with internal interface bus parity
- * checking. Fixed in Rev2.
- */
- val &= ~QLOGIC_IB_HWE_PCIEBUSPARITYRADM;
- }
- /* avoid some intel cpu's speculative read freeze mode issue */
- val &= ~TXEMEMPARITYERR_PIOBUF;
-
- dd->cspec->hwerrmask = val;
-
- qib_write_kreg(dd, kr_hwerrclear, ~HWE_MASK(PowerOnBISTFailed));
- qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask);
-
- /* clear all */
- qib_write_kreg(dd, kr_errclear, ~0ULL);
- /* enable errors that are masked, at least this first time. */
- qib_write_kreg(dd, kr_errmask, ~0ULL);
- dd->cspec->errormask = qib_read_kreg64(dd, kr_errmask);
- /* clear any interrupts up to this point (ints still not enabled) */
- qib_write_kreg(dd, kr_intclear, ~0ULL);
-
- qib_write_kreg(dd, kr_rcvbthqp,
- dd->qpn_mask << (QIB_6120_RcvBTHQP_BTHQP_Mask_LSB - 1) |
- QIB_KD_QP);
-}
-
-/*
- * Disable and enable the armlaunch error. Used for PIO bandwidth testing
- * on chips that are count-based, rather than trigger-based. There is no
- * reference counting, but that's also fine, given the intended use.
- * Only chip-specific because it's all register accesses
- */
-static void qib_set_6120_armlaunch(struct qib_devdata *dd, u32 enable)
-{
- if (enable) {
- qib_write_kreg(dd, kr_errclear,
- ERR_MASK(SendPioArmLaunchErr));
- dd->cspec->errormask |= ERR_MASK(SendPioArmLaunchErr);
- } else
- dd->cspec->errormask &= ~ERR_MASK(SendPioArmLaunchErr);
- qib_write_kreg(dd, kr_errmask, dd->cspec->errormask);
-}
-
-/*
- * Formerly took parameter <which> in pre-shifted,
- * pre-merged form with LinkCmd and LinkInitCmd
- * together, and assuming the zero was NOP.
- */
-static void qib_set_ib_6120_lstate(struct qib_pportdata *ppd, u16 linkcmd,
- u16 linitcmd)
-{
- u64 mod_wd;
- struct qib_devdata *dd = ppd->dd;
- unsigned long flags;
-
- if (linitcmd == QLOGIC_IB_IBCC_LINKINITCMD_DISABLE) {
- /*
- * If we are told to disable, note that so link-recovery
- * code does not attempt to bring us back up.
- */
- spin_lock_irqsave(&ppd->lflags_lock, flags);
- ppd->lflags |= QIBL_IB_LINK_DISABLED;
- spin_unlock_irqrestore(&ppd->lflags_lock, flags);
- } else if (linitcmd || linkcmd == QLOGIC_IB_IBCC_LINKCMD_DOWN) {
- /*
- * Any other linkinitcmd will lead to LINKDOWN and then
- * to INIT (if all is well), so clear flag to let
- * link-recovery code attempt to bring us back up.
- */
- spin_lock_irqsave(&ppd->lflags_lock, flags);
- ppd->lflags &= ~QIBL_IB_LINK_DISABLED;
- spin_unlock_irqrestore(&ppd->lflags_lock, flags);
- }
-
- mod_wd = (linkcmd << QLOGIC_IB_IBCC_LINKCMD_SHIFT) |
- (linitcmd << QLOGIC_IB_IBCC_LINKINITCMD_SHIFT);
-
- qib_write_kreg(dd, kr_ibcctrl, dd->cspec->ibcctrl | mod_wd);
- /* write to chip to prevent back-to-back writes of control reg */
- qib_write_kreg(dd, kr_scratch, 0);
-}
-
-/**
- * qib_6120_bringup_serdes - bring up the serdes
- * @ppd: the qlogic_ib device
- */
-static int qib_6120_bringup_serdes(struct qib_pportdata *ppd)
-{
- struct qib_devdata *dd = ppd->dd;
- u64 val, config1, prev_val, hwstat, ibc;
-
- /* Put IBC in reset, sends disabled */
- dd->control &= ~QLOGIC_IB_C_LINKENABLE;
- qib_write_kreg(dd, kr_control, 0ULL);
-
- dd->cspec->ibdeltainprog = 1;
- dd->cspec->ibsymsnap = read_6120_creg32(dd, cr_ibsymbolerr);
- dd->cspec->iblnkerrsnap = read_6120_creg32(dd, cr_iblinkerrrecov);
-
- /* flowcontrolwatermark is in units of KBytes */
- ibc = 0x5ULL << SYM_LSB(IBCCtrl, FlowCtrlWaterMark);
- /*
- * How often flowctrl sent. More or less in usecs; balance against
- * watermark value, so that in theory senders always get a flow
- * control update in time to not let the IB link go idle.
- */
- ibc |= 0x3ULL << SYM_LSB(IBCCtrl, FlowCtrlPeriod);
- /* max error tolerance */
- dd->cspec->lli_thresh = 0xf;
- ibc |= (u64) dd->cspec->lli_thresh << SYM_LSB(IBCCtrl, PhyerrThreshold);
- /* use "real" buffer space for */
- ibc |= 4ULL << SYM_LSB(IBCCtrl, CreditScale);
- /* IB credit flow control. */
- ibc |= 0xfULL << SYM_LSB(IBCCtrl, OverrunThreshold);
- /*
- * set initial max size pkt IBC will send, including ICRC; it's the
- * PIO buffer size in dwords, less 1; also see qib_set_mtu()
- */
- ibc |= ((u64)(ppd->ibmaxlen >> 2) + 1) << SYM_LSB(IBCCtrl, MaxPktLen);
- dd->cspec->ibcctrl = ibc; /* without linkcmd or linkinitcmd! */
-
- /* initially come up waiting for TS1, without sending anything. */
- val = dd->cspec->ibcctrl | (QLOGIC_IB_IBCC_LINKINITCMD_DISABLE <<
- QLOGIC_IB_IBCC_LINKINITCMD_SHIFT);
- qib_write_kreg(dd, kr_ibcctrl, val);
-
- val = qib_read_kreg64(dd, kr_serdes_cfg0);
- config1 = qib_read_kreg64(dd, kr_serdes_cfg1);
-
- /*
- * Force reset on, also set rxdetect enable. Must do before reading
- * serdesstatus at least for simulation, or some of the bits in
- * serdes status will come back as undefined and cause simulation
- * failures
- */
- val |= SYM_MASK(SerdesCfg0, ResetPLL) |
- SYM_MASK(SerdesCfg0, RxDetEnX) |
- (SYM_MASK(SerdesCfg0, L1PwrDnA) |
- SYM_MASK(SerdesCfg0, L1PwrDnB) |
- SYM_MASK(SerdesCfg0, L1PwrDnC) |
- SYM_MASK(SerdesCfg0, L1PwrDnD));
- qib_write_kreg(dd, kr_serdes_cfg0, val);
- /* be sure chip saw it */
- qib_read_kreg64(dd, kr_scratch);
- udelay(5); /* need pll reset set at least for a bit */
- /*
- * after PLL is reset, set the per-lane Resets and TxIdle and
- * clear the PLL reset and rxdetect (to get falling edge).
- * Leave L1PWR bits set (permanently)
- */
- val &= ~(SYM_MASK(SerdesCfg0, RxDetEnX) |
- SYM_MASK(SerdesCfg0, ResetPLL) |
- (SYM_MASK(SerdesCfg0, L1PwrDnA) |
- SYM_MASK(SerdesCfg0, L1PwrDnB) |
- SYM_MASK(SerdesCfg0, L1PwrDnC) |
- SYM_MASK(SerdesCfg0, L1PwrDnD)));
- val |= (SYM_MASK(SerdesCfg0, ResetA) |
- SYM_MASK(SerdesCfg0, ResetB) |
- SYM_MASK(SerdesCfg0, ResetC) |
- SYM_MASK(SerdesCfg0, ResetD)) |
- SYM_MASK(SerdesCfg0, TxIdeEnX);
- qib_write_kreg(dd, kr_serdes_cfg0, val);
- /* be sure chip saw it */
- (void) qib_read_kreg64(dd, kr_scratch);
- /* need PLL reset clear for at least 11 usec before lane
- * resets cleared; give it a few more to be sure */
- udelay(15);
- val &= ~((SYM_MASK(SerdesCfg0, ResetA) |
- SYM_MASK(SerdesCfg0, ResetB) |
- SYM_MASK(SerdesCfg0, ResetC) |
- SYM_MASK(SerdesCfg0, ResetD)) |
- SYM_MASK(SerdesCfg0, TxIdeEnX));
-
- qib_write_kreg(dd, kr_serdes_cfg0, val);
- /* be sure chip saw it */
- (void) qib_read_kreg64(dd, kr_scratch);
-
- val = qib_read_kreg64(dd, kr_xgxs_cfg);
- prev_val = val;
- if (val & QLOGIC_IB_XGXS_RESET)
- val &= ~QLOGIC_IB_XGXS_RESET;
- if (SYM_FIELD(val, XGXSCfg, polarity_inv) != ppd->rx_pol_inv) {
- /* need to compensate for Tx inversion in partner */
- val &= ~SYM_MASK(XGXSCfg, polarity_inv);
- val |= (u64)ppd->rx_pol_inv << SYM_LSB(XGXSCfg, polarity_inv);
- }
- if (val != prev_val)
- qib_write_kreg(dd, kr_xgxs_cfg, val);
-
- val = qib_read_kreg64(dd, kr_serdes_cfg0);
-
- /* clear current and de-emphasis bits */
- config1 &= ~0x0ffffffff00ULL;
- /* set current to 20ma */
- config1 |= 0x00000000000ULL;
- /* set de-emphasis to -5.68dB */
- config1 |= 0x0cccc000000ULL;
- qib_write_kreg(dd, kr_serdes_cfg1, config1);
-
- /* base and port guid same for single port */
- ppd->guid = dd->base_guid;
-
- /*
- * the process of setting and un-resetting the serdes normally
- * causes a serdes PLL error, so check for that and clear it
- * here. Also clearr hwerr bit in errstatus, but not others.
- */
- hwstat = qib_read_kreg64(dd, kr_hwerrstatus);
- if (hwstat) {
- /* should just have PLL, clear all set, in an case */
- qib_write_kreg(dd, kr_hwerrclear, hwstat);
- qib_write_kreg(dd, kr_errclear, ERR_MASK(HardwareErr));
- }
-
- dd->control |= QLOGIC_IB_C_LINKENABLE;
- dd->control &= ~QLOGIC_IB_C_FREEZEMODE;
- qib_write_kreg(dd, kr_control, dd->control);
-
- return 0;
-}
-
-/**
- * qib_6120_quiet_serdes - set serdes to txidle
- * @ppd: physical port of the qlogic_ib device
- * Called when driver is being unloaded
- */
-static void qib_6120_quiet_serdes(struct qib_pportdata *ppd)
-{
- struct qib_devdata *dd = ppd->dd;
- u64 val;
-
- qib_set_ib_6120_lstate(ppd, 0, QLOGIC_IB_IBCC_LINKINITCMD_DISABLE);
-
- /* disable IBC */
- dd->control &= ~QLOGIC_IB_C_LINKENABLE;
- qib_write_kreg(dd, kr_control,
- dd->control | QLOGIC_IB_C_FREEZEMODE);
-
- if (dd->cspec->ibsymdelta || dd->cspec->iblnkerrdelta ||
- dd->cspec->ibdeltainprog) {
- u64 diagc;
-
- /* enable counter writes */
- diagc = qib_read_kreg64(dd, kr_hwdiagctrl);
- qib_write_kreg(dd, kr_hwdiagctrl,
- diagc | SYM_MASK(HwDiagCtrl, CounterWrEnable));
-
- if (dd->cspec->ibsymdelta || dd->cspec->ibdeltainprog) {
- val = read_6120_creg32(dd, cr_ibsymbolerr);
- if (dd->cspec->ibdeltainprog)
- val -= val - dd->cspec->ibsymsnap;
- val -= dd->cspec->ibsymdelta;
- write_6120_creg(dd, cr_ibsymbolerr, val);
- }
- if (dd->cspec->iblnkerrdelta || dd->cspec->ibdeltainprog) {
- val = read_6120_creg32(dd, cr_iblinkerrrecov);
- if (dd->cspec->ibdeltainprog)
- val -= val - dd->cspec->iblnkerrsnap;
- val -= dd->cspec->iblnkerrdelta;
- write_6120_creg(dd, cr_iblinkerrrecov, val);
- }
-
- /* and disable counter writes */
- qib_write_kreg(dd, kr_hwdiagctrl, diagc);
- }
-
- val = qib_read_kreg64(dd, kr_serdes_cfg0);
- val |= SYM_MASK(SerdesCfg0, TxIdeEnX);
- qib_write_kreg(dd, kr_serdes_cfg0, val);
-}
-
-/**
- * qib_6120_setup_setextled - set the state of the two external LEDs
- * @ppd: the qlogic_ib device
- * @on: whether the link is up or not
- *
- * The exact combo of LEDs if on is true is determined by looking
- * at the ibcstatus.
- * These LEDs indicate the physical and logical state of IB link.
- * For this chip (at least with recommended board pinouts), LED1
- * is Yellow (logical state) and LED2 is Green (physical state),
- *
- * Note: We try to match the Mellanox HCA LED behavior as best
- * we can. Green indicates physical link state is OK (something is
- * plugged in, and we can train).
- * Amber indicates the link is logically up (ACTIVE).
- * Mellanox further blinks the amber LED to indicate data packet
- * activity, but we have no hardware support for that, so it would
- * require waking up every 10-20 msecs and checking the counters
- * on the chip, and then turning the LED off if appropriate. That's
- * visible overhead, so not something we will do.
- *
- */
-static void qib_6120_setup_setextled(struct qib_pportdata *ppd, u32 on)
-{
- u64 extctl, val, lst, ltst;
- unsigned long flags;
- struct qib_devdata *dd = ppd->dd;
-
- /*
- * The diags use the LED to indicate diag info, so we leave
- * the external LED alone when the diags are running.
- */
- if (dd->diag_client)
- return;
-
- /* Allow override of LED display for, e.g. Locating system in rack */
- if (ppd->led_override) {
- ltst = (ppd->led_override & QIB_LED_PHYS) ?
- IB_PHYSPORTSTATE_LINKUP : IB_PHYSPORTSTATE_DISABLED,
- lst = (ppd->led_override & QIB_LED_LOG) ?
- IB_PORT_ACTIVE : IB_PORT_DOWN;
- } else if (on) {
- val = qib_read_kreg64(dd, kr_ibcstatus);
- ltst = qib_6120_phys_portstate(val);
- lst = qib_6120_iblink_state(val);
- } else {
- ltst = 0;
- lst = 0;
- }
-
- spin_lock_irqsave(&dd->cspec->gpio_lock, flags);
- extctl = dd->cspec->extctrl & ~(SYM_MASK(EXTCtrl, LEDPriPortGreenOn) |
- SYM_MASK(EXTCtrl, LEDPriPortYellowOn));
-
- if (ltst == IB_PHYSPORTSTATE_LINKUP)
- extctl |= SYM_MASK(EXTCtrl, LEDPriPortYellowOn);
- if (lst == IB_PORT_ACTIVE)
- extctl |= SYM_MASK(EXTCtrl, LEDPriPortGreenOn);
- dd->cspec->extctrl = extctl;
- qib_write_kreg(dd, kr_extctrl, extctl);
- spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags);
-}
-
-/**
- * qib_6120_setup_cleanup - clean up any per-chip chip-specific stuff
- * @dd: the qlogic_ib device
- *
- * This is called during driver unload.
-*/
-static void qib_6120_setup_cleanup(struct qib_devdata *dd)
-{
- qib_free_irq(dd);
- kfree(dd->cspec->cntrs);
- kfree(dd->cspec->portcntrs);
- if (dd->cspec->dummy_hdrq) {
- dma_free_coherent(&dd->pcidev->dev,
- ALIGN(dd->rcvhdrcnt *
- dd->rcvhdrentsize *
- sizeof(u32), PAGE_SIZE),
- dd->cspec->dummy_hdrq,
- dd->cspec->dummy_hdrq_phys);
- dd->cspec->dummy_hdrq = NULL;
- }
-}
-
-static void qib_wantpiobuf_6120_intr(struct qib_devdata *dd, u32 needint)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&dd->sendctrl_lock, flags);
- if (needint)
- dd->sendctrl |= SYM_MASK(SendCtrl, PIOIntBufAvail);
- else
- dd->sendctrl &= ~SYM_MASK(SendCtrl, PIOIntBufAvail);
- qib_write_kreg(dd, kr_sendctrl, dd->sendctrl);
- qib_write_kreg(dd, kr_scratch, 0ULL);
- spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
-}
-
-/*
- * handle errors and unusual events first, separate function
- * to improve cache hits for fast path interrupt handling
- */
-static noinline void unlikely_6120_intr(struct qib_devdata *dd, u64 istat)
-{
- if (unlikely(istat & ~QLOGIC_IB_I_BITSEXTANT))
- qib_dev_err(dd, "interrupt with unknown interrupts %Lx set\n",
- istat & ~QLOGIC_IB_I_BITSEXTANT);
-
- if (istat & QLOGIC_IB_I_ERROR) {
- u64 estat = 0;
-
- qib_stats.sps_errints++;
- estat = qib_read_kreg64(dd, kr_errstatus);
- if (!estat)
- qib_devinfo(dd->pcidev,
- "error interrupt (%Lx), but no error bits set!\n",
- istat);
- handle_6120_errors(dd, estat);
- }
-
- if (istat & QLOGIC_IB_I_GPIO) {
- u32 gpiostatus;
- u32 to_clear = 0;
-
- /*
- * GPIO_3..5 on IBA6120 Rev2 chips indicate
- * errors that we need to count.
- */
- gpiostatus = qib_read_kreg32(dd, kr_gpio_status);
- /* First the error-counter case. */
- if (gpiostatus & GPIO_ERRINTR_MASK) {
- /* want to clear the bits we see asserted. */
- to_clear |= (gpiostatus & GPIO_ERRINTR_MASK);
-
- /*
- * Count appropriately, clear bits out of our copy,
- * as they have been "handled".
- */
- if (gpiostatus & (1 << GPIO_RXUVL_BIT))
- dd->cspec->rxfc_unsupvl_errs++;
- if (gpiostatus & (1 << GPIO_OVRUN_BIT))
- dd->cspec->overrun_thresh_errs++;
- if (gpiostatus & (1 << GPIO_LLI_BIT))
- dd->cspec->lli_errs++;
- gpiostatus &= ~GPIO_ERRINTR_MASK;
- }
- if (gpiostatus) {
- /*
- * Some unexpected bits remain. If they could have
- * caused the interrupt, complain and clear.
- * To avoid repetition of this condition, also clear
- * the mask. It is almost certainly due to error.
- */
- const u32 mask = qib_read_kreg32(dd, kr_gpio_mask);
-
- /*
- * Also check that the chip reflects our shadow,
- * and report issues, If they caused the interrupt.
- * we will suppress by refreshing from the shadow.
- */
- if (mask & gpiostatus) {
- to_clear |= (gpiostatus & mask);
- dd->cspec->gpio_mask &= ~(gpiostatus & mask);
- qib_write_kreg(dd, kr_gpio_mask,
- dd->cspec->gpio_mask);
- }
- }
- if (to_clear)
- qib_write_kreg(dd, kr_gpio_clear, (u64) to_clear);
- }
-}
-
-static irqreturn_t qib_6120intr(int irq, void *data)
-{
- struct qib_devdata *dd = data;
- irqreturn_t ret;
- u32 istat, ctxtrbits, rmask, crcs = 0;
- unsigned i;
-
- if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT) {
- /*
- * This return value is not great, but we do not want the
- * interrupt core code to remove our interrupt handler
- * because we don't appear to be handling an interrupt
- * during a chip reset.
- */
- ret = IRQ_HANDLED;
- goto bail;
- }
-
- istat = qib_read_kreg32(dd, kr_intstatus);
-
- if (unlikely(!istat)) {
- ret = IRQ_NONE; /* not our interrupt, or already handled */
- goto bail;
- }
- if (unlikely(istat == -1)) {
- qib_bad_intrstatus(dd);
- /* don't know if it was our interrupt or not */
- ret = IRQ_NONE;
- goto bail;
- }
-
- this_cpu_inc(*dd->int_counter);
-
- if (unlikely(istat & (~QLOGIC_IB_I_BITSEXTANT |
- QLOGIC_IB_I_GPIO | QLOGIC_IB_I_ERROR)))
- unlikely_6120_intr(dd, istat);
-
- /*
- * Clear the interrupt bits we found set, relatively early, so we
- * "know" know the chip will have seen this by the time we process
- * the queue, and will re-interrupt if necessary. The processor
- * itself won't take the interrupt again until we return.
- */
- qib_write_kreg(dd, kr_intclear, istat);
-
- /*
- * Handle kernel receive queues before checking for pio buffers
- * available since receives can overflow; piobuf waiters can afford
- * a few extra cycles, since they were waiting anyway.
- */
- ctxtrbits = istat &
- ((QLOGIC_IB_I_RCVAVAIL_MASK << QLOGIC_IB_I_RCVAVAIL_SHIFT) |
- (QLOGIC_IB_I_RCVURG_MASK << QLOGIC_IB_I_RCVURG_SHIFT));
- if (ctxtrbits) {
- rmask = (1U << QLOGIC_IB_I_RCVAVAIL_SHIFT) |
- (1U << QLOGIC_IB_I_RCVURG_SHIFT);
- for (i = 0; i < dd->first_user_ctxt; i++) {
- if (ctxtrbits & rmask) {
- ctxtrbits &= ~rmask;
- crcs += qib_kreceive(dd->rcd[i],
- &dd->cspec->lli_counter,
- NULL);
- }
- rmask <<= 1;
- }
- if (crcs) {
- u32 cntr = dd->cspec->lli_counter;
-
- cntr += crcs;
- if (cntr) {
- if (cntr > dd->cspec->lli_thresh) {
- dd->cspec->lli_counter = 0;
- dd->cspec->lli_errs++;
- } else
- dd->cspec->lli_counter += cntr;
- }
- }
-
-
- if (ctxtrbits) {
- ctxtrbits =
- (ctxtrbits >> QLOGIC_IB_I_RCVAVAIL_SHIFT) |
- (ctxtrbits >> QLOGIC_IB_I_RCVURG_SHIFT);
- qib_handle_urcv(dd, ctxtrbits);
- }
- }
-
- if ((istat & QLOGIC_IB_I_SPIOBUFAVAIL) && (dd->flags & QIB_INITTED))
- qib_ib_piobufavail(dd);
-
- ret = IRQ_HANDLED;
-bail:
- return ret;
-}
-
-/*
- * Set up our chip-specific interrupt handler
- * The interrupt type has already been setup, so
- * we just need to do the registration and error checking.
- */
-static void qib_setup_6120_interrupt(struct qib_devdata *dd)
-{
- int ret;
-
- /*
- * If the chip supports added error indication via GPIO pins,
- * enable interrupts on those bits so the interrupt routine
- * can count the events. Also set flag so interrupt routine
- * can know they are expected.
- */
- if (SYM_FIELD(dd->revision, Revision_R,
- ChipRevMinor) > 1) {
- /* Rev2+ reports extra errors via internal GPIO pins */
- dd->cspec->gpio_mask |= GPIO_ERRINTR_MASK;
- qib_write_kreg(dd, kr_gpio_mask, dd->cspec->gpio_mask);
- }
-
- ret = pci_request_irq(dd->pcidev, 0, qib_6120intr, NULL, dd,
- QIB_DRV_NAME);
- if (ret)
- qib_dev_err(dd,
- "Couldn't setup interrupt (irq=%d): %d\n",
- pci_irq_vector(dd->pcidev, 0), ret);
-}
-
-/**
- * pe_boardname - fill in the board name
- * @dd: the qlogic_ib device
- *
- * info is based on the board revision register
- */
-static void pe_boardname(struct qib_devdata *dd)
-{
- u32 boardid;
-
- boardid = SYM_FIELD(dd->revision, Revision,
- BoardID);
-
- switch (boardid) {
- case 2:
- dd->boardname = "InfiniPath_QLE7140";
- break;
- default:
- qib_dev_err(dd, "Unknown 6120 board with ID %u\n", boardid);
- dd->boardname = "Unknown_InfiniPath_6120";
- break;
- }
-
- if (dd->majrev != 4 || !dd->minrev || dd->minrev > 2)
- qib_dev_err(dd,
- "Unsupported InfiniPath hardware revision %u.%u!\n",
- dd->majrev, dd->minrev);
-
- snprintf(dd->boardversion, sizeof(dd->boardversion),
- "ChipABI %u.%u, %s, InfiniPath%u %u.%u, SW Compat %u\n",
- QIB_CHIP_VERS_MAJ, QIB_CHIP_VERS_MIN, dd->boardname,
- (unsigned int)SYM_FIELD(dd->revision, Revision_R, Arch),
- dd->majrev, dd->minrev,
- (unsigned int)SYM_FIELD(dd->revision, Revision_R, SW));
-}
-
-/*
- * This routine sleeps, so it can only be called from user context, not
- * from interrupt context. If we need interrupt context, we can split
- * it into two routines.
- */
-static int qib_6120_setup_reset(struct qib_devdata *dd)
-{
- u64 val;
- int i;
- int ret;
- u16 cmdval;
- u8 int_line, clinesz;
-
- qib_pcie_getcmd(dd, &cmdval, &int_line, &clinesz);
-
- /* Use ERROR so it shows up in logs, etc. */
- qib_dev_err(dd, "Resetting InfiniPath unit %u\n", dd->unit);
-
- /* no interrupts till re-initted */
- qib_6120_set_intr_state(dd, 0);
-
- dd->cspec->ibdeltainprog = 0;
- dd->cspec->ibsymdelta = 0;
- dd->cspec->iblnkerrdelta = 0;
-
- /*
- * Keep chip from being accessed until we are ready. Use
- * writeq() directly, to allow the write even though QIB_PRESENT
- * isn't set.
- */
- dd->flags &= ~(QIB_INITTED | QIB_PRESENT);
- /* so we check interrupts work again */
- dd->z_int_counter = qib_int_counter(dd);
- val = dd->control | QLOGIC_IB_C_RESET;
- writeq(val, &dd->kregbase[kr_control]);
- mb(); /* prevent compiler re-ordering around actual reset */
-
- for (i = 1; i <= 5; i++) {
- /*
- * Allow MBIST, etc. to complete; longer on each retry.
- * We sometimes get machine checks from bus timeout if no
- * response, so for now, make it *really* long.
- */
- msleep(1000 + (1 + i) * 2000);
-
- qib_pcie_reenable(dd, cmdval, int_line, clinesz);
-
- /*
- * Use readq directly, so we don't need to mark it as PRESENT
- * until we get a successful indication that all is well.
- */
- val = readq(&dd->kregbase[kr_revision]);
- if (val == dd->revision) {
- dd->flags |= QIB_PRESENT; /* it's back */
- ret = qib_reinit_intr(dd);
- goto bail;
- }
- }
- ret = 0; /* failed */
-
-bail:
- if (ret) {
- if (qib_pcie_params(dd, dd->lbus_width, NULL))
- qib_dev_err(dd,
- "Reset failed to setup PCIe or interrupts; continuing anyway\n");
- /* clear the reset error, init error/hwerror mask */
- qib_6120_init_hwerrors(dd);
- /* for Rev2 error interrupts; nop for rev 1 */
- qib_write_kreg(dd, kr_gpio_mask, dd->cspec->gpio_mask);
- /* clear the reset error, init error/hwerror mask */
- qib_6120_init_hwerrors(dd);
- }
- return ret;
-}
-
-/**
- * qib_6120_put_tid - write a TID in chip
- * @dd: the qlogic_ib device
- * @tidptr: pointer to the expected TID (in chip) to update
- * @type: RCVHQ_RCV_TYPE_EAGER (1) for eager, RCVHQ_RCV_TYPE_EXPECTED (0)
- * for expected
- * @pa: physical address of in memory buffer; tidinvalid if freeing
- *
- * This exists as a separate routine to allow for special locking etc.
- * It's used for both the full cleanup on exit, as well as the normal
- * setup and teardown.
- */
-static void qib_6120_put_tid(struct qib_devdata *dd, u64 __iomem *tidptr,
- u32 type, unsigned long pa)
-{
- u32 __iomem *tidp32 = (u32 __iomem *)tidptr;
- unsigned long flags;
- int tidx;
- spinlock_t *tidlockp; /* select appropriate spinlock */
-
- if (!dd->kregbase)
- return;
-
- if (pa != dd->tidinvalid) {
- if (pa & ((1U << 11) - 1)) {
- qib_dev_err(dd, "Physaddr %lx not 2KB aligned!\n",
- pa);
- return;
- }
- pa >>= 11;
- if (pa & ~QLOGIC_IB_RT_ADDR_MASK) {
- qib_dev_err(dd,
- "Physical page address 0x%lx larger than supported\n",
- pa);
- return;
- }
-
- if (type == RCVHQ_RCV_TYPE_EAGER)
- pa |= dd->tidtemplate;
- else /* for now, always full 4KB page */
- pa |= 2 << 29;
- }
-
- /*
- * Avoid chip issue by writing the scratch register
- * before and after the TID, and with an io write barrier.
- * We use a spinlock around the writes, so they can't intermix
- * with other TID (eager or expected) writes (the chip problem
- * is triggered by back to back TID writes). Unfortunately, this
- * call can be done from interrupt level for the ctxt 0 eager TIDs,
- * so we have to use irqsave locks.
- */
- /*
- * Assumes tidptr always > egrtidbase
- * if type == RCVHQ_RCV_TYPE_EAGER.
- */
- tidx = tidptr - dd->egrtidbase;
-
- tidlockp = (type == RCVHQ_RCV_TYPE_EAGER && tidx < dd->rcvhdrcnt)
- ? &dd->cspec->kernel_tid_lock : &dd->cspec->user_tid_lock;
- spin_lock_irqsave(tidlockp, flags);
- qib_write_kreg(dd, kr_scratch, 0xfeeddeaf);
- writel(pa, tidp32);
- qib_write_kreg(dd, kr_scratch, 0xdeadbeef);
- spin_unlock_irqrestore(tidlockp, flags);
-}
-
-/**
- * qib_6120_put_tid_2 - write a TID in chip, Revision 2 or higher
- * @dd: the qlogic_ib device
- * @tidptr: pointer to the expected TID (in chip) to update
- * @type: RCVHQ_RCV_TYPE_EAGER (1) for eager, RCVHQ_RCV_TYPE_EXPECTED (0)
- * for expected
- * @pa: physical address of in memory buffer; tidinvalid if freeing
- *
- * This exists as a separate routine to allow for selection of the
- * appropriate "flavor". The static calls in cleanup just use the
- * revision-agnostic form, as they are not performance critical.
- */
-static void qib_6120_put_tid_2(struct qib_devdata *dd, u64 __iomem *tidptr,
- u32 type, unsigned long pa)
-{
- u32 __iomem *tidp32 = (u32 __iomem *)tidptr;
-
- if (!dd->kregbase)
- return;
-
- if (pa != dd->tidinvalid) {
- if (pa & ((1U << 11) - 1)) {
- qib_dev_err(dd, "Physaddr %lx not 2KB aligned!\n",
- pa);
- return;
- }
- pa >>= 11;
- if (pa & ~QLOGIC_IB_RT_ADDR_MASK) {
- qib_dev_err(dd,
- "Physical page address 0x%lx larger than supported\n",
- pa);
- return;
- }
-
- if (type == RCVHQ_RCV_TYPE_EAGER)
- pa |= dd->tidtemplate;
- else /* for now, always full 4KB page */
- pa |= 2 << 29;
- }
- writel(pa, tidp32);
-}
-
-
-/**
- * qib_6120_clear_tids - clear all TID entries for a context, expected and eager
- * @dd: the qlogic_ib device
- * @rcd: the context
- *
- * clear all TID entries for a context, expected and eager.
- * Used from qib_close(). On this chip, TIDs are only 32 bits,
- * not 64, but they are still on 64 bit boundaries, so tidbase
- * is declared as u64 * for the pointer math, even though we write 32 bits
- */
-static void qib_6120_clear_tids(struct qib_devdata *dd,
- struct qib_ctxtdata *rcd)
-{
- u64 __iomem *tidbase;
- unsigned long tidinv;
- u32 ctxt;
- int i;
-
- if (!dd->kregbase || !rcd)
- return;
-
- ctxt = rcd->ctxt;
-
- tidinv = dd->tidinvalid;
- tidbase = (u64 __iomem *)
- ((char __iomem *)(dd->kregbase) +
- dd->rcvtidbase +
- ctxt * dd->rcvtidcnt * sizeof(*tidbase));
-
- for (i = 0; i < dd->rcvtidcnt; i++)
- /* use func pointer because could be one of two funcs */
- dd->f_put_tid(dd, &tidbase[i], RCVHQ_RCV_TYPE_EXPECTED,
- tidinv);
-
- tidbase = (u64 __iomem *)
- ((char __iomem *)(dd->kregbase) +
- dd->rcvegrbase +
- rcd->rcvegr_tid_base * sizeof(*tidbase));
-
- for (i = 0; i < rcd->rcvegrcnt; i++)
- /* use func pointer because could be one of two funcs */
- dd->f_put_tid(dd, &tidbase[i], RCVHQ_RCV_TYPE_EAGER,
- tidinv);
-}
-
-/**
- * qib_6120_tidtemplate - setup constants for TID updates
- * @dd: the qlogic_ib device
- *
- * We setup stuff that we use a lot, to avoid calculating each time
- */
-static void qib_6120_tidtemplate(struct qib_devdata *dd)
-{
- u32 egrsize = dd->rcvegrbufsize;
-
- /*
- * For now, we always allocate 4KB buffers (at init) so we can
- * receive max size packets. We may want a module parameter to
- * specify 2KB or 4KB and/or make be per ctxt instead of per device
- * for those who want to reduce memory footprint. Note that the
- * rcvhdrentsize size must be large enough to hold the largest
- * IB header (currently 96 bytes) that we expect to handle (plus of
- * course the 2 dwords of RHF).
- */
- if (egrsize == 2048)
- dd->tidtemplate = 1U << 29;
- else if (egrsize == 4096)
- dd->tidtemplate = 2U << 29;
- dd->tidinvalid = 0;
-}
-
-int __attribute__((weak)) qib_unordered_wc(void)
-{
- return 0;
-}
-
-/**
- * qib_6120_get_base_info - set chip-specific flags for user code
- * @rcd: the qlogic_ib ctxt
- * @kinfo: qib_base_info pointer
- *
- * We set the PCIE flag because the lower bandwidth on PCIe vs
- * HyperTransport can affect some user packet algorithms.
- */
-static int qib_6120_get_base_info(struct qib_ctxtdata *rcd,
- struct qib_base_info *kinfo)
-{
- if (qib_unordered_wc())
- kinfo->spi_runtime_flags |= QIB_RUNTIME_FORCE_WC_ORDER;
-
- kinfo->spi_runtime_flags |= QIB_RUNTIME_PCIE |
- QIB_RUNTIME_FORCE_PIOAVAIL | QIB_RUNTIME_PIO_REGSWAPPED;
- return 0;
-}
-
-
-static struct qib_message_header *
-qib_6120_get_msgheader(struct qib_devdata *dd, __le32 *rhf_addr)
-{
- return (struct qib_message_header *)
- &rhf_addr[sizeof(u64) / sizeof(u32)];
-}
-
-static void qib_6120_config_ctxts(struct qib_devdata *dd)
-{
- dd->ctxtcnt = qib_read_kreg32(dd, kr_portcnt);
- if (qib_n_krcv_queues > 1) {
- dd->first_user_ctxt = qib_n_krcv_queues * dd->num_pports;
- if (dd->first_user_ctxt > dd->ctxtcnt)
- dd->first_user_ctxt = dd->ctxtcnt;
- dd->qpn_mask = dd->first_user_ctxt <= 2 ? 2 : 6;
- } else
- dd->first_user_ctxt = dd->num_pports;
- dd->n_krcv_queues = dd->first_user_ctxt;
-}
-
-static void qib_update_6120_usrhead(struct qib_ctxtdata *rcd, u64 hd,
- u32 updegr, u32 egrhd, u32 npkts)
-{
- if (updegr)
- qib_write_ureg(rcd->dd, ur_rcvegrindexhead, egrhd, rcd->ctxt);
- qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt);
-}
-
-static u32 qib_6120_hdrqempty(struct qib_ctxtdata *rcd)
-{
- u32 head, tail;
-
- head = qib_read_ureg32(rcd->dd, ur_rcvhdrhead, rcd->ctxt);
- if (rcd->rcvhdrtail_kvaddr)
- tail = qib_get_rcvhdrtail(rcd);
- else
- tail = qib_read_ureg32(rcd->dd, ur_rcvhdrtail, rcd->ctxt);
- return head == tail;
-}
-
-/*
- * Used when we close any ctxt, for DMA already in flight
- * at close. Can't be done until we know hdrq size, so not
- * early in chip init.
- */
-static void alloc_dummy_hdrq(struct qib_devdata *dd)
-{
- dd->cspec->dummy_hdrq = dma_alloc_coherent(&dd->pcidev->dev,
- dd->rcd[0]->rcvhdrq_size,
- &dd->cspec->dummy_hdrq_phys,
- GFP_ATOMIC);
- if (!dd->cspec->dummy_hdrq) {
- qib_devinfo(dd->pcidev, "Couldn't allocate dummy hdrq\n");
- /* fallback to just 0'ing */
- dd->cspec->dummy_hdrq_phys = 0UL;
- }
-}
-
-/*
- * Modify the RCVCTRL register in chip-specific way. This
- * is a function because bit positions and (future) register
- * location is chip-specific, but the needed operations are
- * generic. <op> is a bit-mask because we often want to
- * do multiple modifications.
- */
-static void rcvctrl_6120_mod(struct qib_pportdata *ppd, unsigned int op,
- int ctxt)
-{
- struct qib_devdata *dd = ppd->dd;
- u64 mask, val;
- unsigned long flags;
-
- spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags);
-
- if (op & QIB_RCVCTRL_TAILUPD_ENB)
- dd->rcvctrl |= (1ULL << QLOGIC_IB_R_TAILUPD_SHIFT);
- if (op & QIB_RCVCTRL_TAILUPD_DIS)
- dd->rcvctrl &= ~(1ULL << QLOGIC_IB_R_TAILUPD_SHIFT);
- if (op & QIB_RCVCTRL_PKEY_ENB)
- dd->rcvctrl &= ~(1ULL << IBA6120_R_PKEY_DIS_SHIFT);
- if (op & QIB_RCVCTRL_PKEY_DIS)
- dd->rcvctrl |= (1ULL << IBA6120_R_PKEY_DIS_SHIFT);
- if (ctxt < 0)
- mask = (1ULL << dd->ctxtcnt) - 1;
- else
- mask = (1ULL << ctxt);
- if (op & QIB_RCVCTRL_CTXT_ENB) {
- /* always done for specific ctxt */
- dd->rcvctrl |= (mask << SYM_LSB(RcvCtrl, PortEnable));
- if (!(dd->flags & QIB_NODMA_RTAIL))
- dd->rcvctrl |= 1ULL << QLOGIC_IB_R_TAILUPD_SHIFT;
- /* Write these registers before the context is enabled. */
- qib_write_kreg_ctxt(dd, kr_rcvhdrtailaddr, ctxt,
- dd->rcd[ctxt]->rcvhdrqtailaddr_phys);
- qib_write_kreg_ctxt(dd, kr_rcvhdraddr, ctxt,
- dd->rcd[ctxt]->rcvhdrq_phys);
-
- if (ctxt == 0 && !dd->cspec->dummy_hdrq)
- alloc_dummy_hdrq(dd);
- }
- if (op & QIB_RCVCTRL_CTXT_DIS)
- dd->rcvctrl &= ~(mask << SYM_LSB(RcvCtrl, PortEnable));
- if (op & QIB_RCVCTRL_INTRAVAIL_ENB)
- dd->rcvctrl |= (mask << QLOGIC_IB_R_INTRAVAIL_SHIFT);
- if (op & QIB_RCVCTRL_INTRAVAIL_DIS)
- dd->rcvctrl &= ~(mask << QLOGIC_IB_R_INTRAVAIL_SHIFT);
- qib_write_kreg(dd, kr_rcvctrl, dd->rcvctrl);
- if ((op & QIB_RCVCTRL_INTRAVAIL_ENB) && dd->rhdrhead_intr_off) {
- /* arm rcv interrupt */
- val = qib_read_ureg32(dd, ur_rcvhdrhead, ctxt) |
- dd->rhdrhead_intr_off;
- qib_write_ureg(dd, ur_rcvhdrhead, val, ctxt);
- }
- if (op & QIB_RCVCTRL_CTXT_ENB) {
- /*
- * Init the context registers also; if we were
- * disabled, tail and head should both be zero
- * already from the enable, but since we don't
- * know, we have to do it explicitly.
- */
- val = qib_read_ureg32(dd, ur_rcvegrindextail, ctxt);
- qib_write_ureg(dd, ur_rcvegrindexhead, val, ctxt);
-
- val = qib_read_ureg32(dd, ur_rcvhdrtail, ctxt);
- dd->rcd[ctxt]->head = val;
- /* If kctxt, interrupt on next receive. */
- if (ctxt < dd->first_user_ctxt)
- val |= dd->rhdrhead_intr_off;
- qib_write_ureg(dd, ur_rcvhdrhead, val, ctxt);
- }
- if (op & QIB_RCVCTRL_CTXT_DIS) {
- /*
- * Be paranoid, and never write 0's to these, just use an
- * unused page. Of course,
- * rcvhdraddr points to a large chunk of memory, so this
- * could still trash things, but at least it won't trash
- * page 0, and by disabling the ctxt, it should stop "soon",
- * even if a packet or two is in already in flight after we
- * disabled the ctxt. Only 6120 has this issue.
- */
- if (ctxt >= 0) {
- qib_write_kreg_ctxt(dd, kr_rcvhdrtailaddr, ctxt,
- dd->cspec->dummy_hdrq_phys);
- qib_write_kreg_ctxt(dd, kr_rcvhdraddr, ctxt,
- dd->cspec->dummy_hdrq_phys);
- } else {
- unsigned i;
-
- for (i = 0; i < dd->cfgctxts; i++) {
- qib_write_kreg_ctxt(dd, kr_rcvhdrtailaddr,
- i, dd->cspec->dummy_hdrq_phys);
- qib_write_kreg_ctxt(dd, kr_rcvhdraddr,
- i, dd->cspec->dummy_hdrq_phys);
- }
- }
- }
- spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags);
-}
-
-/*
- * Modify the SENDCTRL register in chip-specific way. This
- * is a function there may be multiple such registers with
- * slightly different layouts. Only operations actually used
- * are implemented yet.
- * Chip requires no back-back sendctrl writes, so write
- * scratch register after writing sendctrl
- */
-static void sendctrl_6120_mod(struct qib_pportdata *ppd, u32 op)
-{
- struct qib_devdata *dd = ppd->dd;
- u64 tmp_dd_sendctrl;
- unsigned long flags;
-
- spin_lock_irqsave(&dd->sendctrl_lock, flags);
-
- /* First the ones that are "sticky", saved in shadow */
- if (op & QIB_SENDCTRL_CLEAR)
- dd->sendctrl = 0;
- if (op & QIB_SENDCTRL_SEND_DIS)
- dd->sendctrl &= ~SYM_MASK(SendCtrl, PIOEnable);
- else if (op & QIB_SENDCTRL_SEND_ENB)
- dd->sendctrl |= SYM_MASK(SendCtrl, PIOEnable);
- if (op & QIB_SENDCTRL_AVAIL_DIS)
- dd->sendctrl &= ~SYM_MASK(SendCtrl, PIOBufAvailUpd);
- else if (op & QIB_SENDCTRL_AVAIL_ENB)
- dd->sendctrl |= SYM_MASK(SendCtrl, PIOBufAvailUpd);
-
- if (op & QIB_SENDCTRL_DISARM_ALL) {
- u32 i, last;
-
- tmp_dd_sendctrl = dd->sendctrl;
- /*
- * disarm any that are not yet launched, disabling sends
- * and updates until done.
- */
- last = dd->piobcnt2k + dd->piobcnt4k;
- tmp_dd_sendctrl &=
- ~(SYM_MASK(SendCtrl, PIOEnable) |
- SYM_MASK(SendCtrl, PIOBufAvailUpd));
- for (i = 0; i < last; i++) {
- qib_write_kreg(dd, kr_sendctrl, tmp_dd_sendctrl |
- SYM_MASK(SendCtrl, Disarm) | i);
- qib_write_kreg(dd, kr_scratch, 0);
- }
- }
-
- tmp_dd_sendctrl = dd->sendctrl;
-
- if (op & QIB_SENDCTRL_FLUSH)
- tmp_dd_sendctrl |= SYM_MASK(SendCtrl, Abort);
- if (op & QIB_SENDCTRL_DISARM)
- tmp_dd_sendctrl |= SYM_MASK(SendCtrl, Disarm) |
- ((op & QIB_6120_SendCtrl_DisarmPIOBuf_RMASK) <<
- SYM_LSB(SendCtrl, DisarmPIOBuf));
- if (op & QIB_SENDCTRL_AVAIL_BLIP)
- tmp_dd_sendctrl &= ~SYM_MASK(SendCtrl, PIOBufAvailUpd);
-
- qib_write_kreg(dd, kr_sendctrl, tmp_dd_sendctrl);
- qib_write_kreg(dd, kr_scratch, 0);
-
- if (op & QIB_SENDCTRL_AVAIL_BLIP) {
- qib_write_kreg(dd, kr_sendctrl, dd->sendctrl);
- qib_write_kreg(dd, kr_scratch, 0);
- }
-
- spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
-
- if (op & QIB_SENDCTRL_FLUSH) {
- u32 v;
- /*
- * ensure writes have hit chip, then do a few
- * more reads, to allow DMA of pioavail registers
- * to occur, so in-memory copy is in sync with
- * the chip. Not always safe to sleep.
- */
- v = qib_read_kreg32(dd, kr_scratch);
- qib_write_kreg(dd, kr_scratch, v);
- v = qib_read_kreg32(dd, kr_scratch);
- qib_write_kreg(dd, kr_scratch, v);
- qib_read_kreg32(dd, kr_scratch);
- }
-}
-
-/**
- * qib_portcntr_6120 - read a per-port counter
- * @ppd: the qlogic_ib device
- * @reg: the counter to snapshot
- */
-static u64 qib_portcntr_6120(struct qib_pportdata *ppd, u32 reg)
-{
- u64 ret = 0ULL;
- struct qib_devdata *dd = ppd->dd;
- u16 creg;
- /* 0xffff for unimplemented or synthesized counters */
- static const u16 xlator[] = {
- [QIBPORTCNTR_PKTSEND] = cr_pktsend,
- [QIBPORTCNTR_WORDSEND] = cr_wordsend,
- [QIBPORTCNTR_PSXMITDATA] = 0xffff,
- [QIBPORTCNTR_PSXMITPKTS] = 0xffff,
- [QIBPORTCNTR_PSXMITWAIT] = 0xffff,
- [QIBPORTCNTR_SENDSTALL] = cr_sendstall,
- [QIBPORTCNTR_PKTRCV] = cr_pktrcv,
- [QIBPORTCNTR_PSRCVDATA] = 0xffff,
- [QIBPORTCNTR_PSRCVPKTS] = 0xffff,
- [QIBPORTCNTR_RCVEBP] = cr_rcvebp,
- [QIBPORTCNTR_RCVOVFL] = cr_rcvovfl,
- [QIBPORTCNTR_WORDRCV] = cr_wordrcv,
- [QIBPORTCNTR_RXDROPPKT] = cr_rxdroppkt,
- [QIBPORTCNTR_RXLOCALPHYERR] = 0xffff,
- [QIBPORTCNTR_RXVLERR] = 0xffff,
- [QIBPORTCNTR_ERRICRC] = cr_erricrc,
- [QIBPORTCNTR_ERRVCRC] = cr_errvcrc,
- [QIBPORTCNTR_ERRLPCRC] = cr_errlpcrc,
- [QIBPORTCNTR_BADFORMAT] = cr_badformat,
- [QIBPORTCNTR_ERR_RLEN] = cr_err_rlen,
- [QIBPORTCNTR_IBSYMBOLERR] = cr_ibsymbolerr,
- [QIBPORTCNTR_INVALIDRLEN] = cr_invalidrlen,
- [QIBPORTCNTR_UNSUPVL] = cr_txunsupvl,
- [QIBPORTCNTR_EXCESSBUFOVFL] = 0xffff,
- [QIBPORTCNTR_ERRLINK] = cr_errlink,
- [QIBPORTCNTR_IBLINKDOWN] = cr_iblinkdown,
- [QIBPORTCNTR_IBLINKERRRECOV] = cr_iblinkerrrecov,
- [QIBPORTCNTR_LLI] = 0xffff,
- [QIBPORTCNTR_PSINTERVAL] = 0xffff,
- [QIBPORTCNTR_PSSTART] = 0xffff,
- [QIBPORTCNTR_PSSTAT] = 0xffff,
- [QIBPORTCNTR_VL15PKTDROP] = 0xffff,
- [QIBPORTCNTR_ERRPKEY] = cr_errpkey,
- [QIBPORTCNTR_KHDROVFL] = 0xffff,
- };
-
- if (reg >= ARRAY_SIZE(xlator)) {
- qib_devinfo(ppd->dd->pcidev,
- "Unimplemented portcounter %u\n", reg);
- goto done;
- }
- creg = xlator[reg];
-
- /* handle counters requests not implemented as chip counters */
- if (reg == QIBPORTCNTR_LLI)
- ret = dd->cspec->lli_errs;
- else if (reg == QIBPORTCNTR_EXCESSBUFOVFL)
- ret = dd->cspec->overrun_thresh_errs;
- else if (reg == QIBPORTCNTR_KHDROVFL) {
- int i;
-
- /* sum over all kernel contexts */
- for (i = 0; i < dd->first_user_ctxt; i++)
- ret += read_6120_creg32(dd, cr_portovfl + i);
- } else if (reg == QIBPORTCNTR_PSSTAT)
- ret = dd->cspec->pma_sample_status;
- if (creg == 0xffff)
- goto done;
-
- /*
- * only fast incrementing counters are 64bit; use 32 bit reads to
- * avoid two independent reads when on opteron
- */
- if (creg == cr_wordsend || creg == cr_wordrcv ||
- creg == cr_pktsend || creg == cr_pktrcv)
- ret = read_6120_creg(dd, creg);
- else
- ret = read_6120_creg32(dd, creg);
- if (creg == cr_ibsymbolerr) {
- if (dd->cspec->ibdeltainprog)
- ret -= ret - dd->cspec->ibsymsnap;
- ret -= dd->cspec->ibsymdelta;
- } else if (creg == cr_iblinkerrrecov) {
- if (dd->cspec->ibdeltainprog)
- ret -= ret - dd->cspec->iblnkerrsnap;
- ret -= dd->cspec->iblnkerrdelta;
- }
- if (reg == QIBPORTCNTR_RXDROPPKT) /* add special cased count */
- ret += dd->cspec->rxfc_unsupvl_errs;
-
-done:
- return ret;
-}
-
-/*
- * Device counter names (not port-specific), one line per stat,
- * single string. Used by utilities like ipathstats to print the stats
- * in a way which works for different versions of drivers, without changing
- * the utility. Names need to be 12 chars or less (w/o newline), for proper
- * display by utility.
- * Non-error counters are first.
- * Start of "error" conters is indicated by a leading "E " on the first
- * "error" counter, and doesn't count in label length.
- * The EgrOvfl list needs to be last so we truncate them at the configured
- * context count for the device.
- * cntr6120indices contains the corresponding register indices.
- */
-static const char cntr6120names[] =
- "Interrupts\n"
- "HostBusStall\n"
- "E RxTIDFull\n"
- "RxTIDInvalid\n"
- "Ctxt0EgrOvfl\n"
- "Ctxt1EgrOvfl\n"
- "Ctxt2EgrOvfl\n"
- "Ctxt3EgrOvfl\n"
- "Ctxt4EgrOvfl\n";
-
-static const size_t cntr6120indices[] = {
- cr_lbint,
- cr_lbflowstall,
- cr_errtidfull,
- cr_errtidvalid,
- cr_portovfl + 0,
- cr_portovfl + 1,
- cr_portovfl + 2,
- cr_portovfl + 3,
- cr_portovfl + 4,
-};
-
-/*
- * same as cntr6120names and cntr6120indices, but for port-specific counters.
- * portcntr6120indices is somewhat complicated by some registers needing
- * adjustments of various kinds, and those are ORed with _PORT_VIRT_FLAG
- */
-static const char portcntr6120names[] =
- "TxPkt\n"
- "TxFlowPkt\n"
- "TxWords\n"
- "RxPkt\n"
- "RxFlowPkt\n"
- "RxWords\n"
- "TxFlowStall\n"
- "E IBStatusChng\n"
- "IBLinkDown\n"
- "IBLnkRecov\n"
- "IBRxLinkErr\n"
- "IBSymbolErr\n"
- "RxLLIErr\n"
- "RxBadFormat\n"
- "RxBadLen\n"
- "RxBufOvrfl\n"
- "RxEBP\n"
- "RxFlowCtlErr\n"
- "RxICRCerr\n"
- "RxLPCRCerr\n"
- "RxVCRCerr\n"
- "RxInvalLen\n"
- "RxInvalPKey\n"
- "RxPktDropped\n"
- "TxBadLength\n"
- "TxDropped\n"
- "TxInvalLen\n"
- "TxUnderrun\n"
- "TxUnsupVL\n"
- ;
-
-#define _PORT_VIRT_FLAG 0x8000 /* "virtual", need adjustments */
-static const size_t portcntr6120indices[] = {
- QIBPORTCNTR_PKTSEND | _PORT_VIRT_FLAG,
- cr_pktsendflow,
- QIBPORTCNTR_WORDSEND | _PORT_VIRT_FLAG,
- QIBPORTCNTR_PKTRCV | _PORT_VIRT_FLAG,
- cr_pktrcvflowctrl,
- QIBPORTCNTR_WORDRCV | _PORT_VIRT_FLAG,
- QIBPORTCNTR_SENDSTALL | _PORT_VIRT_FLAG,
- cr_ibstatuschange,
- QIBPORTCNTR_IBLINKDOWN | _PORT_VIRT_FLAG,
- QIBPORTCNTR_IBLINKERRRECOV | _PORT_VIRT_FLAG,
- QIBPORTCNTR_ERRLINK | _PORT_VIRT_FLAG,
- QIBPORTCNTR_IBSYMBOLERR | _PORT_VIRT_FLAG,
- QIBPORTCNTR_LLI | _PORT_VIRT_FLAG,
- QIBPORTCNTR_BADFORMAT | _PORT_VIRT_FLAG,
- QIBPORTCNTR_ERR_RLEN | _PORT_VIRT_FLAG,
- QIBPORTCNTR_RCVOVFL | _PORT_VIRT_FLAG,
- QIBPORTCNTR_RCVEBP | _PORT_VIRT_FLAG,
- cr_rcvflowctrl_err,
- QIBPORTCNTR_ERRICRC | _PORT_VIRT_FLAG,
- QIBPORTCNTR_ERRLPCRC | _PORT_VIRT_FLAG,
- QIBPORTCNTR_ERRVCRC | _PORT_VIRT_FLAG,
- QIBPORTCNTR_INVALIDRLEN | _PORT_VIRT_FLAG,
- QIBPORTCNTR_ERRPKEY | _PORT_VIRT_FLAG,
- QIBPORTCNTR_RXDROPPKT | _PORT_VIRT_FLAG,
- cr_invalidslen,
- cr_senddropped,
- cr_errslen,
- cr_sendunderrun,
- cr_txunsupvl,
-};
-
-/* do all the setup to make the counter reads efficient later */
-static void init_6120_cntrnames(struct qib_devdata *dd)
-{
- int i, j = 0;
- char *s;
-
- for (i = 0, s = (char *)cntr6120names; s && j <= dd->cfgctxts;
- i++) {
- /* we always have at least one counter before the egrovfl */
- if (!j && !strncmp("Ctxt0EgrOvfl", s + 1, 12))
- j = 1;
- s = strchr(s + 1, '\n');
- if (s && j)
- j++;
- }
- dd->cspec->ncntrs = i;
- if (!s)
- /* full list; size is without terminating null */
- dd->cspec->cntrnamelen = sizeof(cntr6120names) - 1;
- else
- dd->cspec->cntrnamelen = 1 + s - cntr6120names;
- dd->cspec->cntrs = kmalloc_array(dd->cspec->ncntrs, sizeof(u64),
- GFP_KERNEL);
-
- for (i = 0, s = (char *)portcntr6120names; s; i++)
- s = strchr(s + 1, '\n');
- dd->cspec->nportcntrs = i - 1;
- dd->cspec->portcntrnamelen = sizeof(portcntr6120names) - 1;
- dd->cspec->portcntrs = kmalloc_array(dd->cspec->nportcntrs,
- sizeof(u64),
- GFP_KERNEL);
-}
-
-static u32 qib_read_6120cntrs(struct qib_devdata *dd, loff_t pos, char **namep,
- u64 **cntrp)
-{
- u32 ret;
-
- if (namep) {
- ret = dd->cspec->cntrnamelen;
- if (pos >= ret)
- ret = 0; /* final read after getting everything */
- else
- *namep = (char *)cntr6120names;
- } else {
- u64 *cntr = dd->cspec->cntrs;
- int i;
-
- ret = dd->cspec->ncntrs * sizeof(u64);
- if (!cntr || pos >= ret) {
- /* everything read, or couldn't get memory */
- ret = 0;
- goto done;
- }
- if (pos >= ret) {
- ret = 0; /* final read after getting everything */
- goto done;
- }
- *cntrp = cntr;
- for (i = 0; i < dd->cspec->ncntrs; i++)
- *cntr++ = read_6120_creg32(dd, cntr6120indices[i]);
- }
-done:
- return ret;
-}
-
-static u32 qib_read_6120portcntrs(struct qib_devdata *dd, loff_t pos, u32 port,
- char **namep, u64 **cntrp)
-{
- u32 ret;
-
- if (namep) {
- ret = dd->cspec->portcntrnamelen;
- if (pos >= ret)
- ret = 0; /* final read after getting everything */
- else
- *namep = (char *)portcntr6120names;
- } else {
- u64 *cntr = dd->cspec->portcntrs;
- struct qib_pportdata *ppd = &dd->pport[port];
- int i;
-
- ret = dd->cspec->nportcntrs * sizeof(u64);
- if (!cntr || pos >= ret) {
- /* everything read, or couldn't get memory */
- ret = 0;
- goto done;
- }
- *cntrp = cntr;
- for (i = 0; i < dd->cspec->nportcntrs; i++) {
- if (portcntr6120indices[i] & _PORT_VIRT_FLAG)
- *cntr++ = qib_portcntr_6120(ppd,
- portcntr6120indices[i] &
- ~_PORT_VIRT_FLAG);
- else
- *cntr++ = read_6120_creg32(dd,
- portcntr6120indices[i]);
- }
- }
-done:
- return ret;
-}
-
-static void qib_chk_6120_errormask(struct qib_devdata *dd)
-{
- static u32 fixed;
- u32 ctrl;
- unsigned long errormask;
- unsigned long hwerrs;
-
- if (!dd->cspec->errormask || !(dd->flags & QIB_INITTED))
- return;
-
- errormask = qib_read_kreg64(dd, kr_errmask);
-
- if (errormask == dd->cspec->errormask)
- return;
- fixed++;
-
- hwerrs = qib_read_kreg64(dd, kr_hwerrstatus);
- ctrl = qib_read_kreg32(dd, kr_control);
-
- qib_write_kreg(dd, kr_errmask,
- dd->cspec->errormask);
-
- if ((hwerrs & dd->cspec->hwerrmask) ||
- (ctrl & QLOGIC_IB_C_FREEZEMODE)) {
- qib_write_kreg(dd, kr_hwerrclear, 0ULL);
- qib_write_kreg(dd, kr_errclear, 0ULL);
- /* force re-interrupt of pending events, just in case */
- qib_write_kreg(dd, kr_intclear, 0ULL);
- qib_devinfo(dd->pcidev,
- "errormask fixed(%u) %lx->%lx, ctrl %x hwerr %lx\n",
- fixed, errormask, (unsigned long)dd->cspec->errormask,
- ctrl, hwerrs);
- }
-}
-
-/**
- * qib_get_6120_faststats - get word counters from chip before they overflow
- * @t: contains a pointer to the qlogic_ib device qib_devdata
- *
- * This needs more work; in particular, decision on whether we really
- * need traffic_wds done the way it is
- * called from add_timer
- */
-static void qib_get_6120_faststats(struct timer_list *t)
-{
- struct qib_devdata *dd = timer_container_of(dd, t, stats_timer);
- struct qib_pportdata *ppd = dd->pport;
- unsigned long flags;
- u64 traffic_wds;
-
- /*
- * don't access the chip while running diags, or memory diags can
- * fail
- */
- if (!(dd->flags & QIB_INITTED) || dd->diag_client)
- /* but re-arm the timer, for diags case; won't hurt other */
- goto done;
-
- /*
- * We now try to maintain an activity timer, based on traffic
- * exceeding a threshold, so we need to check the word-counts
- * even if they are 64-bit.
- */
- traffic_wds = qib_portcntr_6120(ppd, cr_wordsend) +
- qib_portcntr_6120(ppd, cr_wordrcv);
- spin_lock_irqsave(&dd->eep_st_lock, flags);
- traffic_wds -= dd->traffic_wds;
- dd->traffic_wds += traffic_wds;
- spin_unlock_irqrestore(&dd->eep_st_lock, flags);
-
- qib_chk_6120_errormask(dd);
-done:
- mod_timer(&dd->stats_timer, jiffies + HZ * ACTIVITY_TIMER);
-}
-
-/* no interrupt fallback for these chips */
-static int qib_6120_nointr_fallback(struct qib_devdata *dd)
-{
- return 0;
-}
-
-/*
- * reset the XGXS (between serdes and IBC). Slightly less intrusive
- * than resetting the IBC or external link state, and useful in some
- * cases to cause some retraining. To do this right, we reset IBC
- * as well.
- */
-static void qib_6120_xgxs_reset(struct qib_pportdata *ppd)
-{
- u64 val, prev_val;
- struct qib_devdata *dd = ppd->dd;
-
- prev_val = qib_read_kreg64(dd, kr_xgxs_cfg);
- val = prev_val | QLOGIC_IB_XGXS_RESET;
- prev_val &= ~QLOGIC_IB_XGXS_RESET; /* be sure */
- qib_write_kreg(dd, kr_control,
- dd->control & ~QLOGIC_IB_C_LINKENABLE);
- qib_write_kreg(dd, kr_xgxs_cfg, val);
- qib_read_kreg32(dd, kr_scratch);
- qib_write_kreg(dd, kr_xgxs_cfg, prev_val);
- qib_write_kreg(dd, kr_control, dd->control);
-}
-
-static int qib_6120_get_ib_cfg(struct qib_pportdata *ppd, int which)
-{
- int ret;
-
- switch (which) {
- case QIB_IB_CFG_LWID:
- ret = ppd->link_width_active;
- break;
-
- case QIB_IB_CFG_SPD:
- ret = ppd->link_speed_active;
- break;
-
- case QIB_IB_CFG_LWID_ENB:
- ret = ppd->link_width_enabled;
- break;
-
- case QIB_IB_CFG_SPD_ENB:
- ret = ppd->link_speed_enabled;
- break;
-
- case QIB_IB_CFG_OP_VLS:
- ret = ppd->vls_operational;
- break;
-
- case QIB_IB_CFG_VL_HIGH_CAP:
- ret = 0;
- break;
-
- case QIB_IB_CFG_VL_LOW_CAP:
- ret = 0;
- break;
-
- case QIB_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */
- ret = SYM_FIELD(ppd->dd->cspec->ibcctrl, IBCCtrl,
- OverrunThreshold);
- break;
-
- case QIB_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */
- ret = SYM_FIELD(ppd->dd->cspec->ibcctrl, IBCCtrl,
- PhyerrThreshold);
- break;
-
- case QIB_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */
- /* will only take effect when the link state changes */
- ret = (ppd->dd->cspec->ibcctrl &
- SYM_MASK(IBCCtrl, LinkDownDefaultState)) ?
- IB_LINKINITCMD_SLEEP : IB_LINKINITCMD_POLL;
- break;
-
- case QIB_IB_CFG_HRTBT: /* Get Heartbeat off/enable/auto */
- ret = 0; /* no heartbeat on this chip */
- break;
-
- case QIB_IB_CFG_PMA_TICKS:
- ret = 250; /* 1 usec. */
- break;
-
- default:
- ret = -EINVAL;
- break;
- }
- return ret;
-}
-
-/*
- * We assume range checking is already done, if needed.
- */
-static int qib_6120_set_ib_cfg(struct qib_pportdata *ppd, int which, u32 val)
-{
- struct qib_devdata *dd = ppd->dd;
- int ret = 0;
- u64 val64;
- u16 lcmd, licmd;
-
- switch (which) {
- case QIB_IB_CFG_LWID_ENB:
- ppd->link_width_enabled = val;
- break;
-
- case QIB_IB_CFG_SPD_ENB:
- ppd->link_speed_enabled = val;
- break;
-
- case QIB_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */
- val64 = SYM_FIELD(dd->cspec->ibcctrl, IBCCtrl,
- OverrunThreshold);
- if (val64 != val) {
- dd->cspec->ibcctrl &=
- ~SYM_MASK(IBCCtrl, OverrunThreshold);
- dd->cspec->ibcctrl |= (u64) val <<
- SYM_LSB(IBCCtrl, OverrunThreshold);
- qib_write_kreg(dd, kr_ibcctrl, dd->cspec->ibcctrl);
- qib_write_kreg(dd, kr_scratch, 0);
- }
- break;
-
- case QIB_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */
- val64 = SYM_FIELD(dd->cspec->ibcctrl, IBCCtrl,
- PhyerrThreshold);
- if (val64 != val) {
- dd->cspec->ibcctrl &=
- ~SYM_MASK(IBCCtrl, PhyerrThreshold);
- dd->cspec->ibcctrl |= (u64) val <<
- SYM_LSB(IBCCtrl, PhyerrThreshold);
- qib_write_kreg(dd, kr_ibcctrl, dd->cspec->ibcctrl);
- qib_write_kreg(dd, kr_scratch, 0);
- }
- break;
-
- case QIB_IB_CFG_PKEYS: /* update pkeys */
- val64 = (u64) ppd->pkeys[0] | ((u64) ppd->pkeys[1] << 16) |
- ((u64) ppd->pkeys[2] << 32) |
- ((u64) ppd->pkeys[3] << 48);
- qib_write_kreg(dd, kr_partitionkey, val64);
- break;
-
- case QIB_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */
- /* will only take effect when the link state changes */
- if (val == IB_LINKINITCMD_POLL)
- dd->cspec->ibcctrl &=
- ~SYM_MASK(IBCCtrl, LinkDownDefaultState);
- else /* SLEEP */
- dd->cspec->ibcctrl |=
- SYM_MASK(IBCCtrl, LinkDownDefaultState);
- qib_write_kreg(dd, kr_ibcctrl, dd->cspec->ibcctrl);
- qib_write_kreg(dd, kr_scratch, 0);
- break;
-
- case QIB_IB_CFG_MTU: /* update the MTU in IBC */
- /*
- * Update our housekeeping variables, and set IBC max
- * size, same as init code; max IBC is max we allow in
- * buffer, less the qword pbc, plus 1 for ICRC, in dwords
- * Set even if it's unchanged, print debug message only
- * on changes.
- */
- val = (ppd->ibmaxlen >> 2) + 1;
- dd->cspec->ibcctrl &= ~SYM_MASK(IBCCtrl, MaxPktLen);
- dd->cspec->ibcctrl |= (u64)val <<
- SYM_LSB(IBCCtrl, MaxPktLen);
- qib_write_kreg(dd, kr_ibcctrl, dd->cspec->ibcctrl);
- qib_write_kreg(dd, kr_scratch, 0);
- break;
-
- case QIB_IB_CFG_LSTATE: /* set the IB link state */
- switch (val & 0xffff0000) {
- case IB_LINKCMD_DOWN:
- lcmd = QLOGIC_IB_IBCC_LINKCMD_DOWN;
- if (!dd->cspec->ibdeltainprog) {
- dd->cspec->ibdeltainprog = 1;
- dd->cspec->ibsymsnap =
- read_6120_creg32(dd, cr_ibsymbolerr);
- dd->cspec->iblnkerrsnap =
- read_6120_creg32(dd, cr_iblinkerrrecov);
- }
- break;
-
- case IB_LINKCMD_ARMED:
- lcmd = QLOGIC_IB_IBCC_LINKCMD_ARMED;
- break;
-
- case IB_LINKCMD_ACTIVE:
- lcmd = QLOGIC_IB_IBCC_LINKCMD_ACTIVE;
- break;
-
- default:
- ret = -EINVAL;
- qib_dev_err(dd, "bad linkcmd req 0x%x\n", val >> 16);
- goto bail;
- }
- switch (val & 0xffff) {
- case IB_LINKINITCMD_NOP:
- licmd = 0;
- break;
-
- case IB_LINKINITCMD_POLL:
- licmd = QLOGIC_IB_IBCC_LINKINITCMD_POLL;
- break;
-
- case IB_LINKINITCMD_SLEEP:
- licmd = QLOGIC_IB_IBCC_LINKINITCMD_SLEEP;
- break;
-
- case IB_LINKINITCMD_DISABLE:
- licmd = QLOGIC_IB_IBCC_LINKINITCMD_DISABLE;
- break;
-
- default:
- ret = -EINVAL;
- qib_dev_err(dd, "bad linkinitcmd req 0x%x\n",
- val & 0xffff);
- goto bail;
- }
- qib_set_ib_6120_lstate(ppd, lcmd, licmd);
- goto bail;
-
- case QIB_IB_CFG_HRTBT:
- ret = -EINVAL;
- break;
-
- default:
- ret = -EINVAL;
- }
-bail:
- return ret;
-}
-
-static int qib_6120_set_loopback(struct qib_pportdata *ppd, const char *what)
-{
- int ret = 0;
-
- if (!strncmp(what, "ibc", 3)) {
- ppd->dd->cspec->ibcctrl |= SYM_MASK(IBCCtrl, Loopback);
- qib_devinfo(ppd->dd->pcidev, "Enabling IB%u:%u IBC loopback\n",
- ppd->dd->unit, ppd->port);
- } else if (!strncmp(what, "off", 3)) {
- ppd->dd->cspec->ibcctrl &= ~SYM_MASK(IBCCtrl, Loopback);
- qib_devinfo(ppd->dd->pcidev,
- "Disabling IB%u:%u IBC loopback (normal)\n",
- ppd->dd->unit, ppd->port);
- } else
- ret = -EINVAL;
- if (!ret) {
- qib_write_kreg(ppd->dd, kr_ibcctrl, ppd->dd->cspec->ibcctrl);
- qib_write_kreg(ppd->dd, kr_scratch, 0);
- }
- return ret;
-}
-
-static void pma_6120_timer(struct timer_list *t)
-{
- struct qib_chip_specific *cs = timer_container_of(cs, t, pma_timer);
- struct qib_pportdata *ppd = cs->ppd;
- struct qib_ibport *ibp = &ppd->ibport_data;
- unsigned long flags;
-
- spin_lock_irqsave(&ibp->rvp.lock, flags);
- if (cs->pma_sample_status == IB_PMA_SAMPLE_STATUS_STARTED) {
- cs->pma_sample_status = IB_PMA_SAMPLE_STATUS_RUNNING;
- qib_snapshot_counters(ppd, &cs->sword, &cs->rword,
- &cs->spkts, &cs->rpkts, &cs->xmit_wait);
- mod_timer(&cs->pma_timer,
- jiffies + usecs_to_jiffies(ibp->rvp.pma_sample_interval));
- } else if (cs->pma_sample_status == IB_PMA_SAMPLE_STATUS_RUNNING) {
- u64 ta, tb, tc, td, te;
-
- cs->pma_sample_status = IB_PMA_SAMPLE_STATUS_DONE;
- qib_snapshot_counters(ppd, &ta, &tb, &tc, &td, &te);
-
- cs->sword = ta - cs->sword;
- cs->rword = tb - cs->rword;
- cs->spkts = tc - cs->spkts;
- cs->rpkts = td - cs->rpkts;
- cs->xmit_wait = te - cs->xmit_wait;
- }
- spin_unlock_irqrestore(&ibp->rvp.lock, flags);
-}
-
-/*
- * Note that the caller has the ibp->rvp.lock held.
- */
-static void qib_set_cntr_6120_sample(struct qib_pportdata *ppd, u32 intv,
- u32 start)
-{
- struct qib_chip_specific *cs = ppd->dd->cspec;
-
- if (start && intv) {
- cs->pma_sample_status = IB_PMA_SAMPLE_STATUS_STARTED;
- mod_timer(&cs->pma_timer, jiffies + usecs_to_jiffies(start));
- } else if (intv) {
- cs->pma_sample_status = IB_PMA_SAMPLE_STATUS_RUNNING;
- qib_snapshot_counters(ppd, &cs->sword, &cs->rword,
- &cs->spkts, &cs->rpkts, &cs->xmit_wait);
- mod_timer(&cs->pma_timer, jiffies + usecs_to_jiffies(intv));
- } else {
- cs->pma_sample_status = IB_PMA_SAMPLE_STATUS_DONE;
- cs->sword = 0;
- cs->rword = 0;
- cs->spkts = 0;
- cs->rpkts = 0;
- cs->xmit_wait = 0;
- }
-}
-
-static u32 qib_6120_iblink_state(u64 ibcs)
-{
- u32 state = (u32)SYM_FIELD(ibcs, IBCStatus, LinkState);
-
- switch (state) {
- case IB_6120_L_STATE_INIT:
- state = IB_PORT_INIT;
- break;
- case IB_6120_L_STATE_ARM:
- state = IB_PORT_ARMED;
- break;
- case IB_6120_L_STATE_ACTIVE:
- case IB_6120_L_STATE_ACT_DEFER:
- state = IB_PORT_ACTIVE;
- break;
- default:
- fallthrough;
- case IB_6120_L_STATE_DOWN:
- state = IB_PORT_DOWN;
- break;
- }
- return state;
-}
-
-/* returns the IBTA port state, rather than the IBC link training state */
-static u8 qib_6120_phys_portstate(u64 ibcs)
-{
- u8 state = (u8)SYM_FIELD(ibcs, IBCStatus, LinkTrainingState);
- return qib_6120_physportstate[state];
-}
-
-static int qib_6120_ib_updown(struct qib_pportdata *ppd, int ibup, u64 ibcs)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&ppd->lflags_lock, flags);
- ppd->lflags &= ~QIBL_IB_FORCE_NOTIFY;
- spin_unlock_irqrestore(&ppd->lflags_lock, flags);
-
- if (ibup) {
- if (ppd->dd->cspec->ibdeltainprog) {
- ppd->dd->cspec->ibdeltainprog = 0;
- ppd->dd->cspec->ibsymdelta +=
- read_6120_creg32(ppd->dd, cr_ibsymbolerr) -
- ppd->dd->cspec->ibsymsnap;
- ppd->dd->cspec->iblnkerrdelta +=
- read_6120_creg32(ppd->dd, cr_iblinkerrrecov) -
- ppd->dd->cspec->iblnkerrsnap;
- }
- qib_hol_init(ppd);
- } else {
- ppd->dd->cspec->lli_counter = 0;
- if (!ppd->dd->cspec->ibdeltainprog) {
- ppd->dd->cspec->ibdeltainprog = 1;
- ppd->dd->cspec->ibsymsnap =
- read_6120_creg32(ppd->dd, cr_ibsymbolerr);
- ppd->dd->cspec->iblnkerrsnap =
- read_6120_creg32(ppd->dd, cr_iblinkerrrecov);
- }
- qib_hol_down(ppd);
- }
-
- qib_6120_setup_setextled(ppd, ibup);
-
- return 0;
-}
-
-/* Does read/modify/write to appropriate registers to
- * set output and direction bits selected by mask.
- * these are in their canonical positions (e.g. lsb of
- * dir will end up in D48 of extctrl on existing chips).
- * returns contents of GP Inputs.
- */
-static int gpio_6120_mod(struct qib_devdata *dd, u32 out, u32 dir, u32 mask)
-{
- u64 read_val, new_out;
- unsigned long flags;
-
- if (mask) {
- /* some bits being written, lock access to GPIO */
- dir &= mask;
- out &= mask;
- spin_lock_irqsave(&dd->cspec->gpio_lock, flags);
- dd->cspec->extctrl &= ~((u64)mask << SYM_LSB(EXTCtrl, GPIOOe));
- dd->cspec->extctrl |= ((u64) dir << SYM_LSB(EXTCtrl, GPIOOe));
- new_out = (dd->cspec->gpio_out & ~mask) | out;
-
- qib_write_kreg(dd, kr_extctrl, dd->cspec->extctrl);
- qib_write_kreg(dd, kr_gpio_out, new_out);
- dd->cspec->gpio_out = new_out;
- spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags);
- }
- /*
- * It is unlikely that a read at this time would get valid
- * data on a pin whose direction line was set in the same
- * call to this function. We include the read here because
- * that allows us to potentially combine a change on one pin with
- * a read on another, and because the old code did something like
- * this.
- */
- read_val = qib_read_kreg64(dd, kr_extstatus);
- return SYM_FIELD(read_val, EXTStatus, GPIOIn);
-}
-
-/*
- * Read fundamental info we need to use the chip. These are
- * the registers that describe chip capabilities, and are
- * saved in shadow registers.
- */
-static void get_6120_chip_params(struct qib_devdata *dd)
-{
- u64 val;
- u32 piobufs;
- int mtu;
-
- dd->uregbase = qib_read_kreg32(dd, kr_userregbase);
-
- dd->rcvtidcnt = qib_read_kreg32(dd, kr_rcvtidcnt);
- dd->rcvtidbase = qib_read_kreg32(dd, kr_rcvtidbase);
- dd->rcvegrbase = qib_read_kreg32(dd, kr_rcvegrbase);
- dd->palign = qib_read_kreg32(dd, kr_palign);
- dd->piobufbase = qib_read_kreg64(dd, kr_sendpiobufbase);
- dd->pio2k_bufbase = dd->piobufbase & 0xffffffff;
-
- dd->rcvhdrcnt = qib_read_kreg32(dd, kr_rcvegrcnt);
-
- val = qib_read_kreg64(dd, kr_sendpiosize);
- dd->piosize2k = val & ~0U;
- dd->piosize4k = val >> 32;
-
- mtu = ib_mtu_enum_to_int(qib_ibmtu);
- if (mtu == -1)
- mtu = QIB_DEFAULT_MTU;
- dd->pport->ibmtu = (u32)mtu;
-
- val = qib_read_kreg64(dd, kr_sendpiobufcnt);
- dd->piobcnt2k = val & ~0U;
- dd->piobcnt4k = val >> 32;
- dd->last_pio = dd->piobcnt4k + dd->piobcnt2k - 1;
- /* these may be adjusted in init_chip_wc_pat() */
- dd->pio2kbase = (u32 __iomem *)
- (((char __iomem *)dd->kregbase) + dd->pio2k_bufbase);
- if (dd->piobcnt4k) {
- dd->pio4kbase = (u32 __iomem *)
- (((char __iomem *) dd->kregbase) +
- (dd->piobufbase >> 32));
- /*
- * 4K buffers take 2 pages; we use roundup just to be
- * paranoid; we calculate it once here, rather than on
- * ever buf allocate
- */
- dd->align4k = ALIGN(dd->piosize4k, dd->palign);
- }
-
- piobufs = dd->piobcnt4k + dd->piobcnt2k;
-
- dd->pioavregs = ALIGN(piobufs, sizeof(u64) * BITS_PER_BYTE / 2) /
- (sizeof(u64) * BITS_PER_BYTE / 2);
-}
-
-/*
- * The chip base addresses in cspec and cpspec have to be set
- * after possible init_chip_wc_pat(), rather than in
- * get_6120_chip_params(), so split out as separate function
- */
-static void set_6120_baseaddrs(struct qib_devdata *dd)
-{
- u32 cregbase;
-
- cregbase = qib_read_kreg32(dd, kr_counterregbase);
- dd->cspec->cregbase = (u64 __iomem *)
- ((char __iomem *) dd->kregbase + cregbase);
-
- dd->egrtidbase = (u64 __iomem *)
- ((char __iomem *) dd->kregbase + dd->rcvegrbase);
-}
-
-/*
- * Write the final few registers that depend on some of the
- * init setup. Done late in init, just before bringing up
- * the serdes.
- */
-static int qib_late_6120_initreg(struct qib_devdata *dd)
-{
- int ret = 0;
- u64 val;
-
- qib_write_kreg(dd, kr_rcvhdrentsize, dd->rcvhdrentsize);
- qib_write_kreg(dd, kr_rcvhdrsize, dd->rcvhdrsize);
- qib_write_kreg(dd, kr_rcvhdrcnt, dd->rcvhdrcnt);
- qib_write_kreg(dd, kr_sendpioavailaddr, dd->pioavailregs_phys);
- val = qib_read_kreg64(dd, kr_sendpioavailaddr);
- if (val != dd->pioavailregs_phys) {
- qib_dev_err(dd,
- "Catastrophic software error, SendPIOAvailAddr written as %lx, read back as %llx\n",
- (unsigned long) dd->pioavailregs_phys,
- (unsigned long long) val);
- ret = -EINVAL;
- }
- return ret;
-}
-
-static int init_6120_variables(struct qib_devdata *dd)
-{
- int ret = 0;
- struct qib_pportdata *ppd;
- u32 sbufs;
-
- ppd = (struct qib_pportdata *)(dd + 1);
- dd->pport = ppd;
- dd->num_pports = 1;
-
- dd->cspec = (struct qib_chip_specific *)(ppd + dd->num_pports);
- dd->cspec->ppd = ppd;
- ppd->cpspec = NULL; /* not used in this chip */
-
- spin_lock_init(&dd->cspec->kernel_tid_lock);
- spin_lock_init(&dd->cspec->user_tid_lock);
- spin_lock_init(&dd->cspec->rcvmod_lock);
- spin_lock_init(&dd->cspec->gpio_lock);
-
- /* we haven't yet set QIB_PRESENT, so use read directly */
- dd->revision = readq(&dd->kregbase[kr_revision]);
-
- if ((dd->revision & 0xffffffffU) == 0xffffffffU) {
- qib_dev_err(dd,
- "Revision register read failure, giving up initialization\n");
- ret = -ENODEV;
- goto bail;
- }
- dd->flags |= QIB_PRESENT; /* now register routines work */
-
- dd->majrev = (u8) SYM_FIELD(dd->revision, Revision_R,
- ChipRevMajor);
- dd->minrev = (u8) SYM_FIELD(dd->revision, Revision_R,
- ChipRevMinor);
-
- get_6120_chip_params(dd);
- pe_boardname(dd); /* fill in boardname */
-
- /*
- * GPIO bits for TWSI data and clock,
- * used for serial EEPROM.
- */
- dd->gpio_sda_num = _QIB_GPIO_SDA_NUM;
- dd->gpio_scl_num = _QIB_GPIO_SCL_NUM;
- dd->twsi_eeprom_dev = QIB_TWSI_NO_DEV;
-
- if (qib_unordered_wc())
- dd->flags |= QIB_PIO_FLUSH_WC;
-
- ret = qib_init_pportdata(ppd, dd, 0, 1);
- if (ret)
- goto bail;
- ppd->link_width_supported = IB_WIDTH_1X | IB_WIDTH_4X;
- ppd->link_speed_supported = QIB_IB_SDR;
- ppd->link_width_enabled = IB_WIDTH_4X;
- ppd->link_speed_enabled = ppd->link_speed_supported;
- /* these can't change for this chip, so set once */
- ppd->link_width_active = ppd->link_width_enabled;
- ppd->link_speed_active = ppd->link_speed_enabled;
- ppd->vls_supported = IB_VL_VL0;
- ppd->vls_operational = ppd->vls_supported;
-
- dd->rcvhdrentsize = QIB_RCVHDR_ENTSIZE;
- dd->rcvhdrsize = QIB_DFLT_RCVHDRSIZE;
- dd->rhf_offset = 0;
-
- /* we always allocate at least 2048 bytes for eager buffers */
- ret = ib_mtu_enum_to_int(qib_ibmtu);
- dd->rcvegrbufsize = ret != -1 ? max(ret, 2048) : QIB_DEFAULT_MTU;
- dd->rcvegrbufsize_shift = ilog2(dd->rcvegrbufsize);
-
- qib_6120_tidtemplate(dd);
-
- /*
- * We can request a receive interrupt for 1 or
- * more packets from current offset. For now, we set this
- * up for a single packet.
- */
- dd->rhdrhead_intr_off = 1ULL << 32;
-
- /* setup the stats timer; the add_timer is done at end of init */
- timer_setup(&dd->stats_timer, qib_get_6120_faststats, 0);
- timer_setup(&dd->cspec->pma_timer, pma_6120_timer, 0);
-
- dd->ureg_align = qib_read_kreg32(dd, kr_palign);
-
- dd->piosize2kmax_dwords = dd->piosize2k >> 2;
- qib_6120_config_ctxts(dd);
- qib_set_ctxtcnt(dd);
-
- ret = init_chip_wc_pat(dd, 0);
- if (ret)
- goto bail;
- set_6120_baseaddrs(dd); /* set chip access pointers now */
-
- ret = 0;
- if (qib_mini_init)
- goto bail;
-
- qib_num_cfg_vls = 1; /* if any 6120's, only one VL */
-
- ret = qib_create_ctxts(dd);
- init_6120_cntrnames(dd);
-
- /* use all of 4KB buffers for the kernel, otherwise 16 */
- sbufs = dd->piobcnt4k ? dd->piobcnt4k : 16;
-
- dd->lastctxt_piobuf = dd->piobcnt2k + dd->piobcnt4k - sbufs;
- dd->pbufsctxt = dd->lastctxt_piobuf /
- (dd->cfgctxts - dd->first_user_ctxt);
-
- if (ret)
- goto bail;
-bail:
- return ret;
-}
-
-/*
- * For this chip, we want to use the same buffer every time
- * when we are trying to bring the link up (they are always VL15
- * packets). At that link state the packet should always go out immediately
- * (or at least be discarded at the tx interface if the link is down).
- * If it doesn't, and the buffer isn't available, that means some other
- * sender has gotten ahead of us, and is preventing our packet from going
- * out. In that case, we flush all packets, and try again. If that still
- * fails, we fail the request, and hope things work the next time around.
- *
- * We don't need very complicated heuristics on whether the packet had
- * time to go out or not, since even at SDR 1X, it goes out in very short
- * time periods, covered by the chip reads done here and as part of the
- * flush.
- */
-static u32 __iomem *get_6120_link_buf(struct qib_pportdata *ppd, u32 *bnum)
-{
- u32 __iomem *buf;
- u32 lbuf = ppd->dd->piobcnt2k + ppd->dd->piobcnt4k - 1;
-
- /*
- * always blip to get avail list updated, since it's almost
- * always needed, and is fairly cheap.
- */
- sendctrl_6120_mod(ppd->dd->pport, QIB_SENDCTRL_AVAIL_BLIP);
- qib_read_kreg64(ppd->dd, kr_scratch); /* extra chip flush */
- buf = qib_getsendbuf_range(ppd->dd, bnum, lbuf, lbuf);
- if (buf)
- goto done;
-
- sendctrl_6120_mod(ppd, QIB_SENDCTRL_DISARM_ALL | QIB_SENDCTRL_FLUSH |
- QIB_SENDCTRL_AVAIL_BLIP);
- ppd->dd->upd_pio_shadow = 1; /* update our idea of what's busy */
- qib_read_kreg64(ppd->dd, kr_scratch); /* extra chip flush */
- buf = qib_getsendbuf_range(ppd->dd, bnum, lbuf, lbuf);
-done:
- return buf;
-}
-
-static u32 __iomem *qib_6120_getsendbuf(struct qib_pportdata *ppd, u64 pbc,
- u32 *pbufnum)
-{
- u32 first, last, plen = pbc & QIB_PBC_LENGTH_MASK;
- struct qib_devdata *dd = ppd->dd;
- u32 __iomem *buf;
-
- if (((pbc >> 32) & PBC_6120_VL15_SEND_CTRL) &&
- !(ppd->lflags & (QIBL_IB_AUTONEG_INPROG | QIBL_LINKACTIVE)))
- buf = get_6120_link_buf(ppd, pbufnum);
- else {
-
- if ((plen + 1) > dd->piosize2kmax_dwords)
- first = dd->piobcnt2k;
- else
- first = 0;
- /* try 4k if all 2k busy, so same last for both sizes */
- last = dd->piobcnt2k + dd->piobcnt4k - 1;
- buf = qib_getsendbuf_range(dd, pbufnum, first, last);
- }
- return buf;
-}
-
-static int init_sdma_6120_regs(struct qib_pportdata *ppd)
-{
- return -ENODEV;
-}
-
-static u16 qib_sdma_6120_gethead(struct qib_pportdata *ppd)
-{
- return 0;
-}
-
-static int qib_sdma_6120_busy(struct qib_pportdata *ppd)
-{
- return 0;
-}
-
-static void qib_sdma_update_6120_tail(struct qib_pportdata *ppd, u16 tail)
-{
-}
-
-static void qib_6120_sdma_sendctrl(struct qib_pportdata *ppd, unsigned op)
-{
-}
-
-static void qib_sdma_set_6120_desc_cnt(struct qib_pportdata *ppd, unsigned cnt)
-{
-}
-
-/*
- * the pbc doesn't need a VL15 indicator, but we need it for link_buf.
- * The chip ignores the bit if set.
- */
-static u32 qib_6120_setpbc_control(struct qib_pportdata *ppd, u32 plen,
- u8 srate, u8 vl)
-{
- return vl == 15 ? PBC_6120_VL15_SEND_CTRL : 0;
-}
-
-static void qib_6120_initvl15_bufs(struct qib_devdata *dd)
-{
-}
-
-static void qib_6120_init_ctxt(struct qib_ctxtdata *rcd)
-{
- rcd->rcvegrcnt = rcd->dd->rcvhdrcnt;
- rcd->rcvegr_tid_base = rcd->ctxt * rcd->rcvegrcnt;
-}
-
-static void qib_6120_txchk_change(struct qib_devdata *dd, u32 start,
- u32 len, u32 avail, struct qib_ctxtdata *rcd)
-{
-}
-
-static void writescratch(struct qib_devdata *dd, u32 val)
-{
- (void) qib_write_kreg(dd, kr_scratch, val);
-}
-
-static int qib_6120_tempsense_rd(struct qib_devdata *dd, int regnum)
-{
- return -ENXIO;
-}
-
-#ifdef CONFIG_INFINIBAND_QIB_DCA
-static int qib_6120_notify_dca(struct qib_devdata *dd, unsigned long event)
-{
- return 0;
-}
-#endif
-
-/* Dummy function, as 6120 boards never disable EEPROM Write */
-static int qib_6120_eeprom_wen(struct qib_devdata *dd, int wen)
-{
- return 1;
-}
-
-/**
- * qib_init_iba6120_funcs - set up the chip-specific function pointers
- * @pdev: pci_dev of the qlogic_ib device
- * @ent: pci_device_id matching this chip
- *
- * This is global, and is called directly at init to set up the
- * chip-specific function pointers for later use.
- *
- * It also allocates/partially-inits the qib_devdata struct for
- * this device.
- */
-struct qib_devdata *qib_init_iba6120_funcs(struct pci_dev *pdev,
- const struct pci_device_id *ent)
-{
- struct qib_devdata *dd;
- int ret;
-
- dd = qib_alloc_devdata(pdev, sizeof(struct qib_pportdata) +
- sizeof(struct qib_chip_specific));
- if (IS_ERR(dd))
- goto bail;
-
- dd->f_bringup_serdes = qib_6120_bringup_serdes;
- dd->f_cleanup = qib_6120_setup_cleanup;
- dd->f_clear_tids = qib_6120_clear_tids;
- dd->f_free_irq = qib_free_irq;
- dd->f_get_base_info = qib_6120_get_base_info;
- dd->f_get_msgheader = qib_6120_get_msgheader;
- dd->f_getsendbuf = qib_6120_getsendbuf;
- dd->f_gpio_mod = gpio_6120_mod;
- dd->f_eeprom_wen = qib_6120_eeprom_wen;
- dd->f_hdrqempty = qib_6120_hdrqempty;
- dd->f_ib_updown = qib_6120_ib_updown;
- dd->f_init_ctxt = qib_6120_init_ctxt;
- dd->f_initvl15_bufs = qib_6120_initvl15_bufs;
- dd->f_intr_fallback = qib_6120_nointr_fallback;
- dd->f_late_initreg = qib_late_6120_initreg;
- dd->f_setpbc_control = qib_6120_setpbc_control;
- dd->f_portcntr = qib_portcntr_6120;
- dd->f_put_tid = (dd->minrev >= 2) ?
- qib_6120_put_tid_2 :
- qib_6120_put_tid;
- dd->f_quiet_serdes = qib_6120_quiet_serdes;
- dd->f_rcvctrl = rcvctrl_6120_mod;
- dd->f_read_cntrs = qib_read_6120cntrs;
- dd->f_read_portcntrs = qib_read_6120portcntrs;
- dd->f_reset = qib_6120_setup_reset;
- dd->f_init_sdma_regs = init_sdma_6120_regs;
- dd->f_sdma_busy = qib_sdma_6120_busy;
- dd->f_sdma_gethead = qib_sdma_6120_gethead;
- dd->f_sdma_sendctrl = qib_6120_sdma_sendctrl;
- dd->f_sdma_set_desc_cnt = qib_sdma_set_6120_desc_cnt;
- dd->f_sdma_update_tail = qib_sdma_update_6120_tail;
- dd->f_sendctrl = sendctrl_6120_mod;
- dd->f_set_armlaunch = qib_set_6120_armlaunch;
- dd->f_set_cntr_sample = qib_set_cntr_6120_sample;
- dd->f_iblink_state = qib_6120_iblink_state;
- dd->f_ibphys_portstate = qib_6120_phys_portstate;
- dd->f_get_ib_cfg = qib_6120_get_ib_cfg;
- dd->f_set_ib_cfg = qib_6120_set_ib_cfg;
- dd->f_set_ib_loopback = qib_6120_set_loopback;
- dd->f_set_intr_state = qib_6120_set_intr_state;
- dd->f_setextled = qib_6120_setup_setextled;
- dd->f_txchk_change = qib_6120_txchk_change;
- dd->f_update_usrhead = qib_update_6120_usrhead;
- dd->f_wantpiobuf_intr = qib_wantpiobuf_6120_intr;
- dd->f_xgxs_reset = qib_6120_xgxs_reset;
- dd->f_writescratch = writescratch;
- dd->f_tempsense_rd = qib_6120_tempsense_rd;
-#ifdef CONFIG_INFINIBAND_QIB_DCA
- dd->f_notify_dca = qib_6120_notify_dca;
-#endif
- /*
- * Do remaining pcie setup and save pcie values in dd.
- * Any error printing is already done by the init code.
- * On return, we have the chip mapped and accessible,
- * but chip registers are not set up until start of
- * init_6120_variables.
- */
- ret = qib_pcie_ddinit(dd, pdev, ent);
- if (ret < 0)
- goto bail_free;
-
- /* initialize chip-specific variables */
- ret = init_6120_variables(dd);
- if (ret)
- goto bail_cleanup;
-
- if (qib_mini_init)
- goto bail;
-
- if (qib_pcie_params(dd, 8, NULL))
- qib_dev_err(dd,
- "Failed to setup PCIe or interrupts; continuing anyway\n");
- /* clear diagctrl register, in case diags were running and crashed */
- qib_write_kreg(dd, kr_hwdiagctrl, 0);
-
- if (qib_read_kreg64(dd, kr_hwerrstatus) &
- QLOGIC_IB_HWE_SERDESPLLFAILED)
- qib_write_kreg(dd, kr_hwerrclear,
- QLOGIC_IB_HWE_SERDESPLLFAILED);
-
- /* setup interrupt handler (interrupt type handled above) */
- qib_setup_6120_interrupt(dd);
- /* Note that qpn_mask is set by qib_6120_config_ctxts() first */
- qib_6120_init_hwerrors(dd);
-
- goto bail;
-
-bail_cleanup:
- qib_pcie_ddcleanup(dd);
-bail_free:
- qib_free_devdata(dd);
- dd = ERR_PTR(ret);
-bail:
- return dd;
-}
diff --git a/drivers/infiniband/hw/qib/qib_iba7220.c b/drivers/infiniband/hw/qib/qib_iba7220.c
deleted file mode 100644
index 0b347d1129fa..000000000000
--- a/drivers/infiniband/hw/qib/qib_iba7220.c
+++ /dev/null
@@ -1,4596 +0,0 @@
-/*
- * Copyright (c) 2011 - 2017 Intel Corporation. All rights reserved.
- * Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation.
- * All rights reserved.
- * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-/*
- * This file contains all of the code that is specific to the
- * QLogic_IB 7220 chip (except that specific to the SerDes)
- */
-
-#include <linux/interrupt.h>
-#include <linux/pci.h>
-#include <linux/delay.h>
-#include <linux/module.h>
-#include <linux/io.h>
-#include <rdma/ib_verbs.h>
-
-#include "qib.h"
-#include "qib_7220.h"
-
-static void qib_setup_7220_setextled(struct qib_pportdata *, u32);
-static void qib_7220_handle_hwerrors(struct qib_devdata *, char *, size_t);
-static void sendctrl_7220_mod(struct qib_pportdata *ppd, u32 op);
-static u32 qib_7220_iblink_state(u64);
-static u8 qib_7220_phys_portstate(u64);
-static void qib_sdma_update_7220_tail(struct qib_pportdata *, u16);
-static void qib_set_ib_7220_lstate(struct qib_pportdata *, u16, u16);
-
-/*
- * This file contains almost all the chip-specific register information and
- * access functions for the QLogic QLogic_IB 7220 PCI-Express chip, with the
- * exception of SerDes support, which in qib_sd7220.c.
- */
-
-/* Below uses machine-generated qib_chipnum_regs.h file */
-#define KREG_IDX(regname) (QIB_7220_##regname##_OFFS / sizeof(u64))
-
-/* Use defines to tie machine-generated names to lower-case names */
-#define kr_control KREG_IDX(Control)
-#define kr_counterregbase KREG_IDX(CntrRegBase)
-#define kr_errclear KREG_IDX(ErrClear)
-#define kr_errmask KREG_IDX(ErrMask)
-#define kr_errstatus KREG_IDX(ErrStatus)
-#define kr_extctrl KREG_IDX(EXTCtrl)
-#define kr_extstatus KREG_IDX(EXTStatus)
-#define kr_gpio_clear KREG_IDX(GPIOClear)
-#define kr_gpio_mask KREG_IDX(GPIOMask)
-#define kr_gpio_out KREG_IDX(GPIOOut)
-#define kr_gpio_status KREG_IDX(GPIOStatus)
-#define kr_hrtbt_guid KREG_IDX(HRTBT_GUID)
-#define kr_hwdiagctrl KREG_IDX(HwDiagCtrl)
-#define kr_hwerrclear KREG_IDX(HwErrClear)
-#define kr_hwerrmask KREG_IDX(HwErrMask)
-#define kr_hwerrstatus KREG_IDX(HwErrStatus)
-#define kr_ibcctrl KREG_IDX(IBCCtrl)
-#define kr_ibcddrctrl KREG_IDX(IBCDDRCtrl)
-#define kr_ibcddrstatus KREG_IDX(IBCDDRStatus)
-#define kr_ibcstatus KREG_IDX(IBCStatus)
-#define kr_ibserdesctrl KREG_IDX(IBSerDesCtrl)
-#define kr_intclear KREG_IDX(IntClear)
-#define kr_intmask KREG_IDX(IntMask)
-#define kr_intstatus KREG_IDX(IntStatus)
-#define kr_ncmodectrl KREG_IDX(IBNCModeCtrl)
-#define kr_palign KREG_IDX(PageAlign)
-#define kr_partitionkey KREG_IDX(RcvPartitionKey)
-#define kr_portcnt KREG_IDX(PortCnt)
-#define kr_rcvbthqp KREG_IDX(RcvBTHQP)
-#define kr_rcvctrl KREG_IDX(RcvCtrl)
-#define kr_rcvegrbase KREG_IDX(RcvEgrBase)
-#define kr_rcvegrcnt KREG_IDX(RcvEgrCnt)
-#define kr_rcvhdrcnt KREG_IDX(RcvHdrCnt)
-#define kr_rcvhdrentsize KREG_IDX(RcvHdrEntSize)
-#define kr_rcvhdrsize KREG_IDX(RcvHdrSize)
-#define kr_rcvpktledcnt KREG_IDX(RcvPktLEDCnt)
-#define kr_rcvtidbase KREG_IDX(RcvTIDBase)
-#define kr_rcvtidcnt KREG_IDX(RcvTIDCnt)
-#define kr_revision KREG_IDX(Revision)
-#define kr_scratch KREG_IDX(Scratch)
-#define kr_sendbuffererror KREG_IDX(SendBufErr0)
-#define kr_sendctrl KREG_IDX(SendCtrl)
-#define kr_senddmabase KREG_IDX(SendDmaBase)
-#define kr_senddmabufmask0 KREG_IDX(SendDmaBufMask0)
-#define kr_senddmabufmask1 (KREG_IDX(SendDmaBufMask0) + 1)
-#define kr_senddmabufmask2 (KREG_IDX(SendDmaBufMask0) + 2)
-#define kr_senddmahead KREG_IDX(SendDmaHead)
-#define kr_senddmaheadaddr KREG_IDX(SendDmaHeadAddr)
-#define kr_senddmalengen KREG_IDX(SendDmaLenGen)
-#define kr_senddmastatus KREG_IDX(SendDmaStatus)
-#define kr_senddmatail KREG_IDX(SendDmaTail)
-#define kr_sendpioavailaddr KREG_IDX(SendBufAvailAddr)
-#define kr_sendpiobufbase KREG_IDX(SendBufBase)
-#define kr_sendpiobufcnt KREG_IDX(SendBufCnt)
-#define kr_sendpiosize KREG_IDX(SendBufSize)
-#define kr_sendregbase KREG_IDX(SendRegBase)
-#define kr_userregbase KREG_IDX(UserRegBase)
-#define kr_xgxs_cfg KREG_IDX(XGXSCfg)
-
-/* These must only be written via qib_write_kreg_ctxt() */
-#define kr_rcvhdraddr KREG_IDX(RcvHdrAddr0)
-#define kr_rcvhdrtailaddr KREG_IDX(RcvHdrTailAddr0)
-
-
-#define CREG_IDX(regname) ((QIB_7220_##regname##_OFFS - \
- QIB_7220_LBIntCnt_OFFS) / sizeof(u64))
-
-#define cr_badformat CREG_IDX(RxVersionErrCnt)
-#define cr_erricrc CREG_IDX(RxICRCErrCnt)
-#define cr_errlink CREG_IDX(RxLinkMalformCnt)
-#define cr_errlpcrc CREG_IDX(RxLPCRCErrCnt)
-#define cr_errpkey CREG_IDX(RxPKeyMismatchCnt)
-#define cr_rcvflowctrl_err CREG_IDX(RxFlowCtrlViolCnt)
-#define cr_err_rlen CREG_IDX(RxLenErrCnt)
-#define cr_errslen CREG_IDX(TxLenErrCnt)
-#define cr_errtidfull CREG_IDX(RxTIDFullErrCnt)
-#define cr_errtidvalid CREG_IDX(RxTIDValidErrCnt)
-#define cr_errvcrc CREG_IDX(RxVCRCErrCnt)
-#define cr_ibstatuschange CREG_IDX(IBStatusChangeCnt)
-#define cr_lbint CREG_IDX(LBIntCnt)
-#define cr_invalidrlen CREG_IDX(RxMaxMinLenErrCnt)
-#define cr_invalidslen CREG_IDX(TxMaxMinLenErrCnt)
-#define cr_lbflowstall CREG_IDX(LBFlowStallCnt)
-#define cr_pktrcv CREG_IDX(RxDataPktCnt)
-#define cr_pktrcvflowctrl CREG_IDX(RxFlowPktCnt)
-#define cr_pktsend CREG_IDX(TxDataPktCnt)
-#define cr_pktsendflow CREG_IDX(TxFlowPktCnt)
-#define cr_portovfl CREG_IDX(RxP0HdrEgrOvflCnt)
-#define cr_rcvebp CREG_IDX(RxEBPCnt)
-#define cr_rcvovfl CREG_IDX(RxBufOvflCnt)
-#define cr_senddropped CREG_IDX(TxDroppedPktCnt)
-#define cr_sendstall CREG_IDX(TxFlowStallCnt)
-#define cr_sendunderrun CREG_IDX(TxUnderrunCnt)
-#define cr_wordrcv CREG_IDX(RxDwordCnt)
-#define cr_wordsend CREG_IDX(TxDwordCnt)
-#define cr_txunsupvl CREG_IDX(TxUnsupVLErrCnt)
-#define cr_rxdroppkt CREG_IDX(RxDroppedPktCnt)
-#define cr_iblinkerrrecov CREG_IDX(IBLinkErrRecoveryCnt)
-#define cr_iblinkdown CREG_IDX(IBLinkDownedCnt)
-#define cr_ibsymbolerr CREG_IDX(IBSymbolErrCnt)
-#define cr_vl15droppedpkt CREG_IDX(RxVL15DroppedPktCnt)
-#define cr_rxotherlocalphyerr CREG_IDX(RxOtherLocalPhyErrCnt)
-#define cr_excessbufferovfl CREG_IDX(ExcessBufferOvflCnt)
-#define cr_locallinkintegrityerr CREG_IDX(LocalLinkIntegrityErrCnt)
-#define cr_rxvlerr CREG_IDX(RxVlErrCnt)
-#define cr_rxdlidfltr CREG_IDX(RxDlidFltrCnt)
-#define cr_psstat CREG_IDX(PSStat)
-#define cr_psstart CREG_IDX(PSStart)
-#define cr_psinterval CREG_IDX(PSInterval)
-#define cr_psrcvdatacount CREG_IDX(PSRcvDataCount)
-#define cr_psrcvpktscount CREG_IDX(PSRcvPktsCount)
-#define cr_psxmitdatacount CREG_IDX(PSXmitDataCount)
-#define cr_psxmitpktscount CREG_IDX(PSXmitPktsCount)
-#define cr_psxmitwaitcount CREG_IDX(PSXmitWaitCount)
-#define cr_txsdmadesc CREG_IDX(TxSDmaDescCnt)
-#define cr_pcieretrydiag CREG_IDX(PcieRetryBufDiagQwordCnt)
-
-#define SYM_RMASK(regname, fldname) ((u64) \
- QIB_7220_##regname##_##fldname##_RMASK)
-#define SYM_MASK(regname, fldname) ((u64) \
- QIB_7220_##regname##_##fldname##_RMASK << \
- QIB_7220_##regname##_##fldname##_LSB)
-#define SYM_LSB(regname, fldname) (QIB_7220_##regname##_##fldname##_LSB)
-#define SYM_FIELD(value, regname, fldname) ((u64) \
- (((value) >> SYM_LSB(regname, fldname)) & \
- SYM_RMASK(regname, fldname)))
-#define ERR_MASK(fldname) SYM_MASK(ErrMask, fldname##Mask)
-#define HWE_MASK(fldname) SYM_MASK(HwErrMask, fldname##Mask)
-
-/* ibcctrl bits */
-#define QLOGIC_IB_IBCC_LINKINITCMD_DISABLE 1
-/* cycle through TS1/TS2 till OK */
-#define QLOGIC_IB_IBCC_LINKINITCMD_POLL 2
-/* wait for TS1, then go on */
-#define QLOGIC_IB_IBCC_LINKINITCMD_SLEEP 3
-#define QLOGIC_IB_IBCC_LINKINITCMD_SHIFT 16
-
-#define QLOGIC_IB_IBCC_LINKCMD_DOWN 1 /* move to 0x11 */
-#define QLOGIC_IB_IBCC_LINKCMD_ARMED 2 /* move to 0x21 */
-#define QLOGIC_IB_IBCC_LINKCMD_ACTIVE 3 /* move to 0x31 */
-
-#define BLOB_7220_IBCHG 0x81
-
-/*
- * We could have a single register get/put routine, that takes a group type,
- * but this is somewhat clearer and cleaner. It also gives us some error
- * checking. 64 bit register reads should always work, but are inefficient
- * on opteron (the northbridge always generates 2 separate HT 32 bit reads),
- * so we use kreg32 wherever possible. User register and counter register
- * reads are always 32 bit reads, so only one form of those routines.
- */
-
-/**
- * qib_read_ureg32 - read 32-bit virtualized per-context register
- * @dd: device
- * @regno: register number
- * @ctxt: context number
- *
- * Return the contents of a register that is virtualized to be per context.
- * Returns -1 on errors (not distinguishable from valid contents at
- * runtime; we may add a separate error variable at some point).
- */
-static inline u32 qib_read_ureg32(const struct qib_devdata *dd,
- enum qib_ureg regno, int ctxt)
-{
- if (!dd->kregbase || !(dd->flags & QIB_PRESENT))
- return 0;
-
- if (dd->userbase)
- return readl(regno + (u64 __iomem *)
- ((char __iomem *)dd->userbase +
- dd->ureg_align * ctxt));
- else
- return readl(regno + (u64 __iomem *)
- (dd->uregbase +
- (char __iomem *)dd->kregbase +
- dd->ureg_align * ctxt));
-}
-
-/**
- * qib_write_ureg - write 32-bit virtualized per-context register
- * @dd: device
- * @regno: register number
- * @value: value
- * @ctxt: context
- *
- * Write the contents of a register that is virtualized to be per context.
- */
-static inline void qib_write_ureg(const struct qib_devdata *dd,
- enum qib_ureg regno, u64 value, int ctxt)
-{
- u64 __iomem *ubase;
-
- if (dd->userbase)
- ubase = (u64 __iomem *)
- ((char __iomem *) dd->userbase +
- dd->ureg_align * ctxt);
- else
- ubase = (u64 __iomem *)
- (dd->uregbase +
- (char __iomem *) dd->kregbase +
- dd->ureg_align * ctxt);
-
- if (dd->kregbase && (dd->flags & QIB_PRESENT))
- writeq(value, &ubase[regno]);
-}
-
-/**
- * qib_write_kreg_ctxt - write a device's per-ctxt 64-bit kernel register
- * @dd: the qlogic_ib device
- * @regno: the register number to write
- * @ctxt: the context containing the register
- * @value: the value to write
- */
-static inline void qib_write_kreg_ctxt(const struct qib_devdata *dd,
- const u16 regno, unsigned ctxt,
- u64 value)
-{
- qib_write_kreg(dd, regno + ctxt, value);
-}
-
-static inline void write_7220_creg(const struct qib_devdata *dd,
- u16 regno, u64 value)
-{
- if (dd->cspec->cregbase && (dd->flags & QIB_PRESENT))
- writeq(value, &dd->cspec->cregbase[regno]);
-}
-
-static inline u64 read_7220_creg(const struct qib_devdata *dd, u16 regno)
-{
- if (!dd->cspec->cregbase || !(dd->flags & QIB_PRESENT))
- return 0;
- return readq(&dd->cspec->cregbase[regno]);
-}
-
-static inline u32 read_7220_creg32(const struct qib_devdata *dd, u16 regno)
-{
- if (!dd->cspec->cregbase || !(dd->flags & QIB_PRESENT))
- return 0;
- return readl(&dd->cspec->cregbase[regno]);
-}
-
-/* kr_revision bits */
-#define QLOGIC_IB_R_EMULATORREV_MASK ((1ULL << 22) - 1)
-#define QLOGIC_IB_R_EMULATORREV_SHIFT 40
-
-/* kr_control bits */
-#define QLOGIC_IB_C_RESET (1U << 7)
-
-/* kr_intstatus, kr_intclear, kr_intmask bits */
-#define QLOGIC_IB_I_RCVURG_MASK ((1ULL << 17) - 1)
-#define QLOGIC_IB_I_RCVURG_SHIFT 32
-#define QLOGIC_IB_I_RCVAVAIL_MASK ((1ULL << 17) - 1)
-#define QLOGIC_IB_I_RCVAVAIL_SHIFT 0
-#define QLOGIC_IB_I_SERDESTRIMDONE (1ULL << 27)
-
-#define QLOGIC_IB_C_FREEZEMODE 0x00000002
-#define QLOGIC_IB_C_LINKENABLE 0x00000004
-
-#define QLOGIC_IB_I_SDMAINT 0x8000000000000000ULL
-#define QLOGIC_IB_I_SDMADISABLED 0x4000000000000000ULL
-#define QLOGIC_IB_I_ERROR 0x0000000080000000ULL
-#define QLOGIC_IB_I_SPIOSENT 0x0000000040000000ULL
-#define QLOGIC_IB_I_SPIOBUFAVAIL 0x0000000020000000ULL
-#define QLOGIC_IB_I_GPIO 0x0000000010000000ULL
-
-/* variables for sanity checking interrupt and errors */
-#define QLOGIC_IB_I_BITSEXTANT \
- (QLOGIC_IB_I_SDMAINT | QLOGIC_IB_I_SDMADISABLED | \
- (QLOGIC_IB_I_RCVURG_MASK << QLOGIC_IB_I_RCVURG_SHIFT) | \
- (QLOGIC_IB_I_RCVAVAIL_MASK << \
- QLOGIC_IB_I_RCVAVAIL_SHIFT) | \
- QLOGIC_IB_I_ERROR | QLOGIC_IB_I_SPIOSENT | \
- QLOGIC_IB_I_SPIOBUFAVAIL | QLOGIC_IB_I_GPIO | \
- QLOGIC_IB_I_SERDESTRIMDONE)
-
-#define IB_HWE_BITSEXTANT \
- (HWE_MASK(RXEMemParityErr) | \
- HWE_MASK(TXEMemParityErr) | \
- (QLOGIC_IB_HWE_PCIEMEMPARITYERR_MASK << \
- QLOGIC_IB_HWE_PCIEMEMPARITYERR_SHIFT) | \
- QLOGIC_IB_HWE_PCIE1PLLFAILED | \
- QLOGIC_IB_HWE_PCIE0PLLFAILED | \
- QLOGIC_IB_HWE_PCIEPOISONEDTLP | \
- QLOGIC_IB_HWE_PCIECPLTIMEOUT | \
- QLOGIC_IB_HWE_PCIEBUSPARITYXTLH | \
- QLOGIC_IB_HWE_PCIEBUSPARITYXADM | \
- QLOGIC_IB_HWE_PCIEBUSPARITYRADM | \
- HWE_MASK(PowerOnBISTFailed) | \
- QLOGIC_IB_HWE_COREPLL_FBSLIP | \
- QLOGIC_IB_HWE_COREPLL_RFSLIP | \
- QLOGIC_IB_HWE_SERDESPLLFAILED | \
- HWE_MASK(IBCBusToSPCParityErr) | \
- HWE_MASK(IBCBusFromSPCParityErr) | \
- QLOGIC_IB_HWE_PCIECPLDATAQUEUEERR | \
- QLOGIC_IB_HWE_PCIECPLHDRQUEUEERR | \
- QLOGIC_IB_HWE_SDMAMEMREADERR | \
- QLOGIC_IB_HWE_CLK_UC_PLLNOTLOCKED | \
- QLOGIC_IB_HWE_PCIESERDESQ0PCLKNOTDETECT | \
- QLOGIC_IB_HWE_PCIESERDESQ1PCLKNOTDETECT | \
- QLOGIC_IB_HWE_PCIESERDESQ2PCLKNOTDETECT | \
- QLOGIC_IB_HWE_PCIESERDESQ3PCLKNOTDETECT | \
- QLOGIC_IB_HWE_DDSRXEQMEMORYPARITYERR | \
- QLOGIC_IB_HWE_IB_UC_MEMORYPARITYERR | \
- QLOGIC_IB_HWE_PCIE_UC_OCT0MEMORYPARITYERR | \
- QLOGIC_IB_HWE_PCIE_UC_OCT1MEMORYPARITYERR)
-
-#define IB_E_BITSEXTANT \
- (ERR_MASK(RcvFormatErr) | ERR_MASK(RcvVCRCErr) | \
- ERR_MASK(RcvICRCErr) | ERR_MASK(RcvMinPktLenErr) | \
- ERR_MASK(RcvMaxPktLenErr) | ERR_MASK(RcvLongPktLenErr) | \
- ERR_MASK(RcvShortPktLenErr) | ERR_MASK(RcvUnexpectedCharErr) | \
- ERR_MASK(RcvUnsupportedVLErr) | ERR_MASK(RcvEBPErr) | \
- ERR_MASK(RcvIBFlowErr) | ERR_MASK(RcvBadVersionErr) | \
- ERR_MASK(RcvEgrFullErr) | ERR_MASK(RcvHdrFullErr) | \
- ERR_MASK(RcvBadTidErr) | ERR_MASK(RcvHdrLenErr) | \
- ERR_MASK(RcvHdrErr) | ERR_MASK(RcvIBLostLinkErr) | \
- ERR_MASK(SendSpecialTriggerErr) | \
- ERR_MASK(SDmaDisabledErr) | ERR_MASK(SendMinPktLenErr) | \
- ERR_MASK(SendMaxPktLenErr) | ERR_MASK(SendUnderRunErr) | \
- ERR_MASK(SendPktLenErr) | ERR_MASK(SendDroppedSmpPktErr) | \
- ERR_MASK(SendDroppedDataPktErr) | \
- ERR_MASK(SendPioArmLaunchErr) | \
- ERR_MASK(SendUnexpectedPktNumErr) | \
- ERR_MASK(SendUnsupportedVLErr) | ERR_MASK(SendBufMisuseErr) | \
- ERR_MASK(SDmaGenMismatchErr) | ERR_MASK(SDmaOutOfBoundErr) | \
- ERR_MASK(SDmaTailOutOfBoundErr) | ERR_MASK(SDmaBaseErr) | \
- ERR_MASK(SDma1stDescErr) | ERR_MASK(SDmaRpyTagErr) | \
- ERR_MASK(SDmaDwEnErr) | ERR_MASK(SDmaMissingDwErr) | \
- ERR_MASK(SDmaUnexpDataErr) | \
- ERR_MASK(IBStatusChanged) | ERR_MASK(InvalidAddrErr) | \
- ERR_MASK(ResetNegated) | ERR_MASK(HardwareErr) | \
- ERR_MASK(SDmaDescAddrMisalignErr) | \
- ERR_MASK(InvalidEEPCmd))
-
-/* kr_hwerrclear, kr_hwerrmask, kr_hwerrstatus, bits */
-#define QLOGIC_IB_HWE_PCIEMEMPARITYERR_MASK 0x00000000000000ffULL
-#define QLOGIC_IB_HWE_PCIEMEMPARITYERR_SHIFT 0
-#define QLOGIC_IB_HWE_PCIEPOISONEDTLP 0x0000000010000000ULL
-#define QLOGIC_IB_HWE_PCIECPLTIMEOUT 0x0000000020000000ULL
-#define QLOGIC_IB_HWE_PCIEBUSPARITYXTLH 0x0000000040000000ULL
-#define QLOGIC_IB_HWE_PCIEBUSPARITYXADM 0x0000000080000000ULL
-#define QLOGIC_IB_HWE_PCIEBUSPARITYRADM 0x0000000100000000ULL
-#define QLOGIC_IB_HWE_COREPLL_FBSLIP 0x0080000000000000ULL
-#define QLOGIC_IB_HWE_COREPLL_RFSLIP 0x0100000000000000ULL
-#define QLOGIC_IB_HWE_PCIE1PLLFAILED 0x0400000000000000ULL
-#define QLOGIC_IB_HWE_PCIE0PLLFAILED 0x0800000000000000ULL
-#define QLOGIC_IB_HWE_SERDESPLLFAILED 0x1000000000000000ULL
-/* specific to this chip */
-#define QLOGIC_IB_HWE_PCIECPLDATAQUEUEERR 0x0000000000000040ULL
-#define QLOGIC_IB_HWE_PCIECPLHDRQUEUEERR 0x0000000000000080ULL
-#define QLOGIC_IB_HWE_SDMAMEMREADERR 0x0000000010000000ULL
-#define QLOGIC_IB_HWE_CLK_UC_PLLNOTLOCKED 0x2000000000000000ULL
-#define QLOGIC_IB_HWE_PCIESERDESQ0PCLKNOTDETECT 0x0100000000000000ULL
-#define QLOGIC_IB_HWE_PCIESERDESQ1PCLKNOTDETECT 0x0200000000000000ULL
-#define QLOGIC_IB_HWE_PCIESERDESQ2PCLKNOTDETECT 0x0400000000000000ULL
-#define QLOGIC_IB_HWE_PCIESERDESQ3PCLKNOTDETECT 0x0800000000000000ULL
-#define QLOGIC_IB_HWE_DDSRXEQMEMORYPARITYERR 0x0000008000000000ULL
-#define QLOGIC_IB_HWE_IB_UC_MEMORYPARITYERR 0x0000004000000000ULL
-#define QLOGIC_IB_HWE_PCIE_UC_OCT0MEMORYPARITYERR 0x0000001000000000ULL
-#define QLOGIC_IB_HWE_PCIE_UC_OCT1MEMORYPARITYERR 0x0000002000000000ULL
-
-#define IBA7220_IBCC_LINKCMD_SHIFT 19
-
-/* kr_ibcddrctrl bits */
-#define IBA7220_IBC_DLIDLMC_MASK 0xFFFFFFFFUL
-#define IBA7220_IBC_DLIDLMC_SHIFT 32
-
-#define IBA7220_IBC_HRTBT_MASK (SYM_RMASK(IBCDDRCtrl, HRTBT_AUTO) | \
- SYM_RMASK(IBCDDRCtrl, HRTBT_ENB))
-#define IBA7220_IBC_HRTBT_SHIFT SYM_LSB(IBCDDRCtrl, HRTBT_ENB)
-
-#define IBA7220_IBC_LANE_REV_SUPPORTED (1<<8)
-#define IBA7220_IBC_LREV_MASK 1
-#define IBA7220_IBC_LREV_SHIFT 8
-#define IBA7220_IBC_RXPOL_MASK 1
-#define IBA7220_IBC_RXPOL_SHIFT 7
-#define IBA7220_IBC_WIDTH_SHIFT 5
-#define IBA7220_IBC_WIDTH_MASK 0x3
-#define IBA7220_IBC_WIDTH_1X_ONLY (0 << IBA7220_IBC_WIDTH_SHIFT)
-#define IBA7220_IBC_WIDTH_4X_ONLY (1 << IBA7220_IBC_WIDTH_SHIFT)
-#define IBA7220_IBC_WIDTH_AUTONEG (2 << IBA7220_IBC_WIDTH_SHIFT)
-#define IBA7220_IBC_SPEED_AUTONEG (1 << 1)
-#define IBA7220_IBC_SPEED_SDR (1 << 2)
-#define IBA7220_IBC_SPEED_DDR (1 << 3)
-#define IBA7220_IBC_SPEED_AUTONEG_MASK (0x7 << 1)
-#define IBA7220_IBC_IBTA_1_2_MASK (1)
-
-/* kr_ibcddrstatus */
-/* link latency shift is 0, don't bother defining */
-#define IBA7220_DDRSTAT_LINKLAT_MASK 0x3ffffff
-
-/* kr_extstatus bits */
-#define QLOGIC_IB_EXTS_FREQSEL 0x2
-#define QLOGIC_IB_EXTS_SERDESSEL 0x4
-#define QLOGIC_IB_EXTS_MEMBIST_ENDTEST 0x0000000000004000
-#define QLOGIC_IB_EXTS_MEMBIST_DISABLED 0x0000000000008000
-
-/* kr_xgxsconfig bits */
-#define QLOGIC_IB_XGXS_RESET 0x5ULL
-#define QLOGIC_IB_XGXS_FC_SAFE (1ULL << 63)
-
-/* kr_rcvpktledcnt */
-#define IBA7220_LEDBLINK_ON_SHIFT 32 /* 4ns period on after packet */
-#define IBA7220_LEDBLINK_OFF_SHIFT 0 /* 4ns period off before next on */
-
-#define _QIB_GPIO_SDA_NUM 1
-#define _QIB_GPIO_SCL_NUM 0
-#define QIB_TWSI_EEPROM_DEV 0xA2 /* All Production 7220 cards. */
-#define QIB_TWSI_TEMP_DEV 0x98
-
-/* HW counter clock is at 4nsec */
-#define QIB_7220_PSXMITWAIT_CHECK_RATE 4000
-
-#define IBA7220_R_INTRAVAIL_SHIFT 17
-#define IBA7220_R_PKEY_DIS_SHIFT 34
-#define IBA7220_R_TAILUPD_SHIFT 35
-#define IBA7220_R_CTXTCFG_SHIFT 36
-
-#define IBA7220_HDRHEAD_PKTINT_SHIFT 32 /* interrupt cnt in upper 32 bits */
-
-/*
- * the size bits give us 2^N, in KB units. 0 marks as invalid,
- * and 7 is reserved. We currently use only 2KB and 4KB
- */
-#define IBA7220_TID_SZ_SHIFT 37 /* shift to 3bit size selector */
-#define IBA7220_TID_SZ_2K (1UL << IBA7220_TID_SZ_SHIFT) /* 2KB */
-#define IBA7220_TID_SZ_4K (2UL << IBA7220_TID_SZ_SHIFT) /* 4KB */
-#define IBA7220_TID_PA_SHIFT 11U /* TID addr in chip stored w/o low bits */
-#define PBC_7220_VL15_SEND (1ULL << 63) /* pbc; VL15, no credit check */
-#define PBC_7220_VL15_SEND_CTRL (1ULL << 31) /* control version of same */
-
-#define AUTONEG_TRIES 5 /* sequential retries to negotiate DDR */
-
-/* packet rate matching delay multiplier */
-static u8 rate_to_delay[2][2] = {
- /* 1x, 4x */
- { 8, 2 }, /* SDR */
- { 4, 1 } /* DDR */
-};
-
-static u8 ib_rate_to_delay[IB_RATE_120_GBPS + 1] = {
- [IB_RATE_2_5_GBPS] = 8,
- [IB_RATE_5_GBPS] = 4,
- [IB_RATE_10_GBPS] = 2,
- [IB_RATE_20_GBPS] = 1
-};
-
-#define IBA7220_LINKSPEED_SHIFT SYM_LSB(IBCStatus, LinkSpeedActive)
-#define IBA7220_LINKWIDTH_SHIFT SYM_LSB(IBCStatus, LinkWidthActive)
-
-/* link training states, from IBC */
-#define IB_7220_LT_STATE_DISABLED 0x00
-#define IB_7220_LT_STATE_LINKUP 0x01
-#define IB_7220_LT_STATE_POLLACTIVE 0x02
-#define IB_7220_LT_STATE_POLLQUIET 0x03
-#define IB_7220_LT_STATE_SLEEPDELAY 0x04
-#define IB_7220_LT_STATE_SLEEPQUIET 0x05
-#define IB_7220_LT_STATE_CFGDEBOUNCE 0x08
-#define IB_7220_LT_STATE_CFGRCVFCFG 0x09
-#define IB_7220_LT_STATE_CFGWAITRMT 0x0a
-#define IB_7220_LT_STATE_CFGIDLE 0x0b
-#define IB_7220_LT_STATE_RECOVERRETRAIN 0x0c
-#define IB_7220_LT_STATE_RECOVERWAITRMT 0x0e
-#define IB_7220_LT_STATE_RECOVERIDLE 0x0f
-
-/* link state machine states from IBC */
-#define IB_7220_L_STATE_DOWN 0x0
-#define IB_7220_L_STATE_INIT 0x1
-#define IB_7220_L_STATE_ARM 0x2
-#define IB_7220_L_STATE_ACTIVE 0x3
-#define IB_7220_L_STATE_ACT_DEFER 0x4
-
-static const u8 qib_7220_physportstate[0x20] = {
- [IB_7220_LT_STATE_DISABLED] = IB_PHYSPORTSTATE_DISABLED,
- [IB_7220_LT_STATE_LINKUP] = IB_PHYSPORTSTATE_LINKUP,
- [IB_7220_LT_STATE_POLLACTIVE] = IB_PHYSPORTSTATE_POLL,
- [IB_7220_LT_STATE_POLLQUIET] = IB_PHYSPORTSTATE_POLL,
- [IB_7220_LT_STATE_SLEEPDELAY] = IB_PHYSPORTSTATE_SLEEP,
- [IB_7220_LT_STATE_SLEEPQUIET] = IB_PHYSPORTSTATE_SLEEP,
- [IB_7220_LT_STATE_CFGDEBOUNCE] =
- IB_PHYSPORTSTATE_CFG_TRAIN,
- [IB_7220_LT_STATE_CFGRCVFCFG] =
- IB_PHYSPORTSTATE_CFG_TRAIN,
- [IB_7220_LT_STATE_CFGWAITRMT] =
- IB_PHYSPORTSTATE_CFG_TRAIN,
- [IB_7220_LT_STATE_CFGIDLE] = IB_PHYSPORTSTATE_CFG_TRAIN,
- [IB_7220_LT_STATE_RECOVERRETRAIN] =
- IB_PHYSPORTSTATE_LINK_ERR_RECOVER,
- [IB_7220_LT_STATE_RECOVERWAITRMT] =
- IB_PHYSPORTSTATE_LINK_ERR_RECOVER,
- [IB_7220_LT_STATE_RECOVERIDLE] =
- IB_PHYSPORTSTATE_LINK_ERR_RECOVER,
- [0x10] = IB_PHYSPORTSTATE_CFG_TRAIN,
- [0x11] = IB_PHYSPORTSTATE_CFG_TRAIN,
- [0x12] = IB_PHYSPORTSTATE_CFG_TRAIN,
- [0x13] = IB_PHYSPORTSTATE_CFG_TRAIN,
- [0x14] = IB_PHYSPORTSTATE_CFG_TRAIN,
- [0x15] = IB_PHYSPORTSTATE_CFG_TRAIN,
- [0x16] = IB_PHYSPORTSTATE_CFG_TRAIN,
- [0x17] = IB_PHYSPORTSTATE_CFG_TRAIN
-};
-
-int qib_special_trigger;
-module_param_named(special_trigger, qib_special_trigger, int, S_IRUGO);
-MODULE_PARM_DESC(special_trigger, "Enable SpecialTrigger arm/launch");
-
-#define IBCBUSFRSPCPARITYERR HWE_MASK(IBCBusFromSPCParityErr)
-#define IBCBUSTOSPCPARITYERR HWE_MASK(IBCBusToSPCParityErr)
-
-#define SYM_MASK_BIT(regname, fldname, bit) ((u64) \
- (1ULL << (SYM_LSB(regname, fldname) + (bit))))
-
-#define TXEMEMPARITYERR_PIOBUF \
- SYM_MASK_BIT(HwErrMask, TXEMemParityErrMask, 0)
-#define TXEMEMPARITYERR_PIOPBC \
- SYM_MASK_BIT(HwErrMask, TXEMemParityErrMask, 1)
-#define TXEMEMPARITYERR_PIOLAUNCHFIFO \
- SYM_MASK_BIT(HwErrMask, TXEMemParityErrMask, 2)
-
-#define RXEMEMPARITYERR_RCVBUF \
- SYM_MASK_BIT(HwErrMask, RXEMemParityErrMask, 0)
-#define RXEMEMPARITYERR_LOOKUPQ \
- SYM_MASK_BIT(HwErrMask, RXEMemParityErrMask, 1)
-#define RXEMEMPARITYERR_EXPTID \
- SYM_MASK_BIT(HwErrMask, RXEMemParityErrMask, 2)
-#define RXEMEMPARITYERR_EAGERTID \
- SYM_MASK_BIT(HwErrMask, RXEMemParityErrMask, 3)
-#define RXEMEMPARITYERR_FLAGBUF \
- SYM_MASK_BIT(HwErrMask, RXEMemParityErrMask, 4)
-#define RXEMEMPARITYERR_DATAINFO \
- SYM_MASK_BIT(HwErrMask, RXEMemParityErrMask, 5)
-#define RXEMEMPARITYERR_HDRINFO \
- SYM_MASK_BIT(HwErrMask, RXEMemParityErrMask, 6)
-
-/* 7220 specific hardware errors... */
-static const struct qib_hwerror_msgs qib_7220_hwerror_msgs[] = {
- /* generic hardware errors */
- QLOGIC_IB_HWE_MSG(IBCBUSFRSPCPARITYERR, "QIB2IB Parity"),
- QLOGIC_IB_HWE_MSG(IBCBUSTOSPCPARITYERR, "IB2QIB Parity"),
-
- QLOGIC_IB_HWE_MSG(TXEMEMPARITYERR_PIOBUF,
- "TXE PIOBUF Memory Parity"),
- QLOGIC_IB_HWE_MSG(TXEMEMPARITYERR_PIOPBC,
- "TXE PIOPBC Memory Parity"),
- QLOGIC_IB_HWE_MSG(TXEMEMPARITYERR_PIOLAUNCHFIFO,
- "TXE PIOLAUNCHFIFO Memory Parity"),
-
- QLOGIC_IB_HWE_MSG(RXEMEMPARITYERR_RCVBUF,
- "RXE RCVBUF Memory Parity"),
- QLOGIC_IB_HWE_MSG(RXEMEMPARITYERR_LOOKUPQ,
- "RXE LOOKUPQ Memory Parity"),
- QLOGIC_IB_HWE_MSG(RXEMEMPARITYERR_EAGERTID,
- "RXE EAGERTID Memory Parity"),
- QLOGIC_IB_HWE_MSG(RXEMEMPARITYERR_EXPTID,
- "RXE EXPTID Memory Parity"),
- QLOGIC_IB_HWE_MSG(RXEMEMPARITYERR_FLAGBUF,
- "RXE FLAGBUF Memory Parity"),
- QLOGIC_IB_HWE_MSG(RXEMEMPARITYERR_DATAINFO,
- "RXE DATAINFO Memory Parity"),
- QLOGIC_IB_HWE_MSG(RXEMEMPARITYERR_HDRINFO,
- "RXE HDRINFO Memory Parity"),
-
- /* chip-specific hardware errors */
- QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIEPOISONEDTLP,
- "PCIe Poisoned TLP"),
- QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIECPLTIMEOUT,
- "PCIe completion timeout"),
- /*
- * In practice, it's unlikely that we'll see PCIe PLL, or bus
- * parity or memory parity error failures, because most likely we
- * won't be able to talk to the core of the chip. Nonetheless, we
- * might see them, if they are in parts of the PCIe core that aren't
- * essential.
- */
- QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIE1PLLFAILED,
- "PCIePLL1"),
- QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIE0PLLFAILED,
- "PCIePLL0"),
- QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIEBUSPARITYXTLH,
- "PCIe XTLH core parity"),
- QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIEBUSPARITYXADM,
- "PCIe ADM TX core parity"),
- QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIEBUSPARITYRADM,
- "PCIe ADM RX core parity"),
- QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_SERDESPLLFAILED,
- "SerDes PLL"),
- QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIECPLDATAQUEUEERR,
- "PCIe cpl header queue"),
- QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIECPLHDRQUEUEERR,
- "PCIe cpl data queue"),
- QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_SDMAMEMREADERR,
- "Send DMA memory read"),
- QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_CLK_UC_PLLNOTLOCKED,
- "uC PLL clock not locked"),
- QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIESERDESQ0PCLKNOTDETECT,
- "PCIe serdes Q0 no clock"),
- QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIESERDESQ1PCLKNOTDETECT,
- "PCIe serdes Q1 no clock"),
- QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIESERDESQ2PCLKNOTDETECT,
- "PCIe serdes Q2 no clock"),
- QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIESERDESQ3PCLKNOTDETECT,
- "PCIe serdes Q3 no clock"),
- QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_DDSRXEQMEMORYPARITYERR,
- "DDS RXEQ memory parity"),
- QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_IB_UC_MEMORYPARITYERR,
- "IB uC memory parity"),
- QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIE_UC_OCT0MEMORYPARITYERR,
- "PCIe uC oct0 memory parity"),
- QLOGIC_IB_HWE_MSG(QLOGIC_IB_HWE_PCIE_UC_OCT1MEMORYPARITYERR,
- "PCIe uC oct1 memory parity"),
-};
-
-#define RXE_PARITY (RXEMEMPARITYERR_EAGERTID|RXEMEMPARITYERR_EXPTID)
-
-#define QLOGIC_IB_E_PKTERRS (\
- ERR_MASK(SendPktLenErr) | \
- ERR_MASK(SendDroppedDataPktErr) | \
- ERR_MASK(RcvVCRCErr) | \
- ERR_MASK(RcvICRCErr) | \
- ERR_MASK(RcvShortPktLenErr) | \
- ERR_MASK(RcvEBPErr))
-
-/* Convenience for decoding Send DMA errors */
-#define QLOGIC_IB_E_SDMAERRS ( \
- ERR_MASK(SDmaGenMismatchErr) | \
- ERR_MASK(SDmaOutOfBoundErr) | \
- ERR_MASK(SDmaTailOutOfBoundErr) | ERR_MASK(SDmaBaseErr) | \
- ERR_MASK(SDma1stDescErr) | ERR_MASK(SDmaRpyTagErr) | \
- ERR_MASK(SDmaDwEnErr) | ERR_MASK(SDmaMissingDwErr) | \
- ERR_MASK(SDmaUnexpDataErr) | \
- ERR_MASK(SDmaDescAddrMisalignErr) | \
- ERR_MASK(SDmaDisabledErr) | \
- ERR_MASK(SendBufMisuseErr))
-
-/* These are all rcv-related errors which we want to count for stats */
-#define E_SUM_PKTERRS \
- (ERR_MASK(RcvHdrLenErr) | ERR_MASK(RcvBadTidErr) | \
- ERR_MASK(RcvBadVersionErr) | ERR_MASK(RcvHdrErr) | \
- ERR_MASK(RcvLongPktLenErr) | ERR_MASK(RcvShortPktLenErr) | \
- ERR_MASK(RcvMaxPktLenErr) | ERR_MASK(RcvMinPktLenErr) | \
- ERR_MASK(RcvFormatErr) | ERR_MASK(RcvUnsupportedVLErr) | \
- ERR_MASK(RcvUnexpectedCharErr) | ERR_MASK(RcvEBPErr))
-
-/* These are all send-related errors which we want to count for stats */
-#define E_SUM_ERRS \
- (ERR_MASK(SendPioArmLaunchErr) | ERR_MASK(SendUnexpectedPktNumErr) | \
- ERR_MASK(SendDroppedDataPktErr) | ERR_MASK(SendDroppedSmpPktErr) | \
- ERR_MASK(SendMaxPktLenErr) | ERR_MASK(SendUnsupportedVLErr) | \
- ERR_MASK(SendMinPktLenErr) | ERR_MASK(SendPktLenErr) | \
- ERR_MASK(InvalidAddrErr))
-
-/*
- * this is similar to E_SUM_ERRS, but can't ignore armlaunch, don't ignore
- * errors not related to freeze and cancelling buffers. Can't ignore
- * armlaunch because could get more while still cleaning up, and need
- * to cancel those as they happen.
- */
-#define E_SPKT_ERRS_IGNORE \
- (ERR_MASK(SendDroppedDataPktErr) | ERR_MASK(SendDroppedSmpPktErr) | \
- ERR_MASK(SendMaxPktLenErr) | ERR_MASK(SendMinPktLenErr) | \
- ERR_MASK(SendPktLenErr))
-
-/*
- * these are errors that can occur when the link changes state while
- * a packet is being sent or received. This doesn't cover things
- * like EBP or VCRC that can be the result of a sending having the
- * link change state, so we receive a "known bad" packet.
- */
-#define E_SUM_LINK_PKTERRS \
- (ERR_MASK(SendDroppedDataPktErr) | ERR_MASK(SendDroppedSmpPktErr) | \
- ERR_MASK(SendMinPktLenErr) | ERR_MASK(SendPktLenErr) | \
- ERR_MASK(RcvShortPktLenErr) | ERR_MASK(RcvMinPktLenErr) | \
- ERR_MASK(RcvUnexpectedCharErr))
-
-static void autoneg_7220_work(struct work_struct *);
-static u32 __iomem *qib_7220_getsendbuf(struct qib_pportdata *, u64, u32 *);
-
-/*
- * Called when we might have an error that is specific to a particular
- * PIO buffer, and may need to cancel that buffer, so it can be re-used.
- * because we don't need to force the update of pioavail.
- */
-static void qib_disarm_7220_senderrbufs(struct qib_pportdata *ppd)
-{
- unsigned long sbuf[3];
- struct qib_devdata *dd = ppd->dd;
-
- /*
- * It's possible that sendbuffererror could have bits set; might
- * have already done this as a result of hardware error handling.
- */
- /* read these before writing errorclear */
- sbuf[0] = qib_read_kreg64(dd, kr_sendbuffererror);
- sbuf[1] = qib_read_kreg64(dd, kr_sendbuffererror + 1);
- sbuf[2] = qib_read_kreg64(dd, kr_sendbuffererror + 2);
-
- if (sbuf[0] || sbuf[1] || sbuf[2])
- qib_disarm_piobufs_set(dd, sbuf,
- dd->piobcnt2k + dd->piobcnt4k);
-}
-
-static void qib_7220_txe_recover(struct qib_devdata *dd)
-{
- qib_devinfo(dd->pcidev, "Recovering from TXE PIO parity error\n");
- qib_disarm_7220_senderrbufs(dd->pport);
-}
-
-/*
- * This is called with interrupts disabled and sdma_lock held.
- */
-static void qib_7220_sdma_sendctrl(struct qib_pportdata *ppd, unsigned op)
-{
- struct qib_devdata *dd = ppd->dd;
- u64 set_sendctrl = 0;
- u64 clr_sendctrl = 0;
-
- if (op & QIB_SDMA_SENDCTRL_OP_ENABLE)
- set_sendctrl |= SYM_MASK(SendCtrl, SDmaEnable);
- else
- clr_sendctrl |= SYM_MASK(SendCtrl, SDmaEnable);
-
- if (op & QIB_SDMA_SENDCTRL_OP_INTENABLE)
- set_sendctrl |= SYM_MASK(SendCtrl, SDmaIntEnable);
- else
- clr_sendctrl |= SYM_MASK(SendCtrl, SDmaIntEnable);
-
- if (op & QIB_SDMA_SENDCTRL_OP_HALT)
- set_sendctrl |= SYM_MASK(SendCtrl, SDmaHalt);
- else
- clr_sendctrl |= SYM_MASK(SendCtrl, SDmaHalt);
-
- spin_lock(&dd->sendctrl_lock);
-
- dd->sendctrl |= set_sendctrl;
- dd->sendctrl &= ~clr_sendctrl;
-
- qib_write_kreg(dd, kr_sendctrl, dd->sendctrl);
- qib_write_kreg(dd, kr_scratch, 0);
-
- spin_unlock(&dd->sendctrl_lock);
-}
-
-static void qib_decode_7220_sdma_errs(struct qib_pportdata *ppd,
- u64 err, char *buf, size_t blen)
-{
- static const struct {
- u64 err;
- const char *msg;
- } errs[] = {
- { ERR_MASK(SDmaGenMismatchErr),
- "SDmaGenMismatch" },
- { ERR_MASK(SDmaOutOfBoundErr),
- "SDmaOutOfBound" },
- { ERR_MASK(SDmaTailOutOfBoundErr),
- "SDmaTailOutOfBound" },
- { ERR_MASK(SDmaBaseErr),
- "SDmaBase" },
- { ERR_MASK(SDma1stDescErr),
- "SDma1stDesc" },
- { ERR_MASK(SDmaRpyTagErr),
- "SDmaRpyTag" },
- { ERR_MASK(SDmaDwEnErr),
- "SDmaDwEn" },
- { ERR_MASK(SDmaMissingDwErr),
- "SDmaMissingDw" },
- { ERR_MASK(SDmaUnexpDataErr),
- "SDmaUnexpData" },
- { ERR_MASK(SDmaDescAddrMisalignErr),
- "SDmaDescAddrMisalign" },
- { ERR_MASK(SendBufMisuseErr),
- "SendBufMisuse" },
- { ERR_MASK(SDmaDisabledErr),
- "SDmaDisabled" },
- };
- int i;
- size_t bidx = 0;
-
- for (i = 0; i < ARRAY_SIZE(errs); i++) {
- if (err & errs[i].err)
- bidx += scnprintf(buf + bidx, blen - bidx,
- "%s ", errs[i].msg);
- }
-}
-
-/*
- * This is called as part of link down clean up so disarm and flush
- * all send buffers so that SMP packets can be sent.
- */
-static void qib_7220_sdma_hw_clean_up(struct qib_pportdata *ppd)
-{
- /* This will trigger the Abort interrupt */
- sendctrl_7220_mod(ppd, QIB_SENDCTRL_DISARM_ALL | QIB_SENDCTRL_FLUSH |
- QIB_SENDCTRL_AVAIL_BLIP);
- ppd->dd->upd_pio_shadow = 1; /* update our idea of what's busy */
-}
-
-static void qib_sdma_7220_setlengen(struct qib_pportdata *ppd)
-{
- /*
- * Set SendDmaLenGen and clear and set
- * the MSB of the generation count to enable generation checking
- * and load the internal generation counter.
- */
- qib_write_kreg(ppd->dd, kr_senddmalengen, ppd->sdma_descq_cnt);
- qib_write_kreg(ppd->dd, kr_senddmalengen,
- ppd->sdma_descq_cnt |
- (1ULL << QIB_7220_SendDmaLenGen_Generation_MSB));
-}
-
-static void qib_7220_sdma_hw_start_up(struct qib_pportdata *ppd)
-{
- qib_sdma_7220_setlengen(ppd);
- qib_sdma_update_7220_tail(ppd, 0); /* Set SendDmaTail */
- ppd->sdma_head_dma[0] = 0;
-}
-
-#define DISABLES_SDMA ( \
- ERR_MASK(SDmaDisabledErr) | \
- ERR_MASK(SDmaBaseErr) | \
- ERR_MASK(SDmaTailOutOfBoundErr) | \
- ERR_MASK(SDmaOutOfBoundErr) | \
- ERR_MASK(SDma1stDescErr) | \
- ERR_MASK(SDmaRpyTagErr) | \
- ERR_MASK(SDmaGenMismatchErr) | \
- ERR_MASK(SDmaDescAddrMisalignErr) | \
- ERR_MASK(SDmaMissingDwErr) | \
- ERR_MASK(SDmaDwEnErr))
-
-static void sdma_7220_errors(struct qib_pportdata *ppd, u64 errs)
-{
- unsigned long flags;
- struct qib_devdata *dd = ppd->dd;
- char *msg;
-
- errs &= QLOGIC_IB_E_SDMAERRS;
-
- msg = dd->cspec->sdmamsgbuf;
- qib_decode_7220_sdma_errs(ppd, errs, msg,
- sizeof(dd->cspec->sdmamsgbuf));
- spin_lock_irqsave(&ppd->sdma_lock, flags);
-
- if (errs & ERR_MASK(SendBufMisuseErr)) {
- unsigned long sbuf[3];
-
- sbuf[0] = qib_read_kreg64(dd, kr_sendbuffererror);
- sbuf[1] = qib_read_kreg64(dd, kr_sendbuffererror + 1);
- sbuf[2] = qib_read_kreg64(dd, kr_sendbuffererror + 2);
-
- qib_dev_err(ppd->dd,
- "IB%u:%u SendBufMisuse: %04lx %016lx %016lx\n",
- ppd->dd->unit, ppd->port, sbuf[2], sbuf[1],
- sbuf[0]);
- }
-
- if (errs & ERR_MASK(SDmaUnexpDataErr))
- qib_dev_err(dd, "IB%u:%u SDmaUnexpData\n", ppd->dd->unit,
- ppd->port);
-
- switch (ppd->sdma_state.current_state) {
- case qib_sdma_state_s00_hw_down:
- /* not expecting any interrupts */
- break;
-
- case qib_sdma_state_s10_hw_start_up_wait:
- /* handled in intr path */
- break;
-
- case qib_sdma_state_s20_idle:
- /* not expecting any interrupts */
- break;
-
- case qib_sdma_state_s30_sw_clean_up_wait:
- /* not expecting any interrupts */
- break;
-
- case qib_sdma_state_s40_hw_clean_up_wait:
- if (errs & ERR_MASK(SDmaDisabledErr))
- __qib_sdma_process_event(ppd,
- qib_sdma_event_e50_hw_cleaned);
- break;
-
- case qib_sdma_state_s50_hw_halt_wait:
- /* handled in intr path */
- break;
-
- case qib_sdma_state_s99_running:
- if (errs & DISABLES_SDMA)
- __qib_sdma_process_event(ppd,
- qib_sdma_event_e7220_err_halted);
- break;
- }
-
- spin_unlock_irqrestore(&ppd->sdma_lock, flags);
-}
-
-/*
- * Decode the error status into strings, deciding whether to always
- * print * it or not depending on "normal packet errors" vs everything
- * else. Return 1 if "real" errors, otherwise 0 if only packet
- * errors, so caller can decide what to print with the string.
- */
-static int qib_decode_7220_err(struct qib_devdata *dd, char *buf, size_t blen,
- u64 err)
-{
- int iserr = 1;
-
- *buf = '\0';
- if (err & QLOGIC_IB_E_PKTERRS) {
- if (!(err & ~QLOGIC_IB_E_PKTERRS))
- iserr = 0;
- if ((err & ERR_MASK(RcvICRCErr)) &&
- !(err & (ERR_MASK(RcvVCRCErr) | ERR_MASK(RcvEBPErr))))
- strlcat(buf, "CRC ", blen);
- if (!iserr)
- goto done;
- }
- if (err & ERR_MASK(RcvHdrLenErr))
- strlcat(buf, "rhdrlen ", blen);
- if (err & ERR_MASK(RcvBadTidErr))
- strlcat(buf, "rbadtid ", blen);
- if (err & ERR_MASK(RcvBadVersionErr))
- strlcat(buf, "rbadversion ", blen);
- if (err & ERR_MASK(RcvHdrErr))
- strlcat(buf, "rhdr ", blen);
- if (err & ERR_MASK(SendSpecialTriggerErr))
- strlcat(buf, "sendspecialtrigger ", blen);
- if (err & ERR_MASK(RcvLongPktLenErr))
- strlcat(buf, "rlongpktlen ", blen);
- if (err & ERR_MASK(RcvMaxPktLenErr))
- strlcat(buf, "rmaxpktlen ", blen);
- if (err & ERR_MASK(RcvMinPktLenErr))
- strlcat(buf, "rminpktlen ", blen);
- if (err & ERR_MASK(SendMinPktLenErr))
- strlcat(buf, "sminpktlen ", blen);
- if (err & ERR_MASK(RcvFormatErr))
- strlcat(buf, "rformaterr ", blen);
- if (err & ERR_MASK(RcvUnsupportedVLErr))
- strlcat(buf, "runsupvl ", blen);
- if (err & ERR_MASK(RcvUnexpectedCharErr))
- strlcat(buf, "runexpchar ", blen);
- if (err & ERR_MASK(RcvIBFlowErr))
- strlcat(buf, "ribflow ", blen);
- if (err & ERR_MASK(SendUnderRunErr))
- strlcat(buf, "sunderrun ", blen);
- if (err & ERR_MASK(SendPioArmLaunchErr))
- strlcat(buf, "spioarmlaunch ", blen);
- if (err & ERR_MASK(SendUnexpectedPktNumErr))
- strlcat(buf, "sunexperrpktnum ", blen);
- if (err & ERR_MASK(SendDroppedSmpPktErr))
- strlcat(buf, "sdroppedsmppkt ", blen);
- if (err & ERR_MASK(SendMaxPktLenErr))
- strlcat(buf, "smaxpktlen ", blen);
- if (err & ERR_MASK(SendUnsupportedVLErr))
- strlcat(buf, "sunsupVL ", blen);
- if (err & ERR_MASK(InvalidAddrErr))
- strlcat(buf, "invalidaddr ", blen);
- if (err & ERR_MASK(RcvEgrFullErr))
- strlcat(buf, "rcvegrfull ", blen);
- if (err & ERR_MASK(RcvHdrFullErr))
- strlcat(buf, "rcvhdrfull ", blen);
- if (err & ERR_MASK(IBStatusChanged))
- strlcat(buf, "ibcstatuschg ", blen);
- if (err & ERR_MASK(RcvIBLostLinkErr))
- strlcat(buf, "riblostlink ", blen);
- if (err & ERR_MASK(HardwareErr))
- strlcat(buf, "hardware ", blen);
- if (err & ERR_MASK(ResetNegated))
- strlcat(buf, "reset ", blen);
- if (err & QLOGIC_IB_E_SDMAERRS)
- qib_decode_7220_sdma_errs(dd->pport, err, buf, blen);
- if (err & ERR_MASK(InvalidEEPCmd))
- strlcat(buf, "invalideepromcmd ", blen);
-done:
- return iserr;
-}
-
-static void reenable_7220_chase(struct timer_list *t)
-{
- struct qib_chippport_specific *cpspec = timer_container_of(cpspec, t,
- chase_timer);
- struct qib_pportdata *ppd = &cpspec->pportdata;
-
- ppd->cpspec->chase_timer.expires = 0;
- qib_set_ib_7220_lstate(ppd, QLOGIC_IB_IBCC_LINKCMD_DOWN,
- QLOGIC_IB_IBCC_LINKINITCMD_POLL);
-}
-
-static void handle_7220_chase(struct qib_pportdata *ppd, u64 ibcst)
-{
- u8 ibclt;
- unsigned long tnow;
-
- ibclt = (u8)SYM_FIELD(ibcst, IBCStatus, LinkTrainingState);
-
- /*
- * Detect and handle the state chase issue, where we can
- * get stuck if we are unlucky on timing on both sides of
- * the link. If we are, we disable, set a timer, and
- * then re-enable.
- */
- switch (ibclt) {
- case IB_7220_LT_STATE_CFGRCVFCFG:
- case IB_7220_LT_STATE_CFGWAITRMT:
- case IB_7220_LT_STATE_TXREVLANES:
- case IB_7220_LT_STATE_CFGENH:
- tnow = jiffies;
- if (ppd->cpspec->chase_end &&
- time_after(tnow, ppd->cpspec->chase_end)) {
- ppd->cpspec->chase_end = 0;
- qib_set_ib_7220_lstate(ppd,
- QLOGIC_IB_IBCC_LINKCMD_DOWN,
- QLOGIC_IB_IBCC_LINKINITCMD_DISABLE);
- ppd->cpspec->chase_timer.expires = jiffies +
- QIB_CHASE_DIS_TIME;
- add_timer(&ppd->cpspec->chase_timer);
- } else if (!ppd->cpspec->chase_end)
- ppd->cpspec->chase_end = tnow + QIB_CHASE_TIME;
- break;
-
- default:
- ppd->cpspec->chase_end = 0;
- break;
- }
-}
-
-static void handle_7220_errors(struct qib_devdata *dd, u64 errs)
-{
- char *msg;
- u64 ignore_this_time = 0;
- u64 iserr = 0;
- struct qib_pportdata *ppd = dd->pport;
- u64 mask;
-
- /* don't report errors that are masked */
- errs &= dd->cspec->errormask;
- msg = dd->cspec->emsgbuf;
-
- /* do these first, they are most important */
- if (errs & ERR_MASK(HardwareErr))
- qib_7220_handle_hwerrors(dd, msg, sizeof(dd->cspec->emsgbuf));
-
- if (errs & QLOGIC_IB_E_SDMAERRS)
- sdma_7220_errors(ppd, errs);
-
- if (errs & ~IB_E_BITSEXTANT)
- qib_dev_err(dd,
- "error interrupt with unknown errors %llx set\n",
- (unsigned long long) (errs & ~IB_E_BITSEXTANT));
-
- if (errs & E_SUM_ERRS) {
- qib_disarm_7220_senderrbufs(ppd);
- if ((errs & E_SUM_LINK_PKTERRS) &&
- !(ppd->lflags & QIBL_LINKACTIVE)) {
- /*
- * This can happen when trying to bring the link
- * up, but the IB link changes state at the "wrong"
- * time. The IB logic then complains that the packet
- * isn't valid. We don't want to confuse people, so
- * we just don't print them, except at debug
- */
- ignore_this_time = errs & E_SUM_LINK_PKTERRS;
- }
- } else if ((errs & E_SUM_LINK_PKTERRS) &&
- !(ppd->lflags & QIBL_LINKACTIVE)) {
- /*
- * This can happen when SMA is trying to bring the link
- * up, but the IB link changes state at the "wrong" time.
- * The IB logic then complains that the packet isn't
- * valid. We don't want to confuse people, so we just
- * don't print them, except at debug
- */
- ignore_this_time = errs & E_SUM_LINK_PKTERRS;
- }
-
- qib_write_kreg(dd, kr_errclear, errs);
-
- errs &= ~ignore_this_time;
- if (!errs)
- goto done;
-
- /*
- * The ones we mask off are handled specially below
- * or above. Also mask SDMADISABLED by default as it
- * is too chatty.
- */
- mask = ERR_MASK(IBStatusChanged) |
- ERR_MASK(RcvEgrFullErr) | ERR_MASK(RcvHdrFullErr) |
- ERR_MASK(HardwareErr) | ERR_MASK(SDmaDisabledErr);
-
- qib_decode_7220_err(dd, msg, sizeof(dd->cspec->emsgbuf), errs & ~mask);
-
- if (errs & E_SUM_PKTERRS)
- qib_stats.sps_rcverrs++;
- if (errs & E_SUM_ERRS)
- qib_stats.sps_txerrs++;
- iserr = errs & ~(E_SUM_PKTERRS | QLOGIC_IB_E_PKTERRS |
- ERR_MASK(SDmaDisabledErr));
-
- if (errs & ERR_MASK(IBStatusChanged)) {
- u64 ibcs;
-
- ibcs = qib_read_kreg64(dd, kr_ibcstatus);
- if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG))
- handle_7220_chase(ppd, ibcs);
-
- /* Update our picture of width and speed from chip */
- ppd->link_width_active =
- ((ibcs >> IBA7220_LINKWIDTH_SHIFT) & 1) ?
- IB_WIDTH_4X : IB_WIDTH_1X;
- ppd->link_speed_active =
- ((ibcs >> IBA7220_LINKSPEED_SHIFT) & 1) ?
- QIB_IB_DDR : QIB_IB_SDR;
-
- /*
- * Since going into a recovery state causes the link state
- * to go down and since recovery is transitory, it is better
- * if we "miss" ever seeing the link training state go into
- * recovery (i.e., ignore this transition for link state
- * special handling purposes) without updating lastibcstat.
- */
- if (qib_7220_phys_portstate(ibcs) !=
- IB_PHYSPORTSTATE_LINK_ERR_RECOVER)
- qib_handle_e_ibstatuschanged(ppd, ibcs);
- }
-
- if (errs & ERR_MASK(ResetNegated)) {
- qib_dev_err(dd,
- "Got reset, requires re-init (unload and reload driver)\n");
- dd->flags &= ~QIB_INITTED; /* needs re-init */
- /* mark as having had error */
- *dd->devstatusp |= QIB_STATUS_HWERROR;
- *dd->pport->statusp &= ~QIB_STATUS_IB_CONF;
- }
-
- if (*msg && iserr)
- qib_dev_porterr(dd, ppd->port, "%s error\n", msg);
-
- if (ppd->state_wanted & ppd->lflags)
- wake_up_interruptible(&ppd->state_wait);
-
- /*
- * If there were hdrq or egrfull errors, wake up any processes
- * waiting in poll. We used to try to check which contexts had
- * the overflow, but given the cost of that and the chip reads
- * to support it, it's better to just wake everybody up if we
- * get an overflow; waiters can poll again if it's not them.
- */
- if (errs & (ERR_MASK(RcvEgrFullErr) | ERR_MASK(RcvHdrFullErr))) {
- qib_handle_urcv(dd, ~0U);
- if (errs & ERR_MASK(RcvEgrFullErr))
- qib_stats.sps_buffull++;
- else
- qib_stats.sps_hdrfull++;
- }
-done:
- return;
-}
-
-/* enable/disable chip from delivering interrupts */
-static void qib_7220_set_intr_state(struct qib_devdata *dd, u32 enable)
-{
- if (enable) {
- if (dd->flags & QIB_BADINTR)
- return;
- qib_write_kreg(dd, kr_intmask, ~0ULL);
- /* force re-interrupt of any pending interrupts. */
- qib_write_kreg(dd, kr_intclear, 0ULL);
- } else
- qib_write_kreg(dd, kr_intmask, 0ULL);
-}
-
-/*
- * Try to cleanup as much as possible for anything that might have gone
- * wrong while in freeze mode, such as pio buffers being written by user
- * processes (causing armlaunch), send errors due to going into freeze mode,
- * etc., and try to avoid causing extra interrupts while doing so.
- * Forcibly update the in-memory pioavail register copies after cleanup
- * because the chip won't do it while in freeze mode (the register values
- * themselves are kept correct).
- * Make sure that we don't lose any important interrupts by using the chip
- * feature that says that writing 0 to a bit in *clear that is set in
- * *status will cause an interrupt to be generated again (if allowed by
- * the *mask value).
- * This is in chip-specific code because of all of the register accesses,
- * even though the details are similar on most chips.
- */
-static void qib_7220_clear_freeze(struct qib_devdata *dd)
-{
- /* disable error interrupts, to avoid confusion */
- qib_write_kreg(dd, kr_errmask, 0ULL);
-
- /* also disable interrupts; errormask is sometimes overwritten */
- qib_7220_set_intr_state(dd, 0);
-
- qib_cancel_sends(dd->pport);
-
- /* clear the freeze, and be sure chip saw it */
- qib_write_kreg(dd, kr_control, dd->control);
- qib_read_kreg32(dd, kr_scratch);
-
- /* force in-memory update now we are out of freeze */
- qib_force_pio_avail_update(dd);
-
- /*
- * force new interrupt if any hwerr, error or interrupt bits are
- * still set, and clear "safe" send packet errors related to freeze
- * and cancelling sends. Re-enable error interrupts before possible
- * force of re-interrupt on pending interrupts.
- */
- qib_write_kreg(dd, kr_hwerrclear, 0ULL);
- qib_write_kreg(dd, kr_errclear, E_SPKT_ERRS_IGNORE);
- qib_write_kreg(dd, kr_errmask, dd->cspec->errormask);
- qib_7220_set_intr_state(dd, 1);
-}
-
-/**
- * qib_7220_handle_hwerrors - display hardware errors.
- * @dd: the qlogic_ib device
- * @msg: the output buffer
- * @msgl: the size of the output buffer
- *
- * Use same msg buffer as regular errors to avoid excessive stack
- * use. Most hardware errors are catastrophic, but for right now,
- * we'll print them and continue. We reuse the same message buffer as
- * handle_7220_errors() to avoid excessive stack usage.
- */
-static void qib_7220_handle_hwerrors(struct qib_devdata *dd, char *msg,
- size_t msgl)
-{
- u64 hwerrs;
- u32 bits, ctrl;
- int isfatal = 0;
- char *bitsmsg;
-
- hwerrs = qib_read_kreg64(dd, kr_hwerrstatus);
- if (!hwerrs)
- goto bail;
- if (hwerrs == ~0ULL) {
- qib_dev_err(dd,
- "Read of hardware error status failed (all bits set); ignoring\n");
- goto bail;
- }
- qib_stats.sps_hwerrs++;
-
- /*
- * Always clear the error status register, except MEMBISTFAIL,
- * regardless of whether we continue or stop using the chip.
- * We want that set so we know it failed, even across driver reload.
- * We'll still ignore it in the hwerrmask. We do this partly for
- * diagnostics, but also for support.
- */
- qib_write_kreg(dd, kr_hwerrclear,
- hwerrs & ~HWE_MASK(PowerOnBISTFailed));
-
- hwerrs &= dd->cspec->hwerrmask;
-
- if (hwerrs & ~(TXEMEMPARITYERR_PIOBUF | TXEMEMPARITYERR_PIOPBC |
- RXE_PARITY))
- qib_devinfo(dd->pcidev,
- "Hardware error: hwerr=0x%llx (cleared)\n",
- (unsigned long long) hwerrs);
-
- if (hwerrs & ~IB_HWE_BITSEXTANT)
- qib_dev_err(dd,
- "hwerror interrupt with unknown errors %llx set\n",
- (unsigned long long) (hwerrs & ~IB_HWE_BITSEXTANT));
-
- if (hwerrs & QLOGIC_IB_HWE_IB_UC_MEMORYPARITYERR)
- qib_sd7220_clr_ibpar(dd);
-
- ctrl = qib_read_kreg32(dd, kr_control);
- if ((ctrl & QLOGIC_IB_C_FREEZEMODE) && !dd->diag_client) {
- /*
- * Parity errors in send memory are recoverable by h/w
- * just do housekeeping, exit freeze mode and continue.
- */
- if (hwerrs & (TXEMEMPARITYERR_PIOBUF |
- TXEMEMPARITYERR_PIOPBC)) {
- qib_7220_txe_recover(dd);
- hwerrs &= ~(TXEMEMPARITYERR_PIOBUF |
- TXEMEMPARITYERR_PIOPBC);
- }
- if (hwerrs)
- isfatal = 1;
- else
- qib_7220_clear_freeze(dd);
- }
-
- *msg = '\0';
-
- if (hwerrs & HWE_MASK(PowerOnBISTFailed)) {
- isfatal = 1;
- strlcat(msg,
- "[Memory BIST test failed, InfiniPath hardware unusable]",
- msgl);
- /* ignore from now on, so disable until driver reloaded */
- dd->cspec->hwerrmask &= ~HWE_MASK(PowerOnBISTFailed);
- qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask);
- }
-
- qib_format_hwerrors(hwerrs, qib_7220_hwerror_msgs,
- ARRAY_SIZE(qib_7220_hwerror_msgs), msg, msgl);
-
- bitsmsg = dd->cspec->bitsmsgbuf;
- if (hwerrs & (QLOGIC_IB_HWE_PCIEMEMPARITYERR_MASK <<
- QLOGIC_IB_HWE_PCIEMEMPARITYERR_SHIFT)) {
- bits = (u32) ((hwerrs >>
- QLOGIC_IB_HWE_PCIEMEMPARITYERR_SHIFT) &
- QLOGIC_IB_HWE_PCIEMEMPARITYERR_MASK);
- snprintf(bitsmsg, sizeof(dd->cspec->bitsmsgbuf),
- "[PCIe Mem Parity Errs %x] ", bits);
- strlcat(msg, bitsmsg, msgl);
- }
-
-#define _QIB_PLL_FAIL (QLOGIC_IB_HWE_COREPLL_FBSLIP | \
- QLOGIC_IB_HWE_COREPLL_RFSLIP)
-
- if (hwerrs & _QIB_PLL_FAIL) {
- isfatal = 1;
- snprintf(bitsmsg, sizeof(dd->cspec->bitsmsgbuf),
- "[PLL failed (%llx), InfiniPath hardware unusable]",
- (unsigned long long) hwerrs & _QIB_PLL_FAIL);
- strlcat(msg, bitsmsg, msgl);
- /* ignore from now on, so disable until driver reloaded */
- dd->cspec->hwerrmask &= ~(hwerrs & _QIB_PLL_FAIL);
- qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask);
- }
-
- if (hwerrs & QLOGIC_IB_HWE_SERDESPLLFAILED) {
- /*
- * If it occurs, it is left masked since the eternal
- * interface is unused.
- */
- dd->cspec->hwerrmask &= ~QLOGIC_IB_HWE_SERDESPLLFAILED;
- qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask);
- }
-
- qib_dev_err(dd, "%s hardware error\n", msg);
-
- if (isfatal && !dd->diag_client) {
- qib_dev_err(dd,
- "Fatal Hardware Error, no longer usable, SN %.16s\n",
- dd->serial);
- /*
- * For /sys status file and user programs to print; if no
- * trailing brace is copied, we'll know it was truncated.
- */
- if (dd->freezemsg)
- snprintf(dd->freezemsg, dd->freezelen,
- "{%s}", msg);
- qib_disable_after_error(dd);
- }
-bail:;
-}
-
-/**
- * qib_7220_init_hwerrors - enable hardware errors
- * @dd: the qlogic_ib device
- *
- * now that we have finished initializing everything that might reasonably
- * cause a hardware error, and cleared those errors bits as they occur,
- * we can enable hardware errors in the mask (potentially enabling
- * freeze mode), and enable hardware errors as errors (along with
- * everything else) in errormask
- */
-static void qib_7220_init_hwerrors(struct qib_devdata *dd)
-{
- u64 val;
- u64 extsval;
-
- extsval = qib_read_kreg64(dd, kr_extstatus);
-
- if (!(extsval & (QLOGIC_IB_EXTS_MEMBIST_ENDTEST |
- QLOGIC_IB_EXTS_MEMBIST_DISABLED)))
- qib_dev_err(dd, "MemBIST did not complete!\n");
- if (extsval & QLOGIC_IB_EXTS_MEMBIST_DISABLED)
- qib_devinfo(dd->pcidev, "MemBIST is disabled.\n");
-
- val = ~0ULL; /* default to all hwerrors become interrupts, */
-
- val &= ~QLOGIC_IB_HWE_IB_UC_MEMORYPARITYERR;
- dd->cspec->hwerrmask = val;
-
- qib_write_kreg(dd, kr_hwerrclear, ~HWE_MASK(PowerOnBISTFailed));
- qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask);
-
- /* clear all */
- qib_write_kreg(dd, kr_errclear, ~0ULL);
- /* enable errors that are masked, at least this first time. */
- qib_write_kreg(dd, kr_errmask, ~0ULL);
- dd->cspec->errormask = qib_read_kreg64(dd, kr_errmask);
- /* clear any interrupts up to this point (ints still not enabled) */
- qib_write_kreg(dd, kr_intclear, ~0ULL);
-}
-
-/*
- * Disable and enable the armlaunch error. Used for PIO bandwidth testing
- * on chips that are count-based, rather than trigger-based. There is no
- * reference counting, but that's also fine, given the intended use.
- * Only chip-specific because it's all register accesses
- */
-static void qib_set_7220_armlaunch(struct qib_devdata *dd, u32 enable)
-{
- if (enable) {
- qib_write_kreg(dd, kr_errclear, ERR_MASK(SendPioArmLaunchErr));
- dd->cspec->errormask |= ERR_MASK(SendPioArmLaunchErr);
- } else
- dd->cspec->errormask &= ~ERR_MASK(SendPioArmLaunchErr);
- qib_write_kreg(dd, kr_errmask, dd->cspec->errormask);
-}
-
-/*
- * Formerly took parameter <which> in pre-shifted,
- * pre-merged form with LinkCmd and LinkInitCmd
- * together, and assuming the zero was NOP.
- */
-static void qib_set_ib_7220_lstate(struct qib_pportdata *ppd, u16 linkcmd,
- u16 linitcmd)
-{
- u64 mod_wd;
- struct qib_devdata *dd = ppd->dd;
- unsigned long flags;
-
- if (linitcmd == QLOGIC_IB_IBCC_LINKINITCMD_DISABLE) {
- /*
- * If we are told to disable, note that so link-recovery
- * code does not attempt to bring us back up.
- */
- spin_lock_irqsave(&ppd->lflags_lock, flags);
- ppd->lflags |= QIBL_IB_LINK_DISABLED;
- spin_unlock_irqrestore(&ppd->lflags_lock, flags);
- } else if (linitcmd || linkcmd == QLOGIC_IB_IBCC_LINKCMD_DOWN) {
- /*
- * Any other linkinitcmd will lead to LINKDOWN and then
- * to INIT (if all is well), so clear flag to let
- * link-recovery code attempt to bring us back up.
- */
- spin_lock_irqsave(&ppd->lflags_lock, flags);
- ppd->lflags &= ~QIBL_IB_LINK_DISABLED;
- spin_unlock_irqrestore(&ppd->lflags_lock, flags);
- }
-
- mod_wd = (linkcmd << IBA7220_IBCC_LINKCMD_SHIFT) |
- (linitcmd << QLOGIC_IB_IBCC_LINKINITCMD_SHIFT);
-
- qib_write_kreg(dd, kr_ibcctrl, ppd->cpspec->ibcctrl | mod_wd);
- /* write to chip to prevent back-to-back writes of ibc reg */
- qib_write_kreg(dd, kr_scratch, 0);
-}
-
-/*
- * All detailed interaction with the SerDes has been moved to qib_sd7220.c
- *
- * The portion of IBA7220-specific bringup_serdes() that actually deals with
- * registers and memory within the SerDes itself is qib_sd7220_init().
- */
-
-/**
- * qib_7220_bringup_serdes - bring up the serdes
- * @ppd: physical port on the qlogic_ib device
- */
-static int qib_7220_bringup_serdes(struct qib_pportdata *ppd)
-{
- struct qib_devdata *dd = ppd->dd;
- u64 val, prev_val, guid, ibc;
- int ret = 0;
-
- /* Put IBC in reset, sends disabled */
- dd->control &= ~QLOGIC_IB_C_LINKENABLE;
- qib_write_kreg(dd, kr_control, 0ULL);
-
- if (qib_compat_ddr_negotiate) {
- ppd->cpspec->ibdeltainprog = 1;
- ppd->cpspec->ibsymsnap = read_7220_creg32(dd, cr_ibsymbolerr);
- ppd->cpspec->iblnkerrsnap =
- read_7220_creg32(dd, cr_iblinkerrrecov);
- }
-
- /* flowcontrolwatermark is in units of KBytes */
- ibc = 0x5ULL << SYM_LSB(IBCCtrl, FlowCtrlWaterMark);
- /*
- * How often flowctrl sent. More or less in usecs; balance against
- * watermark value, so that in theory senders always get a flow
- * control update in time to not let the IB link go idle.
- */
- ibc |= 0x3ULL << SYM_LSB(IBCCtrl, FlowCtrlPeriod);
- /* max error tolerance */
- ibc |= 0xfULL << SYM_LSB(IBCCtrl, PhyerrThreshold);
- /* use "real" buffer space for */
- ibc |= 4ULL << SYM_LSB(IBCCtrl, CreditScale);
- /* IB credit flow control. */
- ibc |= 0xfULL << SYM_LSB(IBCCtrl, OverrunThreshold);
- /*
- * set initial max size pkt IBC will send, including ICRC; it's the
- * PIO buffer size in dwords, less 1; also see qib_set_mtu()
- */
- ibc |= ((u64)(ppd->ibmaxlen >> 2) + 1) << SYM_LSB(IBCCtrl, MaxPktLen);
- ppd->cpspec->ibcctrl = ibc; /* without linkcmd or linkinitcmd! */
-
- /* initially come up waiting for TS1, without sending anything. */
- val = ppd->cpspec->ibcctrl | (QLOGIC_IB_IBCC_LINKINITCMD_DISABLE <<
- QLOGIC_IB_IBCC_LINKINITCMD_SHIFT);
- qib_write_kreg(dd, kr_ibcctrl, val);
-
- if (!ppd->cpspec->ibcddrctrl) {
- /* not on re-init after reset */
- ppd->cpspec->ibcddrctrl = qib_read_kreg64(dd, kr_ibcddrctrl);
-
- if (ppd->link_speed_enabled == (QIB_IB_SDR | QIB_IB_DDR))
- ppd->cpspec->ibcddrctrl |=
- IBA7220_IBC_SPEED_AUTONEG_MASK |
- IBA7220_IBC_IBTA_1_2_MASK;
- else
- ppd->cpspec->ibcddrctrl |=
- ppd->link_speed_enabled == QIB_IB_DDR ?
- IBA7220_IBC_SPEED_DDR : IBA7220_IBC_SPEED_SDR;
- if ((ppd->link_width_enabled & (IB_WIDTH_1X | IB_WIDTH_4X)) ==
- (IB_WIDTH_1X | IB_WIDTH_4X))
- ppd->cpspec->ibcddrctrl |= IBA7220_IBC_WIDTH_AUTONEG;
- else
- ppd->cpspec->ibcddrctrl |=
- ppd->link_width_enabled == IB_WIDTH_4X ?
- IBA7220_IBC_WIDTH_4X_ONLY :
- IBA7220_IBC_WIDTH_1X_ONLY;
-
- /* always enable these on driver reload, not sticky */
- ppd->cpspec->ibcddrctrl |=
- IBA7220_IBC_RXPOL_MASK << IBA7220_IBC_RXPOL_SHIFT;
- ppd->cpspec->ibcddrctrl |=
- IBA7220_IBC_HRTBT_MASK << IBA7220_IBC_HRTBT_SHIFT;
-
- /* enable automatic lane reversal detection for receive */
- ppd->cpspec->ibcddrctrl |= IBA7220_IBC_LANE_REV_SUPPORTED;
- } else
- /* write to chip to prevent back-to-back writes of ibc reg */
- qib_write_kreg(dd, kr_scratch, 0);
-
- qib_write_kreg(dd, kr_ibcddrctrl, ppd->cpspec->ibcddrctrl);
- qib_write_kreg(dd, kr_scratch, 0);
-
- qib_write_kreg(dd, kr_ncmodectrl, 0Ull);
- qib_write_kreg(dd, kr_scratch, 0);
-
- ret = qib_sd7220_init(dd);
-
- val = qib_read_kreg64(dd, kr_xgxs_cfg);
- prev_val = val;
- val |= QLOGIC_IB_XGXS_FC_SAFE;
- if (val != prev_val) {
- qib_write_kreg(dd, kr_xgxs_cfg, val);
- qib_read_kreg32(dd, kr_scratch);
- }
- if (val & QLOGIC_IB_XGXS_RESET)
- val &= ~QLOGIC_IB_XGXS_RESET;
- if (val != prev_val)
- qib_write_kreg(dd, kr_xgxs_cfg, val);
-
- /* first time through, set port guid */
- if (!ppd->guid)
- ppd->guid = dd->base_guid;
- guid = be64_to_cpu(ppd->guid);
-
- qib_write_kreg(dd, kr_hrtbt_guid, guid);
- if (!ret) {
- dd->control |= QLOGIC_IB_C_LINKENABLE;
- qib_write_kreg(dd, kr_control, dd->control);
- } else
- /* write to chip to prevent back-to-back writes of ibc reg */
- qib_write_kreg(dd, kr_scratch, 0);
- return ret;
-}
-
-/**
- * qib_7220_quiet_serdes - set serdes to txidle
- * @ppd: physical port of the qlogic_ib device
- * Called when driver is being unloaded
- */
-static void qib_7220_quiet_serdes(struct qib_pportdata *ppd)
-{
- u64 val;
- struct qib_devdata *dd = ppd->dd;
- unsigned long flags;
-
- /* disable IBC */
- dd->control &= ~QLOGIC_IB_C_LINKENABLE;
- qib_write_kreg(dd, kr_control,
- dd->control | QLOGIC_IB_C_FREEZEMODE);
-
- ppd->cpspec->chase_end = 0;
- if (ppd->cpspec->chase_timer.function) /* if initted */
- timer_delete_sync(&ppd->cpspec->chase_timer);
-
- if (ppd->cpspec->ibsymdelta || ppd->cpspec->iblnkerrdelta ||
- ppd->cpspec->ibdeltainprog) {
- u64 diagc;
-
- /* enable counter writes */
- diagc = qib_read_kreg64(dd, kr_hwdiagctrl);
- qib_write_kreg(dd, kr_hwdiagctrl,
- diagc | SYM_MASK(HwDiagCtrl, CounterWrEnable));
-
- if (ppd->cpspec->ibsymdelta || ppd->cpspec->ibdeltainprog) {
- val = read_7220_creg32(dd, cr_ibsymbolerr);
- if (ppd->cpspec->ibdeltainprog)
- val -= val - ppd->cpspec->ibsymsnap;
- val -= ppd->cpspec->ibsymdelta;
- write_7220_creg(dd, cr_ibsymbolerr, val);
- }
- if (ppd->cpspec->iblnkerrdelta || ppd->cpspec->ibdeltainprog) {
- val = read_7220_creg32(dd, cr_iblinkerrrecov);
- if (ppd->cpspec->ibdeltainprog)
- val -= val - ppd->cpspec->iblnkerrsnap;
- val -= ppd->cpspec->iblnkerrdelta;
- write_7220_creg(dd, cr_iblinkerrrecov, val);
- }
-
- /* and disable counter writes */
- qib_write_kreg(dd, kr_hwdiagctrl, diagc);
- }
- qib_set_ib_7220_lstate(ppd, 0, QLOGIC_IB_IBCC_LINKINITCMD_DISABLE);
-
- spin_lock_irqsave(&ppd->lflags_lock, flags);
- ppd->lflags &= ~QIBL_IB_AUTONEG_INPROG;
- spin_unlock_irqrestore(&ppd->lflags_lock, flags);
- wake_up(&ppd->cpspec->autoneg_wait);
- cancel_delayed_work_sync(&ppd->cpspec->autoneg_work);
-
- shutdown_7220_relock_poll(ppd->dd);
- val = qib_read_kreg64(ppd->dd, kr_xgxs_cfg);
- val |= QLOGIC_IB_XGXS_RESET;
- qib_write_kreg(ppd->dd, kr_xgxs_cfg, val);
-}
-
-/**
- * qib_setup_7220_setextled - set the state of the two external LEDs
- * @ppd: the qlogic_ib device
- * @on: whether the link is up or not
- *
- * The exact combo of LEDs if on is true is determined by looking
- * at the ibcstatus.
- *
- * These LEDs indicate the physical and logical state of IB link.
- * For this chip (at least with recommended board pinouts), LED1
- * is Yellow (logical state) and LED2 is Green (physical state),
- *
- * Note: We try to match the Mellanox HCA LED behavior as best
- * we can. Green indicates physical link state is OK (something is
- * plugged in, and we can train).
- * Amber indicates the link is logically up (ACTIVE).
- * Mellanox further blinks the amber LED to indicate data packet
- * activity, but we have no hardware support for that, so it would
- * require waking up every 10-20 msecs and checking the counters
- * on the chip, and then turning the LED off if appropriate. That's
- * visible overhead, so not something we will do.
- *
- */
-static void qib_setup_7220_setextled(struct qib_pportdata *ppd, u32 on)
-{
- struct qib_devdata *dd = ppd->dd;
- u64 extctl, ledblink = 0, val, lst, ltst;
- unsigned long flags;
-
- /*
- * The diags use the LED to indicate diag info, so we leave
- * the external LED alone when the diags are running.
- */
- if (dd->diag_client)
- return;
-
- if (ppd->led_override) {
- ltst = (ppd->led_override & QIB_LED_PHYS) ?
- IB_PHYSPORTSTATE_LINKUP : IB_PHYSPORTSTATE_DISABLED,
- lst = (ppd->led_override & QIB_LED_LOG) ?
- IB_PORT_ACTIVE : IB_PORT_DOWN;
- } else if (on) {
- val = qib_read_kreg64(dd, kr_ibcstatus);
- ltst = qib_7220_phys_portstate(val);
- lst = qib_7220_iblink_state(val);
- } else {
- ltst = 0;
- lst = 0;
- }
-
- spin_lock_irqsave(&dd->cspec->gpio_lock, flags);
- extctl = dd->cspec->extctrl & ~(SYM_MASK(EXTCtrl, LEDPriPortGreenOn) |
- SYM_MASK(EXTCtrl, LEDPriPortYellowOn));
- if (ltst == IB_PHYSPORTSTATE_LINKUP) {
- extctl |= SYM_MASK(EXTCtrl, LEDPriPortGreenOn);
- /*
- * counts are in chip clock (4ns) periods.
- * This is 1/16 sec (66.6ms) on,
- * 3/16 sec (187.5 ms) off, with packets rcvd
- */
- ledblink = ((66600 * 1000UL / 4) << IBA7220_LEDBLINK_ON_SHIFT)
- | ((187500 * 1000UL / 4) << IBA7220_LEDBLINK_OFF_SHIFT);
- }
- if (lst == IB_PORT_ACTIVE)
- extctl |= SYM_MASK(EXTCtrl, LEDPriPortYellowOn);
- dd->cspec->extctrl = extctl;
- qib_write_kreg(dd, kr_extctrl, extctl);
- spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags);
-
- if (ledblink) /* blink the LED on packet receive */
- qib_write_kreg(dd, kr_rcvpktledcnt, ledblink);
-}
-
-/*
- * qib_setup_7220_cleanup - clean up any per-chip chip-specific stuff
- * @dd: the qlogic_ib device
- *
- * This is called during driver unload.
- *
- */
-static void qib_setup_7220_cleanup(struct qib_devdata *dd)
-{
- qib_free_irq(dd);
- kfree(dd->cspec->cntrs);
- kfree(dd->cspec->portcntrs);
-}
-
-/*
- * This is only called for SDmaInt.
- * SDmaDisabled is handled on the error path.
- */
-static void sdma_7220_intr(struct qib_pportdata *ppd, u64 istat)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&ppd->sdma_lock, flags);
-
- switch (ppd->sdma_state.current_state) {
- case qib_sdma_state_s00_hw_down:
- break;
-
- case qib_sdma_state_s10_hw_start_up_wait:
- __qib_sdma_process_event(ppd, qib_sdma_event_e20_hw_started);
- break;
-
- case qib_sdma_state_s20_idle:
- break;
-
- case qib_sdma_state_s30_sw_clean_up_wait:
- break;
-
- case qib_sdma_state_s40_hw_clean_up_wait:
- break;
-
- case qib_sdma_state_s50_hw_halt_wait:
- __qib_sdma_process_event(ppd, qib_sdma_event_e60_hw_halted);
- break;
-
- case qib_sdma_state_s99_running:
- /* too chatty to print here */
- __qib_sdma_intr(ppd);
- break;
- }
- spin_unlock_irqrestore(&ppd->sdma_lock, flags);
-}
-
-static void qib_wantpiobuf_7220_intr(struct qib_devdata *dd, u32 needint)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&dd->sendctrl_lock, flags);
- if (needint) {
- if (!(dd->sendctrl & SYM_MASK(SendCtrl, SendBufAvailUpd)))
- goto done;
- /*
- * blip the availupd off, next write will be on, so
- * we ensure an avail update, regardless of threshold or
- * buffers becoming free, whenever we want an interrupt
- */
- qib_write_kreg(dd, kr_sendctrl, dd->sendctrl &
- ~SYM_MASK(SendCtrl, SendBufAvailUpd));
- qib_write_kreg(dd, kr_scratch, 0ULL);
- dd->sendctrl |= SYM_MASK(SendCtrl, SendIntBufAvail);
- } else
- dd->sendctrl &= ~SYM_MASK(SendCtrl, SendIntBufAvail);
- qib_write_kreg(dd, kr_sendctrl, dd->sendctrl);
- qib_write_kreg(dd, kr_scratch, 0ULL);
-done:
- spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
-}
-
-/*
- * Handle errors and unusual events first, separate function
- * to improve cache hits for fast path interrupt handling.
- */
-static noinline void unlikely_7220_intr(struct qib_devdata *dd, u64 istat)
-{
- if (unlikely(istat & ~QLOGIC_IB_I_BITSEXTANT))
- qib_dev_err(dd,
- "interrupt with unknown interrupts %Lx set\n",
- istat & ~QLOGIC_IB_I_BITSEXTANT);
-
- if (istat & QLOGIC_IB_I_GPIO) {
- u32 gpiostatus;
-
- /*
- * Boards for this chip currently don't use GPIO interrupts,
- * so clear by writing GPIOstatus to GPIOclear, and complain
- * to alert developer. To avoid endless repeats, clear
- * the bits in the mask, since there is some kind of
- * programming error or chip problem.
- */
- gpiostatus = qib_read_kreg32(dd, kr_gpio_status);
- /*
- * In theory, writing GPIOstatus to GPIOclear could
- * have a bad side-effect on some diagnostic that wanted
- * to poll for a status-change, but the various shadows
- * make that problematic at best. Diags will just suppress
- * all GPIO interrupts during such tests.
- */
- qib_write_kreg(dd, kr_gpio_clear, gpiostatus);
-
- if (gpiostatus) {
- const u32 mask = qib_read_kreg32(dd, kr_gpio_mask);
- u32 gpio_irq = mask & gpiostatus;
-
- /*
- * A bit set in status and (chip) Mask register
- * would cause an interrupt. Since we are not
- * expecting any, report it. Also check that the
- * chip reflects our shadow, report issues,
- * and refresh from the shadow.
- */
- /*
- * Clear any troublemakers, and update chip
- * from shadow
- */
- dd->cspec->gpio_mask &= ~gpio_irq;
- qib_write_kreg(dd, kr_gpio_mask, dd->cspec->gpio_mask);
- }
- }
-
- if (istat & QLOGIC_IB_I_ERROR) {
- u64 estat;
-
- qib_stats.sps_errints++;
- estat = qib_read_kreg64(dd, kr_errstatus);
- if (!estat)
- qib_devinfo(dd->pcidev,
- "error interrupt (%Lx), but no error bits set!\n",
- istat);
- else
- handle_7220_errors(dd, estat);
- }
-}
-
-static irqreturn_t qib_7220intr(int irq, void *data)
-{
- struct qib_devdata *dd = data;
- irqreturn_t ret;
- u64 istat;
- u64 ctxtrbits;
- u64 rmask;
- unsigned i;
-
- if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT) {
- /*
- * This return value is not great, but we do not want the
- * interrupt core code to remove our interrupt handler
- * because we don't appear to be handling an interrupt
- * during a chip reset.
- */
- ret = IRQ_HANDLED;
- goto bail;
- }
-
- istat = qib_read_kreg64(dd, kr_intstatus);
-
- if (unlikely(!istat)) {
- ret = IRQ_NONE; /* not our interrupt, or already handled */
- goto bail;
- }
- if (unlikely(istat == -1)) {
- qib_bad_intrstatus(dd);
- /* don't know if it was our interrupt or not */
- ret = IRQ_NONE;
- goto bail;
- }
-
- this_cpu_inc(*dd->int_counter);
- if (unlikely(istat & (~QLOGIC_IB_I_BITSEXTANT |
- QLOGIC_IB_I_GPIO | QLOGIC_IB_I_ERROR)))
- unlikely_7220_intr(dd, istat);
-
- /*
- * Clear the interrupt bits we found set, relatively early, so we
- * "know" know the chip will have seen this by the time we process
- * the queue, and will re-interrupt if necessary. The processor
- * itself won't take the interrupt again until we return.
- */
- qib_write_kreg(dd, kr_intclear, istat);
-
- /*
- * Handle kernel receive queues before checking for pio buffers
- * available since receives can overflow; piobuf waiters can afford
- * a few extra cycles, since they were waiting anyway.
- */
- ctxtrbits = istat &
- ((QLOGIC_IB_I_RCVAVAIL_MASK << QLOGIC_IB_I_RCVAVAIL_SHIFT) |
- (QLOGIC_IB_I_RCVURG_MASK << QLOGIC_IB_I_RCVURG_SHIFT));
- if (ctxtrbits) {
- rmask = (1ULL << QLOGIC_IB_I_RCVAVAIL_SHIFT) |
- (1ULL << QLOGIC_IB_I_RCVURG_SHIFT);
- for (i = 0; i < dd->first_user_ctxt; i++) {
- if (ctxtrbits & rmask) {
- ctxtrbits &= ~rmask;
- qib_kreceive(dd->rcd[i], NULL, NULL);
- }
- rmask <<= 1;
- }
- if (ctxtrbits) {
- ctxtrbits =
- (ctxtrbits >> QLOGIC_IB_I_RCVAVAIL_SHIFT) |
- (ctxtrbits >> QLOGIC_IB_I_RCVURG_SHIFT);
- qib_handle_urcv(dd, ctxtrbits);
- }
- }
-
- /* only call for SDmaInt */
- if (istat & QLOGIC_IB_I_SDMAINT)
- sdma_7220_intr(dd->pport, istat);
-
- if ((istat & QLOGIC_IB_I_SPIOBUFAVAIL) && (dd->flags & QIB_INITTED))
- qib_ib_piobufavail(dd);
-
- ret = IRQ_HANDLED;
-bail:
- return ret;
-}
-
-/*
- * Set up our chip-specific interrupt handler.
- * The interrupt type has already been setup, so
- * we just need to do the registration and error checking.
- * If we are using MSI interrupts, we may fall back to
- * INTx later, if the interrupt handler doesn't get called
- * within 1/2 second (see verify_interrupt()).
- */
-static void qib_setup_7220_interrupt(struct qib_devdata *dd)
-{
- int ret;
-
- ret = pci_request_irq(dd->pcidev, 0, qib_7220intr, NULL, dd,
- QIB_DRV_NAME);
- if (ret)
- qib_dev_err(dd, "Couldn't setup %s interrupt (irq=%d): %d\n",
- dd->pcidev->msi_enabled ? "MSI" : "INTx",
- pci_irq_vector(dd->pcidev, 0), ret);
-}
-
-/**
- * qib_7220_boardname - fill in the board name
- * @dd: the qlogic_ib device
- *
- * info is based on the board revision register
- */
-static void qib_7220_boardname(struct qib_devdata *dd)
-{
- u32 boardid;
-
- boardid = SYM_FIELD(dd->revision, Revision,
- BoardID);
-
- switch (boardid) {
- case 1:
- dd->boardname = "InfiniPath_QLE7240";
- break;
- case 2:
- dd->boardname = "InfiniPath_QLE7280";
- break;
- default:
- qib_dev_err(dd, "Unknown 7220 board with ID %u\n", boardid);
- dd->boardname = "Unknown_InfiniPath_7220";
- break;
- }
-
- if (dd->majrev != 5 || !dd->minrev || dd->minrev > 2)
- qib_dev_err(dd,
- "Unsupported InfiniPath hardware revision %u.%u!\n",
- dd->majrev, dd->minrev);
-
- snprintf(dd->boardversion, sizeof(dd->boardversion),
- "ChipABI %u.%u, %s, InfiniPath%u %u.%u, SW Compat %u\n",
- QIB_CHIP_VERS_MAJ, QIB_CHIP_VERS_MIN, dd->boardname,
- (unsigned int)SYM_FIELD(dd->revision, Revision_R, Arch),
- dd->majrev, dd->minrev,
- (unsigned int)SYM_FIELD(dd->revision, Revision_R, SW));
-}
-
-/*
- * This routine sleeps, so it can only be called from user context, not
- * from interrupt context.
- */
-static int qib_setup_7220_reset(struct qib_devdata *dd)
-{
- u64 val;
- int i;
- int ret;
- u16 cmdval;
- u8 int_line, clinesz;
- unsigned long flags;
-
- qib_pcie_getcmd(dd, &cmdval, &int_line, &clinesz);
-
- /* Use dev_err so it shows up in logs, etc. */
- qib_dev_err(dd, "Resetting InfiniPath unit %u\n", dd->unit);
-
- /* no interrupts till re-initted */
- qib_7220_set_intr_state(dd, 0);
-
- dd->pport->cpspec->ibdeltainprog = 0;
- dd->pport->cpspec->ibsymdelta = 0;
- dd->pport->cpspec->iblnkerrdelta = 0;
-
- /*
- * Keep chip from being accessed until we are ready. Use
- * writeq() directly, to allow the write even though QIB_PRESENT
- * isn't set.
- */
- dd->flags &= ~(QIB_INITTED | QIB_PRESENT);
- /* so we check interrupts work again */
- dd->z_int_counter = qib_int_counter(dd);
- val = dd->control | QLOGIC_IB_C_RESET;
- writeq(val, &dd->kregbase[kr_control]);
- mb(); /* prevent compiler reordering around actual reset */
-
- for (i = 1; i <= 5; i++) {
- /*
- * Allow MBIST, etc. to complete; longer on each retry.
- * We sometimes get machine checks from bus timeout if no
- * response, so for now, make it *really* long.
- */
- msleep(1000 + (1 + i) * 2000);
-
- qib_pcie_reenable(dd, cmdval, int_line, clinesz);
-
- /*
- * Use readq directly, so we don't need to mark it as PRESENT
- * until we get a successful indication that all is well.
- */
- val = readq(&dd->kregbase[kr_revision]);
- if (val == dd->revision) {
- dd->flags |= QIB_PRESENT; /* it's back */
- ret = qib_reinit_intr(dd);
- goto bail;
- }
- }
- ret = 0; /* failed */
-
-bail:
- if (ret) {
- if (qib_pcie_params(dd, dd->lbus_width, NULL))
- qib_dev_err(dd,
- "Reset failed to setup PCIe or interrupts; continuing anyway\n");
-
- /* hold IBC in reset, no sends, etc till later */
- qib_write_kreg(dd, kr_control, 0ULL);
-
- /* clear the reset error, init error/hwerror mask */
- qib_7220_init_hwerrors(dd);
-
- /* do setup similar to speed or link-width changes */
- if (dd->pport->cpspec->ibcddrctrl & IBA7220_IBC_IBTA_1_2_MASK)
- dd->cspec->presets_needed = 1;
- spin_lock_irqsave(&dd->pport->lflags_lock, flags);
- dd->pport->lflags |= QIBL_IB_FORCE_NOTIFY;
- dd->pport->lflags &= ~QIBL_IB_AUTONEG_FAILED;
- spin_unlock_irqrestore(&dd->pport->lflags_lock, flags);
- }
-
- return ret;
-}
-
-/**
- * qib_7220_put_tid - write a TID to the chip
- * @dd: the qlogic_ib device
- * @tidptr: pointer to the expected TID (in chip) to update
- * @type: 0 for eager, 1 for expected
- * @pa: physical address of in memory buffer; tidinvalid if freeing
- */
-static void qib_7220_put_tid(struct qib_devdata *dd, u64 __iomem *tidptr,
- u32 type, unsigned long pa)
-{
- if (pa != dd->tidinvalid) {
- u64 chippa = pa >> IBA7220_TID_PA_SHIFT;
-
- /* paranoia checks */
- if (pa != (chippa << IBA7220_TID_PA_SHIFT)) {
- qib_dev_err(dd, "Physaddr %lx not 2KB aligned!\n",
- pa);
- return;
- }
- if (chippa >= (1UL << IBA7220_TID_SZ_SHIFT)) {
- qib_dev_err(dd,
- "Physical page address 0x%lx larger than supported\n",
- pa);
- return;
- }
-
- if (type == RCVHQ_RCV_TYPE_EAGER)
- chippa |= dd->tidtemplate;
- else /* for now, always full 4KB page */
- chippa |= IBA7220_TID_SZ_4K;
- pa = chippa;
- }
- writeq(pa, tidptr);
-}
-
-/**
- * qib_7220_clear_tids - clear all TID entries for a ctxt, expected and eager
- * @dd: the qlogic_ib device
- * @rcd: the ctxt
- *
- * clear all TID entries for a ctxt, expected and eager.
- * Used from qib_close(). On this chip, TIDs are only 32 bits,
- * not 64, but they are still on 64 bit boundaries, so tidbase
- * is declared as u64 * for the pointer math, even though we write 32 bits
- */
-static void qib_7220_clear_tids(struct qib_devdata *dd,
- struct qib_ctxtdata *rcd)
-{
- u64 __iomem *tidbase;
- unsigned long tidinv;
- u32 ctxt;
- int i;
-
- if (!dd->kregbase || !rcd)
- return;
-
- ctxt = rcd->ctxt;
-
- tidinv = dd->tidinvalid;
- tidbase = (u64 __iomem *)
- ((char __iomem *)(dd->kregbase) +
- dd->rcvtidbase +
- ctxt * dd->rcvtidcnt * sizeof(*tidbase));
-
- for (i = 0; i < dd->rcvtidcnt; i++)
- qib_7220_put_tid(dd, &tidbase[i], RCVHQ_RCV_TYPE_EXPECTED,
- tidinv);
-
- tidbase = (u64 __iomem *)
- ((char __iomem *)(dd->kregbase) +
- dd->rcvegrbase +
- rcd->rcvegr_tid_base * sizeof(*tidbase));
-
- for (i = 0; i < rcd->rcvegrcnt; i++)
- qib_7220_put_tid(dd, &tidbase[i], RCVHQ_RCV_TYPE_EAGER,
- tidinv);
-}
-
-/**
- * qib_7220_tidtemplate - setup constants for TID updates
- * @dd: the qlogic_ib device
- *
- * We setup stuff that we use a lot, to avoid calculating each time
- */
-static void qib_7220_tidtemplate(struct qib_devdata *dd)
-{
- if (dd->rcvegrbufsize == 2048)
- dd->tidtemplate = IBA7220_TID_SZ_2K;
- else if (dd->rcvegrbufsize == 4096)
- dd->tidtemplate = IBA7220_TID_SZ_4K;
- dd->tidinvalid = 0;
-}
-
-/**
- * qib_7220_get_base_info - set chip-specific flags for user code
- * @rcd: the qlogic_ib ctxt
- * @kinfo: qib_base_info pointer
- *
- * We set the PCIE flag because the lower bandwidth on PCIe vs
- * HyperTransport can affect some user packet algorithims.
- */
-static int qib_7220_get_base_info(struct qib_ctxtdata *rcd,
- struct qib_base_info *kinfo)
-{
- kinfo->spi_runtime_flags |= QIB_RUNTIME_PCIE |
- QIB_RUNTIME_NODMA_RTAIL | QIB_RUNTIME_SDMA;
-
- if (rcd->dd->flags & QIB_USE_SPCL_TRIG)
- kinfo->spi_runtime_flags |= QIB_RUNTIME_SPECIAL_TRIGGER;
-
- return 0;
-}
-
-static struct qib_message_header *
-qib_7220_get_msgheader(struct qib_devdata *dd, __le32 *rhf_addr)
-{
- u32 offset = qib_hdrget_offset(rhf_addr);
-
- return (struct qib_message_header *)
- (rhf_addr - dd->rhf_offset + offset);
-}
-
-static void qib_7220_config_ctxts(struct qib_devdata *dd)
-{
- unsigned long flags;
- u32 nchipctxts;
-
- nchipctxts = qib_read_kreg32(dd, kr_portcnt);
- dd->cspec->numctxts = nchipctxts;
- if (qib_n_krcv_queues > 1) {
- dd->qpn_mask = 0x3e;
- dd->first_user_ctxt = qib_n_krcv_queues * dd->num_pports;
- if (dd->first_user_ctxt > nchipctxts)
- dd->first_user_ctxt = nchipctxts;
- } else
- dd->first_user_ctxt = dd->num_pports;
- dd->n_krcv_queues = dd->first_user_ctxt;
-
- if (!qib_cfgctxts) {
- int nctxts = dd->first_user_ctxt + num_online_cpus();
-
- if (nctxts <= 5)
- dd->ctxtcnt = 5;
- else if (nctxts <= 9)
- dd->ctxtcnt = 9;
- else if (nctxts <= nchipctxts)
- dd->ctxtcnt = nchipctxts;
- } else if (qib_cfgctxts <= nchipctxts)
- dd->ctxtcnt = qib_cfgctxts;
- if (!dd->ctxtcnt) /* none of the above, set to max */
- dd->ctxtcnt = nchipctxts;
-
- /*
- * Chip can be configured for 5, 9, or 17 ctxts, and choice
- * affects number of eager TIDs per ctxt (1K, 2K, 4K).
- * Lock to be paranoid about later motion, etc.
- */
- spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags);
- if (dd->ctxtcnt > 9)
- dd->rcvctrl |= 2ULL << IBA7220_R_CTXTCFG_SHIFT;
- else if (dd->ctxtcnt > 5)
- dd->rcvctrl |= 1ULL << IBA7220_R_CTXTCFG_SHIFT;
- /* else configure for default 5 receive ctxts */
- if (dd->qpn_mask)
- dd->rcvctrl |= 1ULL << QIB_7220_RcvCtrl_RcvQPMapEnable_LSB;
- qib_write_kreg(dd, kr_rcvctrl, dd->rcvctrl);
- spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags);
-
- /* kr_rcvegrcnt changes based on the number of contexts enabled */
- dd->cspec->rcvegrcnt = qib_read_kreg32(dd, kr_rcvegrcnt);
- dd->rcvhdrcnt = max(dd->cspec->rcvegrcnt, IBA7220_KRCVEGRCNT);
-}
-
-static int qib_7220_get_ib_cfg(struct qib_pportdata *ppd, int which)
-{
- int lsb, ret = 0;
- u64 maskr; /* right-justified mask */
-
- switch (which) {
- case QIB_IB_CFG_LWID_ENB: /* Get allowed Link-width */
- ret = ppd->link_width_enabled;
- goto done;
-
- case QIB_IB_CFG_LWID: /* Get currently active Link-width */
- ret = ppd->link_width_active;
- goto done;
-
- case QIB_IB_CFG_SPD_ENB: /* Get allowed Link speeds */
- ret = ppd->link_speed_enabled;
- goto done;
-
- case QIB_IB_CFG_SPD: /* Get current Link spd */
- ret = ppd->link_speed_active;
- goto done;
-
- case QIB_IB_CFG_RXPOL_ENB: /* Get Auto-RX-polarity enable */
- lsb = IBA7220_IBC_RXPOL_SHIFT;
- maskr = IBA7220_IBC_RXPOL_MASK;
- break;
-
- case QIB_IB_CFG_LREV_ENB: /* Get Auto-Lane-reversal enable */
- lsb = IBA7220_IBC_LREV_SHIFT;
- maskr = IBA7220_IBC_LREV_MASK;
- break;
-
- case QIB_IB_CFG_LINKLATENCY:
- ret = qib_read_kreg64(ppd->dd, kr_ibcddrstatus)
- & IBA7220_DDRSTAT_LINKLAT_MASK;
- goto done;
-
- case QIB_IB_CFG_OP_VLS:
- ret = ppd->vls_operational;
- goto done;
-
- case QIB_IB_CFG_VL_HIGH_CAP:
- ret = 0;
- goto done;
-
- case QIB_IB_CFG_VL_LOW_CAP:
- ret = 0;
- goto done;
-
- case QIB_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */
- ret = SYM_FIELD(ppd->cpspec->ibcctrl, IBCCtrl,
- OverrunThreshold);
- goto done;
-
- case QIB_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */
- ret = SYM_FIELD(ppd->cpspec->ibcctrl, IBCCtrl,
- PhyerrThreshold);
- goto done;
-
- case QIB_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */
- /* will only take effect when the link state changes */
- ret = (ppd->cpspec->ibcctrl &
- SYM_MASK(IBCCtrl, LinkDownDefaultState)) ?
- IB_LINKINITCMD_SLEEP : IB_LINKINITCMD_POLL;
- goto done;
-
- case QIB_IB_CFG_HRTBT: /* Get Heartbeat off/enable/auto */
- lsb = IBA7220_IBC_HRTBT_SHIFT;
- maskr = IBA7220_IBC_HRTBT_MASK;
- break;
-
- case QIB_IB_CFG_PMA_TICKS:
- /*
- * 0x00 = 10x link transfer rate or 4 nsec. for 2.5Gbs
- * Since the clock is always 250MHz, the value is 1 or 0.
- */
- ret = (ppd->link_speed_active == QIB_IB_DDR);
- goto done;
-
- default:
- ret = -EINVAL;
- goto done;
- }
- ret = (int)((ppd->cpspec->ibcddrctrl >> lsb) & maskr);
-done:
- return ret;
-}
-
-static int qib_7220_set_ib_cfg(struct qib_pportdata *ppd, int which, u32 val)
-{
- struct qib_devdata *dd = ppd->dd;
- u64 maskr; /* right-justified mask */
- int lsb, ret = 0, setforce = 0;
- u16 lcmd, licmd;
- unsigned long flags;
- u32 tmp = 0;
-
- switch (which) {
- case QIB_IB_CFG_LIDLMC:
- /*
- * Set LID and LMC. Combined to avoid possible hazard
- * caller puts LMC in 16MSbits, DLID in 16LSbits of val
- */
- lsb = IBA7220_IBC_DLIDLMC_SHIFT;
- maskr = IBA7220_IBC_DLIDLMC_MASK;
- break;
-
- case QIB_IB_CFG_LWID_ENB: /* set allowed Link-width */
- /*
- * As with speed, only write the actual register if
- * the link is currently down, otherwise takes effect
- * on next link change.
- */
- ppd->link_width_enabled = val;
- if (!(ppd->lflags & QIBL_LINKDOWN))
- goto bail;
- /*
- * We set the QIBL_IB_FORCE_NOTIFY bit so updown
- * will get called because we want update
- * link_width_active, and the change may not take
- * effect for some time (if we are in POLL), so this
- * flag will force the updown routine to be called
- * on the next ibstatuschange down interrupt, even
- * if it's not an down->up transition.
- */
- val--; /* convert from IB to chip */
- maskr = IBA7220_IBC_WIDTH_MASK;
- lsb = IBA7220_IBC_WIDTH_SHIFT;
- setforce = 1;
- break;
-
- case QIB_IB_CFG_SPD_ENB: /* set allowed Link speeds */
- /*
- * If we turn off IB1.2, need to preset SerDes defaults,
- * but not right now. Set a flag for the next time
- * we command the link down. As with width, only write the
- * actual register if the link is currently down, otherwise
- * takes effect on next link change. Since setting is being
- * explicitly requested (via MAD or sysfs), clear autoneg
- * failure status if speed autoneg is enabled.
- */
- ppd->link_speed_enabled = val;
- if ((ppd->cpspec->ibcddrctrl & IBA7220_IBC_IBTA_1_2_MASK) &&
- !(val & (val - 1)))
- dd->cspec->presets_needed = 1;
- if (!(ppd->lflags & QIBL_LINKDOWN))
- goto bail;
- /*
- * We set the QIBL_IB_FORCE_NOTIFY bit so updown
- * will get called because we want update
- * link_speed_active, and the change may not take
- * effect for some time (if we are in POLL), so this
- * flag will force the updown routine to be called
- * on the next ibstatuschange down interrupt, even
- * if it's not an down->up transition.
- */
- if (val == (QIB_IB_SDR | QIB_IB_DDR)) {
- val = IBA7220_IBC_SPEED_AUTONEG_MASK |
- IBA7220_IBC_IBTA_1_2_MASK;
- spin_lock_irqsave(&ppd->lflags_lock, flags);
- ppd->lflags &= ~QIBL_IB_AUTONEG_FAILED;
- spin_unlock_irqrestore(&ppd->lflags_lock, flags);
- } else
- val = val == QIB_IB_DDR ?
- IBA7220_IBC_SPEED_DDR : IBA7220_IBC_SPEED_SDR;
- maskr = IBA7220_IBC_SPEED_AUTONEG_MASK |
- IBA7220_IBC_IBTA_1_2_MASK;
- /* IBTA 1.2 mode + speed bits are contiguous */
- lsb = SYM_LSB(IBCDDRCtrl, IB_ENHANCED_MODE);
- setforce = 1;
- break;
-
- case QIB_IB_CFG_RXPOL_ENB: /* set Auto-RX-polarity enable */
- lsb = IBA7220_IBC_RXPOL_SHIFT;
- maskr = IBA7220_IBC_RXPOL_MASK;
- break;
-
- case QIB_IB_CFG_LREV_ENB: /* set Auto-Lane-reversal enable */
- lsb = IBA7220_IBC_LREV_SHIFT;
- maskr = IBA7220_IBC_LREV_MASK;
- break;
-
- case QIB_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */
- maskr = SYM_FIELD(ppd->cpspec->ibcctrl, IBCCtrl,
- OverrunThreshold);
- if (maskr != val) {
- ppd->cpspec->ibcctrl &=
- ~SYM_MASK(IBCCtrl, OverrunThreshold);
- ppd->cpspec->ibcctrl |= (u64) val <<
- SYM_LSB(IBCCtrl, OverrunThreshold);
- qib_write_kreg(dd, kr_ibcctrl, ppd->cpspec->ibcctrl);
- qib_write_kreg(dd, kr_scratch, 0);
- }
- goto bail;
-
- case QIB_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */
- maskr = SYM_FIELD(ppd->cpspec->ibcctrl, IBCCtrl,
- PhyerrThreshold);
- if (maskr != val) {
- ppd->cpspec->ibcctrl &=
- ~SYM_MASK(IBCCtrl, PhyerrThreshold);
- ppd->cpspec->ibcctrl |= (u64) val <<
- SYM_LSB(IBCCtrl, PhyerrThreshold);
- qib_write_kreg(dd, kr_ibcctrl, ppd->cpspec->ibcctrl);
- qib_write_kreg(dd, kr_scratch, 0);
- }
- goto bail;
-
- case QIB_IB_CFG_PKEYS: /* update pkeys */
- maskr = (u64) ppd->pkeys[0] | ((u64) ppd->pkeys[1] << 16) |
- ((u64) ppd->pkeys[2] << 32) |
- ((u64) ppd->pkeys[3] << 48);
- qib_write_kreg(dd, kr_partitionkey, maskr);
- goto bail;
-
- case QIB_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */
- /* will only take effect when the link state changes */
- if (val == IB_LINKINITCMD_POLL)
- ppd->cpspec->ibcctrl &=
- ~SYM_MASK(IBCCtrl, LinkDownDefaultState);
- else /* SLEEP */
- ppd->cpspec->ibcctrl |=
- SYM_MASK(IBCCtrl, LinkDownDefaultState);
- qib_write_kreg(dd, kr_ibcctrl, ppd->cpspec->ibcctrl);
- qib_write_kreg(dd, kr_scratch, 0);
- goto bail;
-
- case QIB_IB_CFG_MTU: /* update the MTU in IBC */
- /*
- * Update our housekeeping variables, and set IBC max
- * size, same as init code; max IBC is max we allow in
- * buffer, less the qword pbc, plus 1 for ICRC, in dwords
- * Set even if it's unchanged, print debug message only
- * on changes.
- */
- val = (ppd->ibmaxlen >> 2) + 1;
- ppd->cpspec->ibcctrl &= ~SYM_MASK(IBCCtrl, MaxPktLen);
- ppd->cpspec->ibcctrl |= (u64)val << SYM_LSB(IBCCtrl, MaxPktLen);
- qib_write_kreg(dd, kr_ibcctrl, ppd->cpspec->ibcctrl);
- qib_write_kreg(dd, kr_scratch, 0);
- goto bail;
-
- case QIB_IB_CFG_LSTATE: /* set the IB link state */
- switch (val & 0xffff0000) {
- case IB_LINKCMD_DOWN:
- lcmd = QLOGIC_IB_IBCC_LINKCMD_DOWN;
- if (!ppd->cpspec->ibdeltainprog &&
- qib_compat_ddr_negotiate) {
- ppd->cpspec->ibdeltainprog = 1;
- ppd->cpspec->ibsymsnap =
- read_7220_creg32(dd, cr_ibsymbolerr);
- ppd->cpspec->iblnkerrsnap =
- read_7220_creg32(dd, cr_iblinkerrrecov);
- }
- break;
-
- case IB_LINKCMD_ARMED:
- lcmd = QLOGIC_IB_IBCC_LINKCMD_ARMED;
- break;
-
- case IB_LINKCMD_ACTIVE:
- lcmd = QLOGIC_IB_IBCC_LINKCMD_ACTIVE;
- break;
-
- default:
- ret = -EINVAL;
- qib_dev_err(dd, "bad linkcmd req 0x%x\n", val >> 16);
- goto bail;
- }
- switch (val & 0xffff) {
- case IB_LINKINITCMD_NOP:
- licmd = 0;
- break;
-
- case IB_LINKINITCMD_POLL:
- licmd = QLOGIC_IB_IBCC_LINKINITCMD_POLL;
- break;
-
- case IB_LINKINITCMD_SLEEP:
- licmd = QLOGIC_IB_IBCC_LINKINITCMD_SLEEP;
- break;
-
- case IB_LINKINITCMD_DISABLE:
- licmd = QLOGIC_IB_IBCC_LINKINITCMD_DISABLE;
- ppd->cpspec->chase_end = 0;
- /*
- * stop state chase counter and timer, if running.
- * wait forpending timer, but don't clear .data (ppd)!
- */
- if (ppd->cpspec->chase_timer.expires) {
- timer_delete_sync(&ppd->cpspec->chase_timer);
- ppd->cpspec->chase_timer.expires = 0;
- }
- break;
-
- default:
- ret = -EINVAL;
- qib_dev_err(dd, "bad linkinitcmd req 0x%x\n",
- val & 0xffff);
- goto bail;
- }
- qib_set_ib_7220_lstate(ppd, lcmd, licmd);
-
- maskr = IBA7220_IBC_WIDTH_MASK;
- lsb = IBA7220_IBC_WIDTH_SHIFT;
- tmp = (ppd->cpspec->ibcddrctrl >> lsb) & maskr;
- /* If the width active on the chip does not match the
- * width in the shadow register, write the new active
- * width to the chip.
- * We don't have to worry about speed as the speed is taken
- * care of by set_7220_ibspeed_fast called by ib_updown.
- */
- if (ppd->link_width_enabled-1 != tmp) {
- ppd->cpspec->ibcddrctrl &= ~(maskr << lsb);
- ppd->cpspec->ibcddrctrl |=
- (((u64)(ppd->link_width_enabled-1) & maskr) <<
- lsb);
- qib_write_kreg(dd, kr_ibcddrctrl,
- ppd->cpspec->ibcddrctrl);
- qib_write_kreg(dd, kr_scratch, 0);
- spin_lock_irqsave(&ppd->lflags_lock, flags);
- ppd->lflags |= QIBL_IB_FORCE_NOTIFY;
- spin_unlock_irqrestore(&ppd->lflags_lock, flags);
- }
- goto bail;
-
- case QIB_IB_CFG_HRTBT: /* set Heartbeat off/enable/auto */
- if (val > IBA7220_IBC_HRTBT_MASK) {
- ret = -EINVAL;
- goto bail;
- }
- lsb = IBA7220_IBC_HRTBT_SHIFT;
- maskr = IBA7220_IBC_HRTBT_MASK;
- break;
-
- default:
- ret = -EINVAL;
- goto bail;
- }
- ppd->cpspec->ibcddrctrl &= ~(maskr << lsb);
- ppd->cpspec->ibcddrctrl |= (((u64) val & maskr) << lsb);
- qib_write_kreg(dd, kr_ibcddrctrl, ppd->cpspec->ibcddrctrl);
- qib_write_kreg(dd, kr_scratch, 0);
- if (setforce) {
- spin_lock_irqsave(&ppd->lflags_lock, flags);
- ppd->lflags |= QIBL_IB_FORCE_NOTIFY;
- spin_unlock_irqrestore(&ppd->lflags_lock, flags);
- }
-bail:
- return ret;
-}
-
-static int qib_7220_set_loopback(struct qib_pportdata *ppd, const char *what)
-{
- int ret = 0;
- u64 val, ddr;
-
- if (!strncmp(what, "ibc", 3)) {
- ppd->cpspec->ibcctrl |= SYM_MASK(IBCCtrl, Loopback);
- val = 0; /* disable heart beat, so link will come up */
- qib_devinfo(ppd->dd->pcidev, "Enabling IB%u:%u IBC loopback\n",
- ppd->dd->unit, ppd->port);
- } else if (!strncmp(what, "off", 3)) {
- ppd->cpspec->ibcctrl &= ~SYM_MASK(IBCCtrl, Loopback);
- /* enable heart beat again */
- val = IBA7220_IBC_HRTBT_MASK << IBA7220_IBC_HRTBT_SHIFT;
- qib_devinfo(ppd->dd->pcidev,
- "Disabling IB%u:%u IBC loopback (normal)\n",
- ppd->dd->unit, ppd->port);
- } else
- ret = -EINVAL;
- if (!ret) {
- qib_write_kreg(ppd->dd, kr_ibcctrl, ppd->cpspec->ibcctrl);
- ddr = ppd->cpspec->ibcddrctrl & ~(IBA7220_IBC_HRTBT_MASK
- << IBA7220_IBC_HRTBT_SHIFT);
- ppd->cpspec->ibcddrctrl = ddr | val;
- qib_write_kreg(ppd->dd, kr_ibcddrctrl,
- ppd->cpspec->ibcddrctrl);
- qib_write_kreg(ppd->dd, kr_scratch, 0);
- }
- return ret;
-}
-
-static void qib_update_7220_usrhead(struct qib_ctxtdata *rcd, u64 hd,
- u32 updegr, u32 egrhd, u32 npkts)
-{
- if (updegr)
- qib_write_ureg(rcd->dd, ur_rcvegrindexhead, egrhd, rcd->ctxt);
- qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt);
-}
-
-static u32 qib_7220_hdrqempty(struct qib_ctxtdata *rcd)
-{
- u32 head, tail;
-
- head = qib_read_ureg32(rcd->dd, ur_rcvhdrhead, rcd->ctxt);
- if (rcd->rcvhdrtail_kvaddr)
- tail = qib_get_rcvhdrtail(rcd);
- else
- tail = qib_read_ureg32(rcd->dd, ur_rcvhdrtail, rcd->ctxt);
- return head == tail;
-}
-
-/*
- * Modify the RCVCTRL register in chip-specific way. This
- * is a function because bit positions and (future) register
- * location is chip-specifc, but the needed operations are
- * generic. <op> is a bit-mask because we often want to
- * do multiple modifications.
- */
-static void rcvctrl_7220_mod(struct qib_pportdata *ppd, unsigned int op,
- int ctxt)
-{
- struct qib_devdata *dd = ppd->dd;
- u64 mask, val;
- unsigned long flags;
-
- spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags);
- if (op & QIB_RCVCTRL_TAILUPD_ENB)
- dd->rcvctrl |= (1ULL << IBA7220_R_TAILUPD_SHIFT);
- if (op & QIB_RCVCTRL_TAILUPD_DIS)
- dd->rcvctrl &= ~(1ULL << IBA7220_R_TAILUPD_SHIFT);
- if (op & QIB_RCVCTRL_PKEY_ENB)
- dd->rcvctrl &= ~(1ULL << IBA7220_R_PKEY_DIS_SHIFT);
- if (op & QIB_RCVCTRL_PKEY_DIS)
- dd->rcvctrl |= (1ULL << IBA7220_R_PKEY_DIS_SHIFT);
- if (ctxt < 0)
- mask = (1ULL << dd->ctxtcnt) - 1;
- else
- mask = (1ULL << ctxt);
- if (op & QIB_RCVCTRL_CTXT_ENB) {
- /* always done for specific ctxt */
- dd->rcvctrl |= (mask << SYM_LSB(RcvCtrl, PortEnable));
- if (!(dd->flags & QIB_NODMA_RTAIL))
- dd->rcvctrl |= 1ULL << IBA7220_R_TAILUPD_SHIFT;
- /* Write these registers before the context is enabled. */
- qib_write_kreg_ctxt(dd, kr_rcvhdrtailaddr, ctxt,
- dd->rcd[ctxt]->rcvhdrqtailaddr_phys);
- qib_write_kreg_ctxt(dd, kr_rcvhdraddr, ctxt,
- dd->rcd[ctxt]->rcvhdrq_phys);
- dd->rcd[ctxt]->seq_cnt = 1;
- }
- if (op & QIB_RCVCTRL_CTXT_DIS)
- dd->rcvctrl &= ~(mask << SYM_LSB(RcvCtrl, PortEnable));
- if (op & QIB_RCVCTRL_INTRAVAIL_ENB)
- dd->rcvctrl |= (mask << IBA7220_R_INTRAVAIL_SHIFT);
- if (op & QIB_RCVCTRL_INTRAVAIL_DIS)
- dd->rcvctrl &= ~(mask << IBA7220_R_INTRAVAIL_SHIFT);
- qib_write_kreg(dd, kr_rcvctrl, dd->rcvctrl);
- if ((op & QIB_RCVCTRL_INTRAVAIL_ENB) && dd->rhdrhead_intr_off) {
- /* arm rcv interrupt */
- val = qib_read_ureg32(dd, ur_rcvhdrhead, ctxt) |
- dd->rhdrhead_intr_off;
- qib_write_ureg(dd, ur_rcvhdrhead, val, ctxt);
- }
- if (op & QIB_RCVCTRL_CTXT_ENB) {
- /*
- * Init the context registers also; if we were
- * disabled, tail and head should both be zero
- * already from the enable, but since we don't
- * know, we have to do it explicitly.
- */
- val = qib_read_ureg32(dd, ur_rcvegrindextail, ctxt);
- qib_write_ureg(dd, ur_rcvegrindexhead, val, ctxt);
-
- val = qib_read_ureg32(dd, ur_rcvhdrtail, ctxt);
- dd->rcd[ctxt]->head = val;
- /* If kctxt, interrupt on next receive. */
- if (ctxt < dd->first_user_ctxt)
- val |= dd->rhdrhead_intr_off;
- qib_write_ureg(dd, ur_rcvhdrhead, val, ctxt);
- }
- if (op & QIB_RCVCTRL_CTXT_DIS) {
- if (ctxt >= 0) {
- qib_write_kreg_ctxt(dd, kr_rcvhdrtailaddr, ctxt, 0);
- qib_write_kreg_ctxt(dd, kr_rcvhdraddr, ctxt, 0);
- } else {
- unsigned i;
-
- for (i = 0; i < dd->cfgctxts; i++) {
- qib_write_kreg_ctxt(dd, kr_rcvhdrtailaddr,
- i, 0);
- qib_write_kreg_ctxt(dd, kr_rcvhdraddr, i, 0);
- }
- }
- }
- spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags);
-}
-
-/*
- * Modify the SENDCTRL register in chip-specific way. This
- * is a function there may be multiple such registers with
- * slightly different layouts. To start, we assume the
- * "canonical" register layout of the first chips.
- * Chip requires no back-back sendctrl writes, so write
- * scratch register after writing sendctrl
- */
-static void sendctrl_7220_mod(struct qib_pportdata *ppd, u32 op)
-{
- struct qib_devdata *dd = ppd->dd;
- u64 tmp_dd_sendctrl;
- unsigned long flags;
-
- spin_lock_irqsave(&dd->sendctrl_lock, flags);
-
- /* First the ones that are "sticky", saved in shadow */
- if (op & QIB_SENDCTRL_CLEAR)
- dd->sendctrl = 0;
- if (op & QIB_SENDCTRL_SEND_DIS)
- dd->sendctrl &= ~SYM_MASK(SendCtrl, SPioEnable);
- else if (op & QIB_SENDCTRL_SEND_ENB) {
- dd->sendctrl |= SYM_MASK(SendCtrl, SPioEnable);
- if (dd->flags & QIB_USE_SPCL_TRIG)
- dd->sendctrl |= SYM_MASK(SendCtrl,
- SSpecialTriggerEn);
- }
- if (op & QIB_SENDCTRL_AVAIL_DIS)
- dd->sendctrl &= ~SYM_MASK(SendCtrl, SendBufAvailUpd);
- else if (op & QIB_SENDCTRL_AVAIL_ENB)
- dd->sendctrl |= SYM_MASK(SendCtrl, SendBufAvailUpd);
-
- if (op & QIB_SENDCTRL_DISARM_ALL) {
- u32 i, last;
-
- tmp_dd_sendctrl = dd->sendctrl;
- /*
- * disarm any that are not yet launched, disabling sends
- * and updates until done.
- */
- last = dd->piobcnt2k + dd->piobcnt4k;
- tmp_dd_sendctrl &=
- ~(SYM_MASK(SendCtrl, SPioEnable) |
- SYM_MASK(SendCtrl, SendBufAvailUpd));
- for (i = 0; i < last; i++) {
- qib_write_kreg(dd, kr_sendctrl,
- tmp_dd_sendctrl |
- SYM_MASK(SendCtrl, Disarm) | i);
- qib_write_kreg(dd, kr_scratch, 0);
- }
- }
-
- tmp_dd_sendctrl = dd->sendctrl;
-
- if (op & QIB_SENDCTRL_FLUSH)
- tmp_dd_sendctrl |= SYM_MASK(SendCtrl, Abort);
- if (op & QIB_SENDCTRL_DISARM)
- tmp_dd_sendctrl |= SYM_MASK(SendCtrl, Disarm) |
- ((op & QIB_7220_SendCtrl_DisarmPIOBuf_RMASK) <<
- SYM_LSB(SendCtrl, DisarmPIOBuf));
- if ((op & QIB_SENDCTRL_AVAIL_BLIP) &&
- (dd->sendctrl & SYM_MASK(SendCtrl, SendBufAvailUpd)))
- tmp_dd_sendctrl &= ~SYM_MASK(SendCtrl, SendBufAvailUpd);
-
- qib_write_kreg(dd, kr_sendctrl, tmp_dd_sendctrl);
- qib_write_kreg(dd, kr_scratch, 0);
-
- if (op & QIB_SENDCTRL_AVAIL_BLIP) {
- qib_write_kreg(dd, kr_sendctrl, dd->sendctrl);
- qib_write_kreg(dd, kr_scratch, 0);
- }
-
- spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
-
- if (op & QIB_SENDCTRL_FLUSH) {
- u32 v;
- /*
- * ensure writes have hit chip, then do a few
- * more reads, to allow DMA of pioavail registers
- * to occur, so in-memory copy is in sync with
- * the chip. Not always safe to sleep.
- */
- v = qib_read_kreg32(dd, kr_scratch);
- qib_write_kreg(dd, kr_scratch, v);
- v = qib_read_kreg32(dd, kr_scratch);
- qib_write_kreg(dd, kr_scratch, v);
- qib_read_kreg32(dd, kr_scratch);
- }
-}
-
-/**
- * qib_portcntr_7220 - read a per-port counter
- * @ppd: the qlogic_ib device
- * @reg: the counter to snapshot
- */
-static u64 qib_portcntr_7220(struct qib_pportdata *ppd, u32 reg)
-{
- u64 ret = 0ULL;
- struct qib_devdata *dd = ppd->dd;
- u16 creg;
- /* 0xffff for unimplemented or synthesized counters */
- static const u16 xlator[] = {
- [QIBPORTCNTR_PKTSEND] = cr_pktsend,
- [QIBPORTCNTR_WORDSEND] = cr_wordsend,
- [QIBPORTCNTR_PSXMITDATA] = cr_psxmitdatacount,
- [QIBPORTCNTR_PSXMITPKTS] = cr_psxmitpktscount,
- [QIBPORTCNTR_PSXMITWAIT] = cr_psxmitwaitcount,
- [QIBPORTCNTR_SENDSTALL] = cr_sendstall,
- [QIBPORTCNTR_PKTRCV] = cr_pktrcv,
- [QIBPORTCNTR_PSRCVDATA] = cr_psrcvdatacount,
- [QIBPORTCNTR_PSRCVPKTS] = cr_psrcvpktscount,
- [QIBPORTCNTR_RCVEBP] = cr_rcvebp,
- [QIBPORTCNTR_RCVOVFL] = cr_rcvovfl,
- [QIBPORTCNTR_WORDRCV] = cr_wordrcv,
- [QIBPORTCNTR_RXDROPPKT] = cr_rxdroppkt,
- [QIBPORTCNTR_RXLOCALPHYERR] = cr_rxotherlocalphyerr,
- [QIBPORTCNTR_RXVLERR] = cr_rxvlerr,
- [QIBPORTCNTR_ERRICRC] = cr_erricrc,
- [QIBPORTCNTR_ERRVCRC] = cr_errvcrc,
- [QIBPORTCNTR_ERRLPCRC] = cr_errlpcrc,
- [QIBPORTCNTR_BADFORMAT] = cr_badformat,
- [QIBPORTCNTR_ERR_RLEN] = cr_err_rlen,
- [QIBPORTCNTR_IBSYMBOLERR] = cr_ibsymbolerr,
- [QIBPORTCNTR_INVALIDRLEN] = cr_invalidrlen,
- [QIBPORTCNTR_UNSUPVL] = cr_txunsupvl,
- [QIBPORTCNTR_EXCESSBUFOVFL] = cr_excessbufferovfl,
- [QIBPORTCNTR_ERRLINK] = cr_errlink,
- [QIBPORTCNTR_IBLINKDOWN] = cr_iblinkdown,
- [QIBPORTCNTR_IBLINKERRRECOV] = cr_iblinkerrrecov,
- [QIBPORTCNTR_LLI] = cr_locallinkintegrityerr,
- [QIBPORTCNTR_PSINTERVAL] = cr_psinterval,
- [QIBPORTCNTR_PSSTART] = cr_psstart,
- [QIBPORTCNTR_PSSTAT] = cr_psstat,
- [QIBPORTCNTR_VL15PKTDROP] = cr_vl15droppedpkt,
- [QIBPORTCNTR_ERRPKEY] = cr_errpkey,
- [QIBPORTCNTR_KHDROVFL] = 0xffff,
- };
-
- if (reg >= ARRAY_SIZE(xlator)) {
- qib_devinfo(ppd->dd->pcidev,
- "Unimplemented portcounter %u\n", reg);
- goto done;
- }
- creg = xlator[reg];
-
- if (reg == QIBPORTCNTR_KHDROVFL) {
- int i;
-
- /* sum over all kernel contexts */
- for (i = 0; i < dd->first_user_ctxt; i++)
- ret += read_7220_creg32(dd, cr_portovfl + i);
- }
- if (creg == 0xffff)
- goto done;
-
- /*
- * only fast incrementing counters are 64bit; use 32 bit reads to
- * avoid two independent reads when on opteron
- */
- if ((creg == cr_wordsend || creg == cr_wordrcv ||
- creg == cr_pktsend || creg == cr_pktrcv))
- ret = read_7220_creg(dd, creg);
- else
- ret = read_7220_creg32(dd, creg);
- if (creg == cr_ibsymbolerr) {
- if (dd->pport->cpspec->ibdeltainprog)
- ret -= ret - ppd->cpspec->ibsymsnap;
- ret -= dd->pport->cpspec->ibsymdelta;
- } else if (creg == cr_iblinkerrrecov) {
- if (dd->pport->cpspec->ibdeltainprog)
- ret -= ret - ppd->cpspec->iblnkerrsnap;
- ret -= dd->pport->cpspec->iblnkerrdelta;
- }
-done:
- return ret;
-}
-
-/*
- * Device counter names (not port-specific), one line per stat,
- * single string. Used by utilities like ipathstats to print the stats
- * in a way which works for different versions of drivers, without changing
- * the utility. Names need to be 12 chars or less (w/o newline), for proper
- * display by utility.
- * Non-error counters are first.
- * Start of "error" counters is indicated by a leading "E " on the first
- * "error" counter, and doesn't count in label length.
- * The EgrOvfl list needs to be last so we truncate them at the configured
- * context count for the device.
- * cntr7220indices contains the corresponding register indices.
- */
-static const char cntr7220names[] =
- "Interrupts\n"
- "HostBusStall\n"
- "E RxTIDFull\n"
- "RxTIDInvalid\n"
- "Ctxt0EgrOvfl\n"
- "Ctxt1EgrOvfl\n"
- "Ctxt2EgrOvfl\n"
- "Ctxt3EgrOvfl\n"
- "Ctxt4EgrOvfl\n"
- "Ctxt5EgrOvfl\n"
- "Ctxt6EgrOvfl\n"
- "Ctxt7EgrOvfl\n"
- "Ctxt8EgrOvfl\n"
- "Ctxt9EgrOvfl\n"
- "Ctx10EgrOvfl\n"
- "Ctx11EgrOvfl\n"
- "Ctx12EgrOvfl\n"
- "Ctx13EgrOvfl\n"
- "Ctx14EgrOvfl\n"
- "Ctx15EgrOvfl\n"
- "Ctx16EgrOvfl\n";
-
-static const size_t cntr7220indices[] = {
- cr_lbint,
- cr_lbflowstall,
- cr_errtidfull,
- cr_errtidvalid,
- cr_portovfl + 0,
- cr_portovfl + 1,
- cr_portovfl + 2,
- cr_portovfl + 3,
- cr_portovfl + 4,
- cr_portovfl + 5,
- cr_portovfl + 6,
- cr_portovfl + 7,
- cr_portovfl + 8,
- cr_portovfl + 9,
- cr_portovfl + 10,
- cr_portovfl + 11,
- cr_portovfl + 12,
- cr_portovfl + 13,
- cr_portovfl + 14,
- cr_portovfl + 15,
- cr_portovfl + 16,
-};
-
-/*
- * same as cntr7220names and cntr7220indices, but for port-specific counters.
- * portcntr7220indices is somewhat complicated by some registers needing
- * adjustments of various kinds, and those are ORed with _PORT_VIRT_FLAG
- */
-static const char portcntr7220names[] =
- "TxPkt\n"
- "TxFlowPkt\n"
- "TxWords\n"
- "RxPkt\n"
- "RxFlowPkt\n"
- "RxWords\n"
- "TxFlowStall\n"
- "TxDmaDesc\n" /* 7220 and 7322-only */
- "E RxDlidFltr\n" /* 7220 and 7322-only */
- "IBStatusChng\n"
- "IBLinkDown\n"
- "IBLnkRecov\n"
- "IBRxLinkErr\n"
- "IBSymbolErr\n"
- "RxLLIErr\n"
- "RxBadFormat\n"
- "RxBadLen\n"
- "RxBufOvrfl\n"
- "RxEBP\n"
- "RxFlowCtlErr\n"
- "RxICRCerr\n"
- "RxLPCRCerr\n"
- "RxVCRCerr\n"
- "RxInvalLen\n"
- "RxInvalPKey\n"
- "RxPktDropped\n"
- "TxBadLength\n"
- "TxDropped\n"
- "TxInvalLen\n"
- "TxUnderrun\n"
- "TxUnsupVL\n"
- "RxLclPhyErr\n" /* 7220 and 7322-only */
- "RxVL15Drop\n" /* 7220 and 7322-only */
- "RxVlErr\n" /* 7220 and 7322-only */
- "XcessBufOvfl\n" /* 7220 and 7322-only */
- ;
-
-#define _PORT_VIRT_FLAG 0x8000 /* "virtual", need adjustments */
-static const size_t portcntr7220indices[] = {
- QIBPORTCNTR_PKTSEND | _PORT_VIRT_FLAG,
- cr_pktsendflow,
- QIBPORTCNTR_WORDSEND | _PORT_VIRT_FLAG,
- QIBPORTCNTR_PKTRCV | _PORT_VIRT_FLAG,
- cr_pktrcvflowctrl,
- QIBPORTCNTR_WORDRCV | _PORT_VIRT_FLAG,
- QIBPORTCNTR_SENDSTALL | _PORT_VIRT_FLAG,
- cr_txsdmadesc,
- cr_rxdlidfltr,
- cr_ibstatuschange,
- QIBPORTCNTR_IBLINKDOWN | _PORT_VIRT_FLAG,
- QIBPORTCNTR_IBLINKERRRECOV | _PORT_VIRT_FLAG,
- QIBPORTCNTR_ERRLINK | _PORT_VIRT_FLAG,
- QIBPORTCNTR_IBSYMBOLERR | _PORT_VIRT_FLAG,
- QIBPORTCNTR_LLI | _PORT_VIRT_FLAG,
- QIBPORTCNTR_BADFORMAT | _PORT_VIRT_FLAG,
- QIBPORTCNTR_ERR_RLEN | _PORT_VIRT_FLAG,
- QIBPORTCNTR_RCVOVFL | _PORT_VIRT_FLAG,
- QIBPORTCNTR_RCVEBP | _PORT_VIRT_FLAG,
- cr_rcvflowctrl_err,
- QIBPORTCNTR_ERRICRC | _PORT_VIRT_FLAG,
- QIBPORTCNTR_ERRLPCRC | _PORT_VIRT_FLAG,
- QIBPORTCNTR_ERRVCRC | _PORT_VIRT_FLAG,
- QIBPORTCNTR_INVALIDRLEN | _PORT_VIRT_FLAG,
- QIBPORTCNTR_ERRPKEY | _PORT_VIRT_FLAG,
- QIBPORTCNTR_RXDROPPKT | _PORT_VIRT_FLAG,
- cr_invalidslen,
- cr_senddropped,
- cr_errslen,
- cr_sendunderrun,
- cr_txunsupvl,
- QIBPORTCNTR_RXLOCALPHYERR | _PORT_VIRT_FLAG,
- QIBPORTCNTR_VL15PKTDROP | _PORT_VIRT_FLAG,
- QIBPORTCNTR_RXVLERR | _PORT_VIRT_FLAG,
- QIBPORTCNTR_EXCESSBUFOVFL | _PORT_VIRT_FLAG,
-};
-
-/* do all the setup to make the counter reads efficient later */
-static void init_7220_cntrnames(struct qib_devdata *dd)
-{
- int i, j = 0;
- char *s;
-
- for (i = 0, s = (char *)cntr7220names; s && j <= dd->cfgctxts;
- i++) {
- /* we always have at least one counter before the egrovfl */
- if (!j && !strncmp("Ctxt0EgrOvfl", s + 1, 12))
- j = 1;
- s = strchr(s + 1, '\n');
- if (s && j)
- j++;
- }
- dd->cspec->ncntrs = i;
- if (!s)
- /* full list; size is without terminating null */
- dd->cspec->cntrnamelen = sizeof(cntr7220names) - 1;
- else
- dd->cspec->cntrnamelen = 1 + s - cntr7220names;
- dd->cspec->cntrs = kmalloc_array(dd->cspec->ncntrs, sizeof(u64),
- GFP_KERNEL);
-
- for (i = 0, s = (char *)portcntr7220names; s; i++)
- s = strchr(s + 1, '\n');
- dd->cspec->nportcntrs = i - 1;
- dd->cspec->portcntrnamelen = sizeof(portcntr7220names) - 1;
- dd->cspec->portcntrs = kmalloc_array(dd->cspec->nportcntrs,
- sizeof(u64),
- GFP_KERNEL);
-}
-
-static u32 qib_read_7220cntrs(struct qib_devdata *dd, loff_t pos, char **namep,
- u64 **cntrp)
-{
- u32 ret;
-
- if (!dd->cspec->cntrs) {
- ret = 0;
- goto done;
- }
-
- if (namep) {
- *namep = (char *)cntr7220names;
- ret = dd->cspec->cntrnamelen;
- if (pos >= ret)
- ret = 0; /* final read after getting everything */
- } else {
- u64 *cntr = dd->cspec->cntrs;
- int i;
-
- ret = dd->cspec->ncntrs * sizeof(u64);
- if (!cntr || pos >= ret) {
- /* everything read, or couldn't get memory */
- ret = 0;
- goto done;
- }
-
- *cntrp = cntr;
- for (i = 0; i < dd->cspec->ncntrs; i++)
- *cntr++ = read_7220_creg32(dd, cntr7220indices[i]);
- }
-done:
- return ret;
-}
-
-static u32 qib_read_7220portcntrs(struct qib_devdata *dd, loff_t pos, u32 port,
- char **namep, u64 **cntrp)
-{
- u32 ret;
-
- if (!dd->cspec->portcntrs) {
- ret = 0;
- goto done;
- }
- if (namep) {
- *namep = (char *)portcntr7220names;
- ret = dd->cspec->portcntrnamelen;
- if (pos >= ret)
- ret = 0; /* final read after getting everything */
- } else {
- u64 *cntr = dd->cspec->portcntrs;
- struct qib_pportdata *ppd = &dd->pport[port];
- int i;
-
- ret = dd->cspec->nportcntrs * sizeof(u64);
- if (!cntr || pos >= ret) {
- /* everything read, or couldn't get memory */
- ret = 0;
- goto done;
- }
- *cntrp = cntr;
- for (i = 0; i < dd->cspec->nportcntrs; i++) {
- if (portcntr7220indices[i] & _PORT_VIRT_FLAG)
- *cntr++ = qib_portcntr_7220(ppd,
- portcntr7220indices[i] &
- ~_PORT_VIRT_FLAG);
- else
- *cntr++ = read_7220_creg32(dd,
- portcntr7220indices[i]);
- }
- }
-done:
- return ret;
-}
-
-/**
- * qib_get_7220_faststats - get word counters from chip before they overflow
- * @t: contains a pointer to the qlogic_ib device qib_devdata
- *
- * This needs more work; in particular, decision on whether we really
- * need traffic_wds done the way it is
- * called from add_timer
- */
-static void qib_get_7220_faststats(struct timer_list *t)
-{
- struct qib_devdata *dd = timer_container_of(dd, t, stats_timer);
- struct qib_pportdata *ppd = dd->pport;
- unsigned long flags;
- u64 traffic_wds;
-
- /*
- * don't access the chip while running diags, or memory diags can
- * fail
- */
- if (!(dd->flags & QIB_INITTED) || dd->diag_client)
- /* but re-arm the timer, for diags case; won't hurt other */
- goto done;
-
- /*
- * We now try to maintain an activity timer, based on traffic
- * exceeding a threshold, so we need to check the word-counts
- * even if they are 64-bit.
- */
- traffic_wds = qib_portcntr_7220(ppd, cr_wordsend) +
- qib_portcntr_7220(ppd, cr_wordrcv);
- spin_lock_irqsave(&dd->eep_st_lock, flags);
- traffic_wds -= dd->traffic_wds;
- dd->traffic_wds += traffic_wds;
- spin_unlock_irqrestore(&dd->eep_st_lock, flags);
-done:
- mod_timer(&dd->stats_timer, jiffies + HZ * ACTIVITY_TIMER);
-}
-
-/*
- * If we are using MSI, try to fallback to INTx.
- */
-static int qib_7220_intr_fallback(struct qib_devdata *dd)
-{
- if (!dd->msi_lo)
- return 0;
-
- qib_devinfo(dd->pcidev,
- "MSI interrupt not detected, trying INTx interrupts\n");
-
- qib_free_irq(dd);
- dd->msi_lo = 0;
- if (pci_alloc_irq_vectors(dd->pcidev, 1, 1, PCI_IRQ_INTX) < 0)
- qib_dev_err(dd, "Failed to enable INTx\n");
- qib_setup_7220_interrupt(dd);
- return 1;
-}
-
-/*
- * Reset the XGXS (between serdes and IBC). Slightly less intrusive
- * than resetting the IBC or external link state, and useful in some
- * cases to cause some retraining. To do this right, we reset IBC
- * as well.
- */
-static void qib_7220_xgxs_reset(struct qib_pportdata *ppd)
-{
- u64 val, prev_val;
- struct qib_devdata *dd = ppd->dd;
-
- prev_val = qib_read_kreg64(dd, kr_xgxs_cfg);
- val = prev_val | QLOGIC_IB_XGXS_RESET;
- prev_val &= ~QLOGIC_IB_XGXS_RESET; /* be sure */
- qib_write_kreg(dd, kr_control,
- dd->control & ~QLOGIC_IB_C_LINKENABLE);
- qib_write_kreg(dd, kr_xgxs_cfg, val);
- qib_read_kreg32(dd, kr_scratch);
- qib_write_kreg(dd, kr_xgxs_cfg, prev_val);
- qib_write_kreg(dd, kr_control, dd->control);
-}
-
-/*
- * For this chip, we want to use the same buffer every time
- * when we are trying to bring the link up (they are always VL15
- * packets). At that link state the packet should always go out immediately
- * (or at least be discarded at the tx interface if the link is down).
- * If it doesn't, and the buffer isn't available, that means some other
- * sender has gotten ahead of us, and is preventing our packet from going
- * out. In that case, we flush all packets, and try again. If that still
- * fails, we fail the request, and hope things work the next time around.
- *
- * We don't need very complicated heuristics on whether the packet had
- * time to go out or not, since even at SDR 1X, it goes out in very short
- * time periods, covered by the chip reads done here and as part of the
- * flush.
- */
-static u32 __iomem *get_7220_link_buf(struct qib_pportdata *ppd, u32 *bnum)
-{
- u32 __iomem *buf;
- u32 lbuf = ppd->dd->cspec->lastbuf_for_pio;
- int do_cleanup;
- unsigned long flags;
-
- /*
- * always blip to get avail list updated, since it's almost
- * always needed, and is fairly cheap.
- */
- sendctrl_7220_mod(ppd->dd->pport, QIB_SENDCTRL_AVAIL_BLIP);
- qib_read_kreg64(ppd->dd, kr_scratch); /* extra chip flush */
- buf = qib_getsendbuf_range(ppd->dd, bnum, lbuf, lbuf);
- if (buf)
- goto done;
-
- spin_lock_irqsave(&ppd->sdma_lock, flags);
- if (ppd->sdma_state.current_state == qib_sdma_state_s20_idle &&
- ppd->sdma_state.current_state != qib_sdma_state_s00_hw_down) {
- __qib_sdma_process_event(ppd, qib_sdma_event_e00_go_hw_down);
- do_cleanup = 0;
- } else {
- do_cleanup = 1;
- qib_7220_sdma_hw_clean_up(ppd);
- }
- spin_unlock_irqrestore(&ppd->sdma_lock, flags);
-
- if (do_cleanup) {
- qib_read_kreg64(ppd->dd, kr_scratch); /* extra chip flush */
- buf = qib_getsendbuf_range(ppd->dd, bnum, lbuf, lbuf);
- }
-done:
- return buf;
-}
-
-/*
- * This code for non-IBTA-compliant IB speed negotiation is only known to
- * work for the SDR to DDR transition, and only between an HCA and a switch
- * with recent firmware. It is based on observed heuristics, rather than
- * actual knowledge of the non-compliant speed negotiation.
- * It has a number of hard-coded fields, since the hope is to rewrite this
- * when a spec is available on how the negoation is intended to work.
- */
-static void autoneg_7220_sendpkt(struct qib_pportdata *ppd, u32 *hdr,
- u32 dcnt, u32 *data)
-{
- int i;
- u64 pbc;
- u32 __iomem *piobuf;
- u32 pnum;
- struct qib_devdata *dd = ppd->dd;
-
- i = 0;
- pbc = 7 + dcnt + 1; /* 7 dword header, dword data, icrc */
- pbc |= PBC_7220_VL15_SEND;
- while (!(piobuf = get_7220_link_buf(ppd, &pnum))) {
- if (i++ > 5)
- return;
- udelay(2);
- }
- sendctrl_7220_mod(dd->pport, QIB_SENDCTRL_DISARM_BUF(pnum));
- writeq(pbc, piobuf);
- qib_flush_wc();
- qib_pio_copy(piobuf + 2, hdr, 7);
- qib_pio_copy(piobuf + 9, data, dcnt);
- if (dd->flags & QIB_USE_SPCL_TRIG) {
- u32 spcl_off = (pnum >= dd->piobcnt2k) ? 2047 : 1023;
-
- qib_flush_wc();
- __raw_writel(0xaebecede, piobuf + spcl_off);
- }
- qib_flush_wc();
- qib_sendbuf_done(dd, pnum);
-}
-
-/*
- * _start packet gets sent twice at start, _done gets sent twice at end
- */
-static void autoneg_7220_send(struct qib_pportdata *ppd, int which)
-{
- struct qib_devdata *dd = ppd->dd;
- static u32 swapped;
- u32 dw, i, hcnt, dcnt, *data;
- static u32 hdr[7] = { 0xf002ffff, 0x48ffff, 0x6400abba };
- static u32 madpayload_start[0x40] = {
- 0x1810103, 0x1, 0x0, 0x0, 0x2c90000, 0x2c9, 0x0, 0x0,
- 0xffffffff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
- 0x1, 0x1388, 0x15e, 0x1, /* rest 0's */
- };
- static u32 madpayload_done[0x40] = {
- 0x1810103, 0x1, 0x0, 0x0, 0x2c90000, 0x2c9, 0x0, 0x0,
- 0xffffffff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
- 0x40000001, 0x1388, 0x15e, /* rest 0's */
- };
-
- dcnt = ARRAY_SIZE(madpayload_start);
- hcnt = ARRAY_SIZE(hdr);
- if (!swapped) {
- /* for maintainability, do it at runtime */
- for (i = 0; i < hcnt; i++) {
- dw = (__force u32) cpu_to_be32(hdr[i]);
- hdr[i] = dw;
- }
- for (i = 0; i < dcnt; i++) {
- dw = (__force u32) cpu_to_be32(madpayload_start[i]);
- madpayload_start[i] = dw;
- dw = (__force u32) cpu_to_be32(madpayload_done[i]);
- madpayload_done[i] = dw;
- }
- swapped = 1;
- }
-
- data = which ? madpayload_done : madpayload_start;
-
- autoneg_7220_sendpkt(ppd, hdr, dcnt, data);
- qib_read_kreg64(dd, kr_scratch);
- udelay(2);
- autoneg_7220_sendpkt(ppd, hdr, dcnt, data);
- qib_read_kreg64(dd, kr_scratch);
- udelay(2);
-}
-
-/*
- * Do the absolute minimum to cause an IB speed change, and make it
- * ready, but don't actually trigger the change. The caller will
- * do that when ready (if link is in Polling training state, it will
- * happen immediately, otherwise when link next goes down)
- *
- * This routine should only be used as part of the DDR autonegotation
- * code for devices that are not compliant with IB 1.2 (or code that
- * fixes things up for same).
- *
- * When link has gone down, and autoneg enabled, or autoneg has
- * failed and we give up until next time we set both speeds, and
- * then we want IBTA enabled as well as "use max enabled speed.
- */
-static void set_7220_ibspeed_fast(struct qib_pportdata *ppd, u32 speed)
-{
- ppd->cpspec->ibcddrctrl &= ~(IBA7220_IBC_SPEED_AUTONEG_MASK |
- IBA7220_IBC_IBTA_1_2_MASK);
-
- if (speed == (QIB_IB_SDR | QIB_IB_DDR))
- ppd->cpspec->ibcddrctrl |= IBA7220_IBC_SPEED_AUTONEG_MASK |
- IBA7220_IBC_IBTA_1_2_MASK;
- else
- ppd->cpspec->ibcddrctrl |= speed == QIB_IB_DDR ?
- IBA7220_IBC_SPEED_DDR : IBA7220_IBC_SPEED_SDR;
-
- qib_write_kreg(ppd->dd, kr_ibcddrctrl, ppd->cpspec->ibcddrctrl);
- qib_write_kreg(ppd->dd, kr_scratch, 0);
-}
-
-/*
- * This routine is only used when we are not talking to another
- * IB 1.2-compliant device that we think can do DDR.
- * (This includes all existing switch chips as of Oct 2007.)
- * 1.2-compliant devices go directly to DDR prior to reaching INIT
- */
-static void try_7220_autoneg(struct qib_pportdata *ppd)
-{
- unsigned long flags;
-
- /*
- * Required for older non-IB1.2 DDR switches. Newer
- * non-IB-compliant switches don't need it, but so far,
- * aren't bothered by it either. "Magic constant"
- */
- qib_write_kreg(ppd->dd, kr_ncmodectrl, 0x3b9dc07);
-
- spin_lock_irqsave(&ppd->lflags_lock, flags);
- ppd->lflags |= QIBL_IB_AUTONEG_INPROG;
- spin_unlock_irqrestore(&ppd->lflags_lock, flags);
- autoneg_7220_send(ppd, 0);
- set_7220_ibspeed_fast(ppd, QIB_IB_DDR);
-
- toggle_7220_rclkrls(ppd->dd);
- /* 2 msec is minimum length of a poll cycle */
- queue_delayed_work(ib_wq, &ppd->cpspec->autoneg_work,
- msecs_to_jiffies(2));
-}
-
-/*
- * Handle the empirically determined mechanism for auto-negotiation
- * of DDR speed with switches.
- */
-static void autoneg_7220_work(struct work_struct *work)
-{
- struct qib_pportdata *ppd;
- struct qib_devdata *dd;
- u32 i;
- unsigned long flags;
-
- ppd = &container_of(work, struct qib_chippport_specific,
- autoneg_work.work)->pportdata;
- dd = ppd->dd;
-
- /*
- * Busy wait for this first part, it should be at most a
- * few hundred usec, since we scheduled ourselves for 2msec.
- */
- for (i = 0; i < 25; i++) {
- if (SYM_FIELD(ppd->lastibcstat, IBCStatus, LinkTrainingState)
- == IB_7220_LT_STATE_POLLQUIET) {
- qib_set_linkstate(ppd, QIB_IB_LINKDOWN_DISABLE);
- break;
- }
- udelay(100);
- }
-
- if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG))
- goto done; /* we got there early or told to stop */
-
- /* we expect this to timeout */
- if (wait_event_timeout(ppd->cpspec->autoneg_wait,
- !(ppd->lflags & QIBL_IB_AUTONEG_INPROG),
- msecs_to_jiffies(90)))
- goto done;
-
- toggle_7220_rclkrls(dd);
-
- /* we expect this to timeout */
- if (wait_event_timeout(ppd->cpspec->autoneg_wait,
- !(ppd->lflags & QIBL_IB_AUTONEG_INPROG),
- msecs_to_jiffies(1700)))
- goto done;
-
- set_7220_ibspeed_fast(ppd, QIB_IB_SDR);
- toggle_7220_rclkrls(dd);
-
- /*
- * Wait up to 250 msec for link to train and get to INIT at DDR;
- * this should terminate early.
- */
- wait_event_timeout(ppd->cpspec->autoneg_wait,
- !(ppd->lflags & QIBL_IB_AUTONEG_INPROG),
- msecs_to_jiffies(250));
-done:
- if (ppd->lflags & QIBL_IB_AUTONEG_INPROG) {
- spin_lock_irqsave(&ppd->lflags_lock, flags);
- ppd->lflags &= ~QIBL_IB_AUTONEG_INPROG;
- if (dd->cspec->autoneg_tries == AUTONEG_TRIES) {
- ppd->lflags |= QIBL_IB_AUTONEG_FAILED;
- dd->cspec->autoneg_tries = 0;
- }
- spin_unlock_irqrestore(&ppd->lflags_lock, flags);
- set_7220_ibspeed_fast(ppd, ppd->link_speed_enabled);
- }
-}
-
-static u32 qib_7220_iblink_state(u64 ibcs)
-{
- u32 state = (u32)SYM_FIELD(ibcs, IBCStatus, LinkState);
-
- switch (state) {
- case IB_7220_L_STATE_INIT:
- state = IB_PORT_INIT;
- break;
- case IB_7220_L_STATE_ARM:
- state = IB_PORT_ARMED;
- break;
- case IB_7220_L_STATE_ACTIVE:
- case IB_7220_L_STATE_ACT_DEFER:
- state = IB_PORT_ACTIVE;
- break;
- default:
- fallthrough;
- case IB_7220_L_STATE_DOWN:
- state = IB_PORT_DOWN;
- break;
- }
- return state;
-}
-
-/* returns the IBTA port state, rather than the IBC link training state */
-static u8 qib_7220_phys_portstate(u64 ibcs)
-{
- u8 state = (u8)SYM_FIELD(ibcs, IBCStatus, LinkTrainingState);
- return qib_7220_physportstate[state];
-}
-
-static int qib_7220_ib_updown(struct qib_pportdata *ppd, int ibup, u64 ibcs)
-{
- int ret = 0, symadj = 0;
- struct qib_devdata *dd = ppd->dd;
- unsigned long flags;
-
- spin_lock_irqsave(&ppd->lflags_lock, flags);
- ppd->lflags &= ~QIBL_IB_FORCE_NOTIFY;
- spin_unlock_irqrestore(&ppd->lflags_lock, flags);
-
- if (!ibup) {
- /*
- * When the link goes down we don't want AEQ running, so it
- * won't interfere with IBC training, etc., and we need
- * to go back to the static SerDes preset values.
- */
- if (!(ppd->lflags & (QIBL_IB_AUTONEG_FAILED |
- QIBL_IB_AUTONEG_INPROG)))
- set_7220_ibspeed_fast(ppd, ppd->link_speed_enabled);
- if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG)) {
- qib_sd7220_presets(dd);
- qib_cancel_sends(ppd); /* initial disarm, etc. */
- spin_lock_irqsave(&ppd->sdma_lock, flags);
- if (__qib_sdma_running(ppd))
- __qib_sdma_process_event(ppd,
- qib_sdma_event_e70_go_idle);
- spin_unlock_irqrestore(&ppd->sdma_lock, flags);
- }
- /* this might better in qib_sd7220_presets() */
- set_7220_relock_poll(dd, ibup);
- } else {
- if (qib_compat_ddr_negotiate &&
- !(ppd->lflags & (QIBL_IB_AUTONEG_FAILED |
- QIBL_IB_AUTONEG_INPROG)) &&
- ppd->link_speed_active == QIB_IB_SDR &&
- (ppd->link_speed_enabled & (QIB_IB_DDR | QIB_IB_SDR)) ==
- (QIB_IB_DDR | QIB_IB_SDR) &&
- dd->cspec->autoneg_tries < AUTONEG_TRIES) {
- /* we are SDR, and DDR auto-negotiation enabled */
- ++dd->cspec->autoneg_tries;
- if (!ppd->cpspec->ibdeltainprog) {
- ppd->cpspec->ibdeltainprog = 1;
- ppd->cpspec->ibsymsnap = read_7220_creg32(dd,
- cr_ibsymbolerr);
- ppd->cpspec->iblnkerrsnap = read_7220_creg32(dd,
- cr_iblinkerrrecov);
- }
- try_7220_autoneg(ppd);
- ret = 1; /* no other IB status change processing */
- } else if ((ppd->lflags & QIBL_IB_AUTONEG_INPROG) &&
- ppd->link_speed_active == QIB_IB_SDR) {
- autoneg_7220_send(ppd, 1);
- set_7220_ibspeed_fast(ppd, QIB_IB_DDR);
- udelay(2);
- toggle_7220_rclkrls(dd);
- ret = 1; /* no other IB status change processing */
- } else {
- if ((ppd->lflags & QIBL_IB_AUTONEG_INPROG) &&
- (ppd->link_speed_active & QIB_IB_DDR)) {
- spin_lock_irqsave(&ppd->lflags_lock, flags);
- ppd->lflags &= ~(QIBL_IB_AUTONEG_INPROG |
- QIBL_IB_AUTONEG_FAILED);
- spin_unlock_irqrestore(&ppd->lflags_lock,
- flags);
- dd->cspec->autoneg_tries = 0;
- /* re-enable SDR, for next link down */
- set_7220_ibspeed_fast(ppd,
- ppd->link_speed_enabled);
- wake_up(&ppd->cpspec->autoneg_wait);
- symadj = 1;
- } else if (ppd->lflags & QIBL_IB_AUTONEG_FAILED) {
- /*
- * Clear autoneg failure flag, and do setup
- * so we'll try next time link goes down and
- * back to INIT (possibly connected to a
- * different device).
- */
- spin_lock_irqsave(&ppd->lflags_lock, flags);
- ppd->lflags &= ~QIBL_IB_AUTONEG_FAILED;
- spin_unlock_irqrestore(&ppd->lflags_lock,
- flags);
- ppd->cpspec->ibcddrctrl |=
- IBA7220_IBC_IBTA_1_2_MASK;
- qib_write_kreg(dd, kr_ncmodectrl, 0);
- symadj = 1;
- }
- }
-
- if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG))
- symadj = 1;
-
- if (!ret) {
- ppd->delay_mult = rate_to_delay
- [(ibcs >> IBA7220_LINKSPEED_SHIFT) & 1]
- [(ibcs >> IBA7220_LINKWIDTH_SHIFT) & 1];
-
- set_7220_relock_poll(dd, ibup);
- spin_lock_irqsave(&ppd->sdma_lock, flags);
- /*
- * Unlike 7322, the 7220 needs this, due to lack of
- * interrupt in some cases when we have sdma active
- * when the link goes down.
- */
- if (ppd->sdma_state.current_state !=
- qib_sdma_state_s20_idle)
- __qib_sdma_process_event(ppd,
- qib_sdma_event_e00_go_hw_down);
- spin_unlock_irqrestore(&ppd->sdma_lock, flags);
- }
- }
-
- if (symadj) {
- if (ppd->cpspec->ibdeltainprog) {
- ppd->cpspec->ibdeltainprog = 0;
- ppd->cpspec->ibsymdelta += read_7220_creg32(ppd->dd,
- cr_ibsymbolerr) - ppd->cpspec->ibsymsnap;
- ppd->cpspec->iblnkerrdelta += read_7220_creg32(ppd->dd,
- cr_iblinkerrrecov) - ppd->cpspec->iblnkerrsnap;
- }
- } else if (!ibup && qib_compat_ddr_negotiate &&
- !ppd->cpspec->ibdeltainprog &&
- !(ppd->lflags & QIBL_IB_AUTONEG_INPROG)) {
- ppd->cpspec->ibdeltainprog = 1;
- ppd->cpspec->ibsymsnap = read_7220_creg32(ppd->dd,
- cr_ibsymbolerr);
- ppd->cpspec->iblnkerrsnap = read_7220_creg32(ppd->dd,
- cr_iblinkerrrecov);
- }
-
- if (!ret)
- qib_setup_7220_setextled(ppd, ibup);
- return ret;
-}
-
-/*
- * Does read/modify/write to appropriate registers to
- * set output and direction bits selected by mask.
- * these are in their canonical positions (e.g. lsb of
- * dir will end up in D48 of extctrl on existing chips).
- * returns contents of GP Inputs.
- */
-static int gpio_7220_mod(struct qib_devdata *dd, u32 out, u32 dir, u32 mask)
-{
- u64 read_val, new_out;
- unsigned long flags;
-
- if (mask) {
- /* some bits being written, lock access to GPIO */
- dir &= mask;
- out &= mask;
- spin_lock_irqsave(&dd->cspec->gpio_lock, flags);
- dd->cspec->extctrl &= ~((u64)mask << SYM_LSB(EXTCtrl, GPIOOe));
- dd->cspec->extctrl |= ((u64) dir << SYM_LSB(EXTCtrl, GPIOOe));
- new_out = (dd->cspec->gpio_out & ~mask) | out;
-
- qib_write_kreg(dd, kr_extctrl, dd->cspec->extctrl);
- qib_write_kreg(dd, kr_gpio_out, new_out);
- dd->cspec->gpio_out = new_out;
- spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags);
- }
- /*
- * It is unlikely that a read at this time would get valid
- * data on a pin whose direction line was set in the same
- * call to this function. We include the read here because
- * that allows us to potentially combine a change on one pin with
- * a read on another, and because the old code did something like
- * this.
- */
- read_val = qib_read_kreg64(dd, kr_extstatus);
- return SYM_FIELD(read_val, EXTStatus, GPIOIn);
-}
-
-/*
- * Read fundamental info we need to use the chip. These are
- * the registers that describe chip capabilities, and are
- * saved in shadow registers.
- */
-static void get_7220_chip_params(struct qib_devdata *dd)
-{
- u64 val;
- u32 piobufs;
- int mtu;
-
- dd->uregbase = qib_read_kreg32(dd, kr_userregbase);
-
- dd->rcvtidcnt = qib_read_kreg32(dd, kr_rcvtidcnt);
- dd->rcvtidbase = qib_read_kreg32(dd, kr_rcvtidbase);
- dd->rcvegrbase = qib_read_kreg32(dd, kr_rcvegrbase);
- dd->palign = qib_read_kreg32(dd, kr_palign);
- dd->piobufbase = qib_read_kreg64(dd, kr_sendpiobufbase);
- dd->pio2k_bufbase = dd->piobufbase & 0xffffffff;
-
- val = qib_read_kreg64(dd, kr_sendpiosize);
- dd->piosize2k = val & ~0U;
- dd->piosize4k = val >> 32;
-
- mtu = ib_mtu_enum_to_int(qib_ibmtu);
- if (mtu == -1)
- mtu = QIB_DEFAULT_MTU;
- dd->pport->ibmtu = (u32)mtu;
-
- val = qib_read_kreg64(dd, kr_sendpiobufcnt);
- dd->piobcnt2k = val & ~0U;
- dd->piobcnt4k = val >> 32;
- /* these may be adjusted in init_chip_wc_pat() */
- dd->pio2kbase = (u32 __iomem *)
- ((char __iomem *) dd->kregbase + dd->pio2k_bufbase);
- if (dd->piobcnt4k) {
- dd->pio4kbase = (u32 __iomem *)
- ((char __iomem *) dd->kregbase +
- (dd->piobufbase >> 32));
- /*
- * 4K buffers take 2 pages; we use roundup just to be
- * paranoid; we calculate it once here, rather than on
- * ever buf allocate
- */
- dd->align4k = ALIGN(dd->piosize4k, dd->palign);
- }
-
- piobufs = dd->piobcnt4k + dd->piobcnt2k;
-
- dd->pioavregs = ALIGN(piobufs, sizeof(u64) * BITS_PER_BYTE / 2) /
- (sizeof(u64) * BITS_PER_BYTE / 2);
-}
-
-/*
- * The chip base addresses in cspec and cpspec have to be set
- * after possible init_chip_wc_pat(), rather than in
- * qib_get_7220_chip_params(), so split out as separate function
- */
-static void set_7220_baseaddrs(struct qib_devdata *dd)
-{
- u32 cregbase;
- /* init after possible re-map in init_chip_wc_pat() */
- cregbase = qib_read_kreg32(dd, kr_counterregbase);
- dd->cspec->cregbase = (u64 __iomem *)
- ((char __iomem *) dd->kregbase + cregbase);
-
- dd->egrtidbase = (u64 __iomem *)
- ((char __iomem *) dd->kregbase + dd->rcvegrbase);
-}
-
-
-#define SENDCTRL_SHADOWED (SYM_MASK(SendCtrl, SendIntBufAvail) | \
- SYM_MASK(SendCtrl, SPioEnable) | \
- SYM_MASK(SendCtrl, SSpecialTriggerEn) | \
- SYM_MASK(SendCtrl, SendBufAvailUpd) | \
- SYM_MASK(SendCtrl, AvailUpdThld) | \
- SYM_MASK(SendCtrl, SDmaEnable) | \
- SYM_MASK(SendCtrl, SDmaIntEnable) | \
- SYM_MASK(SendCtrl, SDmaHalt) | \
- SYM_MASK(SendCtrl, SDmaSingleDescriptor))
-
-static int sendctrl_hook(struct qib_devdata *dd,
- const struct diag_observer *op,
- u32 offs, u64 *data, u64 mask, int only_32)
-{
- unsigned long flags;
- unsigned idx = offs / sizeof(u64);
- u64 local_data, all_bits;
-
- if (idx != kr_sendctrl) {
- qib_dev_err(dd, "SendCtrl Hook called with offs %X, %s-bit\n",
- offs, only_32 ? "32" : "64");
- return 0;
- }
-
- all_bits = ~0ULL;
- if (only_32)
- all_bits >>= 32;
- spin_lock_irqsave(&dd->sendctrl_lock, flags);
- if ((mask & all_bits) != all_bits) {
- /*
- * At least some mask bits are zero, so we need
- * to read. The judgement call is whether from
- * reg or shadow. First-cut: read reg, and complain
- * if any bits which should be shadowed are different
- * from their shadowed value.
- */
- if (only_32)
- local_data = (u64)qib_read_kreg32(dd, idx);
- else
- local_data = qib_read_kreg64(dd, idx);
- qib_dev_err(dd, "Sendctrl -> %X, Shad -> %X\n",
- (u32)local_data, (u32)dd->sendctrl);
- if ((local_data & SENDCTRL_SHADOWED) !=
- (dd->sendctrl & SENDCTRL_SHADOWED))
- qib_dev_err(dd, "Sendctrl read: %X shadow is %X\n",
- (u32)local_data, (u32) dd->sendctrl);
- *data = (local_data & ~mask) | (*data & mask);
- }
- if (mask) {
- /*
- * At least some mask bits are one, so we need
- * to write, but only shadow some bits.
- */
- u64 sval, tval; /* Shadowed, transient */
-
- /*
- * New shadow val is bits we don't want to touch,
- * ORed with bits we do, that are intended for shadow.
- */
- sval = (dd->sendctrl & ~mask);
- sval |= *data & SENDCTRL_SHADOWED & mask;
- dd->sendctrl = sval;
- tval = sval | (*data & ~SENDCTRL_SHADOWED & mask);
- qib_dev_err(dd, "Sendctrl <- %X, Shad <- %X\n",
- (u32)tval, (u32)sval);
- qib_write_kreg(dd, kr_sendctrl, tval);
- qib_write_kreg(dd, kr_scratch, 0Ull);
- }
- spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
-
- return only_32 ? 4 : 8;
-}
-
-static const struct diag_observer sendctrl_observer = {
- sendctrl_hook, kr_sendctrl * sizeof(u64),
- kr_sendctrl * sizeof(u64)
-};
-
-/*
- * write the final few registers that depend on some of the
- * init setup. Done late in init, just before bringing up
- * the serdes.
- */
-static int qib_late_7220_initreg(struct qib_devdata *dd)
-{
- int ret = 0;
- u64 val;
-
- qib_write_kreg(dd, kr_rcvhdrentsize, dd->rcvhdrentsize);
- qib_write_kreg(dd, kr_rcvhdrsize, dd->rcvhdrsize);
- qib_write_kreg(dd, kr_rcvhdrcnt, dd->rcvhdrcnt);
- qib_write_kreg(dd, kr_sendpioavailaddr, dd->pioavailregs_phys);
- val = qib_read_kreg64(dd, kr_sendpioavailaddr);
- if (val != dd->pioavailregs_phys) {
- qib_dev_err(dd,
- "Catastrophic software error, SendPIOAvailAddr written as %lx, read back as %llx\n",
- (unsigned long) dd->pioavailregs_phys,
- (unsigned long long) val);
- ret = -EINVAL;
- }
- qib_register_observer(dd, &sendctrl_observer);
- return ret;
-}
-
-static int qib_init_7220_variables(struct qib_devdata *dd)
-{
- struct qib_chippport_specific *cpspec;
- struct qib_pportdata *ppd;
- int ret = 0;
- u32 sbufs, updthresh;
-
- cpspec = (struct qib_chippport_specific *)(dd + 1);
- ppd = &cpspec->pportdata;
- dd->pport = ppd;
- dd->num_pports = 1;
-
- dd->cspec = (struct qib_chip_specific *)(cpspec + dd->num_pports);
- dd->cspec->dd = dd;
- ppd->cpspec = cpspec;
-
- spin_lock_init(&dd->cspec->sdepb_lock);
- spin_lock_init(&dd->cspec->rcvmod_lock);
- spin_lock_init(&dd->cspec->gpio_lock);
-
- /* we haven't yet set QIB_PRESENT, so use read directly */
- dd->revision = readq(&dd->kregbase[kr_revision]);
-
- if ((dd->revision & 0xffffffffU) == 0xffffffffU) {
- qib_dev_err(dd,
- "Revision register read failure, giving up initialization\n");
- ret = -ENODEV;
- goto bail;
- }
- dd->flags |= QIB_PRESENT; /* now register routines work */
-
- dd->majrev = (u8) SYM_FIELD(dd->revision, Revision_R,
- ChipRevMajor);
- dd->minrev = (u8) SYM_FIELD(dd->revision, Revision_R,
- ChipRevMinor);
-
- get_7220_chip_params(dd);
- qib_7220_boardname(dd);
-
- /*
- * GPIO bits for TWSI data and clock,
- * used for serial EEPROM.
- */
- dd->gpio_sda_num = _QIB_GPIO_SDA_NUM;
- dd->gpio_scl_num = _QIB_GPIO_SCL_NUM;
- dd->twsi_eeprom_dev = QIB_TWSI_EEPROM_DEV;
-
- dd->flags |= QIB_HAS_INTX | QIB_HAS_LINK_LATENCY |
- QIB_NODMA_RTAIL | QIB_HAS_THRESH_UPDATE;
- dd->flags |= qib_special_trigger ?
- QIB_USE_SPCL_TRIG : QIB_HAS_SEND_DMA;
-
- init_waitqueue_head(&cpspec->autoneg_wait);
- INIT_DELAYED_WORK(&cpspec->autoneg_work, autoneg_7220_work);
-
- ret = qib_init_pportdata(ppd, dd, 0, 1);
- if (ret)
- goto bail;
- ppd->link_width_supported = IB_WIDTH_1X | IB_WIDTH_4X;
- ppd->link_speed_supported = QIB_IB_SDR | QIB_IB_DDR;
-
- ppd->link_width_enabled = ppd->link_width_supported;
- ppd->link_speed_enabled = ppd->link_speed_supported;
- /*
- * Set the initial values to reasonable default, will be set
- * for real when link is up.
- */
- ppd->link_width_active = IB_WIDTH_4X;
- ppd->link_speed_active = QIB_IB_SDR;
- ppd->delay_mult = rate_to_delay[0][1];
- ppd->vls_supported = IB_VL_VL0;
- ppd->vls_operational = ppd->vls_supported;
-
- if (!qib_mini_init)
- qib_write_kreg(dd, kr_rcvbthqp, QIB_KD_QP);
-
- timer_setup(&ppd->cpspec->chase_timer, reenable_7220_chase, 0);
-
- qib_num_cfg_vls = 1; /* if any 7220's, only one VL */
-
- dd->rcvhdrentsize = QIB_RCVHDR_ENTSIZE;
- dd->rcvhdrsize = QIB_DFLT_RCVHDRSIZE;
- dd->rhf_offset =
- dd->rcvhdrentsize - sizeof(u64) / sizeof(u32);
-
- /* we always allocate at least 2048 bytes for eager buffers */
- ret = ib_mtu_enum_to_int(qib_ibmtu);
- dd->rcvegrbufsize = ret != -1 ? max(ret, 2048) : QIB_DEFAULT_MTU;
- dd->rcvegrbufsize_shift = ilog2(dd->rcvegrbufsize);
-
- qib_7220_tidtemplate(dd);
-
- /*
- * We can request a receive interrupt for 1 or
- * more packets from current offset. For now, we set this
- * up for a single packet.
- */
- dd->rhdrhead_intr_off = 1ULL << 32;
-
- /* setup the stats timer; the add_timer is done at end of init */
- timer_setup(&dd->stats_timer, qib_get_7220_faststats, 0);
- dd->stats_timer.expires = jiffies + ACTIVITY_TIMER * HZ;
-
- /*
- * Control[4] has been added to change the arbitration within
- * the SDMA engine between favoring data fetches over descriptor
- * fetches. qib_sdma_fetch_arb==0 gives data fetches priority.
- */
- if (qib_sdma_fetch_arb)
- dd->control |= 1 << 4;
-
- dd->ureg_align = 0x10000; /* 64KB alignment */
-
- dd->piosize2kmax_dwords = (dd->piosize2k >> 2)-1;
- qib_7220_config_ctxts(dd);
- qib_set_ctxtcnt(dd); /* needed for PAT setup */
-
- ret = init_chip_wc_pat(dd, 0);
- if (ret)
- goto bail;
- set_7220_baseaddrs(dd); /* set chip access pointers now */
-
- ret = 0;
- if (qib_mini_init)
- goto bail;
-
- ret = qib_create_ctxts(dd);
- init_7220_cntrnames(dd);
-
- /* use all of 4KB buffers for the kernel SDMA, zero if !SDMA.
- * reserve the update threshold amount for other kernel use, such
- * as sending SMI, MAD, and ACKs, or 3, whichever is greater,
- * unless we aren't enabling SDMA, in which case we want to use
- * all the 4k bufs for the kernel.
- * if this was less than the update threshold, we could wait
- * a long time for an update. Coded this way because we
- * sometimes change the update threshold for various reasons,
- * and we want this to remain robust.
- */
- updthresh = 8U; /* update threshold */
- if (dd->flags & QIB_HAS_SEND_DMA) {
- dd->cspec->sdmabufcnt = dd->piobcnt4k;
- sbufs = updthresh > 3 ? updthresh : 3;
- } else {
- dd->cspec->sdmabufcnt = 0;
- sbufs = dd->piobcnt4k;
- }
-
- dd->cspec->lastbuf_for_pio = dd->piobcnt2k + dd->piobcnt4k -
- dd->cspec->sdmabufcnt;
- dd->lastctxt_piobuf = dd->cspec->lastbuf_for_pio - sbufs;
- dd->cspec->lastbuf_for_pio--; /* range is <= , not < */
- dd->last_pio = dd->cspec->lastbuf_for_pio;
- dd->pbufsctxt = dd->lastctxt_piobuf /
- (dd->cfgctxts - dd->first_user_ctxt);
-
- /*
- * if we are at 16 user contexts, we will have one 7 sbufs
- * per context, so drop the update threshold to match. We
- * want to update before we actually run out, at low pbufs/ctxt
- * so give ourselves some margin
- */
- if ((dd->pbufsctxt - 2) < updthresh)
- updthresh = dd->pbufsctxt - 2;
-
- dd->cspec->updthresh_dflt = updthresh;
- dd->cspec->updthresh = updthresh;
-
- /* before full enable, no interrupts, no locking needed */
- dd->sendctrl |= (updthresh & SYM_RMASK(SendCtrl, AvailUpdThld))
- << SYM_LSB(SendCtrl, AvailUpdThld);
-
- dd->psxmitwait_supported = 1;
- dd->psxmitwait_check_rate = QIB_7220_PSXMITWAIT_CHECK_RATE;
-bail:
- return ret;
-}
-
-static u32 __iomem *qib_7220_getsendbuf(struct qib_pportdata *ppd, u64 pbc,
- u32 *pbufnum)
-{
- u32 first, last, plen = pbc & QIB_PBC_LENGTH_MASK;
- struct qib_devdata *dd = ppd->dd;
- u32 __iomem *buf;
-
- if (((pbc >> 32) & PBC_7220_VL15_SEND_CTRL) &&
- !(ppd->lflags & (QIBL_IB_AUTONEG_INPROG | QIBL_LINKACTIVE)))
- buf = get_7220_link_buf(ppd, pbufnum);
- else {
- if ((plen + 1) > dd->piosize2kmax_dwords)
- first = dd->piobcnt2k;
- else
- first = 0;
- /* try 4k if all 2k busy, so same last for both sizes */
- last = dd->cspec->lastbuf_for_pio;
- buf = qib_getsendbuf_range(dd, pbufnum, first, last);
- }
- return buf;
-}
-
-/* these 2 "counters" are really control registers, and are always RW */
-static void qib_set_cntr_7220_sample(struct qib_pportdata *ppd, u32 intv,
- u32 start)
-{
- write_7220_creg(ppd->dd, cr_psinterval, intv);
- write_7220_creg(ppd->dd, cr_psstart, start);
-}
-
-/*
- * NOTE: no real attempt is made to generalize the SDMA stuff.
- * At some point "soon" we will have a new more generalized
- * set of sdma interface, and then we'll clean this up.
- */
-
-/* Must be called with sdma_lock held, or before init finished */
-static void qib_sdma_update_7220_tail(struct qib_pportdata *ppd, u16 tail)
-{
- /* Commit writes to memory and advance the tail on the chip */
- wmb();
- ppd->sdma_descq_tail = tail;
- qib_write_kreg(ppd->dd, kr_senddmatail, tail);
-}
-
-static void qib_sdma_set_7220_desc_cnt(struct qib_pportdata *ppd, unsigned cnt)
-{
-}
-
-static struct sdma_set_state_action sdma_7220_action_table[] = {
- [qib_sdma_state_s00_hw_down] = {
- .op_enable = 0,
- .op_intenable = 0,
- .op_halt = 0,
- .go_s99_running_tofalse = 1,
- },
- [qib_sdma_state_s10_hw_start_up_wait] = {
- .op_enable = 1,
- .op_intenable = 1,
- .op_halt = 1,
- },
- [qib_sdma_state_s20_idle] = {
- .op_enable = 1,
- .op_intenable = 1,
- .op_halt = 1,
- },
- [qib_sdma_state_s30_sw_clean_up_wait] = {
- .op_enable = 0,
- .op_intenable = 1,
- .op_halt = 0,
- },
- [qib_sdma_state_s40_hw_clean_up_wait] = {
- .op_enable = 1,
- .op_intenable = 1,
- .op_halt = 1,
- },
- [qib_sdma_state_s50_hw_halt_wait] = {
- .op_enable = 1,
- .op_intenable = 1,
- .op_halt = 1,
- },
- [qib_sdma_state_s99_running] = {
- .op_enable = 1,
- .op_intenable = 1,
- .op_halt = 0,
- .go_s99_running_totrue = 1,
- },
-};
-
-static void qib_7220_sdma_init_early(struct qib_pportdata *ppd)
-{
- ppd->sdma_state.set_state_action = sdma_7220_action_table;
-}
-
-static int init_sdma_7220_regs(struct qib_pportdata *ppd)
-{
- struct qib_devdata *dd = ppd->dd;
- unsigned i, n;
- u64 senddmabufmask[3] = { 0 };
-
- /* Set SendDmaBase */
- qib_write_kreg(dd, kr_senddmabase, ppd->sdma_descq_phys);
- qib_sdma_7220_setlengen(ppd);
- qib_sdma_update_7220_tail(ppd, 0); /* Set SendDmaTail */
- /* Set SendDmaHeadAddr */
- qib_write_kreg(dd, kr_senddmaheadaddr, ppd->sdma_head_phys);
-
- /*
- * Reserve all the former "kernel" piobufs, using high number range
- * so we get as many 4K buffers as possible
- */
- n = dd->piobcnt2k + dd->piobcnt4k;
- i = n - dd->cspec->sdmabufcnt;
-
- for (; i < n; ++i) {
- unsigned word = i / 64;
- unsigned bit = i & 63;
-
- senddmabufmask[word] |= 1ULL << bit;
- }
- qib_write_kreg(dd, kr_senddmabufmask0, senddmabufmask[0]);
- qib_write_kreg(dd, kr_senddmabufmask1, senddmabufmask[1]);
- qib_write_kreg(dd, kr_senddmabufmask2, senddmabufmask[2]);
-
- ppd->sdma_state.first_sendbuf = i;
- ppd->sdma_state.last_sendbuf = n;
-
- return 0;
-}
-
-/* sdma_lock must be held */
-static u16 qib_sdma_7220_gethead(struct qib_pportdata *ppd)
-{
- struct qib_devdata *dd = ppd->dd;
- int sane;
- int use_dmahead;
- u16 swhead;
- u16 swtail;
- u16 cnt;
- u16 hwhead;
-
- use_dmahead = __qib_sdma_running(ppd) &&
- (dd->flags & QIB_HAS_SDMA_TIMEOUT);
-retry:
- hwhead = use_dmahead ?
- (u16)le64_to_cpu(*ppd->sdma_head_dma) :
- (u16)qib_read_kreg32(dd, kr_senddmahead);
-
- swhead = ppd->sdma_descq_head;
- swtail = ppd->sdma_descq_tail;
- cnt = ppd->sdma_descq_cnt;
-
- if (swhead < swtail) {
- /* not wrapped */
- sane = (hwhead >= swhead) & (hwhead <= swtail);
- } else if (swhead > swtail) {
- /* wrapped around */
- sane = ((hwhead >= swhead) && (hwhead < cnt)) ||
- (hwhead <= swtail);
- } else {
- /* empty */
- sane = (hwhead == swhead);
- }
-
- if (unlikely(!sane)) {
- if (use_dmahead) {
- /* try one more time, directly from the register */
- use_dmahead = 0;
- goto retry;
- }
- /* assume no progress */
- hwhead = swhead;
- }
-
- return hwhead;
-}
-
-static int qib_sdma_7220_busy(struct qib_pportdata *ppd)
-{
- u64 hwstatus = qib_read_kreg64(ppd->dd, kr_senddmastatus);
-
- return (hwstatus & SYM_MASK(SendDmaStatus, ScoreBoardDrainInProg)) ||
- (hwstatus & SYM_MASK(SendDmaStatus, AbortInProg)) ||
- (hwstatus & SYM_MASK(SendDmaStatus, InternalSDmaEnable)) ||
- !(hwstatus & SYM_MASK(SendDmaStatus, ScbEmpty));
-}
-
-/*
- * Compute the amount of delay before sending the next packet if the
- * port's send rate differs from the static rate set for the QP.
- * Since the delay affects this packet but the amount of the delay is
- * based on the length of the previous packet, use the last delay computed
- * and save the delay count for this packet to be used next time
- * we get here.
- */
-static u32 qib_7220_setpbc_control(struct qib_pportdata *ppd, u32 plen,
- u8 srate, u8 vl)
-{
- u8 snd_mult = ppd->delay_mult;
- u8 rcv_mult = ib_rate_to_delay[srate];
- u32 ret = ppd->cpspec->last_delay_mult;
-
- ppd->cpspec->last_delay_mult = (rcv_mult > snd_mult) ?
- (plen * (rcv_mult - snd_mult) + 1) >> 1 : 0;
-
- /* Indicate VL15, if necessary */
- if (vl == 15)
- ret |= PBC_7220_VL15_SEND_CTRL;
- return ret;
-}
-
-static void qib_7220_initvl15_bufs(struct qib_devdata *dd)
-{
-}
-
-static void qib_7220_init_ctxt(struct qib_ctxtdata *rcd)
-{
- if (!rcd->ctxt) {
- rcd->rcvegrcnt = IBA7220_KRCVEGRCNT;
- rcd->rcvegr_tid_base = 0;
- } else {
- rcd->rcvegrcnt = rcd->dd->cspec->rcvegrcnt;
- rcd->rcvegr_tid_base = IBA7220_KRCVEGRCNT +
- (rcd->ctxt - 1) * rcd->rcvegrcnt;
- }
-}
-
-static void qib_7220_txchk_change(struct qib_devdata *dd, u32 start,
- u32 len, u32 which, struct qib_ctxtdata *rcd)
-{
- int i;
- unsigned long flags;
-
- switch (which) {
- case TXCHK_CHG_TYPE_KERN:
- /* see if we need to raise avail update threshold */
- spin_lock_irqsave(&dd->uctxt_lock, flags);
- for (i = dd->first_user_ctxt;
- dd->cspec->updthresh != dd->cspec->updthresh_dflt
- && i < dd->cfgctxts; i++)
- if (dd->rcd[i] && dd->rcd[i]->subctxt_cnt &&
- ((dd->rcd[i]->piocnt / dd->rcd[i]->subctxt_cnt) - 1)
- < dd->cspec->updthresh_dflt)
- break;
- spin_unlock_irqrestore(&dd->uctxt_lock, flags);
- if (i == dd->cfgctxts) {
- spin_lock_irqsave(&dd->sendctrl_lock, flags);
- dd->cspec->updthresh = dd->cspec->updthresh_dflt;
- dd->sendctrl &= ~SYM_MASK(SendCtrl, AvailUpdThld);
- dd->sendctrl |= (dd->cspec->updthresh &
- SYM_RMASK(SendCtrl, AvailUpdThld)) <<
- SYM_LSB(SendCtrl, AvailUpdThld);
- spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
- sendctrl_7220_mod(dd->pport, QIB_SENDCTRL_AVAIL_BLIP);
- }
- break;
- case TXCHK_CHG_TYPE_USER:
- spin_lock_irqsave(&dd->sendctrl_lock, flags);
- if (rcd && rcd->subctxt_cnt && ((rcd->piocnt
- / rcd->subctxt_cnt) - 1) < dd->cspec->updthresh) {
- dd->cspec->updthresh = (rcd->piocnt /
- rcd->subctxt_cnt) - 1;
- dd->sendctrl &= ~SYM_MASK(SendCtrl, AvailUpdThld);
- dd->sendctrl |= (dd->cspec->updthresh &
- SYM_RMASK(SendCtrl, AvailUpdThld))
- << SYM_LSB(SendCtrl, AvailUpdThld);
- spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
- sendctrl_7220_mod(dd->pport, QIB_SENDCTRL_AVAIL_BLIP);
- } else
- spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
- break;
- }
-}
-
-static void writescratch(struct qib_devdata *dd, u32 val)
-{
- qib_write_kreg(dd, kr_scratch, val);
-}
-
-#define VALID_TS_RD_REG_MASK 0xBF
-/**
- * qib_7220_tempsense_rd - read register of temp sensor via TWSI
- * @dd: the qlogic_ib device
- * @regnum: register to read from
- *
- * returns reg contents (0..255) or < 0 for error
- */
-static int qib_7220_tempsense_rd(struct qib_devdata *dd, int regnum)
-{
- int ret;
- u8 rdata;
-
- if (regnum > 7) {
- ret = -EINVAL;
- goto bail;
- }
-
- /* return a bogus value for (the one) register we do not have */
- if (!((1 << regnum) & VALID_TS_RD_REG_MASK)) {
- ret = 0;
- goto bail;
- }
-
- ret = mutex_lock_interruptible(&dd->eep_lock);
- if (ret)
- goto bail;
-
- ret = qib_twsi_blk_rd(dd, QIB_TWSI_TEMP_DEV, regnum, &rdata, 1);
- if (!ret)
- ret = rdata;
-
- mutex_unlock(&dd->eep_lock);
-
- /*
- * There are three possibilities here:
- * ret is actual value (0..255)
- * ret is -ENXIO or -EINVAL from twsi code or this file
- * ret is -EINTR from mutex_lock_interruptible.
- */
-bail:
- return ret;
-}
-
-#ifdef CONFIG_INFINIBAND_QIB_DCA
-static int qib_7220_notify_dca(struct qib_devdata *dd, unsigned long event)
-{
- return 0;
-}
-#endif
-
-/* Dummy function, as 7220 boards never disable EEPROM Write */
-static int qib_7220_eeprom_wen(struct qib_devdata *dd, int wen)
-{
- return 1;
-}
-
-/**
- * qib_init_iba7220_funcs - set up the chip-specific function pointers
- * @pdev: the pci_dev for qlogic_ib device
- * @ent: pci_device_id struct for this dev
- *
- * This is global, and is called directly at init to set up the
- * chip-specific function pointers for later use.
- */
-struct qib_devdata *qib_init_iba7220_funcs(struct pci_dev *pdev,
- const struct pci_device_id *ent)
-{
- struct qib_devdata *dd;
- int ret;
- u32 boardid, minwidth;
-
- dd = qib_alloc_devdata(pdev, sizeof(struct qib_chip_specific) +
- sizeof(struct qib_chippport_specific));
- if (IS_ERR(dd))
- goto bail;
-
- dd->f_bringup_serdes = qib_7220_bringup_serdes;
- dd->f_cleanup = qib_setup_7220_cleanup;
- dd->f_clear_tids = qib_7220_clear_tids;
- dd->f_free_irq = qib_free_irq;
- dd->f_get_base_info = qib_7220_get_base_info;
- dd->f_get_msgheader = qib_7220_get_msgheader;
- dd->f_getsendbuf = qib_7220_getsendbuf;
- dd->f_gpio_mod = gpio_7220_mod;
- dd->f_eeprom_wen = qib_7220_eeprom_wen;
- dd->f_hdrqempty = qib_7220_hdrqempty;
- dd->f_ib_updown = qib_7220_ib_updown;
- dd->f_init_ctxt = qib_7220_init_ctxt;
- dd->f_initvl15_bufs = qib_7220_initvl15_bufs;
- dd->f_intr_fallback = qib_7220_intr_fallback;
- dd->f_late_initreg = qib_late_7220_initreg;
- dd->f_setpbc_control = qib_7220_setpbc_control;
- dd->f_portcntr = qib_portcntr_7220;
- dd->f_put_tid = qib_7220_put_tid;
- dd->f_quiet_serdes = qib_7220_quiet_serdes;
- dd->f_rcvctrl = rcvctrl_7220_mod;
- dd->f_read_cntrs = qib_read_7220cntrs;
- dd->f_read_portcntrs = qib_read_7220portcntrs;
- dd->f_reset = qib_setup_7220_reset;
- dd->f_init_sdma_regs = init_sdma_7220_regs;
- dd->f_sdma_busy = qib_sdma_7220_busy;
- dd->f_sdma_gethead = qib_sdma_7220_gethead;
- dd->f_sdma_sendctrl = qib_7220_sdma_sendctrl;
- dd->f_sdma_set_desc_cnt = qib_sdma_set_7220_desc_cnt;
- dd->f_sdma_update_tail = qib_sdma_update_7220_tail;
- dd->f_sdma_hw_clean_up = qib_7220_sdma_hw_clean_up;
- dd->f_sdma_hw_start_up = qib_7220_sdma_hw_start_up;
- dd->f_sdma_init_early = qib_7220_sdma_init_early;
- dd->f_sendctrl = sendctrl_7220_mod;
- dd->f_set_armlaunch = qib_set_7220_armlaunch;
- dd->f_set_cntr_sample = qib_set_cntr_7220_sample;
- dd->f_iblink_state = qib_7220_iblink_state;
- dd->f_ibphys_portstate = qib_7220_phys_portstate;
- dd->f_get_ib_cfg = qib_7220_get_ib_cfg;
- dd->f_set_ib_cfg = qib_7220_set_ib_cfg;
- dd->f_set_ib_loopback = qib_7220_set_loopback;
- dd->f_set_intr_state = qib_7220_set_intr_state;
- dd->f_setextled = qib_setup_7220_setextled;
- dd->f_txchk_change = qib_7220_txchk_change;
- dd->f_update_usrhead = qib_update_7220_usrhead;
- dd->f_wantpiobuf_intr = qib_wantpiobuf_7220_intr;
- dd->f_xgxs_reset = qib_7220_xgxs_reset;
- dd->f_writescratch = writescratch;
- dd->f_tempsense_rd = qib_7220_tempsense_rd;
-#ifdef CONFIG_INFINIBAND_QIB_DCA
- dd->f_notify_dca = qib_7220_notify_dca;
-#endif
- /*
- * Do remaining pcie setup and save pcie values in dd.
- * Any error printing is already done by the init code.
- * On return, we have the chip mapped, but chip registers
- * are not set up until start of qib_init_7220_variables.
- */
- ret = qib_pcie_ddinit(dd, pdev, ent);
- if (ret < 0)
- goto bail_free;
-
- /* initialize chip-specific variables */
- ret = qib_init_7220_variables(dd);
- if (ret)
- goto bail_cleanup;
-
- if (qib_mini_init)
- goto bail;
-
- boardid = SYM_FIELD(dd->revision, Revision,
- BoardID);
- switch (boardid) {
- case 0:
- case 2:
- case 10:
- case 12:
- minwidth = 16; /* x16 capable boards */
- break;
- default:
- minwidth = 8; /* x8 capable boards */
- break;
- }
- if (qib_pcie_params(dd, minwidth, NULL))
- qib_dev_err(dd,
- "Failed to setup PCIe or interrupts; continuing anyway\n");
-
- if (qib_read_kreg64(dd, kr_hwerrstatus) &
- QLOGIC_IB_HWE_SERDESPLLFAILED)
- qib_write_kreg(dd, kr_hwerrclear,
- QLOGIC_IB_HWE_SERDESPLLFAILED);
-
- /* setup interrupt handler (interrupt type handled above) */
- qib_setup_7220_interrupt(dd);
- qib_7220_init_hwerrors(dd);
-
- /* clear diagctrl register, in case diags were running and crashed */
- qib_write_kreg(dd, kr_hwdiagctrl, 0);
-
- goto bail;
-
-bail_cleanup:
- qib_pcie_ddcleanup(dd);
-bail_free:
- qib_free_devdata(dd);
- dd = ERR_PTR(ret);
-bail:
- return dd;
-}
diff --git a/drivers/infiniband/hw/qib/qib_iba7322.c b/drivers/infiniband/hw/qib/qib_iba7322.c
deleted file mode 100644
index 781b6a4fb002..000000000000
--- a/drivers/infiniband/hw/qib/qib_iba7322.c
+++ /dev/null
@@ -1,8475 +0,0 @@
-/*
- * Copyright (c) 2012 - 2017 Intel Corporation. All rights reserved.
- * Copyright (c) 2008 - 2012 QLogic Corporation. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-/*
- * This file contains all of the code that is specific to the
- * InfiniPath 7322 chip
- */
-
-#include <linux/interrupt.h>
-#include <linux/pci.h>
-#include <linux/delay.h>
-#include <linux/io.h>
-#include <linux/jiffies.h>
-#include <linux/module.h>
-#include <rdma/ib_verbs.h>
-#include <rdma/ib_smi.h>
-#ifdef CONFIG_INFINIBAND_QIB_DCA
-#include <linux/dca.h>
-#endif
-
-#include "qib.h"
-#include "qib_7322_regs.h"
-#include "qib_qsfp.h"
-
-#include "qib_mad.h"
-#include "qib_verbs.h"
-
-#undef pr_fmt
-#define pr_fmt(fmt) QIB_DRV_NAME " " fmt
-
-static void qib_setup_7322_setextled(struct qib_pportdata *, u32);
-static void qib_7322_handle_hwerrors(struct qib_devdata *, char *, size_t);
-static void sendctrl_7322_mod(struct qib_pportdata *ppd, u32 op);
-static irqreturn_t qib_7322intr(int irq, void *data);
-static irqreturn_t qib_7322bufavail(int irq, void *data);
-static irqreturn_t sdma_intr(int irq, void *data);
-static irqreturn_t sdma_idle_intr(int irq, void *data);
-static irqreturn_t sdma_progress_intr(int irq, void *data);
-static irqreturn_t sdma_cleanup_intr(int irq, void *data);
-static void qib_7322_txchk_change(struct qib_devdata *, u32, u32, u32,
- struct qib_ctxtdata *rcd);
-static u8 qib_7322_phys_portstate(u64);
-static u32 qib_7322_iblink_state(u64);
-static void qib_set_ib_7322_lstate(struct qib_pportdata *ppd, u16 linkcmd,
- u16 linitcmd);
-static void force_h1(struct qib_pportdata *);
-static void adj_tx_serdes(struct qib_pportdata *);
-static u32 qib_7322_setpbc_control(struct qib_pportdata *, u32, u8, u8);
-static void qib_7322_mini_pcs_reset(struct qib_pportdata *);
-
-static u32 ahb_mod(struct qib_devdata *, int, int, int, u32, u32);
-static void ibsd_wr_allchans(struct qib_pportdata *, int, unsigned, unsigned);
-static void serdes_7322_los_enable(struct qib_pportdata *, int);
-static int serdes_7322_init_old(struct qib_pportdata *);
-static int serdes_7322_init_new(struct qib_pportdata *);
-static void dump_sdma_7322_state(struct qib_pportdata *);
-
-#define BMASK(msb, lsb) (((1 << ((msb) + 1 - (lsb))) - 1) << (lsb))
-
-/* LE2 serdes values for different cases */
-#define LE2_DEFAULT 5
-#define LE2_5m 4
-#define LE2_QME 0
-
-/* Below is special-purpose, so only really works for the IB SerDes blocks. */
-#define IBSD(hw_pidx) (hw_pidx + 2)
-
-/* these are variables for documentation and experimentation purposes */
-static const unsigned rcv_int_timeout = 375;
-static const unsigned rcv_int_count = 16;
-static const unsigned sdma_idle_cnt = 64;
-
-/* Time to stop altering Rx Equalization parameters, after link up. */
-#define RXEQ_DISABLE_MSECS 2500
-
-/*
- * Number of VLs we are configured to use (to allow for more
- * credits per vl, etc.)
- */
-ushort qib_num_cfg_vls = 2;
-module_param_named(num_vls, qib_num_cfg_vls, ushort, S_IRUGO);
-MODULE_PARM_DESC(num_vls, "Set number of Virtual Lanes to use (1-8)");
-
-static ushort qib_chase = 1;
-module_param_named(chase, qib_chase, ushort, S_IRUGO);
-MODULE_PARM_DESC(chase, "Enable state chase handling");
-
-static ushort qib_long_atten = 10; /* 10 dB ~= 5m length */
-module_param_named(long_attenuation, qib_long_atten, ushort, S_IRUGO);
-MODULE_PARM_DESC(long_attenuation,
- "attenuation cutoff (dB) for long copper cable setup");
-
-static ushort qib_singleport;
-module_param_named(singleport, qib_singleport, ushort, S_IRUGO);
-MODULE_PARM_DESC(singleport, "Use only IB port 1; more per-port buffer space");
-
-static ushort qib_krcvq01_no_msi;
-module_param_named(krcvq01_no_msi, qib_krcvq01_no_msi, ushort, S_IRUGO);
-MODULE_PARM_DESC(krcvq01_no_msi, "No MSI for kctx < 2");
-
-/*
- * Receive header queue sizes
- */
-static unsigned qib_rcvhdrcnt;
-module_param_named(rcvhdrcnt, qib_rcvhdrcnt, uint, S_IRUGO);
-MODULE_PARM_DESC(rcvhdrcnt, "receive header count");
-
-static unsigned qib_rcvhdrsize;
-module_param_named(rcvhdrsize, qib_rcvhdrsize, uint, S_IRUGO);
-MODULE_PARM_DESC(rcvhdrsize, "receive header size in 32-bit words");
-
-static unsigned qib_rcvhdrentsize;
-module_param_named(rcvhdrentsize, qib_rcvhdrentsize, uint, S_IRUGO);
-MODULE_PARM_DESC(rcvhdrentsize, "receive header entry size in 32-bit words");
-
-#define MAX_ATTEN_LEN 64 /* plenty for any real system */
-/* for read back, default index is ~5m copper cable */
-static char txselect_list[MAX_ATTEN_LEN] = "10";
-static struct kparam_string kp_txselect = {
- .string = txselect_list,
- .maxlen = MAX_ATTEN_LEN
-};
-static int setup_txselect(const char *, const struct kernel_param *);
-module_param_call(txselect, setup_txselect, param_get_string,
- &kp_txselect, S_IWUSR | S_IRUGO);
-MODULE_PARM_DESC(txselect,
- "Tx serdes indices (for no QSFP or invalid QSFP data)");
-
-#define BOARD_QME7342 5
-#define BOARD_QMH7342 6
-#define BOARD_QMH7360 9
-#define IS_QMH(dd) (SYM_FIELD((dd)->revision, Revision, BoardID) == \
- BOARD_QMH7342)
-#define IS_QME(dd) (SYM_FIELD((dd)->revision, Revision, BoardID) == \
- BOARD_QME7342)
-
-#define KREG_IDX(regname) (QIB_7322_##regname##_OFFS / sizeof(u64))
-
-#define KREG_IBPORT_IDX(regname) ((QIB_7322_##regname##_0_OFFS / sizeof(u64)))
-
-#define MASK_ACROSS(lsb, msb) \
- (((1ULL << ((msb) + 1 - (lsb))) - 1) << (lsb))
-
-#define SYM_RMASK(regname, fldname) ((u64) \
- QIB_7322_##regname##_##fldname##_RMASK)
-
-#define SYM_MASK(regname, fldname) ((u64) \
- QIB_7322_##regname##_##fldname##_RMASK << \
- QIB_7322_##regname##_##fldname##_LSB)
-
-#define SYM_FIELD(value, regname, fldname) ((u64) \
- (((value) >> SYM_LSB(regname, fldname)) & \
- SYM_RMASK(regname, fldname)))
-
-/* useful for things like LaFifoEmpty_0...7, TxCreditOK_0...7, etc. */
-#define SYM_FIELD_ACROSS(value, regname, fldname, nbits) \
- (((value) >> SYM_LSB(regname, fldname)) & MASK_ACROSS(0, nbits))
-
-#define HWE_MASK(fldname) SYM_MASK(HwErrMask, fldname##Mask)
-#define ERR_MASK(fldname) SYM_MASK(ErrMask, fldname##Mask)
-#define ERR_MASK_N(fldname) SYM_MASK(ErrMask_0, fldname##Mask)
-#define INT_MASK(fldname) SYM_MASK(IntMask, fldname##IntMask)
-#define INT_MASK_P(fldname, port) SYM_MASK(IntMask, fldname##IntMask##_##port)
-/* Below because most, but not all, fields of IntMask have that full suffix */
-#define INT_MASK_PM(fldname, port) SYM_MASK(IntMask, fldname##Mask##_##port)
-
-
-#define SYM_LSB(regname, fldname) (QIB_7322_##regname##_##fldname##_LSB)
-
-/*
- * the size bits give us 2^N, in KB units. 0 marks as invalid,
- * and 7 is reserved. We currently use only 2KB and 4KB
- */
-#define IBA7322_TID_SZ_SHIFT QIB_7322_RcvTIDArray0_RT_BufSize_LSB
-#define IBA7322_TID_SZ_2K (1UL<<IBA7322_TID_SZ_SHIFT) /* 2KB */
-#define IBA7322_TID_SZ_4K (2UL<<IBA7322_TID_SZ_SHIFT) /* 4KB */
-#define IBA7322_TID_PA_SHIFT 11U /* TID addr in chip stored w/o low bits */
-
-#define SendIBSLIDAssignMask \
- QIB_7322_SendIBSLIDAssign_0_SendIBSLIDAssign_15_0_RMASK
-#define SendIBSLMCMask \
- QIB_7322_SendIBSLIDMask_0_SendIBSLIDMask_15_0_RMASK
-
-#define ExtLED_IB1_YEL SYM_MASK(EXTCtrl, LEDPort0YellowOn)
-#define ExtLED_IB1_GRN SYM_MASK(EXTCtrl, LEDPort0GreenOn)
-#define ExtLED_IB2_YEL SYM_MASK(EXTCtrl, LEDPort1YellowOn)
-#define ExtLED_IB2_GRN SYM_MASK(EXTCtrl, LEDPort1GreenOn)
-#define ExtLED_IB1_MASK (ExtLED_IB1_YEL | ExtLED_IB1_GRN)
-#define ExtLED_IB2_MASK (ExtLED_IB2_YEL | ExtLED_IB2_GRN)
-
-#define _QIB_GPIO_SDA_NUM 1
-#define _QIB_GPIO_SCL_NUM 0
-#define QIB_EEPROM_WEN_NUM 14
-#define QIB_TWSI_EEPROM_DEV 0xA2 /* All Production 7322 cards. */
-
-/* HW counter clock is at 4nsec */
-#define QIB_7322_PSXMITWAIT_CHECK_RATE 4000
-
-/* full speed IB port 1 only */
-#define PORT_SPD_CAP (QIB_IB_SDR | QIB_IB_DDR | QIB_IB_QDR)
-#define PORT_SPD_CAP_SHIFT 3
-
-/* full speed featuremask, both ports */
-#define DUAL_PORT_CAP (PORT_SPD_CAP | (PORT_SPD_CAP << PORT_SPD_CAP_SHIFT))
-
-/*
- * This file contains almost all the chip-specific register information and
- * access functions for the FAKED QLogic InfiniPath 7322 PCI-Express chip.
- */
-
-/* Use defines to tie machine-generated names to lower-case names */
-#define kr_contextcnt KREG_IDX(ContextCnt)
-#define kr_control KREG_IDX(Control)
-#define kr_counterregbase KREG_IDX(CntrRegBase)
-#define kr_errclear KREG_IDX(ErrClear)
-#define kr_errmask KREG_IDX(ErrMask)
-#define kr_errstatus KREG_IDX(ErrStatus)
-#define kr_extctrl KREG_IDX(EXTCtrl)
-#define kr_extstatus KREG_IDX(EXTStatus)
-#define kr_gpio_clear KREG_IDX(GPIOClear)
-#define kr_gpio_mask KREG_IDX(GPIOMask)
-#define kr_gpio_out KREG_IDX(GPIOOut)
-#define kr_gpio_status KREG_IDX(GPIOStatus)
-#define kr_hwdiagctrl KREG_IDX(HwDiagCtrl)
-#define kr_debugportval KREG_IDX(DebugPortValueReg)
-#define kr_fmask KREG_IDX(feature_mask)
-#define kr_act_fmask KREG_IDX(active_feature_mask)
-#define kr_hwerrclear KREG_IDX(HwErrClear)
-#define kr_hwerrmask KREG_IDX(HwErrMask)
-#define kr_hwerrstatus KREG_IDX(HwErrStatus)
-#define kr_intclear KREG_IDX(IntClear)
-#define kr_intmask KREG_IDX(IntMask)
-#define kr_intredirect KREG_IDX(IntRedirect0)
-#define kr_intstatus KREG_IDX(IntStatus)
-#define kr_pagealign KREG_IDX(PageAlign)
-#define kr_rcvavailtimeout KREG_IDX(RcvAvailTimeOut0)
-#define kr_rcvctrl KREG_IDX(RcvCtrl) /* Common, but chip also has per-port */
-#define kr_rcvegrbase KREG_IDX(RcvEgrBase)
-#define kr_rcvegrcnt KREG_IDX(RcvEgrCnt)
-#define kr_rcvhdrcnt KREG_IDX(RcvHdrCnt)
-#define kr_rcvhdrentsize KREG_IDX(RcvHdrEntSize)
-#define kr_rcvhdrsize KREG_IDX(RcvHdrSize)
-#define kr_rcvtidbase KREG_IDX(RcvTIDBase)
-#define kr_rcvtidcnt KREG_IDX(RcvTIDCnt)
-#define kr_revision KREG_IDX(Revision)
-#define kr_scratch KREG_IDX(Scratch)
-#define kr_sendbuffererror KREG_IDX(SendBufErr0) /* and base for 1 and 2 */
-#define kr_sendcheckmask KREG_IDX(SendCheckMask0) /* and 1, 2 */
-#define kr_sendctrl KREG_IDX(SendCtrl)
-#define kr_sendgrhcheckmask KREG_IDX(SendGRHCheckMask0) /* and 1, 2 */
-#define kr_sendibpktmask KREG_IDX(SendIBPacketMask0) /* and 1, 2 */
-#define kr_sendpioavailaddr KREG_IDX(SendBufAvailAddr)
-#define kr_sendpiobufbase KREG_IDX(SendBufBase)
-#define kr_sendpiobufcnt KREG_IDX(SendBufCnt)
-#define kr_sendpiosize KREG_IDX(SendBufSize)
-#define kr_sendregbase KREG_IDX(SendRegBase)
-#define kr_sendbufavail0 KREG_IDX(SendBufAvail0)
-#define kr_userregbase KREG_IDX(UserRegBase)
-#define kr_intgranted KREG_IDX(Int_Granted)
-#define kr_vecclr_wo_int KREG_IDX(vec_clr_without_int)
-#define kr_intblocked KREG_IDX(IntBlocked)
-#define kr_r_access KREG_IDX(SPC_JTAG_ACCESS_REG)
-
-/*
- * per-port kernel registers. Access only with qib_read_kreg_port()
- * or qib_write_kreg_port()
- */
-#define krp_errclear KREG_IBPORT_IDX(ErrClear)
-#define krp_errmask KREG_IBPORT_IDX(ErrMask)
-#define krp_errstatus KREG_IBPORT_IDX(ErrStatus)
-#define krp_highprio_0 KREG_IBPORT_IDX(HighPriority0)
-#define krp_highprio_limit KREG_IBPORT_IDX(HighPriorityLimit)
-#define krp_hrtbt_guid KREG_IBPORT_IDX(HRTBT_GUID)
-#define krp_ib_pcsconfig KREG_IBPORT_IDX(IBPCSConfig)
-#define krp_ibcctrl_a KREG_IBPORT_IDX(IBCCtrlA)
-#define krp_ibcctrl_b KREG_IBPORT_IDX(IBCCtrlB)
-#define krp_ibcctrl_c KREG_IBPORT_IDX(IBCCtrlC)
-#define krp_ibcstatus_a KREG_IBPORT_IDX(IBCStatusA)
-#define krp_ibcstatus_b KREG_IBPORT_IDX(IBCStatusB)
-#define krp_txestatus KREG_IBPORT_IDX(TXEStatus)
-#define krp_lowprio_0 KREG_IBPORT_IDX(LowPriority0)
-#define krp_ncmodectrl KREG_IBPORT_IDX(IBNCModeCtrl)
-#define krp_partitionkey KREG_IBPORT_IDX(RcvPartitionKey)
-#define krp_psinterval KREG_IBPORT_IDX(PSInterval)
-#define krp_psstart KREG_IBPORT_IDX(PSStart)
-#define krp_psstat KREG_IBPORT_IDX(PSStat)
-#define krp_rcvbthqp KREG_IBPORT_IDX(RcvBTHQP)
-#define krp_rcvctrl KREG_IBPORT_IDX(RcvCtrl)
-#define krp_rcvpktledcnt KREG_IBPORT_IDX(RcvPktLEDCnt)
-#define krp_rcvqpmaptable KREG_IBPORT_IDX(RcvQPMapTableA)
-#define krp_rxcreditvl0 KREG_IBPORT_IDX(RxCreditVL0)
-#define krp_rxcreditvl15 (KREG_IBPORT_IDX(RxCreditVL0)+15)
-#define krp_sendcheckcontrol KREG_IBPORT_IDX(SendCheckControl)
-#define krp_sendctrl KREG_IBPORT_IDX(SendCtrl)
-#define krp_senddmabase KREG_IBPORT_IDX(SendDmaBase)
-#define krp_senddmabufmask0 KREG_IBPORT_IDX(SendDmaBufMask0)
-#define krp_senddmabufmask1 (KREG_IBPORT_IDX(SendDmaBufMask0) + 1)
-#define krp_senddmabufmask2 (KREG_IBPORT_IDX(SendDmaBufMask0) + 2)
-#define krp_senddmabuf_use0 KREG_IBPORT_IDX(SendDmaBufUsed0)
-#define krp_senddmabuf_use1 (KREG_IBPORT_IDX(SendDmaBufUsed0) + 1)
-#define krp_senddmabuf_use2 (KREG_IBPORT_IDX(SendDmaBufUsed0) + 2)
-#define krp_senddmadesccnt KREG_IBPORT_IDX(SendDmaDescCnt)
-#define krp_senddmahead KREG_IBPORT_IDX(SendDmaHead)
-#define krp_senddmaheadaddr KREG_IBPORT_IDX(SendDmaHeadAddr)
-#define krp_senddmaidlecnt KREG_IBPORT_IDX(SendDmaIdleCnt)
-#define krp_senddmalengen KREG_IBPORT_IDX(SendDmaLenGen)
-#define krp_senddmaprioritythld KREG_IBPORT_IDX(SendDmaPriorityThld)
-#define krp_senddmareloadcnt KREG_IBPORT_IDX(SendDmaReloadCnt)
-#define krp_senddmastatus KREG_IBPORT_IDX(SendDmaStatus)
-#define krp_senddmatail KREG_IBPORT_IDX(SendDmaTail)
-#define krp_sendhdrsymptom KREG_IBPORT_IDX(SendHdrErrSymptom)
-#define krp_sendslid KREG_IBPORT_IDX(SendIBSLIDAssign)
-#define krp_sendslidmask KREG_IBPORT_IDX(SendIBSLIDMask)
-#define krp_ibsdtestiftx KREG_IBPORT_IDX(IB_SDTEST_IF_TX)
-#define krp_adapt_dis_timer KREG_IBPORT_IDX(ADAPT_DISABLE_TIMER_THRESHOLD)
-#define krp_tx_deemph_override KREG_IBPORT_IDX(IBSD_TX_DEEMPHASIS_OVERRIDE)
-#define krp_serdesctrl KREG_IBPORT_IDX(IBSerdesCtrl)
-
-/*
- * Per-context kernel registers. Access only with qib_read_kreg_ctxt()
- * or qib_write_kreg_ctxt()
- */
-#define krc_rcvhdraddr KREG_IDX(RcvHdrAddr0)
-#define krc_rcvhdrtailaddr KREG_IDX(RcvHdrTailAddr0)
-
-/*
- * TID Flow table, per context. Reduces
- * number of hdrq updates to one per flow (or on errors).
- * context 0 and 1 share same memory, but have distinct
- * addresses. Since for now, we never use expected sends
- * on kernel contexts, we don't worry about that (we initialize
- * those entries for ctxt 0/1 on driver load twice, for example).
- */
-#define NUM_TIDFLOWS_CTXT 0x20 /* 0x20 per context; have to hardcode */
-#define ur_rcvflowtable (KREG_IDX(RcvTIDFlowTable0) - KREG_IDX(RcvHdrTail0))
-
-/* these are the error bits in the tid flows, and are W1C */
-#define TIDFLOW_ERRBITS ( \
- (SYM_MASK(RcvTIDFlowTable0, GenMismatch) << \
- SYM_LSB(RcvTIDFlowTable0, GenMismatch)) | \
- (SYM_MASK(RcvTIDFlowTable0, SeqMismatch) << \
- SYM_LSB(RcvTIDFlowTable0, SeqMismatch)))
-
-/* Most (not all) Counters are per-IBport.
- * Requires LBIntCnt is at offset 0 in the group
- */
-#define CREG_IDX(regname) \
-((QIB_7322_##regname##_0_OFFS - QIB_7322_LBIntCnt_OFFS) / sizeof(u64))
-
-#define crp_badformat CREG_IDX(RxVersionErrCnt)
-#define crp_err_rlen CREG_IDX(RxLenErrCnt)
-#define crp_erricrc CREG_IDX(RxICRCErrCnt)
-#define crp_errlink CREG_IDX(RxLinkMalformCnt)
-#define crp_errlpcrc CREG_IDX(RxLPCRCErrCnt)
-#define crp_errpkey CREG_IDX(RxPKeyMismatchCnt)
-#define crp_errvcrc CREG_IDX(RxVCRCErrCnt)
-#define crp_excessbufferovfl CREG_IDX(ExcessBufferOvflCnt)
-#define crp_iblinkdown CREG_IDX(IBLinkDownedCnt)
-#define crp_iblinkerrrecov CREG_IDX(IBLinkErrRecoveryCnt)
-#define crp_ibstatuschange CREG_IDX(IBStatusChangeCnt)
-#define crp_ibsymbolerr CREG_IDX(IBSymbolErrCnt)
-#define crp_invalidrlen CREG_IDX(RxMaxMinLenErrCnt)
-#define crp_locallinkintegrityerr CREG_IDX(LocalLinkIntegrityErrCnt)
-#define crp_pktrcv CREG_IDX(RxDataPktCnt)
-#define crp_pktrcvflowctrl CREG_IDX(RxFlowPktCnt)
-#define crp_pktsend CREG_IDX(TxDataPktCnt)
-#define crp_pktsendflow CREG_IDX(TxFlowPktCnt)
-#define crp_psrcvdatacount CREG_IDX(PSRcvDataCount)
-#define crp_psrcvpktscount CREG_IDX(PSRcvPktsCount)
-#define crp_psxmitdatacount CREG_IDX(PSXmitDataCount)
-#define crp_psxmitpktscount CREG_IDX(PSXmitPktsCount)
-#define crp_psxmitwaitcount CREG_IDX(PSXmitWaitCount)
-#define crp_rcvebp CREG_IDX(RxEBPCnt)
-#define crp_rcvflowctrlviol CREG_IDX(RxFlowCtrlViolCnt)
-#define crp_rcvovfl CREG_IDX(RxBufOvflCnt)
-#define crp_rxdlidfltr CREG_IDX(RxDlidFltrCnt)
-#define crp_rxdroppkt CREG_IDX(RxDroppedPktCnt)
-#define crp_rxotherlocalphyerr CREG_IDX(RxOtherLocalPhyErrCnt)
-#define crp_rxqpinvalidctxt CREG_IDX(RxQPInvalidContextCnt)
-#define crp_rxvlerr CREG_IDX(RxVlErrCnt)
-#define crp_sendstall CREG_IDX(TxFlowStallCnt)
-#define crp_txdroppedpkt CREG_IDX(TxDroppedPktCnt)
-#define crp_txhdrerr CREG_IDX(TxHeadersErrCnt)
-#define crp_txlenerr CREG_IDX(TxLenErrCnt)
-#define crp_txminmaxlenerr CREG_IDX(TxMaxMinLenErrCnt)
-#define crp_txsdmadesc CREG_IDX(TxSDmaDescCnt)
-#define crp_txunderrun CREG_IDX(TxUnderrunCnt)
-#define crp_txunsupvl CREG_IDX(TxUnsupVLErrCnt)
-#define crp_vl15droppedpkt CREG_IDX(RxVL15DroppedPktCnt)
-#define crp_wordrcv CREG_IDX(RxDwordCnt)
-#define crp_wordsend CREG_IDX(TxDwordCnt)
-#define crp_tx_creditstalls CREG_IDX(TxCreditUpToDateTimeOut)
-
-/* these are the (few) counters that are not port-specific */
-#define CREG_DEVIDX(regname) ((QIB_7322_##regname##_OFFS - \
- QIB_7322_LBIntCnt_OFFS) / sizeof(u64))
-#define cr_base_egrovfl CREG_DEVIDX(RxP0HdrEgrOvflCnt)
-#define cr_lbint CREG_DEVIDX(LBIntCnt)
-#define cr_lbstall CREG_DEVIDX(LBFlowStallCnt)
-#define cr_pcieretrydiag CREG_DEVIDX(PcieRetryBufDiagQwordCnt)
-#define cr_rxtidflowdrop CREG_DEVIDX(RxTidFlowDropCnt)
-#define cr_tidfull CREG_DEVIDX(RxTIDFullErrCnt)
-#define cr_tidinvalid CREG_DEVIDX(RxTIDValidErrCnt)
-
-/* no chip register for # of IB ports supported, so define */
-#define NUM_IB_PORTS 2
-
-/* 1 VL15 buffer per hardware IB port, no register for this, so define */
-#define NUM_VL15_BUFS NUM_IB_PORTS
-
-/*
- * context 0 and 1 are special, and there is no chip register that
- * defines this value, so we have to define it here.
- * These are all allocated to either 0 or 1 for single port
- * hardware configuration, otherwise each gets half
- */
-#define KCTXT0_EGRCNT 2048
-
-/* values for vl and port fields in PBC, 7322-specific */
-#define PBC_PORT_SEL_LSB 26
-#define PBC_PORT_SEL_RMASK 1
-#define PBC_VL_NUM_LSB 27
-#define PBC_VL_NUM_RMASK 7
-#define PBC_7322_VL15_SEND (1ULL << 63) /* pbc; VL15, no credit check */
-#define PBC_7322_VL15_SEND_CTRL (1ULL << 31) /* control version of same */
-
-static u8 ib_rate_to_delay[IB_RATE_120_GBPS + 1] = {
- [IB_RATE_2_5_GBPS] = 16,
- [IB_RATE_5_GBPS] = 8,
- [IB_RATE_10_GBPS] = 4,
- [IB_RATE_20_GBPS] = 2,
- [IB_RATE_30_GBPS] = 2,
- [IB_RATE_40_GBPS] = 1
-};
-
-static const char * const qib_sdma_state_names[] = {
- [qib_sdma_state_s00_hw_down] = "s00_HwDown",
- [qib_sdma_state_s10_hw_start_up_wait] = "s10_HwStartUpWait",
- [qib_sdma_state_s20_idle] = "s20_Idle",
- [qib_sdma_state_s30_sw_clean_up_wait] = "s30_SwCleanUpWait",
- [qib_sdma_state_s40_hw_clean_up_wait] = "s40_HwCleanUpWait",
- [qib_sdma_state_s50_hw_halt_wait] = "s50_HwHaltWait",
- [qib_sdma_state_s99_running] = "s99_Running",
-};
-
-#define IBA7322_LINKSPEED_SHIFT SYM_LSB(IBCStatusA_0, LinkSpeedActive)
-#define IBA7322_LINKWIDTH_SHIFT SYM_LSB(IBCStatusA_0, LinkWidthActive)
-
-/* link training states, from IBC */
-#define IB_7322_LT_STATE_DISABLED 0x00
-#define IB_7322_LT_STATE_LINKUP 0x01
-#define IB_7322_LT_STATE_POLLACTIVE 0x02
-#define IB_7322_LT_STATE_POLLQUIET 0x03
-#define IB_7322_LT_STATE_SLEEPDELAY 0x04
-#define IB_7322_LT_STATE_SLEEPQUIET 0x05
-#define IB_7322_LT_STATE_CFGDEBOUNCE 0x08
-#define IB_7322_LT_STATE_CFGRCVFCFG 0x09
-#define IB_7322_LT_STATE_CFGWAITRMT 0x0a
-#define IB_7322_LT_STATE_CFGIDLE 0x0b
-#define IB_7322_LT_STATE_RECOVERRETRAIN 0x0c
-#define IB_7322_LT_STATE_TXREVLANES 0x0d
-#define IB_7322_LT_STATE_RECOVERWAITRMT 0x0e
-#define IB_7322_LT_STATE_RECOVERIDLE 0x0f
-#define IB_7322_LT_STATE_CFGENH 0x10
-#define IB_7322_LT_STATE_CFGTEST 0x11
-#define IB_7322_LT_STATE_CFGWAITRMTTEST 0x12
-#define IB_7322_LT_STATE_CFGWAITENH 0x13
-
-/* link state machine states from IBC */
-#define IB_7322_L_STATE_DOWN 0x0
-#define IB_7322_L_STATE_INIT 0x1
-#define IB_7322_L_STATE_ARM 0x2
-#define IB_7322_L_STATE_ACTIVE 0x3
-#define IB_7322_L_STATE_ACT_DEFER 0x4
-
-static const u8 qib_7322_physportstate[0x20] = {
- [IB_7322_LT_STATE_DISABLED] = IB_PHYSPORTSTATE_DISABLED,
- [IB_7322_LT_STATE_LINKUP] = IB_PHYSPORTSTATE_LINKUP,
- [IB_7322_LT_STATE_POLLACTIVE] = IB_PHYSPORTSTATE_POLL,
- [IB_7322_LT_STATE_POLLQUIET] = IB_PHYSPORTSTATE_POLL,
- [IB_7322_LT_STATE_SLEEPDELAY] = IB_PHYSPORTSTATE_SLEEP,
- [IB_7322_LT_STATE_SLEEPQUIET] = IB_PHYSPORTSTATE_SLEEP,
- [IB_7322_LT_STATE_CFGDEBOUNCE] = IB_PHYSPORTSTATE_CFG_TRAIN,
- [IB_7322_LT_STATE_CFGRCVFCFG] =
- IB_PHYSPORTSTATE_CFG_TRAIN,
- [IB_7322_LT_STATE_CFGWAITRMT] =
- IB_PHYSPORTSTATE_CFG_TRAIN,
- [IB_7322_LT_STATE_CFGIDLE] = IB_PHYSPORTSTATE_CFG_IDLE,
- [IB_7322_LT_STATE_RECOVERRETRAIN] =
- IB_PHYSPORTSTATE_LINK_ERR_RECOVER,
- [IB_7322_LT_STATE_RECOVERWAITRMT] =
- IB_PHYSPORTSTATE_LINK_ERR_RECOVER,
- [IB_7322_LT_STATE_RECOVERIDLE] =
- IB_PHYSPORTSTATE_LINK_ERR_RECOVER,
- [IB_7322_LT_STATE_CFGENH] = IB_PHYSPORTSTATE_CFG_ENH,
- [IB_7322_LT_STATE_CFGTEST] = IB_PHYSPORTSTATE_CFG_TRAIN,
- [IB_7322_LT_STATE_CFGWAITRMTTEST] =
- IB_PHYSPORTSTATE_CFG_TRAIN,
- [IB_7322_LT_STATE_CFGWAITENH] =
- IB_PHYSPORTSTATE_CFG_WAIT_ENH,
- [0x14] = IB_PHYSPORTSTATE_CFG_TRAIN,
- [0x15] = IB_PHYSPORTSTATE_CFG_TRAIN,
- [0x16] = IB_PHYSPORTSTATE_CFG_TRAIN,
- [0x17] = IB_PHYSPORTSTATE_CFG_TRAIN
-};
-
-#ifdef CONFIG_INFINIBAND_QIB_DCA
-struct qib_irq_notify {
- int rcv;
- void *arg;
- struct irq_affinity_notify notify;
-};
-#endif
-
-struct qib_chip_specific {
- u64 __iomem *cregbase;
- u64 *cntrs;
- spinlock_t rcvmod_lock; /* protect rcvctrl shadow changes */
- spinlock_t gpio_lock; /* RMW of shadows/regs for ExtCtrl and GPIO */
- u64 main_int_mask; /* clear bits which have dedicated handlers */
- u64 int_enable_mask; /* for per port interrupts in single port mode */
- u64 errormask;
- u64 hwerrmask;
- u64 gpio_out; /* shadow of kr_gpio_out, for rmw ops */
- u64 gpio_mask; /* shadow the gpio mask register */
- u64 extctrl; /* shadow the gpio output enable, etc... */
- u32 ncntrs;
- u32 nportcntrs;
- u32 cntrnamelen;
- u32 portcntrnamelen;
- u32 numctxts;
- u32 rcvegrcnt;
- u32 updthresh; /* current AvailUpdThld */
- u32 updthresh_dflt; /* default AvailUpdThld */
- u32 r1;
- u32 num_msix_entries;
- u32 sdmabufcnt;
- u32 lastbuf_for_pio;
- u32 stay_in_freeze;
- u32 recovery_ports_initted;
-#ifdef CONFIG_INFINIBAND_QIB_DCA
- u32 dca_ctrl;
- int rhdr_cpu[18];
- int sdma_cpu[2];
- u64 dca_rcvhdr_ctrl[5]; /* B, C, D, E, F */
-#endif
- struct qib_msix_entry *msix_entries;
- unsigned long *sendchkenable;
- unsigned long *sendgrhchk;
- unsigned long *sendibchk;
- u32 rcvavail_timeout[18];
- char emsgbuf[128]; /* for device error interrupt msg buffer */
-};
-
-/* Table of entries in "human readable" form Tx Emphasis. */
-struct txdds_ent {
- u8 amp;
- u8 pre;
- u8 main;
- u8 post;
-};
-
-struct vendor_txdds_ent {
- u8 oui[QSFP_VOUI_LEN];
- u8 *partnum;
- struct txdds_ent sdr;
- struct txdds_ent ddr;
- struct txdds_ent qdr;
-};
-
-static void write_tx_serdes_param(struct qib_pportdata *, struct txdds_ent *);
-
-#define TXDDS_TABLE_SZ 16 /* number of entries per speed in onchip table */
-#define TXDDS_EXTRA_SZ 18 /* number of extra tx settings entries */
-#define TXDDS_MFG_SZ 2 /* number of mfg tx settings entries */
-#define SERDES_CHANS 4 /* yes, it's obvious, but one less magic number */
-
-#define H1_FORCE_VAL 8
-#define H1_FORCE_QME 1 /* may be overridden via setup_txselect() */
-#define H1_FORCE_QMH 7 /* may be overridden via setup_txselect() */
-
-/* The static and dynamic registers are paired, and the pairs indexed by spd */
-#define krp_static_adapt_dis(spd) (KREG_IBPORT_IDX(ADAPT_DISABLE_STATIC_SDR) \
- + ((spd) * 2))
-
-#define QDR_DFE_DISABLE_DELAY 4000 /* msec after LINKUP */
-#define QDR_STATIC_ADAPT_DOWN 0xf0f0f0f0ULL /* link down, H1-H4 QDR adapts */
-#define QDR_STATIC_ADAPT_DOWN_R1 0ULL /* r1 link down, H1-H4 QDR adapts */
-#define QDR_STATIC_ADAPT_INIT 0xffffffffffULL /* up, disable H0,H1-8, LE */
-#define QDR_STATIC_ADAPT_INIT_R1 0xf0ffffffffULL /* r1 up, disable H0,H1-8 */
-
-struct qib_chippport_specific {
- u64 __iomem *kpregbase;
- u64 __iomem *cpregbase;
- u64 *portcntrs;
- struct qib_pportdata *ppd;
- wait_queue_head_t autoneg_wait;
- struct delayed_work autoneg_work;
- struct delayed_work ipg_work;
- struct timer_list chase_timer;
- /*
- * these 5 fields are used to establish deltas for IB symbol
- * errors and linkrecovery errors. They can be reported on
- * some chips during link negotiation prior to INIT, and with
- * DDR when faking DDR negotiations with non-IBTA switches.
- * The chip counters are adjusted at driver unload if there is
- * a non-zero delta.
- */
- u64 ibdeltainprog;
- u64 ibsymdelta;
- u64 ibsymsnap;
- u64 iblnkerrdelta;
- u64 iblnkerrsnap;
- u64 iblnkdownsnap;
- u64 iblnkdowndelta;
- u64 ibmalfdelta;
- u64 ibmalfsnap;
- u64 ibcctrl_a; /* krp_ibcctrl_a shadow */
- u64 ibcctrl_b; /* krp_ibcctrl_b shadow */
- unsigned long qdr_dfe_time;
- unsigned long chase_end;
- u32 autoneg_tries;
- u32 recovery_init;
- u32 qdr_dfe_on;
- u32 qdr_reforce;
- /*
- * Per-bay per-channel rcv QMH H1 values and Tx values for QDR.
- * entry zero is unused, to simplify indexing
- */
- u8 h1_val;
- u8 no_eep; /* txselect table index to use if no qsfp info */
- u8 ipg_tries;
- u8 ibmalfusesnap;
- struct qib_qsfp_data qsfp_data;
- char epmsgbuf[192]; /* for port error interrupt msg buffer */
- char sdmamsgbuf[192]; /* for per-port sdma error messages */
-};
-
-static struct {
- const char *name;
- irq_handler_t handler;
- int lsb;
- int port; /* 0 if not port-specific, else port # */
- int dca;
-} irq_table[] = {
- { "", qib_7322intr, -1, 0, 0 },
- { " (buf avail)", qib_7322bufavail,
- SYM_LSB(IntStatus, SendBufAvail), 0, 0},
- { " (sdma 0)", sdma_intr,
- SYM_LSB(IntStatus, SDmaInt_0), 1, 1 },
- { " (sdma 1)", sdma_intr,
- SYM_LSB(IntStatus, SDmaInt_1), 2, 1 },
- { " (sdmaI 0)", sdma_idle_intr,
- SYM_LSB(IntStatus, SDmaIdleInt_0), 1, 1},
- { " (sdmaI 1)", sdma_idle_intr,
- SYM_LSB(IntStatus, SDmaIdleInt_1), 2, 1},
- { " (sdmaP 0)", sdma_progress_intr,
- SYM_LSB(IntStatus, SDmaProgressInt_0), 1, 1 },
- { " (sdmaP 1)", sdma_progress_intr,
- SYM_LSB(IntStatus, SDmaProgressInt_1), 2, 1 },
- { " (sdmaC 0)", sdma_cleanup_intr,
- SYM_LSB(IntStatus, SDmaCleanupDone_0), 1, 0 },
- { " (sdmaC 1)", sdma_cleanup_intr,
- SYM_LSB(IntStatus, SDmaCleanupDone_1), 2 , 0},
-};
-
-#ifdef CONFIG_INFINIBAND_QIB_DCA
-
-static const struct dca_reg_map {
- int shadow_inx;
- int lsb;
- u64 mask;
- u16 regno;
-} dca_rcvhdr_reg_map[] = {
- { 0, SYM_LSB(DCACtrlB, RcvHdrq0DCAOPH),
- ~SYM_MASK(DCACtrlB, RcvHdrq0DCAOPH) , KREG_IDX(DCACtrlB) },
- { 0, SYM_LSB(DCACtrlB, RcvHdrq1DCAOPH),
- ~SYM_MASK(DCACtrlB, RcvHdrq1DCAOPH) , KREG_IDX(DCACtrlB) },
- { 0, SYM_LSB(DCACtrlB, RcvHdrq2DCAOPH),
- ~SYM_MASK(DCACtrlB, RcvHdrq2DCAOPH) , KREG_IDX(DCACtrlB) },
- { 0, SYM_LSB(DCACtrlB, RcvHdrq3DCAOPH),
- ~SYM_MASK(DCACtrlB, RcvHdrq3DCAOPH) , KREG_IDX(DCACtrlB) },
- { 1, SYM_LSB(DCACtrlC, RcvHdrq4DCAOPH),
- ~SYM_MASK(DCACtrlC, RcvHdrq4DCAOPH) , KREG_IDX(DCACtrlC) },
- { 1, SYM_LSB(DCACtrlC, RcvHdrq5DCAOPH),
- ~SYM_MASK(DCACtrlC, RcvHdrq5DCAOPH) , KREG_IDX(DCACtrlC) },
- { 1, SYM_LSB(DCACtrlC, RcvHdrq6DCAOPH),
- ~SYM_MASK(DCACtrlC, RcvHdrq6DCAOPH) , KREG_IDX(DCACtrlC) },
- { 1, SYM_LSB(DCACtrlC, RcvHdrq7DCAOPH),
- ~SYM_MASK(DCACtrlC, RcvHdrq7DCAOPH) , KREG_IDX(DCACtrlC) },
- { 2, SYM_LSB(DCACtrlD, RcvHdrq8DCAOPH),
- ~SYM_MASK(DCACtrlD, RcvHdrq8DCAOPH) , KREG_IDX(DCACtrlD) },
- { 2, SYM_LSB(DCACtrlD, RcvHdrq9DCAOPH),
- ~SYM_MASK(DCACtrlD, RcvHdrq9DCAOPH) , KREG_IDX(DCACtrlD) },
- { 2, SYM_LSB(DCACtrlD, RcvHdrq10DCAOPH),
- ~SYM_MASK(DCACtrlD, RcvHdrq10DCAOPH) , KREG_IDX(DCACtrlD) },
- { 2, SYM_LSB(DCACtrlD, RcvHdrq11DCAOPH),
- ~SYM_MASK(DCACtrlD, RcvHdrq11DCAOPH) , KREG_IDX(DCACtrlD) },
- { 3, SYM_LSB(DCACtrlE, RcvHdrq12DCAOPH),
- ~SYM_MASK(DCACtrlE, RcvHdrq12DCAOPH) , KREG_IDX(DCACtrlE) },
- { 3, SYM_LSB(DCACtrlE, RcvHdrq13DCAOPH),
- ~SYM_MASK(DCACtrlE, RcvHdrq13DCAOPH) , KREG_IDX(DCACtrlE) },
- { 3, SYM_LSB(DCACtrlE, RcvHdrq14DCAOPH),
- ~SYM_MASK(DCACtrlE, RcvHdrq14DCAOPH) , KREG_IDX(DCACtrlE) },
- { 3, SYM_LSB(DCACtrlE, RcvHdrq15DCAOPH),
- ~SYM_MASK(DCACtrlE, RcvHdrq15DCAOPH) , KREG_IDX(DCACtrlE) },
- { 4, SYM_LSB(DCACtrlF, RcvHdrq16DCAOPH),
- ~SYM_MASK(DCACtrlF, RcvHdrq16DCAOPH) , KREG_IDX(DCACtrlF) },
- { 4, SYM_LSB(DCACtrlF, RcvHdrq17DCAOPH),
- ~SYM_MASK(DCACtrlF, RcvHdrq17DCAOPH) , KREG_IDX(DCACtrlF) },
-};
-#endif
-
-/* ibcctrl bits */
-#define QLOGIC_IB_IBCC_LINKINITCMD_DISABLE 1
-/* cycle through TS1/TS2 till OK */
-#define QLOGIC_IB_IBCC_LINKINITCMD_POLL 2
-/* wait for TS1, then go on */
-#define QLOGIC_IB_IBCC_LINKINITCMD_SLEEP 3
-#define QLOGIC_IB_IBCC_LINKINITCMD_SHIFT 16
-
-#define QLOGIC_IB_IBCC_LINKCMD_DOWN 1 /* move to 0x11 */
-#define QLOGIC_IB_IBCC_LINKCMD_ARMED 2 /* move to 0x21 */
-#define QLOGIC_IB_IBCC_LINKCMD_ACTIVE 3 /* move to 0x31 */
-
-#define BLOB_7322_IBCHG 0x101
-
-static inline void qib_write_kreg(const struct qib_devdata *dd,
- const u32 regno, u64 value);
-static inline u32 qib_read_kreg32(const struct qib_devdata *, const u32);
-static void write_7322_initregs(struct qib_devdata *);
-static void write_7322_init_portregs(struct qib_pportdata *);
-static void setup_7322_link_recovery(struct qib_pportdata *, u32);
-static void check_7322_rxe_status(struct qib_pportdata *);
-static u32 __iomem *qib_7322_getsendbuf(struct qib_pportdata *, u64, u32 *);
-#ifdef CONFIG_INFINIBAND_QIB_DCA
-static void qib_setup_dca(struct qib_devdata *dd);
-static void setup_dca_notifier(struct qib_devdata *dd, int msixnum);
-static void reset_dca_notifier(struct qib_devdata *dd, int msixnum);
-#endif
-
-/**
- * qib_read_ureg32 - read 32-bit virtualized per-context register
- * @dd: device
- * @regno: register number
- * @ctxt: context number
- *
- * Return the contents of a register that is virtualized to be per context.
- * Returns -1 on errors (not distinguishable from valid contents at
- * runtime; we may add a separate error variable at some point).
- */
-static inline u32 qib_read_ureg32(const struct qib_devdata *dd,
- enum qib_ureg regno, int ctxt)
-{
- if (!dd->kregbase || !(dd->flags & QIB_PRESENT))
- return 0;
- return readl(regno + (u64 __iomem *)(
- (dd->ureg_align * ctxt) + (dd->userbase ?
- (char __iomem *)dd->userbase :
- (char __iomem *)dd->kregbase + dd->uregbase)));
-}
-
-/**
- * qib_write_ureg - write virtualized per-context register
- * @dd: device
- * @regno: register number
- * @value: value
- * @ctxt: context
- *
- * Write the contents of a register that is virtualized to be per context.
- */
-static inline void qib_write_ureg(const struct qib_devdata *dd,
- enum qib_ureg regno, u64 value, int ctxt)
-{
- u64 __iomem *ubase;
-
- if (dd->userbase)
- ubase = (u64 __iomem *)
- ((char __iomem *) dd->userbase +
- dd->ureg_align * ctxt);
- else
- ubase = (u64 __iomem *)
- (dd->uregbase +
- (char __iomem *) dd->kregbase +
- dd->ureg_align * ctxt);
-
- if (dd->kregbase && (dd->flags & QIB_PRESENT))
- writeq(value, &ubase[regno]);
-}
-
-static inline u32 qib_read_kreg32(const struct qib_devdata *dd,
- const u32 regno)
-{
- if (!dd->kregbase || !(dd->flags & QIB_PRESENT))
- return -1;
- return readl((u32 __iomem *) &dd->kregbase[regno]);
-}
-
-static inline u64 qib_read_kreg64(const struct qib_devdata *dd,
- const u32 regno)
-{
- if (!dd->kregbase || !(dd->flags & QIB_PRESENT))
- return -1;
- return readq(&dd->kregbase[regno]);
-}
-
-static inline void qib_write_kreg(const struct qib_devdata *dd,
- const u32 regno, u64 value)
-{
- if (dd->kregbase && (dd->flags & QIB_PRESENT))
- writeq(value, &dd->kregbase[regno]);
-}
-
-/*
- * not many sanity checks for the port-specific kernel register routines,
- * since they are only used when it's known to be safe.
-*/
-static inline u64 qib_read_kreg_port(const struct qib_pportdata *ppd,
- const u16 regno)
-{
- if (!ppd->cpspec->kpregbase || !(ppd->dd->flags & QIB_PRESENT))
- return 0ULL;
- return readq(&ppd->cpspec->kpregbase[regno]);
-}
-
-static inline void qib_write_kreg_port(const struct qib_pportdata *ppd,
- const u16 regno, u64 value)
-{
- if (ppd->cpspec && ppd->dd && ppd->cpspec->kpregbase &&
- (ppd->dd->flags & QIB_PRESENT))
- writeq(value, &ppd->cpspec->kpregbase[regno]);
-}
-
-/**
- * qib_write_kreg_ctxt - write a device's per-ctxt 64-bit kernel register
- * @dd: the qlogic_ib device
- * @regno: the register number to write
- * @ctxt: the context containing the register
- * @value: the value to write
- */
-static inline void qib_write_kreg_ctxt(const struct qib_devdata *dd,
- const u16 regno, unsigned ctxt,
- u64 value)
-{
- qib_write_kreg(dd, regno + ctxt, value);
-}
-
-static inline u64 read_7322_creg(const struct qib_devdata *dd, u16 regno)
-{
- if (!dd->cspec->cregbase || !(dd->flags & QIB_PRESENT))
- return 0;
- return readq(&dd->cspec->cregbase[regno]);
-
-
-}
-
-static inline u32 read_7322_creg32(const struct qib_devdata *dd, u16 regno)
-{
- if (!dd->cspec->cregbase || !(dd->flags & QIB_PRESENT))
- return 0;
- return readl(&dd->cspec->cregbase[regno]);
-
-
-}
-
-static inline void write_7322_creg_port(const struct qib_pportdata *ppd,
- u16 regno, u64 value)
-{
- if (ppd->cpspec && ppd->cpspec->cpregbase &&
- (ppd->dd->flags & QIB_PRESENT))
- writeq(value, &ppd->cpspec->cpregbase[regno]);
-}
-
-static inline u64 read_7322_creg_port(const struct qib_pportdata *ppd,
- u16 regno)
-{
- if (!ppd->cpspec || !ppd->cpspec->cpregbase ||
- !(ppd->dd->flags & QIB_PRESENT))
- return 0;
- return readq(&ppd->cpspec->cpregbase[regno]);
-}
-
-static inline u32 read_7322_creg32_port(const struct qib_pportdata *ppd,
- u16 regno)
-{
- if (!ppd->cpspec || !ppd->cpspec->cpregbase ||
- !(ppd->dd->flags & QIB_PRESENT))
- return 0;
- return readl(&ppd->cpspec->cpregbase[regno]);
-}
-
-/* bits in Control register */
-#define QLOGIC_IB_C_RESET SYM_MASK(Control, SyncReset)
-#define QLOGIC_IB_C_SDMAFETCHPRIOEN SYM_MASK(Control, SDmaDescFetchPriorityEn)
-
-/* bits in general interrupt regs */
-#define QIB_I_RCVURG_LSB SYM_LSB(IntMask, RcvUrg0IntMask)
-#define QIB_I_RCVURG_RMASK MASK_ACROSS(0, 17)
-#define QIB_I_RCVURG_MASK (QIB_I_RCVURG_RMASK << QIB_I_RCVURG_LSB)
-#define QIB_I_RCVAVAIL_LSB SYM_LSB(IntMask, RcvAvail0IntMask)
-#define QIB_I_RCVAVAIL_RMASK MASK_ACROSS(0, 17)
-#define QIB_I_RCVAVAIL_MASK (QIB_I_RCVAVAIL_RMASK << QIB_I_RCVAVAIL_LSB)
-#define QIB_I_C_ERROR INT_MASK(Err)
-
-#define QIB_I_SPIOSENT (INT_MASK_P(SendDone, 0) | INT_MASK_P(SendDone, 1))
-#define QIB_I_SPIOBUFAVAIL INT_MASK(SendBufAvail)
-#define QIB_I_GPIO INT_MASK(AssertGPIO)
-#define QIB_I_P_SDMAINT(pidx) \
- (INT_MASK_P(SDma, pidx) | INT_MASK_P(SDmaIdle, pidx) | \
- INT_MASK_P(SDmaProgress, pidx) | \
- INT_MASK_PM(SDmaCleanupDone, pidx))
-
-/* Interrupt bits that are "per port" */
-#define QIB_I_P_BITSEXTANT(pidx) \
- (INT_MASK_P(Err, pidx) | INT_MASK_P(SendDone, pidx) | \
- INT_MASK_P(SDma, pidx) | INT_MASK_P(SDmaIdle, pidx) | \
- INT_MASK_P(SDmaProgress, pidx) | \
- INT_MASK_PM(SDmaCleanupDone, pidx))
-
-/* Interrupt bits that are common to a device */
-/* currently unused: QIB_I_SPIOSENT */
-#define QIB_I_C_BITSEXTANT \
- (QIB_I_RCVURG_MASK | QIB_I_RCVAVAIL_MASK | \
- QIB_I_SPIOSENT | \
- QIB_I_C_ERROR | QIB_I_SPIOBUFAVAIL | QIB_I_GPIO)
-
-#define QIB_I_BITSEXTANT (QIB_I_C_BITSEXTANT | \
- QIB_I_P_BITSEXTANT(0) | QIB_I_P_BITSEXTANT(1))
-
-/*
- * Error bits that are "per port".
- */
-#define QIB_E_P_IBSTATUSCHANGED ERR_MASK_N(IBStatusChanged)
-#define QIB_E_P_SHDR ERR_MASK_N(SHeadersErr)
-#define QIB_E_P_VL15_BUF_MISUSE ERR_MASK_N(VL15BufMisuseErr)
-#define QIB_E_P_SND_BUF_MISUSE ERR_MASK_N(SendBufMisuseErr)
-#define QIB_E_P_SUNSUPVL ERR_MASK_N(SendUnsupportedVLErr)
-#define QIB_E_P_SUNEXP_PKTNUM ERR_MASK_N(SendUnexpectedPktNumErr)
-#define QIB_E_P_SDROP_DATA ERR_MASK_N(SendDroppedDataPktErr)
-#define QIB_E_P_SDROP_SMP ERR_MASK_N(SendDroppedSmpPktErr)
-#define QIB_E_P_SPKTLEN ERR_MASK_N(SendPktLenErr)
-#define QIB_E_P_SUNDERRUN ERR_MASK_N(SendUnderRunErr)
-#define QIB_E_P_SMAXPKTLEN ERR_MASK_N(SendMaxPktLenErr)
-#define QIB_E_P_SMINPKTLEN ERR_MASK_N(SendMinPktLenErr)
-#define QIB_E_P_RIBLOSTLINK ERR_MASK_N(RcvIBLostLinkErr)
-#define QIB_E_P_RHDR ERR_MASK_N(RcvHdrErr)
-#define QIB_E_P_RHDRLEN ERR_MASK_N(RcvHdrLenErr)
-#define QIB_E_P_RBADTID ERR_MASK_N(RcvBadTidErr)
-#define QIB_E_P_RBADVERSION ERR_MASK_N(RcvBadVersionErr)
-#define QIB_E_P_RIBFLOW ERR_MASK_N(RcvIBFlowErr)
-#define QIB_E_P_REBP ERR_MASK_N(RcvEBPErr)
-#define QIB_E_P_RUNSUPVL ERR_MASK_N(RcvUnsupportedVLErr)
-#define QIB_E_P_RUNEXPCHAR ERR_MASK_N(RcvUnexpectedCharErr)
-#define QIB_E_P_RSHORTPKTLEN ERR_MASK_N(RcvShortPktLenErr)
-#define QIB_E_P_RLONGPKTLEN ERR_MASK_N(RcvLongPktLenErr)
-#define QIB_E_P_RMAXPKTLEN ERR_MASK_N(RcvMaxPktLenErr)
-#define QIB_E_P_RMINPKTLEN ERR_MASK_N(RcvMinPktLenErr)
-#define QIB_E_P_RICRC ERR_MASK_N(RcvICRCErr)
-#define QIB_E_P_RVCRC ERR_MASK_N(RcvVCRCErr)
-#define QIB_E_P_RFORMATERR ERR_MASK_N(RcvFormatErr)
-
-#define QIB_E_P_SDMA1STDESC ERR_MASK_N(SDma1stDescErr)
-#define QIB_E_P_SDMABASE ERR_MASK_N(SDmaBaseErr)
-#define QIB_E_P_SDMADESCADDRMISALIGN ERR_MASK_N(SDmaDescAddrMisalignErr)
-#define QIB_E_P_SDMADWEN ERR_MASK_N(SDmaDwEnErr)
-#define QIB_E_P_SDMAGENMISMATCH ERR_MASK_N(SDmaGenMismatchErr)
-#define QIB_E_P_SDMAHALT ERR_MASK_N(SDmaHaltErr)
-#define QIB_E_P_SDMAMISSINGDW ERR_MASK_N(SDmaMissingDwErr)
-#define QIB_E_P_SDMAOUTOFBOUND ERR_MASK_N(SDmaOutOfBoundErr)
-#define QIB_E_P_SDMARPYTAG ERR_MASK_N(SDmaRpyTagErr)
-#define QIB_E_P_SDMATAILOUTOFBOUND ERR_MASK_N(SDmaTailOutOfBoundErr)
-#define QIB_E_P_SDMAUNEXPDATA ERR_MASK_N(SDmaUnexpDataErr)
-
-/* Error bits that are common to a device */
-#define QIB_E_RESET ERR_MASK(ResetNegated)
-#define QIB_E_HARDWARE ERR_MASK(HardwareErr)
-#define QIB_E_INVALIDADDR ERR_MASK(InvalidAddrErr)
-
-
-/*
- * Per chip (rather than per-port) errors. Most either do
- * nothing but trigger a print (because they self-recover, or
- * always occur in tandem with other errors that handle the
- * issue), or because they indicate errors with no recovery,
- * but we want to know that they happened.
- */
-#define QIB_E_SBUF_VL15_MISUSE ERR_MASK(SBufVL15MisUseErr)
-#define QIB_E_BADEEP ERR_MASK(InvalidEEPCmd)
-#define QIB_E_VLMISMATCH ERR_MASK(SendVLMismatchErr)
-#define QIB_E_ARMLAUNCH ERR_MASK(SendArmLaunchErr)
-#define QIB_E_SPCLTRIG ERR_MASK(SendSpecialTriggerErr)
-#define QIB_E_RRCVHDRFULL ERR_MASK(RcvHdrFullErr)
-#define QIB_E_RRCVEGRFULL ERR_MASK(RcvEgrFullErr)
-#define QIB_E_RCVCTXTSHARE ERR_MASK(RcvContextShareErr)
-
-/* SDMA chip errors (not per port)
- * QIB_E_SDMA_BUF_DUP needs no special handling, because we will also get
- * the SDMAHALT error immediately, so we just print the dup error via the
- * E_AUTO mechanism. This is true of most of the per-port fatal errors
- * as well, but since this is port-independent, by definition, it's
- * handled a bit differently. SDMA_VL15 and SDMA_WRONG_PORT are per
- * packet send errors, and so are handled in the same manner as other
- * per-packet errors.
- */
-#define QIB_E_SDMA_VL15 ERR_MASK(SDmaVL15Err)
-#define QIB_E_SDMA_WRONG_PORT ERR_MASK(SDmaWrongPortErr)
-#define QIB_E_SDMA_BUF_DUP ERR_MASK(SDmaBufMaskDuplicateErr)
-
-/*
- * Below functionally equivalent to legacy QLOGIC_IB_E_PKTERRS
- * it is used to print "common" packet errors.
- */
-#define QIB_E_P_PKTERRS (QIB_E_P_SPKTLEN |\
- QIB_E_P_SDROP_DATA | QIB_E_P_RVCRC |\
- QIB_E_P_RICRC | QIB_E_P_RSHORTPKTLEN |\
- QIB_E_P_VL15_BUF_MISUSE | QIB_E_P_SHDR | \
- QIB_E_P_REBP)
-
-/* Error Bits that Packet-related (Receive, per-port) */
-#define QIB_E_P_RPKTERRS (\
- QIB_E_P_RHDRLEN | QIB_E_P_RBADTID | \
- QIB_E_P_RBADVERSION | QIB_E_P_RHDR | \
- QIB_E_P_RLONGPKTLEN | QIB_E_P_RSHORTPKTLEN |\
- QIB_E_P_RMAXPKTLEN | QIB_E_P_RMINPKTLEN | \
- QIB_E_P_RFORMATERR | QIB_E_P_RUNSUPVL | \
- QIB_E_P_RUNEXPCHAR | QIB_E_P_RIBFLOW | QIB_E_P_REBP)
-
-/*
- * Error bits that are Send-related (per port)
- * (ARMLAUNCH excluded from E_SPKTERRS because it gets special handling).
- * All of these potentially need to have a buffer disarmed
- */
-#define QIB_E_P_SPKTERRS (\
- QIB_E_P_SUNEXP_PKTNUM |\
- QIB_E_P_SDROP_DATA | QIB_E_P_SDROP_SMP |\
- QIB_E_P_SMAXPKTLEN |\
- QIB_E_P_VL15_BUF_MISUSE | QIB_E_P_SHDR | \
- QIB_E_P_SMINPKTLEN | QIB_E_P_SPKTLEN | \
- QIB_E_P_SND_BUF_MISUSE | QIB_E_P_SUNSUPVL)
-
-#define QIB_E_SPKTERRS ( \
- QIB_E_SBUF_VL15_MISUSE | QIB_E_VLMISMATCH | \
- ERR_MASK_N(SendUnsupportedVLErr) | \
- QIB_E_SPCLTRIG | QIB_E_SDMA_VL15 | QIB_E_SDMA_WRONG_PORT)
-
-#define QIB_E_P_SDMAERRS ( \
- QIB_E_P_SDMAHALT | \
- QIB_E_P_SDMADESCADDRMISALIGN | \
- QIB_E_P_SDMAUNEXPDATA | \
- QIB_E_P_SDMAMISSINGDW | \
- QIB_E_P_SDMADWEN | \
- QIB_E_P_SDMARPYTAG | \
- QIB_E_P_SDMA1STDESC | \
- QIB_E_P_SDMABASE | \
- QIB_E_P_SDMATAILOUTOFBOUND | \
- QIB_E_P_SDMAOUTOFBOUND | \
- QIB_E_P_SDMAGENMISMATCH)
-
-/*
- * This sets some bits more than once, but makes it more obvious which
- * bits are not handled under other categories, and the repeat definition
- * is not a problem.
- */
-#define QIB_E_P_BITSEXTANT ( \
- QIB_E_P_SPKTERRS | QIB_E_P_PKTERRS | QIB_E_P_RPKTERRS | \
- QIB_E_P_RIBLOSTLINK | QIB_E_P_IBSTATUSCHANGED | \
- QIB_E_P_SND_BUF_MISUSE | QIB_E_P_SUNDERRUN | \
- QIB_E_P_SHDR | QIB_E_P_VL15_BUF_MISUSE | QIB_E_P_SDMAERRS \
- )
-
-/*
- * These are errors that can occur when the link
- * changes state while a packet is being sent or received. This doesn't
- * cover things like EBP or VCRC that can be the result of a sending
- * having the link change state, so we receive a "known bad" packet.
- * All of these are "per port", so renamed:
- */
-#define QIB_E_P_LINK_PKTERRS (\
- QIB_E_P_SDROP_DATA | QIB_E_P_SDROP_SMP |\
- QIB_E_P_SMINPKTLEN | QIB_E_P_SPKTLEN |\
- QIB_E_P_RSHORTPKTLEN | QIB_E_P_RMINPKTLEN |\
- QIB_E_P_RUNEXPCHAR)
-
-/*
- * This sets some bits more than once, but makes it more obvious which
- * bits are not handled under other categories (such as QIB_E_SPKTERRS),
- * and the repeat definition is not a problem.
- */
-#define QIB_E_C_BITSEXTANT (\
- QIB_E_HARDWARE | QIB_E_INVALIDADDR | QIB_E_BADEEP |\
- QIB_E_ARMLAUNCH | QIB_E_VLMISMATCH | QIB_E_RRCVHDRFULL |\
- QIB_E_RRCVEGRFULL | QIB_E_RESET | QIB_E_SBUF_VL15_MISUSE)
-
-/* Likewise Neuter E_SPKT_ERRS_IGNORE */
-#define E_SPKT_ERRS_IGNORE 0
-
-#define QIB_EXTS_MEMBIST_DISABLED \
- SYM_MASK(EXTStatus, MemBISTDisabled)
-#define QIB_EXTS_MEMBIST_ENDTEST \
- SYM_MASK(EXTStatus, MemBISTEndTest)
-
-#define QIB_E_SPIOARMLAUNCH \
- ERR_MASK(SendArmLaunchErr)
-
-#define IBA7322_IBCC_LINKINITCMD_MASK SYM_RMASK(IBCCtrlA_0, LinkInitCmd)
-#define IBA7322_IBCC_LINKCMD_SHIFT SYM_LSB(IBCCtrlA_0, LinkCmd)
-
-/*
- * IBTA_1_2 is set when multiple speeds are enabled (normal),
- * and also if forced QDR (only QDR enabled). It's enabled for the
- * forced QDR case so that scrambling will be enabled by the TS3
- * exchange, when supported by both sides of the link.
- */
-#define IBA7322_IBC_IBTA_1_2_MASK SYM_MASK(IBCCtrlB_0, IB_ENHANCED_MODE)
-#define IBA7322_IBC_MAX_SPEED_MASK SYM_MASK(IBCCtrlB_0, SD_SPEED)
-#define IBA7322_IBC_SPEED_QDR SYM_MASK(IBCCtrlB_0, SD_SPEED_QDR)
-#define IBA7322_IBC_SPEED_DDR SYM_MASK(IBCCtrlB_0, SD_SPEED_DDR)
-#define IBA7322_IBC_SPEED_SDR SYM_MASK(IBCCtrlB_0, SD_SPEED_SDR)
-#define IBA7322_IBC_SPEED_MASK (SYM_MASK(IBCCtrlB_0, SD_SPEED_SDR) | \
- SYM_MASK(IBCCtrlB_0, SD_SPEED_DDR) | SYM_MASK(IBCCtrlB_0, SD_SPEED_QDR))
-#define IBA7322_IBC_SPEED_LSB SYM_LSB(IBCCtrlB_0, SD_SPEED_SDR)
-
-#define IBA7322_LEDBLINK_OFF_SHIFT SYM_LSB(RcvPktLEDCnt_0, OFFperiod)
-#define IBA7322_LEDBLINK_ON_SHIFT SYM_LSB(RcvPktLEDCnt_0, ONperiod)
-
-#define IBA7322_IBC_WIDTH_AUTONEG SYM_MASK(IBCCtrlB_0, IB_NUM_CHANNELS)
-#define IBA7322_IBC_WIDTH_4X_ONLY (1<<SYM_LSB(IBCCtrlB_0, IB_NUM_CHANNELS))
-#define IBA7322_IBC_WIDTH_1X_ONLY (0<<SYM_LSB(IBCCtrlB_0, IB_NUM_CHANNELS))
-
-#define IBA7322_IBC_RXPOL_MASK SYM_MASK(IBCCtrlB_0, IB_POLARITY_REV_SUPP)
-#define IBA7322_IBC_RXPOL_LSB SYM_LSB(IBCCtrlB_0, IB_POLARITY_REV_SUPP)
-#define IBA7322_IBC_HRTBT_MASK (SYM_MASK(IBCCtrlB_0, HRTBT_AUTO) | \
- SYM_MASK(IBCCtrlB_0, HRTBT_ENB))
-#define IBA7322_IBC_HRTBT_RMASK (IBA7322_IBC_HRTBT_MASK >> \
- SYM_LSB(IBCCtrlB_0, HRTBT_ENB))
-#define IBA7322_IBC_HRTBT_LSB SYM_LSB(IBCCtrlB_0, HRTBT_ENB)
-
-#define IBA7322_REDIRECT_VEC_PER_REG 12
-
-#define IBA7322_SENDCHK_PKEY SYM_MASK(SendCheckControl_0, PKey_En)
-#define IBA7322_SENDCHK_BTHQP SYM_MASK(SendCheckControl_0, BTHQP_En)
-#define IBA7322_SENDCHK_SLID SYM_MASK(SendCheckControl_0, SLID_En)
-#define IBA7322_SENDCHK_RAW_IPV6 SYM_MASK(SendCheckControl_0, RawIPV6_En)
-#define IBA7322_SENDCHK_MINSZ SYM_MASK(SendCheckControl_0, PacketTooSmall_En)
-
-#define AUTONEG_TRIES 3 /* sequential retries to negotiate DDR */
-
-#define HWE_AUTO(fldname) { .mask = SYM_MASK(HwErrMask, fldname##Mask), \
- .msg = #fldname , .sz = sizeof(#fldname) }
-#define HWE_AUTO_P(fldname, port) { .mask = SYM_MASK(HwErrMask, \
- fldname##Mask##_##port), .msg = #fldname , .sz = sizeof(#fldname) }
-static const struct qib_hwerror_msgs qib_7322_hwerror_msgs[] = {
- HWE_AUTO_P(IBSerdesPClkNotDetect, 1),
- HWE_AUTO_P(IBSerdesPClkNotDetect, 0),
- HWE_AUTO(PCIESerdesPClkNotDetect),
- HWE_AUTO(PowerOnBISTFailed),
- HWE_AUTO(TempsenseTholdReached),
- HWE_AUTO(MemoryErr),
- HWE_AUTO(PCIeBusParityErr),
- HWE_AUTO(PcieCplTimeout),
- HWE_AUTO(PciePoisonedTLP),
- HWE_AUTO_P(SDmaMemReadErr, 1),
- HWE_AUTO_P(SDmaMemReadErr, 0),
- HWE_AUTO_P(IBCBusFromSPCParityErr, 1),
- HWE_AUTO_P(IBCBusToSPCParityErr, 1),
- HWE_AUTO_P(IBCBusFromSPCParityErr, 0),
- HWE_AUTO(statusValidNoEop),
- HWE_AUTO(LATriggered),
- { .mask = 0, .sz = 0 }
-};
-
-#define E_AUTO(fldname) { .mask = SYM_MASK(ErrMask, fldname##Mask), \
- .msg = #fldname, .sz = sizeof(#fldname) }
-#define E_P_AUTO(fldname) { .mask = SYM_MASK(ErrMask_0, fldname##Mask), \
- .msg = #fldname, .sz = sizeof(#fldname) }
-static const struct qib_hwerror_msgs qib_7322error_msgs[] = {
- E_AUTO(RcvEgrFullErr),
- E_AUTO(RcvHdrFullErr),
- E_AUTO(ResetNegated),
- E_AUTO(HardwareErr),
- E_AUTO(InvalidAddrErr),
- E_AUTO(SDmaVL15Err),
- E_AUTO(SBufVL15MisUseErr),
- E_AUTO(InvalidEEPCmd),
- E_AUTO(RcvContextShareErr),
- E_AUTO(SendVLMismatchErr),
- E_AUTO(SendArmLaunchErr),
- E_AUTO(SendSpecialTriggerErr),
- E_AUTO(SDmaWrongPortErr),
- E_AUTO(SDmaBufMaskDuplicateErr),
- { .mask = 0, .sz = 0 }
-};
-
-static const struct qib_hwerror_msgs qib_7322p_error_msgs[] = {
- E_P_AUTO(IBStatusChanged),
- E_P_AUTO(SHeadersErr),
- E_P_AUTO(VL15BufMisuseErr),
- /*
- * SDmaHaltErr is not really an error, make it clearer;
- */
- {.mask = SYM_MASK(ErrMask_0, SDmaHaltErrMask), .msg = "SDmaHalted",
- .sz = 11},
- E_P_AUTO(SDmaDescAddrMisalignErr),
- E_P_AUTO(SDmaUnexpDataErr),
- E_P_AUTO(SDmaMissingDwErr),
- E_P_AUTO(SDmaDwEnErr),
- E_P_AUTO(SDmaRpyTagErr),
- E_P_AUTO(SDma1stDescErr),
- E_P_AUTO(SDmaBaseErr),
- E_P_AUTO(SDmaTailOutOfBoundErr),
- E_P_AUTO(SDmaOutOfBoundErr),
- E_P_AUTO(SDmaGenMismatchErr),
- E_P_AUTO(SendBufMisuseErr),
- E_P_AUTO(SendUnsupportedVLErr),
- E_P_AUTO(SendUnexpectedPktNumErr),
- E_P_AUTO(SendDroppedDataPktErr),
- E_P_AUTO(SendDroppedSmpPktErr),
- E_P_AUTO(SendPktLenErr),
- E_P_AUTO(SendUnderRunErr),
- E_P_AUTO(SendMaxPktLenErr),
- E_P_AUTO(SendMinPktLenErr),
- E_P_AUTO(RcvIBLostLinkErr),
- E_P_AUTO(RcvHdrErr),
- E_P_AUTO(RcvHdrLenErr),
- E_P_AUTO(RcvBadTidErr),
- E_P_AUTO(RcvBadVersionErr),
- E_P_AUTO(RcvIBFlowErr),
- E_P_AUTO(RcvEBPErr),
- E_P_AUTO(RcvUnsupportedVLErr),
- E_P_AUTO(RcvUnexpectedCharErr),
- E_P_AUTO(RcvShortPktLenErr),
- E_P_AUTO(RcvLongPktLenErr),
- E_P_AUTO(RcvMaxPktLenErr),
- E_P_AUTO(RcvMinPktLenErr),
- E_P_AUTO(RcvICRCErr),
- E_P_AUTO(RcvVCRCErr),
- E_P_AUTO(RcvFormatErr),
- { .mask = 0, .sz = 0 }
-};
-
-/*
- * Below generates "auto-message" for interrupts not specific to any port or
- * context
- */
-#define INTR_AUTO(fldname) { .mask = SYM_MASK(IntMask, fldname##Mask), \
- .msg = #fldname, .sz = sizeof(#fldname) }
-/* Below generates "auto-message" for interrupts specific to a port */
-#define INTR_AUTO_P(fldname) { .mask = MASK_ACROSS(\
- SYM_LSB(IntMask, fldname##Mask##_0), \
- SYM_LSB(IntMask, fldname##Mask##_1)), \
- .msg = #fldname "_P", .sz = sizeof(#fldname "_P") }
-/* For some reason, the SerDesTrimDone bits are reversed */
-#define INTR_AUTO_PI(fldname) { .mask = MASK_ACROSS(\
- SYM_LSB(IntMask, fldname##Mask##_1), \
- SYM_LSB(IntMask, fldname##Mask##_0)), \
- .msg = #fldname "_P", .sz = sizeof(#fldname "_P") }
-/*
- * Below generates "auto-message" for interrupts specific to a context,
- * with ctxt-number appended
- */
-#define INTR_AUTO_C(fldname) { .mask = MASK_ACROSS(\
- SYM_LSB(IntMask, fldname##0IntMask), \
- SYM_LSB(IntMask, fldname##17IntMask)), \
- .msg = #fldname "_C", .sz = sizeof(#fldname "_C") }
-
-#define TXSYMPTOM_AUTO_P(fldname) \
- { .mask = SYM_MASK(SendHdrErrSymptom_0, fldname), \
- .msg = #fldname, .sz = sizeof(#fldname) }
-static const struct qib_hwerror_msgs hdrchk_msgs[] = {
- TXSYMPTOM_AUTO_P(NonKeyPacket),
- TXSYMPTOM_AUTO_P(GRHFail),
- TXSYMPTOM_AUTO_P(PkeyFail),
- TXSYMPTOM_AUTO_P(QPFail),
- TXSYMPTOM_AUTO_P(SLIDFail),
- TXSYMPTOM_AUTO_P(RawIPV6),
- TXSYMPTOM_AUTO_P(PacketTooSmall),
- { .mask = 0, .sz = 0 }
-};
-
-#define IBA7322_HDRHEAD_PKTINT_SHIFT 32 /* interrupt cnt in upper 32 bits */
-
-/*
- * Called when we might have an error that is specific to a particular
- * PIO buffer, and may need to cancel that buffer, so it can be re-used,
- * because we don't need to force the update of pioavail
- */
-static void qib_disarm_7322_senderrbufs(struct qib_pportdata *ppd)
-{
- struct qib_devdata *dd = ppd->dd;
- u32 i;
- int any;
- u32 piobcnt = dd->piobcnt2k + dd->piobcnt4k + NUM_VL15_BUFS;
- u32 regcnt = (piobcnt + BITS_PER_LONG - 1) / BITS_PER_LONG;
- unsigned long sbuf[4];
-
- /*
- * It's possible that sendbuffererror could have bits set; might
- * have already done this as a result of hardware error handling.
- */
- any = 0;
- for (i = 0; i < regcnt; ++i) {
- sbuf[i] = qib_read_kreg64(dd, kr_sendbuffererror + i);
- if (sbuf[i]) {
- any = 1;
- qib_write_kreg(dd, kr_sendbuffererror + i, sbuf[i]);
- }
- }
-
- if (any)
- qib_disarm_piobufs_set(dd, sbuf, piobcnt);
-}
-
-/* No txe_recover yet, if ever */
-
-/* No decode__errors yet */
-static void err_decode(char *msg, size_t len, u64 errs,
- const struct qib_hwerror_msgs *msp)
-{
- u64 these, lmask;
- int took, multi, n = 0;
-
- while (errs && msp && msp->mask) {
- multi = (msp->mask & (msp->mask - 1));
- while (errs & msp->mask) {
- these = (errs & msp->mask);
- lmask = (these & (these - 1)) ^ these;
- if (len) {
- if (n++) {
- /* separate the strings */
- *msg++ = ',';
- len--;
- }
- /* msp->sz counts the nul */
- took = min_t(size_t, msp->sz - (size_t)1, len);
- memcpy(msg, msp->msg, took);
- len -= took;
- msg += took;
- if (len)
- *msg = '\0';
- }
- errs &= ~lmask;
- if (len && multi) {
- /* More than one bit this mask */
- int idx = -1;
-
- while (lmask & msp->mask) {
- ++idx;
- lmask >>= 1;
- }
- took = scnprintf(msg, len, "_%d", idx);
- len -= took;
- msg += took;
- }
- }
- ++msp;
- }
- /* If some bits are left, show in hex. */
- if (len && errs)
- snprintf(msg, len, "%sMORE:%llX", n ? "," : "",
- (unsigned long long) errs);
-}
-
-/* only called if r1 set */
-static void flush_fifo(struct qib_pportdata *ppd)
-{
- struct qib_devdata *dd = ppd->dd;
- u32 __iomem *piobuf;
- u32 bufn;
- u32 *hdr;
- u64 pbc;
- const unsigned hdrwords = 7;
- static struct ib_header ibhdr = {
- .lrh[0] = cpu_to_be16(0xF000 | QIB_LRH_BTH),
- .lrh[1] = IB_LID_PERMISSIVE,
- .lrh[2] = cpu_to_be16(hdrwords + SIZE_OF_CRC),
- .lrh[3] = IB_LID_PERMISSIVE,
- .u.oth.bth[0] = cpu_to_be32(
- (IB_OPCODE_UD_SEND_ONLY << 24) | QIB_DEFAULT_P_KEY),
- .u.oth.bth[1] = cpu_to_be32(0),
- .u.oth.bth[2] = cpu_to_be32(0),
- .u.oth.u.ud.deth[0] = cpu_to_be32(0),
- .u.oth.u.ud.deth[1] = cpu_to_be32(0),
- };
-
- /*
- * Send a dummy VL15 packet to flush the launch FIFO.
- * This will not actually be sent since the TxeBypassIbc bit is set.
- */
- pbc = PBC_7322_VL15_SEND |
- (((u64)ppd->hw_pidx) << (PBC_PORT_SEL_LSB + 32)) |
- (hdrwords + SIZE_OF_CRC);
- piobuf = qib_7322_getsendbuf(ppd, pbc, &bufn);
- if (!piobuf)
- return;
- writeq(pbc, piobuf);
- hdr = (u32 *) &ibhdr;
- if (dd->flags & QIB_PIO_FLUSH_WC) {
- qib_flush_wc();
- qib_pio_copy(piobuf + 2, hdr, hdrwords - 1);
- qib_flush_wc();
- __raw_writel(hdr[hdrwords - 1], piobuf + hdrwords + 1);
- qib_flush_wc();
- } else
- qib_pio_copy(piobuf + 2, hdr, hdrwords);
- qib_sendbuf_done(dd, bufn);
-}
-
-/*
- * This is called with interrupts disabled and sdma_lock held.
- */
-static void qib_7322_sdma_sendctrl(struct qib_pportdata *ppd, unsigned op)
-{
- struct qib_devdata *dd = ppd->dd;
- u64 set_sendctrl = 0;
- u64 clr_sendctrl = 0;
-
- if (op & QIB_SDMA_SENDCTRL_OP_ENABLE)
- set_sendctrl |= SYM_MASK(SendCtrl_0, SDmaEnable);
- else
- clr_sendctrl |= SYM_MASK(SendCtrl_0, SDmaEnable);
-
- if (op & QIB_SDMA_SENDCTRL_OP_INTENABLE)
- set_sendctrl |= SYM_MASK(SendCtrl_0, SDmaIntEnable);
- else
- clr_sendctrl |= SYM_MASK(SendCtrl_0, SDmaIntEnable);
-
- if (op & QIB_SDMA_SENDCTRL_OP_HALT)
- set_sendctrl |= SYM_MASK(SendCtrl_0, SDmaHalt);
- else
- clr_sendctrl |= SYM_MASK(SendCtrl_0, SDmaHalt);
-
- if (op & QIB_SDMA_SENDCTRL_OP_DRAIN)
- set_sendctrl |= SYM_MASK(SendCtrl_0, TxeBypassIbc) |
- SYM_MASK(SendCtrl_0, TxeAbortIbc) |
- SYM_MASK(SendCtrl_0, TxeDrainRmFifo);
- else
- clr_sendctrl |= SYM_MASK(SendCtrl_0, TxeBypassIbc) |
- SYM_MASK(SendCtrl_0, TxeAbortIbc) |
- SYM_MASK(SendCtrl_0, TxeDrainRmFifo);
-
- spin_lock(&dd->sendctrl_lock);
-
- /* If we are draining everything, block sends first */
- if (op & QIB_SDMA_SENDCTRL_OP_DRAIN) {
- ppd->p_sendctrl &= ~SYM_MASK(SendCtrl_0, SendEnable);
- qib_write_kreg_port(ppd, krp_sendctrl, ppd->p_sendctrl);
- qib_write_kreg(dd, kr_scratch, 0);
- }
-
- ppd->p_sendctrl |= set_sendctrl;
- ppd->p_sendctrl &= ~clr_sendctrl;
-
- if (op & QIB_SDMA_SENDCTRL_OP_CLEANUP)
- qib_write_kreg_port(ppd, krp_sendctrl,
- ppd->p_sendctrl |
- SYM_MASK(SendCtrl_0, SDmaCleanup));
- else
- qib_write_kreg_port(ppd, krp_sendctrl, ppd->p_sendctrl);
- qib_write_kreg(dd, kr_scratch, 0);
-
- if (op & QIB_SDMA_SENDCTRL_OP_DRAIN) {
- ppd->p_sendctrl |= SYM_MASK(SendCtrl_0, SendEnable);
- qib_write_kreg_port(ppd, krp_sendctrl, ppd->p_sendctrl);
- qib_write_kreg(dd, kr_scratch, 0);
- }
-
- spin_unlock(&dd->sendctrl_lock);
-
- if ((op & QIB_SDMA_SENDCTRL_OP_DRAIN) && ppd->dd->cspec->r1)
- flush_fifo(ppd);
-}
-
-static void qib_7322_sdma_hw_clean_up(struct qib_pportdata *ppd)
-{
- __qib_sdma_process_event(ppd, qib_sdma_event_e50_hw_cleaned);
-}
-
-static void qib_sdma_7322_setlengen(struct qib_pportdata *ppd)
-{
- /*
- * Set SendDmaLenGen and clear and set
- * the MSB of the generation count to enable generation checking
- * and load the internal generation counter.
- */
- qib_write_kreg_port(ppd, krp_senddmalengen, ppd->sdma_descq_cnt);
- qib_write_kreg_port(ppd, krp_senddmalengen,
- ppd->sdma_descq_cnt |
- (1ULL << QIB_7322_SendDmaLenGen_0_Generation_MSB));
-}
-
-/*
- * Must be called with sdma_lock held, or before init finished.
- */
-static void qib_sdma_update_7322_tail(struct qib_pportdata *ppd, u16 tail)
-{
- /* Commit writes to memory and advance the tail on the chip */
- wmb();
- ppd->sdma_descq_tail = tail;
- qib_write_kreg_port(ppd, krp_senddmatail, tail);
-}
-
-/*
- * This is called with interrupts disabled and sdma_lock held.
- */
-static void qib_7322_sdma_hw_start_up(struct qib_pportdata *ppd)
-{
- /*
- * Drain all FIFOs.
- * The hardware doesn't require this but we do it so that verbs
- * and user applications don't wait for link active to send stale
- * data.
- */
- sendctrl_7322_mod(ppd, QIB_SENDCTRL_FLUSH);
-
- qib_sdma_7322_setlengen(ppd);
- qib_sdma_update_7322_tail(ppd, 0); /* Set SendDmaTail */
- ppd->sdma_head_dma[0] = 0;
- qib_7322_sdma_sendctrl(ppd,
- ppd->sdma_state.current_op | QIB_SDMA_SENDCTRL_OP_CLEANUP);
-}
-
-#define DISABLES_SDMA ( \
- QIB_E_P_SDMAHALT | \
- QIB_E_P_SDMADESCADDRMISALIGN | \
- QIB_E_P_SDMAMISSINGDW | \
- QIB_E_P_SDMADWEN | \
- QIB_E_P_SDMARPYTAG | \
- QIB_E_P_SDMA1STDESC | \
- QIB_E_P_SDMABASE | \
- QIB_E_P_SDMATAILOUTOFBOUND | \
- QIB_E_P_SDMAOUTOFBOUND | \
- QIB_E_P_SDMAGENMISMATCH)
-
-static void sdma_7322_p_errors(struct qib_pportdata *ppd, u64 errs)
-{
- unsigned long flags;
- struct qib_devdata *dd = ppd->dd;
-
- errs &= QIB_E_P_SDMAERRS;
- err_decode(ppd->cpspec->sdmamsgbuf, sizeof(ppd->cpspec->sdmamsgbuf),
- errs, qib_7322p_error_msgs);
-
- if (errs & QIB_E_P_SDMAUNEXPDATA)
- qib_dev_err(dd, "IB%u:%u SDmaUnexpData\n", dd->unit,
- ppd->port);
-
- spin_lock_irqsave(&ppd->sdma_lock, flags);
-
- if (errs != QIB_E_P_SDMAHALT) {
- /* SDMA errors have QIB_E_P_SDMAHALT and another bit set */
- qib_dev_porterr(dd, ppd->port,
- "SDMA %s 0x%016llx %s\n",
- qib_sdma_state_names[ppd->sdma_state.current_state],
- errs, ppd->cpspec->sdmamsgbuf);
- dump_sdma_7322_state(ppd);
- }
-
- switch (ppd->sdma_state.current_state) {
- case qib_sdma_state_s00_hw_down:
- break;
-
- case qib_sdma_state_s10_hw_start_up_wait:
- if (errs & QIB_E_P_SDMAHALT)
- __qib_sdma_process_event(ppd,
- qib_sdma_event_e20_hw_started);
- break;
-
- case qib_sdma_state_s20_idle:
- break;
-
- case qib_sdma_state_s30_sw_clean_up_wait:
- break;
-
- case qib_sdma_state_s40_hw_clean_up_wait:
- if (errs & QIB_E_P_SDMAHALT)
- __qib_sdma_process_event(ppd,
- qib_sdma_event_e50_hw_cleaned);
- break;
-
- case qib_sdma_state_s50_hw_halt_wait:
- if (errs & QIB_E_P_SDMAHALT)
- __qib_sdma_process_event(ppd,
- qib_sdma_event_e60_hw_halted);
- break;
-
- case qib_sdma_state_s99_running:
- __qib_sdma_process_event(ppd, qib_sdma_event_e7322_err_halted);
- __qib_sdma_process_event(ppd, qib_sdma_event_e60_hw_halted);
- break;
- }
-
- spin_unlock_irqrestore(&ppd->sdma_lock, flags);
-}
-
-/*
- * handle per-device errors (not per-port errors)
- */
-static noinline void handle_7322_errors(struct qib_devdata *dd)
-{
- char *msg;
- u64 iserr = 0;
- u64 errs;
- u64 mask;
-
- qib_stats.sps_errints++;
- errs = qib_read_kreg64(dd, kr_errstatus);
- if (!errs) {
- qib_devinfo(dd->pcidev,
- "device error interrupt, but no error bits set!\n");
- goto done;
- }
-
- /* don't report errors that are masked */
- errs &= dd->cspec->errormask;
- msg = dd->cspec->emsgbuf;
-
- /* do these first, they are most important */
- if (errs & QIB_E_HARDWARE) {
- *msg = '\0';
- qib_7322_handle_hwerrors(dd, msg, sizeof(dd->cspec->emsgbuf));
- }
-
- if (errs & QIB_E_SPKTERRS) {
- qib_disarm_7322_senderrbufs(dd->pport);
- qib_stats.sps_txerrs++;
- } else if (errs & QIB_E_INVALIDADDR)
- qib_stats.sps_txerrs++;
- else if (errs & QIB_E_ARMLAUNCH) {
- qib_stats.sps_txerrs++;
- qib_disarm_7322_senderrbufs(dd->pport);
- }
- qib_write_kreg(dd, kr_errclear, errs);
-
- /*
- * The ones we mask off are handled specially below
- * or above. Also mask SDMADISABLED by default as it
- * is too chatty.
- */
- mask = QIB_E_HARDWARE;
- *msg = '\0';
-
- err_decode(msg, sizeof(dd->cspec->emsgbuf), errs & ~mask,
- qib_7322error_msgs);
-
- /*
- * Getting reset is a tragedy for all ports. Mark the device
- * _and_ the ports as "offline" in way meaningful to each.
- */
- if (errs & QIB_E_RESET) {
- int pidx;
-
- qib_dev_err(dd,
- "Got reset, requires re-init (unload and reload driver)\n");
- dd->flags &= ~QIB_INITTED; /* needs re-init */
- /* mark as having had error */
- *dd->devstatusp |= QIB_STATUS_HWERROR;
- for (pidx = 0; pidx < dd->num_pports; ++pidx)
- if (dd->pport[pidx].link_speed_supported)
- *dd->pport[pidx].statusp &= ~QIB_STATUS_IB_CONF;
- }
-
- if (*msg && iserr)
- qib_dev_err(dd, "%s error\n", msg);
-
- /*
- * If there were hdrq or egrfull errors, wake up any processes
- * waiting in poll. We used to try to check which contexts had
- * the overflow, but given the cost of that and the chip reads
- * to support it, it's better to just wake everybody up if we
- * get an overflow; waiters can poll again if it's not them.
- */
- if (errs & (ERR_MASK(RcvEgrFullErr) | ERR_MASK(RcvHdrFullErr))) {
- qib_handle_urcv(dd, ~0U);
- if (errs & ERR_MASK(RcvEgrFullErr))
- qib_stats.sps_buffull++;
- else
- qib_stats.sps_hdrfull++;
- }
-
-done:
- return;
-}
-
-static void qib_error_tasklet(struct tasklet_struct *t)
-{
- struct qib_devdata *dd = from_tasklet(dd, t, error_tasklet);
-
- handle_7322_errors(dd);
- qib_write_kreg(dd, kr_errmask, dd->cspec->errormask);
-}
-
-static void reenable_chase(struct timer_list *t)
-{
- struct qib_chippport_specific *cp = timer_container_of(cp, t,
- chase_timer);
- struct qib_pportdata *ppd = cp->ppd;
-
- ppd->cpspec->chase_timer.expires = 0;
- qib_set_ib_7322_lstate(ppd, QLOGIC_IB_IBCC_LINKCMD_DOWN,
- QLOGIC_IB_IBCC_LINKINITCMD_POLL);
-}
-
-static void disable_chase(struct qib_pportdata *ppd, unsigned long tnow,
- u8 ibclt)
-{
- ppd->cpspec->chase_end = 0;
-
- if (!qib_chase)
- return;
-
- qib_set_ib_7322_lstate(ppd, QLOGIC_IB_IBCC_LINKCMD_DOWN,
- QLOGIC_IB_IBCC_LINKINITCMD_DISABLE);
- ppd->cpspec->chase_timer.expires = jiffies + QIB_CHASE_DIS_TIME;
- add_timer(&ppd->cpspec->chase_timer);
-}
-
-static void handle_serdes_issues(struct qib_pportdata *ppd, u64 ibcst)
-{
- u8 ibclt;
- unsigned long tnow;
-
- ibclt = (u8)SYM_FIELD(ibcst, IBCStatusA_0, LinkTrainingState);
-
- /*
- * Detect and handle the state chase issue, where we can
- * get stuck if we are unlucky on timing on both sides of
- * the link. If we are, we disable, set a timer, and
- * then re-enable.
- */
- switch (ibclt) {
- case IB_7322_LT_STATE_CFGRCVFCFG:
- case IB_7322_LT_STATE_CFGWAITRMT:
- case IB_7322_LT_STATE_TXREVLANES:
- case IB_7322_LT_STATE_CFGENH:
- tnow = jiffies;
- if (ppd->cpspec->chase_end &&
- time_after(tnow, ppd->cpspec->chase_end))
- disable_chase(ppd, tnow, ibclt);
- else if (!ppd->cpspec->chase_end)
- ppd->cpspec->chase_end = tnow + QIB_CHASE_TIME;
- break;
- default:
- ppd->cpspec->chase_end = 0;
- break;
- }
-
- if (((ibclt >= IB_7322_LT_STATE_CFGTEST &&
- ibclt <= IB_7322_LT_STATE_CFGWAITENH) ||
- ibclt == IB_7322_LT_STATE_LINKUP) &&
- (ibcst & SYM_MASK(IBCStatusA_0, LinkSpeedQDR))) {
- force_h1(ppd);
- ppd->cpspec->qdr_reforce = 1;
- if (!ppd->dd->cspec->r1)
- serdes_7322_los_enable(ppd, 0);
- } else if (ppd->cpspec->qdr_reforce &&
- (ibcst & SYM_MASK(IBCStatusA_0, LinkSpeedQDR)) &&
- (ibclt == IB_7322_LT_STATE_CFGENH ||
- ibclt == IB_7322_LT_STATE_CFGIDLE ||
- ibclt == IB_7322_LT_STATE_LINKUP))
- force_h1(ppd);
-
- if ((IS_QMH(ppd->dd) || IS_QME(ppd->dd)) &&
- ppd->link_speed_enabled == QIB_IB_QDR &&
- (ibclt == IB_7322_LT_STATE_CFGTEST ||
- ibclt == IB_7322_LT_STATE_CFGENH ||
- (ibclt >= IB_7322_LT_STATE_POLLACTIVE &&
- ibclt <= IB_7322_LT_STATE_SLEEPQUIET)))
- adj_tx_serdes(ppd);
-
- if (ibclt != IB_7322_LT_STATE_LINKUP) {
- u8 ltstate = qib_7322_phys_portstate(ibcst);
- u8 pibclt = (u8)SYM_FIELD(ppd->lastibcstat, IBCStatusA_0,
- LinkTrainingState);
- if (!ppd->dd->cspec->r1 &&
- pibclt == IB_7322_LT_STATE_LINKUP &&
- ltstate != IB_PHYSPORTSTATE_LINK_ERR_RECOVER &&
- ltstate != IB_PHYSPORTSTATE_RECOVERY_RETRAIN &&
- ltstate != IB_PHYSPORTSTATE_RECOVERY_WAITRMT &&
- ltstate != IB_PHYSPORTSTATE_RECOVERY_IDLE)
- /* If the link went down (but no into recovery,
- * turn LOS back on */
- serdes_7322_los_enable(ppd, 1);
- if (!ppd->cpspec->qdr_dfe_on &&
- ibclt <= IB_7322_LT_STATE_SLEEPQUIET) {
- ppd->cpspec->qdr_dfe_on = 1;
- ppd->cpspec->qdr_dfe_time = 0;
- /* On link down, reenable QDR adaptation */
- qib_write_kreg_port(ppd, krp_static_adapt_dis(2),
- ppd->dd->cspec->r1 ?
- QDR_STATIC_ADAPT_DOWN_R1 :
- QDR_STATIC_ADAPT_DOWN);
- pr_info(
- "IB%u:%u re-enabled QDR adaptation ibclt %x\n",
- ppd->dd->unit, ppd->port, ibclt);
- }
- }
-}
-
-static int qib_7322_set_ib_cfg(struct qib_pportdata *, int, u32);
-
-/*
- * This is per-pport error handling.
- * will likely get it's own MSIx interrupt (one for each port,
- * although just a single handler).
- */
-static noinline void handle_7322_p_errors(struct qib_pportdata *ppd)
-{
- char *msg;
- u64 ignore_this_time = 0, iserr = 0, errs, fmask;
- struct qib_devdata *dd = ppd->dd;
-
- /* do this as soon as possible */
- fmask = qib_read_kreg64(dd, kr_act_fmask);
- if (!fmask)
- check_7322_rxe_status(ppd);
-
- errs = qib_read_kreg_port(ppd, krp_errstatus);
- if (!errs)
- qib_devinfo(dd->pcidev,
- "Port%d error interrupt, but no error bits set!\n",
- ppd->port);
- if (!fmask)
- errs &= ~QIB_E_P_IBSTATUSCHANGED;
- if (!errs)
- goto done;
-
- msg = ppd->cpspec->epmsgbuf;
- *msg = '\0';
-
- if (errs & ~QIB_E_P_BITSEXTANT) {
- err_decode(msg, sizeof(ppd->cpspec->epmsgbuf),
- errs & ~QIB_E_P_BITSEXTANT, qib_7322p_error_msgs);
- if (!*msg)
- snprintf(msg, sizeof(ppd->cpspec->epmsgbuf),
- "no others");
- qib_dev_porterr(dd, ppd->port,
- "error interrupt with unknown errors 0x%016Lx set (and %s)\n",
- (errs & ~QIB_E_P_BITSEXTANT), msg);
- *msg = '\0';
- }
-
- if (errs & QIB_E_P_SHDR) {
- u64 symptom;
-
- /* determine cause, then write to clear */
- symptom = qib_read_kreg_port(ppd, krp_sendhdrsymptom);
- qib_write_kreg_port(ppd, krp_sendhdrsymptom, 0);
- err_decode(msg, sizeof(ppd->cpspec->epmsgbuf), symptom,
- hdrchk_msgs);
- *msg = '\0';
- /* senderrbuf cleared in SPKTERRS below */
- }
-
- if (errs & QIB_E_P_SPKTERRS) {
- if ((errs & QIB_E_P_LINK_PKTERRS) &&
- !(ppd->lflags & QIBL_LINKACTIVE)) {
- /*
- * This can happen when trying to bring the link
- * up, but the IB link changes state at the "wrong"
- * time. The IB logic then complains that the packet
- * isn't valid. We don't want to confuse people, so
- * we just don't print them, except at debug
- */
- err_decode(msg, sizeof(ppd->cpspec->epmsgbuf),
- (errs & QIB_E_P_LINK_PKTERRS),
- qib_7322p_error_msgs);
- *msg = '\0';
- ignore_this_time = errs & QIB_E_P_LINK_PKTERRS;
- }
- qib_disarm_7322_senderrbufs(ppd);
- } else if ((errs & QIB_E_P_LINK_PKTERRS) &&
- !(ppd->lflags & QIBL_LINKACTIVE)) {
- /*
- * This can happen when SMA is trying to bring the link
- * up, but the IB link changes state at the "wrong" time.
- * The IB logic then complains that the packet isn't
- * valid. We don't want to confuse people, so we just
- * don't print them, except at debug
- */
- err_decode(msg, sizeof(ppd->cpspec->epmsgbuf), errs,
- qib_7322p_error_msgs);
- ignore_this_time = errs & QIB_E_P_LINK_PKTERRS;
- *msg = '\0';
- }
-
- qib_write_kreg_port(ppd, krp_errclear, errs);
-
- errs &= ~ignore_this_time;
- if (!errs)
- goto done;
-
- if (errs & QIB_E_P_RPKTERRS)
- qib_stats.sps_rcverrs++;
- if (errs & QIB_E_P_SPKTERRS)
- qib_stats.sps_txerrs++;
-
- iserr = errs & ~(QIB_E_P_RPKTERRS | QIB_E_P_PKTERRS);
-
- if (errs & QIB_E_P_SDMAERRS)
- sdma_7322_p_errors(ppd, errs);
-
- if (errs & QIB_E_P_IBSTATUSCHANGED) {
- u64 ibcs;
- u8 ltstate;
-
- ibcs = qib_read_kreg_port(ppd, krp_ibcstatus_a);
- ltstate = qib_7322_phys_portstate(ibcs);
-
- if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG))
- handle_serdes_issues(ppd, ibcs);
- if (!(ppd->cpspec->ibcctrl_a &
- SYM_MASK(IBCCtrlA_0, IBStatIntReductionEn))) {
- /*
- * We got our interrupt, so init code should be
- * happy and not try alternatives. Now squelch
- * other "chatter" from link-negotiation (pre Init)
- */
- ppd->cpspec->ibcctrl_a |=
- SYM_MASK(IBCCtrlA_0, IBStatIntReductionEn);
- qib_write_kreg_port(ppd, krp_ibcctrl_a,
- ppd->cpspec->ibcctrl_a);
- }
-
- /* Update our picture of width and speed from chip */
- ppd->link_width_active =
- (ibcs & SYM_MASK(IBCStatusA_0, LinkWidthActive)) ?
- IB_WIDTH_4X : IB_WIDTH_1X;
- ppd->link_speed_active = (ibcs & SYM_MASK(IBCStatusA_0,
- LinkSpeedQDR)) ? QIB_IB_QDR : (ibcs &
- SYM_MASK(IBCStatusA_0, LinkSpeedActive)) ?
- QIB_IB_DDR : QIB_IB_SDR;
-
- if ((ppd->lflags & QIBL_IB_LINK_DISABLED) && ltstate !=
- IB_PHYSPORTSTATE_DISABLED)
- qib_set_ib_7322_lstate(ppd, 0,
- QLOGIC_IB_IBCC_LINKINITCMD_DISABLE);
- else
- /*
- * Since going into a recovery state causes the link
- * state to go down and since recovery is transitory,
- * it is better if we "miss" ever seeing the link
- * training state go into recovery (i.e., ignore this
- * transition for link state special handling purposes)
- * without updating lastibcstat.
- */
- if (ltstate != IB_PHYSPORTSTATE_LINK_ERR_RECOVER &&
- ltstate != IB_PHYSPORTSTATE_RECOVERY_RETRAIN &&
- ltstate != IB_PHYSPORTSTATE_RECOVERY_WAITRMT &&
- ltstate != IB_PHYSPORTSTATE_RECOVERY_IDLE)
- qib_handle_e_ibstatuschanged(ppd, ibcs);
- }
- if (*msg && iserr)
- qib_dev_porterr(dd, ppd->port, "%s error\n", msg);
-
- if (ppd->state_wanted & ppd->lflags)
- wake_up_interruptible(&ppd->state_wait);
-done:
- return;
-}
-
-/* enable/disable chip from delivering interrupts */
-static void qib_7322_set_intr_state(struct qib_devdata *dd, u32 enable)
-{
- if (enable) {
- if (dd->flags & QIB_BADINTR)
- return;
- qib_write_kreg(dd, kr_intmask, dd->cspec->int_enable_mask);
- /* cause any pending enabled interrupts to be re-delivered */
- qib_write_kreg(dd, kr_intclear, 0ULL);
- if (dd->cspec->num_msix_entries) {
- /* and same for MSIx */
- u64 val = qib_read_kreg64(dd, kr_intgranted);
-
- if (val)
- qib_write_kreg(dd, kr_intgranted, val);
- }
- } else
- qib_write_kreg(dd, kr_intmask, 0ULL);
-}
-
-/*
- * Try to cleanup as much as possible for anything that might have gone
- * wrong while in freeze mode, such as pio buffers being written by user
- * processes (causing armlaunch), send errors due to going into freeze mode,
- * etc., and try to avoid causing extra interrupts while doing so.
- * Forcibly update the in-memory pioavail register copies after cleanup
- * because the chip won't do it while in freeze mode (the register values
- * themselves are kept correct).
- * Make sure that we don't lose any important interrupts by using the chip
- * feature that says that writing 0 to a bit in *clear that is set in
- * *status will cause an interrupt to be generated again (if allowed by
- * the *mask value).
- * This is in chip-specific code because of all of the register accesses,
- * even though the details are similar on most chips.
- */
-static void qib_7322_clear_freeze(struct qib_devdata *dd)
-{
- int pidx;
-
- /* disable error interrupts, to avoid confusion */
- qib_write_kreg(dd, kr_errmask, 0ULL);
-
- for (pidx = 0; pidx < dd->num_pports; ++pidx)
- if (dd->pport[pidx].link_speed_supported)
- qib_write_kreg_port(dd->pport + pidx, krp_errmask,
- 0ULL);
-
- /* also disable interrupts; errormask is sometimes overwritten */
- qib_7322_set_intr_state(dd, 0);
-
- /* clear the freeze, and be sure chip saw it */
- qib_write_kreg(dd, kr_control, dd->control);
- qib_read_kreg32(dd, kr_scratch);
-
- /*
- * Force new interrupt if any hwerr, error or interrupt bits are
- * still set, and clear "safe" send packet errors related to freeze
- * and cancelling sends. Re-enable error interrupts before possible
- * force of re-interrupt on pending interrupts.
- */
- qib_write_kreg(dd, kr_hwerrclear, 0ULL);
- qib_write_kreg(dd, kr_errclear, E_SPKT_ERRS_IGNORE);
- qib_write_kreg(dd, kr_errmask, dd->cspec->errormask);
- /* We need to purge per-port errs and reset mask, too */
- for (pidx = 0; pidx < dd->num_pports; ++pidx) {
- if (!dd->pport[pidx].link_speed_supported)
- continue;
- qib_write_kreg_port(dd->pport + pidx, krp_errclear, ~0Ull);
- qib_write_kreg_port(dd->pport + pidx, krp_errmask, ~0Ull);
- }
- qib_7322_set_intr_state(dd, 1);
-}
-
-/* no error handling to speak of */
-/**
- * qib_7322_handle_hwerrors - display hardware errors.
- * @dd: the qlogic_ib device
- * @msg: the output buffer
- * @msgl: the size of the output buffer
- *
- * Use same msg buffer as regular errors to avoid excessive stack
- * use. Most hardware errors are catastrophic, but for right now,
- * we'll print them and continue. We reuse the same message buffer as
- * qib_handle_errors() to avoid excessive stack usage.
- */
-static void qib_7322_handle_hwerrors(struct qib_devdata *dd, char *msg,
- size_t msgl)
-{
- u64 hwerrs;
- u32 ctrl;
- int isfatal = 0;
-
- hwerrs = qib_read_kreg64(dd, kr_hwerrstatus);
- if (!hwerrs)
- goto bail;
- if (hwerrs == ~0ULL) {
- qib_dev_err(dd,
- "Read of hardware error status failed (all bits set); ignoring\n");
- goto bail;
- }
- qib_stats.sps_hwerrs++;
-
- /* Always clear the error status register, except BIST fail */
- qib_write_kreg(dd, kr_hwerrclear, hwerrs &
- ~HWE_MASK(PowerOnBISTFailed));
-
- hwerrs &= dd->cspec->hwerrmask;
-
- /* no EEPROM logging, yet */
-
- if (hwerrs)
- qib_devinfo(dd->pcidev,
- "Hardware error: hwerr=0x%llx (cleared)\n",
- (unsigned long long) hwerrs);
-
- ctrl = qib_read_kreg32(dd, kr_control);
- if ((ctrl & SYM_MASK(Control, FreezeMode)) && !dd->diag_client) {
- /*
- * No recovery yet...
- */
- if ((hwerrs & ~HWE_MASK(LATriggered)) ||
- dd->cspec->stay_in_freeze) {
- /*
- * If any set that we aren't ignoring only make the
- * complaint once, in case it's stuck or recurring,
- * and we get here multiple times
- * Force link down, so switch knows, and
- * LEDs are turned off.
- */
- if (dd->flags & QIB_INITTED)
- isfatal = 1;
- } else
- qib_7322_clear_freeze(dd);
- }
-
- if (hwerrs & HWE_MASK(PowerOnBISTFailed)) {
- isfatal = 1;
- strscpy(msg,
- "[Memory BIST test failed, InfiniPath hardware unusable]",
- msgl);
- /* ignore from now on, so disable until driver reloaded */
- dd->cspec->hwerrmask &= ~HWE_MASK(PowerOnBISTFailed);
- qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask);
- }
-
- err_decode(msg, msgl, hwerrs, qib_7322_hwerror_msgs);
-
- /* Ignore esoteric PLL failures et al. */
-
- qib_dev_err(dd, "%s hardware error\n", msg);
-
- if (hwerrs &
- (SYM_MASK(HwErrMask, SDmaMemReadErrMask_0) |
- SYM_MASK(HwErrMask, SDmaMemReadErrMask_1))) {
- int pidx = 0;
- int err;
- unsigned long flags;
- struct qib_pportdata *ppd = dd->pport;
-
- for (; pidx < dd->num_pports; ++pidx, ppd++) {
- err = 0;
- if (pidx == 0 && (hwerrs &
- SYM_MASK(HwErrMask, SDmaMemReadErrMask_0)))
- err++;
- if (pidx == 1 && (hwerrs &
- SYM_MASK(HwErrMask, SDmaMemReadErrMask_1)))
- err++;
- if (err) {
- spin_lock_irqsave(&ppd->sdma_lock, flags);
- dump_sdma_7322_state(ppd);
- spin_unlock_irqrestore(&ppd->sdma_lock, flags);
- }
- }
- }
-
- if (isfatal && !dd->diag_client) {
- qib_dev_err(dd,
- "Fatal Hardware Error, no longer usable, SN %.16s\n",
- dd->serial);
- /*
- * for /sys status file and user programs to print; if no
- * trailing brace is copied, we'll know it was truncated.
- */
- if (dd->freezemsg)
- snprintf(dd->freezemsg, dd->freezelen,
- "{%s}", msg);
- qib_disable_after_error(dd);
- }
-bail:;
-}
-
-/**
- * qib_7322_init_hwerrors - enable hardware errors
- * @dd: the qlogic_ib device
- *
- * now that we have finished initializing everything that might reasonably
- * cause a hardware error, and cleared those errors bits as they occur,
- * we can enable hardware errors in the mask (potentially enabling
- * freeze mode), and enable hardware errors as errors (along with
- * everything else) in errormask
- */
-static void qib_7322_init_hwerrors(struct qib_devdata *dd)
-{
- int pidx;
- u64 extsval;
-
- extsval = qib_read_kreg64(dd, kr_extstatus);
- if (!(extsval & (QIB_EXTS_MEMBIST_DISABLED |
- QIB_EXTS_MEMBIST_ENDTEST)))
- qib_dev_err(dd, "MemBIST did not complete!\n");
-
- /* never clear BIST failure, so reported on each driver load */
- qib_write_kreg(dd, kr_hwerrclear, ~HWE_MASK(PowerOnBISTFailed));
- qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask);
-
- /* clear all */
- qib_write_kreg(dd, kr_errclear, ~0ULL);
- /* enable errors that are masked, at least this first time. */
- qib_write_kreg(dd, kr_errmask, ~0ULL);
- dd->cspec->errormask = qib_read_kreg64(dd, kr_errmask);
- for (pidx = 0; pidx < dd->num_pports; ++pidx)
- if (dd->pport[pidx].link_speed_supported)
- qib_write_kreg_port(dd->pport + pidx, krp_errmask,
- ~0ULL);
-}
-
-/*
- * Disable and enable the armlaunch error. Used for PIO bandwidth testing
- * on chips that are count-based, rather than trigger-based. There is no
- * reference counting, but that's also fine, given the intended use.
- * Only chip-specific because it's all register accesses
- */
-static void qib_set_7322_armlaunch(struct qib_devdata *dd, u32 enable)
-{
- if (enable) {
- qib_write_kreg(dd, kr_errclear, QIB_E_SPIOARMLAUNCH);
- dd->cspec->errormask |= QIB_E_SPIOARMLAUNCH;
- } else
- dd->cspec->errormask &= ~QIB_E_SPIOARMLAUNCH;
- qib_write_kreg(dd, kr_errmask, dd->cspec->errormask);
-}
-
-/*
- * Formerly took parameter <which> in pre-shifted,
- * pre-merged form with LinkCmd and LinkInitCmd
- * together, and assuming the zero was NOP.
- */
-static void qib_set_ib_7322_lstate(struct qib_pportdata *ppd, u16 linkcmd,
- u16 linitcmd)
-{
- u64 mod_wd;
- struct qib_devdata *dd = ppd->dd;
- unsigned long flags;
-
- if (linitcmd == QLOGIC_IB_IBCC_LINKINITCMD_DISABLE) {
- /*
- * If we are told to disable, note that so link-recovery
- * code does not attempt to bring us back up.
- * Also reset everything that we can, so we start
- * completely clean when re-enabled (before we
- * actually issue the disable to the IBC)
- */
- qib_7322_mini_pcs_reset(ppd);
- spin_lock_irqsave(&ppd->lflags_lock, flags);
- ppd->lflags |= QIBL_IB_LINK_DISABLED;
- spin_unlock_irqrestore(&ppd->lflags_lock, flags);
- } else if (linitcmd || linkcmd == QLOGIC_IB_IBCC_LINKCMD_DOWN) {
- /*
- * Any other linkinitcmd will lead to LINKDOWN and then
- * to INIT (if all is well), so clear flag to let
- * link-recovery code attempt to bring us back up.
- */
- spin_lock_irqsave(&ppd->lflags_lock, flags);
- ppd->lflags &= ~QIBL_IB_LINK_DISABLED;
- spin_unlock_irqrestore(&ppd->lflags_lock, flags);
- /*
- * Clear status change interrupt reduction so the
- * new state is seen.
- */
- ppd->cpspec->ibcctrl_a &=
- ~SYM_MASK(IBCCtrlA_0, IBStatIntReductionEn);
- }
-
- mod_wd = (linkcmd << IBA7322_IBCC_LINKCMD_SHIFT) |
- (linitcmd << QLOGIC_IB_IBCC_LINKINITCMD_SHIFT);
-
- qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a |
- mod_wd);
- /* write to chip to prevent back-to-back writes of ibc reg */
- qib_write_kreg(dd, kr_scratch, 0);
-
-}
-
-/*
- * The total RCV buffer memory is 64KB, used for both ports, and is
- * in units of 64 bytes (same as IB flow control credit unit).
- * The consumedVL unit in the same registers are in 32 byte units!
- * So, a VL15 packet needs 4.50 IB credits, and 9 rx buffer chunks,
- * and we can therefore allocate just 9 IB credits for 2 VL15 packets
- * in krp_rxcreditvl15, rather than 10.
- */
-#define RCV_BUF_UNITSZ 64
-#define NUM_RCV_BUF_UNITS(dd) ((64 * 1024) / (RCV_BUF_UNITSZ * dd->num_pports))
-
-static void set_vls(struct qib_pportdata *ppd)
-{
- int i, numvls, totcred, cred_vl, vl0extra;
- struct qib_devdata *dd = ppd->dd;
- u64 val;
-
- numvls = qib_num_vls(ppd->vls_operational);
-
- /*
- * Set up per-VL credits. Below is kluge based on these assumptions:
- * 1) port is disabled at the time early_init is called.
- * 2) give VL15 17 credits, for two max-plausible packets.
- * 3) Give VL0-N the rest, with any rounding excess used for VL0
- */
- /* 2 VL15 packets @ 288 bytes each (including IB headers) */
- totcred = NUM_RCV_BUF_UNITS(dd);
- cred_vl = (2 * 288 + RCV_BUF_UNITSZ - 1) / RCV_BUF_UNITSZ;
- totcred -= cred_vl;
- qib_write_kreg_port(ppd, krp_rxcreditvl15, (u64) cred_vl);
- cred_vl = totcred / numvls;
- vl0extra = totcred - cred_vl * numvls;
- qib_write_kreg_port(ppd, krp_rxcreditvl0, cred_vl + vl0extra);
- for (i = 1; i < numvls; i++)
- qib_write_kreg_port(ppd, krp_rxcreditvl0 + i, cred_vl);
- for (; i < 8; i++) /* no buffer space for other VLs */
- qib_write_kreg_port(ppd, krp_rxcreditvl0 + i, 0);
-
- /* Notify IBC that credits need to be recalculated */
- val = qib_read_kreg_port(ppd, krp_ibsdtestiftx);
- val |= SYM_MASK(IB_SDTEST_IF_TX_0, CREDIT_CHANGE);
- qib_write_kreg_port(ppd, krp_ibsdtestiftx, val);
- qib_write_kreg(dd, kr_scratch, 0ULL);
- val &= ~SYM_MASK(IB_SDTEST_IF_TX_0, CREDIT_CHANGE);
- qib_write_kreg_port(ppd, krp_ibsdtestiftx, val);
-
- for (i = 0; i < numvls; i++)
- val = qib_read_kreg_port(ppd, krp_rxcreditvl0 + i);
- val = qib_read_kreg_port(ppd, krp_rxcreditvl15);
-
- /* Change the number of operational VLs */
- ppd->cpspec->ibcctrl_a = (ppd->cpspec->ibcctrl_a &
- ~SYM_MASK(IBCCtrlA_0, NumVLane)) |
- ((u64)(numvls - 1) << SYM_LSB(IBCCtrlA_0, NumVLane));
- qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a);
- qib_write_kreg(dd, kr_scratch, 0ULL);
-}
-
-/*
- * The code that deals with actual SerDes is in serdes_7322_init().
- * Compared to the code for iba7220, it is minimal.
- */
-static int serdes_7322_init(struct qib_pportdata *ppd);
-
-/**
- * qib_7322_bringup_serdes - bring up the serdes
- * @ppd: physical port on the qlogic_ib device
- */
-static int qib_7322_bringup_serdes(struct qib_pportdata *ppd)
-{
- struct qib_devdata *dd = ppd->dd;
- u64 val, guid, ibc;
- unsigned long flags;
-
- /*
- * SerDes model not in Pd, but still need to
- * set up much of IBCCtrl and IBCDDRCtrl; move elsewhere
- * eventually.
- */
- /* Put IBC in reset, sends disabled (should be in reset already) */
- ppd->cpspec->ibcctrl_a &= ~SYM_MASK(IBCCtrlA_0, IBLinkEn);
- qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a);
- qib_write_kreg(dd, kr_scratch, 0ULL);
-
- /* ensure previous Tx parameters are not still forced */
- qib_write_kreg_port(ppd, krp_tx_deemph_override,
- SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
- reset_tx_deemphasis_override));
-
- if (qib_compat_ddr_negotiate) {
- ppd->cpspec->ibdeltainprog = 1;
- ppd->cpspec->ibsymsnap = read_7322_creg32_port(ppd,
- crp_ibsymbolerr);
- ppd->cpspec->iblnkerrsnap = read_7322_creg32_port(ppd,
- crp_iblinkerrrecov);
- }
-
- /* flowcontrolwatermark is in units of KBytes */
- ibc = 0x5ULL << SYM_LSB(IBCCtrlA_0, FlowCtrlWaterMark);
- /*
- * Flow control is sent this often, even if no changes in
- * buffer space occur. Units are 128ns for this chip.
- * Set to 3usec.
- */
- ibc |= 24ULL << SYM_LSB(IBCCtrlA_0, FlowCtrlPeriod);
- /* max error tolerance */
- ibc |= 0xfULL << SYM_LSB(IBCCtrlA_0, PhyerrThreshold);
- /* IB credit flow control. */
- ibc |= 0xfULL << SYM_LSB(IBCCtrlA_0, OverrunThreshold);
- /*
- * set initial max size pkt IBC will send, including ICRC; it's the
- * PIO buffer size in dwords, less 1; also see qib_set_mtu()
- */
- ibc |= ((u64)(ppd->ibmaxlen >> 2) + 1) <<
- SYM_LSB(IBCCtrlA_0, MaxPktLen);
- ppd->cpspec->ibcctrl_a = ibc; /* without linkcmd or linkinitcmd! */
-
- /*
- * Reset the PCS interface to the serdes (and also ibc, which is still
- * in reset from above). Writes new value of ibcctrl_a as last step.
- */
- qib_7322_mini_pcs_reset(ppd);
-
- if (!ppd->cpspec->ibcctrl_b) {
- unsigned lse = ppd->link_speed_enabled;
-
- /*
- * Not on re-init after reset, establish shadow
- * and force initial config.
- */
- ppd->cpspec->ibcctrl_b = qib_read_kreg_port(ppd,
- krp_ibcctrl_b);
- ppd->cpspec->ibcctrl_b &= ~(IBA7322_IBC_SPEED_QDR |
- IBA7322_IBC_SPEED_DDR |
- IBA7322_IBC_SPEED_SDR |
- IBA7322_IBC_WIDTH_AUTONEG |
- SYM_MASK(IBCCtrlB_0, IB_LANE_REV_SUPPORTED));
- if (lse & (lse - 1)) /* Muliple speeds enabled */
- ppd->cpspec->ibcctrl_b |=
- (lse << IBA7322_IBC_SPEED_LSB) |
- IBA7322_IBC_IBTA_1_2_MASK |
- IBA7322_IBC_MAX_SPEED_MASK;
- else
- ppd->cpspec->ibcctrl_b |= (lse == QIB_IB_QDR) ?
- IBA7322_IBC_SPEED_QDR |
- IBA7322_IBC_IBTA_1_2_MASK :
- (lse == QIB_IB_DDR) ?
- IBA7322_IBC_SPEED_DDR :
- IBA7322_IBC_SPEED_SDR;
- if ((ppd->link_width_enabled & (IB_WIDTH_1X | IB_WIDTH_4X)) ==
- (IB_WIDTH_1X | IB_WIDTH_4X))
- ppd->cpspec->ibcctrl_b |= IBA7322_IBC_WIDTH_AUTONEG;
- else
- ppd->cpspec->ibcctrl_b |=
- ppd->link_width_enabled == IB_WIDTH_4X ?
- IBA7322_IBC_WIDTH_4X_ONLY :
- IBA7322_IBC_WIDTH_1X_ONLY;
-
- /* always enable these on driver reload, not sticky */
- ppd->cpspec->ibcctrl_b |= (IBA7322_IBC_RXPOL_MASK |
- IBA7322_IBC_HRTBT_MASK);
- }
- qib_write_kreg_port(ppd, krp_ibcctrl_b, ppd->cpspec->ibcctrl_b);
-
- /* setup so we have more time at CFGTEST to change H1 */
- val = qib_read_kreg_port(ppd, krp_ibcctrl_c);
- val &= ~SYM_MASK(IBCCtrlC_0, IB_FRONT_PORCH);
- val |= 0xfULL << SYM_LSB(IBCCtrlC_0, IB_FRONT_PORCH);
- qib_write_kreg_port(ppd, krp_ibcctrl_c, val);
-
- serdes_7322_init(ppd);
-
- guid = be64_to_cpu(ppd->guid);
- if (!guid) {
- if (dd->base_guid)
- guid = be64_to_cpu(dd->base_guid) + ppd->port - 1;
- ppd->guid = cpu_to_be64(guid);
- }
-
- qib_write_kreg_port(ppd, krp_hrtbt_guid, guid);
- /* write to chip to prevent back-to-back writes of ibc reg */
- qib_write_kreg(dd, kr_scratch, 0);
-
- /* Enable port */
- ppd->cpspec->ibcctrl_a |= SYM_MASK(IBCCtrlA_0, IBLinkEn);
- set_vls(ppd);
-
- /* initially come up DISABLED, without sending anything. */
- val = ppd->cpspec->ibcctrl_a | (QLOGIC_IB_IBCC_LINKINITCMD_DISABLE <<
- QLOGIC_IB_IBCC_LINKINITCMD_SHIFT);
- qib_write_kreg_port(ppd, krp_ibcctrl_a, val);
- qib_write_kreg(dd, kr_scratch, 0ULL);
- /* clear the linkinit cmds */
- ppd->cpspec->ibcctrl_a = val & ~SYM_MASK(IBCCtrlA_0, LinkInitCmd);
-
- /* be paranoid against later code motion, etc. */
- spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags);
- ppd->p_rcvctrl |= SYM_MASK(RcvCtrl_0, RcvIBPortEnable);
- qib_write_kreg_port(ppd, krp_rcvctrl, ppd->p_rcvctrl);
- spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags);
-
- /* Also enable IBSTATUSCHG interrupt. */
- val = qib_read_kreg_port(ppd, krp_errmask);
- qib_write_kreg_port(ppd, krp_errmask,
- val | ERR_MASK_N(IBStatusChanged));
-
- /* Always zero until we start messing with SerDes for real */
- return 0;
-}
-
-/**
- * qib_7322_mini_quiet_serdes - set serdes to txidle
- * @ppd: the qlogic_ib device
- * Called when driver is being unloaded
- */
-static void qib_7322_mini_quiet_serdes(struct qib_pportdata *ppd)
-{
- u64 val;
- unsigned long flags;
-
- qib_set_ib_7322_lstate(ppd, 0, QLOGIC_IB_IBCC_LINKINITCMD_DISABLE);
-
- spin_lock_irqsave(&ppd->lflags_lock, flags);
- ppd->lflags &= ~QIBL_IB_AUTONEG_INPROG;
- spin_unlock_irqrestore(&ppd->lflags_lock, flags);
- wake_up(&ppd->cpspec->autoneg_wait);
- cancel_delayed_work_sync(&ppd->cpspec->autoneg_work);
- if (ppd->dd->cspec->r1)
- cancel_delayed_work_sync(&ppd->cpspec->ipg_work);
-
- ppd->cpspec->chase_end = 0;
- if (ppd->cpspec->chase_timer.function) /* if initted */
- timer_delete_sync(&ppd->cpspec->chase_timer);
-
- /*
- * Despite the name, actually disables IBC as well. Do it when
- * we are as sure as possible that no more packets can be
- * received, following the down and the PCS reset.
- * The actual disabling happens in qib_7322_mini_pci_reset(),
- * along with the PCS being reset.
- */
- ppd->cpspec->ibcctrl_a &= ~SYM_MASK(IBCCtrlA_0, IBLinkEn);
- qib_7322_mini_pcs_reset(ppd);
-
- /*
- * Update the adjusted counters so the adjustment persists
- * across driver reload.
- */
- if (ppd->cpspec->ibsymdelta || ppd->cpspec->iblnkerrdelta ||
- ppd->cpspec->ibdeltainprog || ppd->cpspec->iblnkdowndelta) {
- struct qib_devdata *dd = ppd->dd;
- u64 diagc;
-
- /* enable counter writes */
- diagc = qib_read_kreg64(dd, kr_hwdiagctrl);
- qib_write_kreg(dd, kr_hwdiagctrl,
- diagc | SYM_MASK(HwDiagCtrl, CounterWrEnable));
-
- if (ppd->cpspec->ibsymdelta || ppd->cpspec->ibdeltainprog) {
- val = read_7322_creg32_port(ppd, crp_ibsymbolerr);
- if (ppd->cpspec->ibdeltainprog)
- val -= val - ppd->cpspec->ibsymsnap;
- val -= ppd->cpspec->ibsymdelta;
- write_7322_creg_port(ppd, crp_ibsymbolerr, val);
- }
- if (ppd->cpspec->iblnkerrdelta || ppd->cpspec->ibdeltainprog) {
- val = read_7322_creg32_port(ppd, crp_iblinkerrrecov);
- if (ppd->cpspec->ibdeltainprog)
- val -= val - ppd->cpspec->iblnkerrsnap;
- val -= ppd->cpspec->iblnkerrdelta;
- write_7322_creg_port(ppd, crp_iblinkerrrecov, val);
- }
- if (ppd->cpspec->iblnkdowndelta) {
- val = read_7322_creg32_port(ppd, crp_iblinkdown);
- val += ppd->cpspec->iblnkdowndelta;
- write_7322_creg_port(ppd, crp_iblinkdown, val);
- }
- /*
- * No need to save ibmalfdelta since IB perfcounters
- * are cleared on driver reload.
- */
-
- /* and disable counter writes */
- qib_write_kreg(dd, kr_hwdiagctrl, diagc);
- }
-}
-
-/**
- * qib_setup_7322_setextled - set the state of the two external LEDs
- * @ppd: physical port on the qlogic_ib device
- * @on: whether the link is up or not
- *
- * The exact combo of LEDs if on is true is determined by looking
- * at the ibcstatus.
- *
- * These LEDs indicate the physical and logical state of IB link.
- * For this chip (at least with recommended board pinouts), LED1
- * is Yellow (logical state) and LED2 is Green (physical state),
- *
- * Note: We try to match the Mellanox HCA LED behavior as best
- * we can. Green indicates physical link state is OK (something is
- * plugged in, and we can train).
- * Amber indicates the link is logically up (ACTIVE).
- * Mellanox further blinks the amber LED to indicate data packet
- * activity, but we have no hardware support for that, so it would
- * require waking up every 10-20 msecs and checking the counters
- * on the chip, and then turning the LED off if appropriate. That's
- * visible overhead, so not something we will do.
- */
-static void qib_setup_7322_setextled(struct qib_pportdata *ppd, u32 on)
-{
- struct qib_devdata *dd = ppd->dd;
- u64 extctl, ledblink = 0, val;
- unsigned long flags;
- int yel, grn;
-
- /*
- * The diags use the LED to indicate diag info, so we leave
- * the external LED alone when the diags are running.
- */
- if (dd->diag_client)
- return;
-
- /* Allow override of LED display for, e.g. Locating system in rack */
- if (ppd->led_override) {
- grn = (ppd->led_override & QIB_LED_PHYS);
- yel = (ppd->led_override & QIB_LED_LOG);
- } else if (on) {
- val = qib_read_kreg_port(ppd, krp_ibcstatus_a);
- grn = qib_7322_phys_portstate(val) ==
- IB_PHYSPORTSTATE_LINKUP;
- yel = qib_7322_iblink_state(val) == IB_PORT_ACTIVE;
- } else {
- grn = 0;
- yel = 0;
- }
-
- spin_lock_irqsave(&dd->cspec->gpio_lock, flags);
- extctl = dd->cspec->extctrl & (ppd->port == 1 ?
- ~ExtLED_IB1_MASK : ~ExtLED_IB2_MASK);
- if (grn) {
- extctl |= ppd->port == 1 ? ExtLED_IB1_GRN : ExtLED_IB2_GRN;
- /*
- * Counts are in chip clock (4ns) periods.
- * This is 1/16 sec (66.6ms) on,
- * 3/16 sec (187.5 ms) off, with packets rcvd.
- */
- ledblink = ((66600 * 1000UL / 4) << IBA7322_LEDBLINK_ON_SHIFT) |
- ((187500 * 1000UL / 4) << IBA7322_LEDBLINK_OFF_SHIFT);
- }
- if (yel)
- extctl |= ppd->port == 1 ? ExtLED_IB1_YEL : ExtLED_IB2_YEL;
- dd->cspec->extctrl = extctl;
- qib_write_kreg(dd, kr_extctrl, dd->cspec->extctrl);
- spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags);
-
- if (ledblink) /* blink the LED on packet receive */
- qib_write_kreg_port(ppd, krp_rcvpktledcnt, ledblink);
-}
-
-#ifdef CONFIG_INFINIBAND_QIB_DCA
-
-static int qib_7322_notify_dca(struct qib_devdata *dd, unsigned long event)
-{
- switch (event) {
- case DCA_PROVIDER_ADD:
- if (dd->flags & QIB_DCA_ENABLED)
- break;
- if (!dca_add_requester(&dd->pcidev->dev)) {
- qib_devinfo(dd->pcidev, "DCA enabled\n");
- dd->flags |= QIB_DCA_ENABLED;
- qib_setup_dca(dd);
- }
- break;
- case DCA_PROVIDER_REMOVE:
- if (dd->flags & QIB_DCA_ENABLED) {
- dca_remove_requester(&dd->pcidev->dev);
- dd->flags &= ~QIB_DCA_ENABLED;
- dd->cspec->dca_ctrl = 0;
- qib_write_kreg(dd, KREG_IDX(DCACtrlA),
- dd->cspec->dca_ctrl);
- }
- break;
- }
- return 0;
-}
-
-static void qib_update_rhdrq_dca(struct qib_ctxtdata *rcd, int cpu)
-{
- struct qib_devdata *dd = rcd->dd;
- struct qib_chip_specific *cspec = dd->cspec;
-
- if (!(dd->flags & QIB_DCA_ENABLED))
- return;
- if (cspec->rhdr_cpu[rcd->ctxt] != cpu) {
- const struct dca_reg_map *rmp;
-
- cspec->rhdr_cpu[rcd->ctxt] = cpu;
- rmp = &dca_rcvhdr_reg_map[rcd->ctxt];
- cspec->dca_rcvhdr_ctrl[rmp->shadow_inx] &= rmp->mask;
- cspec->dca_rcvhdr_ctrl[rmp->shadow_inx] |=
- (u64) dca3_get_tag(&dd->pcidev->dev, cpu) << rmp->lsb;
- qib_devinfo(dd->pcidev,
- "Ctxt %d cpu %d dca %llx\n", rcd->ctxt, cpu,
- (long long) cspec->dca_rcvhdr_ctrl[rmp->shadow_inx]);
- qib_write_kreg(dd, rmp->regno,
- cspec->dca_rcvhdr_ctrl[rmp->shadow_inx]);
- cspec->dca_ctrl |= SYM_MASK(DCACtrlA, RcvHdrqDCAEnable);
- qib_write_kreg(dd, KREG_IDX(DCACtrlA), cspec->dca_ctrl);
- }
-}
-
-static void qib_update_sdma_dca(struct qib_pportdata *ppd, int cpu)
-{
- struct qib_devdata *dd = ppd->dd;
- struct qib_chip_specific *cspec = dd->cspec;
- unsigned pidx = ppd->port - 1;
-
- if (!(dd->flags & QIB_DCA_ENABLED))
- return;
- if (cspec->sdma_cpu[pidx] != cpu) {
- cspec->sdma_cpu[pidx] = cpu;
- cspec->dca_rcvhdr_ctrl[4] &= ~(ppd->hw_pidx ?
- SYM_MASK(DCACtrlF, SendDma1DCAOPH) :
- SYM_MASK(DCACtrlF, SendDma0DCAOPH));
- cspec->dca_rcvhdr_ctrl[4] |=
- (u64) dca3_get_tag(&dd->pcidev->dev, cpu) <<
- (ppd->hw_pidx ?
- SYM_LSB(DCACtrlF, SendDma1DCAOPH) :
- SYM_LSB(DCACtrlF, SendDma0DCAOPH));
- qib_devinfo(dd->pcidev,
- "sdma %d cpu %d dca %llx\n", ppd->hw_pidx, cpu,
- (long long) cspec->dca_rcvhdr_ctrl[4]);
- qib_write_kreg(dd, KREG_IDX(DCACtrlF),
- cspec->dca_rcvhdr_ctrl[4]);
- cspec->dca_ctrl |= ppd->hw_pidx ?
- SYM_MASK(DCACtrlA, SendDMAHead1DCAEnable) :
- SYM_MASK(DCACtrlA, SendDMAHead0DCAEnable);
- qib_write_kreg(dd, KREG_IDX(DCACtrlA), cspec->dca_ctrl);
- }
-}
-
-static void qib_setup_dca(struct qib_devdata *dd)
-{
- struct qib_chip_specific *cspec = dd->cspec;
- int i;
-
- for (i = 0; i < ARRAY_SIZE(cspec->rhdr_cpu); i++)
- cspec->rhdr_cpu[i] = -1;
- for (i = 0; i < ARRAY_SIZE(cspec->sdma_cpu); i++)
- cspec->sdma_cpu[i] = -1;
- cspec->dca_rcvhdr_ctrl[0] =
- (1ULL << SYM_LSB(DCACtrlB, RcvHdrq0DCAXfrCnt)) |
- (1ULL << SYM_LSB(DCACtrlB, RcvHdrq1DCAXfrCnt)) |
- (1ULL << SYM_LSB(DCACtrlB, RcvHdrq2DCAXfrCnt)) |
- (1ULL << SYM_LSB(DCACtrlB, RcvHdrq3DCAXfrCnt));
- cspec->dca_rcvhdr_ctrl[1] =
- (1ULL << SYM_LSB(DCACtrlC, RcvHdrq4DCAXfrCnt)) |
- (1ULL << SYM_LSB(DCACtrlC, RcvHdrq5DCAXfrCnt)) |
- (1ULL << SYM_LSB(DCACtrlC, RcvHdrq6DCAXfrCnt)) |
- (1ULL << SYM_LSB(DCACtrlC, RcvHdrq7DCAXfrCnt));
- cspec->dca_rcvhdr_ctrl[2] =
- (1ULL << SYM_LSB(DCACtrlD, RcvHdrq8DCAXfrCnt)) |
- (1ULL << SYM_LSB(DCACtrlD, RcvHdrq9DCAXfrCnt)) |
- (1ULL << SYM_LSB(DCACtrlD, RcvHdrq10DCAXfrCnt)) |
- (1ULL << SYM_LSB(DCACtrlD, RcvHdrq11DCAXfrCnt));
- cspec->dca_rcvhdr_ctrl[3] =
- (1ULL << SYM_LSB(DCACtrlE, RcvHdrq12DCAXfrCnt)) |
- (1ULL << SYM_LSB(DCACtrlE, RcvHdrq13DCAXfrCnt)) |
- (1ULL << SYM_LSB(DCACtrlE, RcvHdrq14DCAXfrCnt)) |
- (1ULL << SYM_LSB(DCACtrlE, RcvHdrq15DCAXfrCnt));
- cspec->dca_rcvhdr_ctrl[4] =
- (1ULL << SYM_LSB(DCACtrlF, RcvHdrq16DCAXfrCnt)) |
- (1ULL << SYM_LSB(DCACtrlF, RcvHdrq17DCAXfrCnt));
- for (i = 0; i < ARRAY_SIZE(cspec->sdma_cpu); i++)
- qib_write_kreg(dd, KREG_IDX(DCACtrlB) + i,
- cspec->dca_rcvhdr_ctrl[i]);
- for (i = 0; i < cspec->num_msix_entries; i++)
- setup_dca_notifier(dd, i);
-}
-
-static void qib_irq_notifier_notify(struct irq_affinity_notify *notify,
- const cpumask_t *mask)
-{
- struct qib_irq_notify *n =
- container_of(notify, struct qib_irq_notify, notify);
- int cpu = cpumask_first(mask);
-
- if (n->rcv) {
- struct qib_ctxtdata *rcd = (struct qib_ctxtdata *)n->arg;
-
- qib_update_rhdrq_dca(rcd, cpu);
- } else {
- struct qib_pportdata *ppd = (struct qib_pportdata *)n->arg;
-
- qib_update_sdma_dca(ppd, cpu);
- }
-}
-
-static void qib_irq_notifier_release(struct kref *ref)
-{
- struct qib_irq_notify *n =
- container_of(ref, struct qib_irq_notify, notify.kref);
- struct qib_devdata *dd;
-
- if (n->rcv) {
- struct qib_ctxtdata *rcd = (struct qib_ctxtdata *)n->arg;
-
- dd = rcd->dd;
- } else {
- struct qib_pportdata *ppd = (struct qib_pportdata *)n->arg;
-
- dd = ppd->dd;
- }
- qib_devinfo(dd->pcidev,
- "release on HCA notify 0x%p n 0x%p\n", ref, n);
- kfree(n);
-}
-#endif
-
-static void qib_7322_free_irq(struct qib_devdata *dd)
-{
- u64 intgranted;
- int i;
-
- dd->cspec->main_int_mask = ~0ULL;
-
- for (i = 0; i < dd->cspec->num_msix_entries; i++) {
- /* only free IRQs that were allocated */
- if (dd->cspec->msix_entries[i].arg) {
-#ifdef CONFIG_INFINIBAND_QIB_DCA
- reset_dca_notifier(dd, i);
-#endif
- irq_set_affinity_hint(pci_irq_vector(dd->pcidev, i),
- NULL);
- free_cpumask_var(dd->cspec->msix_entries[i].mask);
- pci_free_irq(dd->pcidev, i,
- dd->cspec->msix_entries[i].arg);
- }
- }
-
- /* If num_msix_entries was 0, disable the INTx IRQ */
- if (!dd->cspec->num_msix_entries)
- pci_free_irq(dd->pcidev, 0, dd);
- else
- dd->cspec->num_msix_entries = 0;
-
- pci_free_irq_vectors(dd->pcidev);
-
- /* make sure no MSIx interrupts are left pending */
- intgranted = qib_read_kreg64(dd, kr_intgranted);
- if (intgranted)
- qib_write_kreg(dd, kr_intgranted, intgranted);
-}
-
-static void qib_setup_7322_cleanup(struct qib_devdata *dd)
-{
- int i;
-
-#ifdef CONFIG_INFINIBAND_QIB_DCA
- if (dd->flags & QIB_DCA_ENABLED) {
- dca_remove_requester(&dd->pcidev->dev);
- dd->flags &= ~QIB_DCA_ENABLED;
- dd->cspec->dca_ctrl = 0;
- qib_write_kreg(dd, KREG_IDX(DCACtrlA), dd->cspec->dca_ctrl);
- }
-#endif
-
- qib_7322_free_irq(dd);
- kfree(dd->cspec->cntrs);
- bitmap_free(dd->cspec->sendchkenable);
- bitmap_free(dd->cspec->sendgrhchk);
- bitmap_free(dd->cspec->sendibchk);
- kfree(dd->cspec->msix_entries);
- for (i = 0; i < dd->num_pports; i++) {
- unsigned long flags;
- u32 mask = QSFP_GPIO_MOD_PRS_N |
- (QSFP_GPIO_MOD_PRS_N << QSFP_GPIO_PORT2_SHIFT);
-
- kfree(dd->pport[i].cpspec->portcntrs);
- if (dd->flags & QIB_HAS_QSFP) {
- spin_lock_irqsave(&dd->cspec->gpio_lock, flags);
- dd->cspec->gpio_mask &= ~mask;
- qib_write_kreg(dd, kr_gpio_mask, dd->cspec->gpio_mask);
- spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags);
- }
- }
-}
-
-/* handle SDMA interrupts */
-static void sdma_7322_intr(struct qib_devdata *dd, u64 istat)
-{
- struct qib_pportdata *ppd0 = &dd->pport[0];
- struct qib_pportdata *ppd1 = &dd->pport[1];
- u64 intr0 = istat & (INT_MASK_P(SDma, 0) |
- INT_MASK_P(SDmaIdle, 0) | INT_MASK_P(SDmaProgress, 0));
- u64 intr1 = istat & (INT_MASK_P(SDma, 1) |
- INT_MASK_P(SDmaIdle, 1) | INT_MASK_P(SDmaProgress, 1));
-
- if (intr0)
- qib_sdma_intr(ppd0);
- if (intr1)
- qib_sdma_intr(ppd1);
-
- if (istat & INT_MASK_PM(SDmaCleanupDone, 0))
- qib_sdma_process_event(ppd0, qib_sdma_event_e20_hw_started);
- if (istat & INT_MASK_PM(SDmaCleanupDone, 1))
- qib_sdma_process_event(ppd1, qib_sdma_event_e20_hw_started);
-}
-
-/*
- * Set or clear the Send buffer available interrupt enable bit.
- */
-static void qib_wantpiobuf_7322_intr(struct qib_devdata *dd, u32 needint)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&dd->sendctrl_lock, flags);
- if (needint)
- dd->sendctrl |= SYM_MASK(SendCtrl, SendIntBufAvail);
- else
- dd->sendctrl &= ~SYM_MASK(SendCtrl, SendIntBufAvail);
- qib_write_kreg(dd, kr_sendctrl, dd->sendctrl);
- qib_write_kreg(dd, kr_scratch, 0ULL);
- spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
-}
-
-/*
- * Somehow got an interrupt with reserved bits set in interrupt status.
- * Print a message so we know it happened, then clear them.
- * keep mainline interrupt handler cache-friendly
- */
-static noinline void unknown_7322_ibits(struct qib_devdata *dd, u64 istat)
-{
- u64 kills;
- char msg[128];
-
- kills = istat & ~QIB_I_BITSEXTANT;
- qib_dev_err(dd,
- "Clearing reserved interrupt(s) 0x%016llx: %s\n",
- (unsigned long long) kills, msg);
- qib_write_kreg(dd, kr_intmask, (dd->cspec->int_enable_mask & ~kills));
-}
-
-/* keep mainline interrupt handler cache-friendly */
-static noinline void unknown_7322_gpio_intr(struct qib_devdata *dd)
-{
- u32 gpiostatus;
- int handled = 0;
- int pidx;
-
- /*
- * Boards for this chip currently don't use GPIO interrupts,
- * so clear by writing GPIOstatus to GPIOclear, and complain
- * to developer. To avoid endless repeats, clear
- * the bits in the mask, since there is some kind of
- * programming error or chip problem.
- */
- gpiostatus = qib_read_kreg32(dd, kr_gpio_status);
- /*
- * In theory, writing GPIOstatus to GPIOclear could
- * have a bad side-effect on some diagnostic that wanted
- * to poll for a status-change, but the various shadows
- * make that problematic at best. Diags will just suppress
- * all GPIO interrupts during such tests.
- */
- qib_write_kreg(dd, kr_gpio_clear, gpiostatus);
- /*
- * Check for QSFP MOD_PRS changes
- * only works for single port if IB1 != pidx1
- */
- for (pidx = 0; pidx < dd->num_pports && (dd->flags & QIB_HAS_QSFP);
- ++pidx) {
- struct qib_pportdata *ppd;
- struct qib_qsfp_data *qd;
- u32 mask;
-
- if (!dd->pport[pidx].link_speed_supported)
- continue;
- mask = QSFP_GPIO_MOD_PRS_N;
- ppd = dd->pport + pidx;
- mask <<= (QSFP_GPIO_PORT2_SHIFT * ppd->hw_pidx);
- if (gpiostatus & dd->cspec->gpio_mask & mask) {
- u64 pins;
-
- qd = &ppd->cpspec->qsfp_data;
- gpiostatus &= ~mask;
- pins = qib_read_kreg64(dd, kr_extstatus);
- pins >>= SYM_LSB(EXTStatus, GPIOIn);
- if (!(pins & mask)) {
- ++handled;
- qd->t_insert = jiffies;
- queue_work(ib_wq, &qd->work);
- }
- }
- }
-
- if (gpiostatus && !handled) {
- const u32 mask = qib_read_kreg32(dd, kr_gpio_mask);
- u32 gpio_irq = mask & gpiostatus;
-
- /*
- * Clear any troublemakers, and update chip from shadow
- */
- dd->cspec->gpio_mask &= ~gpio_irq;
- qib_write_kreg(dd, kr_gpio_mask, dd->cspec->gpio_mask);
- }
-}
-
-/*
- * Handle errors and unusual events first, separate function
- * to improve cache hits for fast path interrupt handling.
- */
-static noinline void unlikely_7322_intr(struct qib_devdata *dd, u64 istat)
-{
- if (istat & ~QIB_I_BITSEXTANT)
- unknown_7322_ibits(dd, istat);
- if (istat & QIB_I_GPIO)
- unknown_7322_gpio_intr(dd);
- if (istat & QIB_I_C_ERROR) {
- qib_write_kreg(dd, kr_errmask, 0ULL);
- tasklet_schedule(&dd->error_tasklet);
- }
- if (istat & INT_MASK_P(Err, 0) && dd->rcd[0])
- handle_7322_p_errors(dd->rcd[0]->ppd);
- if (istat & INT_MASK_P(Err, 1) && dd->rcd[1])
- handle_7322_p_errors(dd->rcd[1]->ppd);
-}
-
-/*
- * Dynamically adjust the rcv int timeout for a context based on incoming
- * packet rate.
- */
-static void adjust_rcv_timeout(struct qib_ctxtdata *rcd, int npkts)
-{
- struct qib_devdata *dd = rcd->dd;
- u32 timeout = dd->cspec->rcvavail_timeout[rcd->ctxt];
-
- /*
- * Dynamically adjust idle timeout on chip
- * based on number of packets processed.
- */
- if (npkts < rcv_int_count && timeout > 2)
- timeout >>= 1;
- else if (npkts >= rcv_int_count && timeout < rcv_int_timeout)
- timeout = min(timeout << 1, rcv_int_timeout);
- else
- return;
-
- dd->cspec->rcvavail_timeout[rcd->ctxt] = timeout;
- qib_write_kreg(dd, kr_rcvavailtimeout + rcd->ctxt, timeout);
-}
-
-/*
- * This is the main interrupt handler.
- * It will normally only be used for low frequency interrupts but may
- * have to handle all interrupts if INTx is enabled or fewer than normal
- * MSIx interrupts were allocated.
- * This routine should ignore the interrupt bits for any of the
- * dedicated MSIx handlers.
- */
-static irqreturn_t qib_7322intr(int irq, void *data)
-{
- struct qib_devdata *dd = data;
- irqreturn_t ret;
- u64 istat;
- u64 ctxtrbits;
- u64 rmask;
- unsigned i;
- u32 npkts;
-
- if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT) {
- /*
- * This return value is not great, but we do not want the
- * interrupt core code to remove our interrupt handler
- * because we don't appear to be handling an interrupt
- * during a chip reset.
- */
- ret = IRQ_HANDLED;
- goto bail;
- }
-
- istat = qib_read_kreg64(dd, kr_intstatus);
-
- if (unlikely(istat == ~0ULL)) {
- qib_bad_intrstatus(dd);
- qib_dev_err(dd, "Interrupt status all f's, skipping\n");
- /* don't know if it was our interrupt or not */
- ret = IRQ_NONE;
- goto bail;
- }
-
- istat &= dd->cspec->main_int_mask;
- if (unlikely(!istat)) {
- /* already handled, or shared and not us */
- ret = IRQ_NONE;
- goto bail;
- }
-
- this_cpu_inc(*dd->int_counter);
-
- /* handle "errors" of various kinds first, device ahead of port */
- if (unlikely(istat & (~QIB_I_BITSEXTANT | QIB_I_GPIO |
- QIB_I_C_ERROR | INT_MASK_P(Err, 0) |
- INT_MASK_P(Err, 1))))
- unlikely_7322_intr(dd, istat);
-
- /*
- * Clear the interrupt bits we found set, relatively early, so we
- * "know" know the chip will have seen this by the time we process
- * the queue, and will re-interrupt if necessary. The processor
- * itself won't take the interrupt again until we return.
- */
- qib_write_kreg(dd, kr_intclear, istat);
-
- /*
- * Handle kernel receive queues before checking for pio buffers
- * available since receives can overflow; piobuf waiters can afford
- * a few extra cycles, since they were waiting anyway.
- */
- ctxtrbits = istat & (QIB_I_RCVAVAIL_MASK | QIB_I_RCVURG_MASK);
- if (ctxtrbits) {
- rmask = (1ULL << QIB_I_RCVAVAIL_LSB) |
- (1ULL << QIB_I_RCVURG_LSB);
- for (i = 0; i < dd->first_user_ctxt; i++) {
- if (ctxtrbits & rmask) {
- ctxtrbits &= ~rmask;
- if (dd->rcd[i])
- qib_kreceive(dd->rcd[i], NULL, &npkts);
- }
- rmask <<= 1;
- }
- if (ctxtrbits) {
- ctxtrbits = (ctxtrbits >> QIB_I_RCVAVAIL_LSB) |
- (ctxtrbits >> QIB_I_RCVURG_LSB);
- qib_handle_urcv(dd, ctxtrbits);
- }
- }
-
- if (istat & (QIB_I_P_SDMAINT(0) | QIB_I_P_SDMAINT(1)))
- sdma_7322_intr(dd, istat);
-
- if ((istat & QIB_I_SPIOBUFAVAIL) && (dd->flags & QIB_INITTED))
- qib_ib_piobufavail(dd);
-
- ret = IRQ_HANDLED;
-bail:
- return ret;
-}
-
-/*
- * Dedicated receive packet available interrupt handler.
- */
-static irqreturn_t qib_7322pintr(int irq, void *data)
-{
- struct qib_ctxtdata *rcd = data;
- struct qib_devdata *dd = rcd->dd;
- u32 npkts;
-
- if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT)
- /*
- * This return value is not great, but we do not want the
- * interrupt core code to remove our interrupt handler
- * because we don't appear to be handling an interrupt
- * during a chip reset.
- */
- return IRQ_HANDLED;
-
- this_cpu_inc(*dd->int_counter);
-
- /* Clear the interrupt bit we expect to be set. */
- qib_write_kreg(dd, kr_intclear, ((1ULL << QIB_I_RCVAVAIL_LSB) |
- (1ULL << QIB_I_RCVURG_LSB)) << rcd->ctxt);
-
- qib_kreceive(rcd, NULL, &npkts);
-
- return IRQ_HANDLED;
-}
-
-/*
- * Dedicated Send buffer available interrupt handler.
- */
-static irqreturn_t qib_7322bufavail(int irq, void *data)
-{
- struct qib_devdata *dd = data;
-
- if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT)
- /*
- * This return value is not great, but we do not want the
- * interrupt core code to remove our interrupt handler
- * because we don't appear to be handling an interrupt
- * during a chip reset.
- */
- return IRQ_HANDLED;
-
- this_cpu_inc(*dd->int_counter);
-
- /* Clear the interrupt bit we expect to be set. */
- qib_write_kreg(dd, kr_intclear, QIB_I_SPIOBUFAVAIL);
-
- /* qib_ib_piobufavail() will clear the want PIO interrupt if needed */
- if (dd->flags & QIB_INITTED)
- qib_ib_piobufavail(dd);
- else
- qib_wantpiobuf_7322_intr(dd, 0);
-
- return IRQ_HANDLED;
-}
-
-/*
- * Dedicated Send DMA interrupt handler.
- */
-static irqreturn_t sdma_intr(int irq, void *data)
-{
- struct qib_pportdata *ppd = data;
- struct qib_devdata *dd = ppd->dd;
-
- if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT)
- /*
- * This return value is not great, but we do not want the
- * interrupt core code to remove our interrupt handler
- * because we don't appear to be handling an interrupt
- * during a chip reset.
- */
- return IRQ_HANDLED;
-
- this_cpu_inc(*dd->int_counter);
-
- /* Clear the interrupt bit we expect to be set. */
- qib_write_kreg(dd, kr_intclear, ppd->hw_pidx ?
- INT_MASK_P(SDma, 1) : INT_MASK_P(SDma, 0));
- qib_sdma_intr(ppd);
-
- return IRQ_HANDLED;
-}
-
-/*
- * Dedicated Send DMA idle interrupt handler.
- */
-static irqreturn_t sdma_idle_intr(int irq, void *data)
-{
- struct qib_pportdata *ppd = data;
- struct qib_devdata *dd = ppd->dd;
-
- if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT)
- /*
- * This return value is not great, but we do not want the
- * interrupt core code to remove our interrupt handler
- * because we don't appear to be handling an interrupt
- * during a chip reset.
- */
- return IRQ_HANDLED;
-
- this_cpu_inc(*dd->int_counter);
-
- /* Clear the interrupt bit we expect to be set. */
- qib_write_kreg(dd, kr_intclear, ppd->hw_pidx ?
- INT_MASK_P(SDmaIdle, 1) : INT_MASK_P(SDmaIdle, 0));
- qib_sdma_intr(ppd);
-
- return IRQ_HANDLED;
-}
-
-/*
- * Dedicated Send DMA progress interrupt handler.
- */
-static irqreturn_t sdma_progress_intr(int irq, void *data)
-{
- struct qib_pportdata *ppd = data;
- struct qib_devdata *dd = ppd->dd;
-
- if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT)
- /*
- * This return value is not great, but we do not want the
- * interrupt core code to remove our interrupt handler
- * because we don't appear to be handling an interrupt
- * during a chip reset.
- */
- return IRQ_HANDLED;
-
- this_cpu_inc(*dd->int_counter);
-
- /* Clear the interrupt bit we expect to be set. */
- qib_write_kreg(dd, kr_intclear, ppd->hw_pidx ?
- INT_MASK_P(SDmaProgress, 1) :
- INT_MASK_P(SDmaProgress, 0));
- qib_sdma_intr(ppd);
-
- return IRQ_HANDLED;
-}
-
-/*
- * Dedicated Send DMA cleanup interrupt handler.
- */
-static irqreturn_t sdma_cleanup_intr(int irq, void *data)
-{
- struct qib_pportdata *ppd = data;
- struct qib_devdata *dd = ppd->dd;
-
- if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT)
- /*
- * This return value is not great, but we do not want the
- * interrupt core code to remove our interrupt handler
- * because we don't appear to be handling an interrupt
- * during a chip reset.
- */
- return IRQ_HANDLED;
-
- this_cpu_inc(*dd->int_counter);
-
- /* Clear the interrupt bit we expect to be set. */
- qib_write_kreg(dd, kr_intclear, ppd->hw_pidx ?
- INT_MASK_PM(SDmaCleanupDone, 1) :
- INT_MASK_PM(SDmaCleanupDone, 0));
- qib_sdma_process_event(ppd, qib_sdma_event_e20_hw_started);
-
- return IRQ_HANDLED;
-}
-
-#ifdef CONFIG_INFINIBAND_QIB_DCA
-
-static void reset_dca_notifier(struct qib_devdata *dd, int msixnum)
-{
- if (!dd->cspec->msix_entries[msixnum].dca)
- return;
-
- qib_devinfo(dd->pcidev, "Disabling notifier on HCA %d irq %d\n",
- dd->unit, pci_irq_vector(dd->pcidev, msixnum));
- irq_set_affinity_notifier(pci_irq_vector(dd->pcidev, msixnum), NULL);
- dd->cspec->msix_entries[msixnum].notifier = NULL;
-}
-
-static void setup_dca_notifier(struct qib_devdata *dd, int msixnum)
-{
- struct qib_msix_entry *m = &dd->cspec->msix_entries[msixnum];
- struct qib_irq_notify *n;
-
- if (!m->dca)
- return;
- n = kzalloc(sizeof(*n), GFP_KERNEL);
- if (n) {
- int ret;
-
- m->notifier = n;
- n->notify.irq = pci_irq_vector(dd->pcidev, msixnum);
- n->notify.notify = qib_irq_notifier_notify;
- n->notify.release = qib_irq_notifier_release;
- n->arg = m->arg;
- n->rcv = m->rcv;
- qib_devinfo(dd->pcidev,
- "set notifier irq %d rcv %d notify %p\n",
- n->notify.irq, n->rcv, &n->notify);
- ret = irq_set_affinity_notifier(
- n->notify.irq,
- &n->notify);
- if (ret) {
- m->notifier = NULL;
- kfree(n);
- }
- }
-}
-
-#endif
-
-/*
- * Set up our chip-specific interrupt handler.
- * The interrupt type has already been setup, so
- * we just need to do the registration and error checking.
- * If we are using MSIx interrupts, we may fall back to
- * INTx later, if the interrupt handler doesn't get called
- * within 1/2 second (see verify_interrupt()).
- */
-static void qib_setup_7322_interrupt(struct qib_devdata *dd, int clearpend)
-{
- int ret, i, msixnum;
- u64 redirect[6];
- u64 mask;
- const struct cpumask *local_mask;
- int firstcpu, secondcpu = 0, currrcvcpu = 0;
-
- if (!dd->num_pports)
- return;
-
- if (clearpend) {
- /*
- * if not switching interrupt types, be sure interrupts are
- * disabled, and then clear anything pending at this point,
- * because we are starting clean.
- */
- qib_7322_set_intr_state(dd, 0);
-
- /* clear the reset error, init error/hwerror mask */
- qib_7322_init_hwerrors(dd);
-
- /* clear any interrupt bits that might be set */
- qib_write_kreg(dd, kr_intclear, ~0ULL);
-
- /* make sure no pending MSIx intr, and clear diag reg */
- qib_write_kreg(dd, kr_intgranted, ~0ULL);
- qib_write_kreg(dd, kr_vecclr_wo_int, ~0ULL);
- }
-
- if (!dd->cspec->num_msix_entries) {
- /* Try to get INTx interrupt */
-try_intx:
- ret = pci_request_irq(dd->pcidev, 0, qib_7322intr, NULL, dd,
- QIB_DRV_NAME);
- if (ret) {
- qib_dev_err(
- dd,
- "Couldn't setup INTx interrupt (irq=%d): %d\n",
- pci_irq_vector(dd->pcidev, 0), ret);
- return;
- }
- dd->cspec->main_int_mask = ~0ULL;
- return;
- }
-
- /* Try to get MSIx interrupts */
- memset(redirect, 0, sizeof(redirect));
- mask = ~0ULL;
- msixnum = 0;
- local_mask = cpumask_of_pcibus(dd->pcidev->bus);
- firstcpu = cpumask_first(local_mask);
- if (firstcpu >= nr_cpu_ids ||
- cpumask_weight(local_mask) == num_online_cpus()) {
- local_mask = topology_core_cpumask(0);
- firstcpu = cpumask_first(local_mask);
- }
- if (firstcpu < nr_cpu_ids) {
- secondcpu = cpumask_next(firstcpu, local_mask);
- if (secondcpu >= nr_cpu_ids)
- secondcpu = firstcpu;
- currrcvcpu = secondcpu;
- }
- for (i = 0; msixnum < dd->cspec->num_msix_entries; i++) {
- irq_handler_t handler;
- void *arg;
- int lsb, reg, sh;
-#ifdef CONFIG_INFINIBAND_QIB_DCA
- int dca = 0;
-#endif
- if (i < ARRAY_SIZE(irq_table)) {
- if (irq_table[i].port) {
- /* skip if for a non-configured port */
- if (irq_table[i].port > dd->num_pports)
- continue;
- arg = dd->pport + irq_table[i].port - 1;
- } else
- arg = dd;
-#ifdef CONFIG_INFINIBAND_QIB_DCA
- dca = irq_table[i].dca;
-#endif
- lsb = irq_table[i].lsb;
- handler = irq_table[i].handler;
- ret = pci_request_irq(dd->pcidev, msixnum, handler,
- NULL, arg, QIB_DRV_NAME "%d%s",
- dd->unit,
- irq_table[i].name);
- } else {
- unsigned ctxt;
-
- ctxt = i - ARRAY_SIZE(irq_table);
- /* per krcvq context receive interrupt */
- arg = dd->rcd[ctxt];
- if (!arg)
- continue;
- if (qib_krcvq01_no_msi && ctxt < 2)
- continue;
-#ifdef CONFIG_INFINIBAND_QIB_DCA
- dca = 1;
-#endif
- lsb = QIB_I_RCVAVAIL_LSB + ctxt;
- handler = qib_7322pintr;
- ret = pci_request_irq(dd->pcidev, msixnum, handler,
- NULL, arg,
- QIB_DRV_NAME "%d (kctx)",
- dd->unit);
- }
-
- if (ret) {
- /*
- * Shouldn't happen since the enable said we could
- * have as many as we are trying to setup here.
- */
- qib_dev_err(dd,
- "Couldn't setup MSIx interrupt (vec=%d, irq=%d): %d\n",
- msixnum,
- pci_irq_vector(dd->pcidev, msixnum),
- ret);
- qib_7322_free_irq(dd);
- pci_alloc_irq_vectors(dd->pcidev, 1, 1, PCI_IRQ_INTX);
- goto try_intx;
- }
- dd->cspec->msix_entries[msixnum].arg = arg;
-#ifdef CONFIG_INFINIBAND_QIB_DCA
- dd->cspec->msix_entries[msixnum].dca = dca;
- dd->cspec->msix_entries[msixnum].rcv =
- handler == qib_7322pintr;
-#endif
- if (lsb >= 0) {
- reg = lsb / IBA7322_REDIRECT_VEC_PER_REG;
- sh = (lsb % IBA7322_REDIRECT_VEC_PER_REG) *
- SYM_LSB(IntRedirect0, vec1);
- mask &= ~(1ULL << lsb);
- redirect[reg] |= ((u64) msixnum) << sh;
- }
- qib_read_kreg64(dd, 2 * msixnum + 1 +
- (QIB_7322_MsixTable_OFFS / sizeof(u64)));
- if (firstcpu < nr_cpu_ids &&
- zalloc_cpumask_var(
- &dd->cspec->msix_entries[msixnum].mask,
- GFP_KERNEL)) {
- if (handler == qib_7322pintr) {
- cpumask_set_cpu(currrcvcpu,
- dd->cspec->msix_entries[msixnum].mask);
- currrcvcpu = cpumask_next(currrcvcpu,
- local_mask);
- if (currrcvcpu >= nr_cpu_ids)
- currrcvcpu = secondcpu;
- } else {
- cpumask_set_cpu(firstcpu,
- dd->cspec->msix_entries[msixnum].mask);
- }
- irq_set_affinity_hint(
- pci_irq_vector(dd->pcidev, msixnum),
- dd->cspec->msix_entries[msixnum].mask);
- }
- msixnum++;
- }
- /* Initialize the vector mapping */
- for (i = 0; i < ARRAY_SIZE(redirect); i++)
- qib_write_kreg(dd, kr_intredirect + i, redirect[i]);
- dd->cspec->main_int_mask = mask;
- tasklet_setup(&dd->error_tasklet, qib_error_tasklet);
-}
-
-/**
- * qib_7322_boardname - fill in the board name and note features
- * @dd: the qlogic_ib device
- *
- * info will be based on the board revision register
- */
-static unsigned qib_7322_boardname(struct qib_devdata *dd)
-{
- /* Will need enumeration of board-types here */
- u32 boardid;
- unsigned int features = DUAL_PORT_CAP;
-
- boardid = SYM_FIELD(dd->revision, Revision, BoardID);
-
- switch (boardid) {
- case 0:
- dd->boardname = "InfiniPath_QLE7342_Emulation";
- break;
- case 1:
- dd->boardname = "InfiniPath_QLE7340";
- dd->flags |= QIB_HAS_QSFP;
- features = PORT_SPD_CAP;
- break;
- case 2:
- dd->boardname = "InfiniPath_QLE7342";
- dd->flags |= QIB_HAS_QSFP;
- break;
- case 3:
- dd->boardname = "InfiniPath_QMI7342";
- break;
- case 4:
- dd->boardname = "InfiniPath_Unsupported7342";
- qib_dev_err(dd, "Unsupported version of QMH7342\n");
- features = 0;
- break;
- case BOARD_QMH7342:
- dd->boardname = "InfiniPath_QMH7342";
- features = 0x24;
- break;
- case BOARD_QME7342:
- dd->boardname = "InfiniPath_QME7342";
- break;
- case 8:
- dd->boardname = "InfiniPath_QME7362";
- dd->flags |= QIB_HAS_QSFP;
- break;
- case BOARD_QMH7360:
- dd->boardname = "Intel IB QDR 1P FLR-QSFP Adptr";
- dd->flags |= QIB_HAS_QSFP;
- break;
- case 15:
- dd->boardname = "InfiniPath_QLE7342_TEST";
- dd->flags |= QIB_HAS_QSFP;
- break;
- default:
- dd->boardname = "InfiniPath_QLE73xy_UNKNOWN";
- qib_dev_err(dd, "Unknown 7322 board type %u\n", boardid);
- break;
- }
- dd->board_atten = 1; /* index into txdds_Xdr */
-
- snprintf(dd->boardversion, sizeof(dd->boardversion),
- "ChipABI %u.%u, %s, InfiniPath%u %u.%u, SW Compat %u\n",
- QIB_CHIP_VERS_MAJ, QIB_CHIP_VERS_MIN, dd->boardname,
- (unsigned int)SYM_FIELD(dd->revision, Revision_R, Arch),
- dd->majrev, dd->minrev,
- (unsigned int)SYM_FIELD(dd->revision, Revision_R, SW));
-
- if (qib_singleport && (features >> PORT_SPD_CAP_SHIFT) & PORT_SPD_CAP) {
- qib_devinfo(dd->pcidev,
- "IB%u: Forced to single port mode by module parameter\n",
- dd->unit);
- features &= PORT_SPD_CAP;
- }
-
- return features;
-}
-
-/*
- * This routine sleeps, so it can only be called from user context, not
- * from interrupt context.
- */
-static int qib_do_7322_reset(struct qib_devdata *dd)
-{
- u64 val;
- u64 *msix_vecsave = NULL;
- int i, msix_entries, ret = 1;
- u16 cmdval;
- u8 int_line, clinesz;
- unsigned long flags;
-
- /* Use dev_err so it shows up in logs, etc. */
- qib_dev_err(dd, "Resetting InfiniPath unit %u\n", dd->unit);
-
- qib_pcie_getcmd(dd, &cmdval, &int_line, &clinesz);
-
- msix_entries = dd->cspec->num_msix_entries;
-
- /* no interrupts till re-initted */
- qib_7322_set_intr_state(dd, 0);
-
- qib_7322_free_irq(dd);
-
- if (msix_entries) {
- /* can be up to 512 bytes, too big for stack */
- msix_vecsave = kmalloc_array(2 * dd->cspec->num_msix_entries,
- sizeof(u64),
- GFP_KERNEL);
- }
-
- /*
- * Core PCI (as of 2.6.18) doesn't save or rewrite the full vector
- * info that is set up by the BIOS, so we have to save and restore
- * it ourselves. There is some risk something could change it,
- * after we save it, but since we have disabled the MSIx, it
- * shouldn't be touched...
- */
- for (i = 0; i < msix_entries; i++) {
- u64 vecaddr, vecdata;
-
- vecaddr = qib_read_kreg64(dd, 2 * i +
- (QIB_7322_MsixTable_OFFS / sizeof(u64)));
- vecdata = qib_read_kreg64(dd, 1 + 2 * i +
- (QIB_7322_MsixTable_OFFS / sizeof(u64)));
- if (msix_vecsave) {
- msix_vecsave[2 * i] = vecaddr;
- /* save it without the masked bit set */
- msix_vecsave[1 + 2 * i] = vecdata & ~0x100000000ULL;
- }
- }
-
- dd->pport->cpspec->ibdeltainprog = 0;
- dd->pport->cpspec->ibsymdelta = 0;
- dd->pport->cpspec->iblnkerrdelta = 0;
- dd->pport->cpspec->ibmalfdelta = 0;
- /* so we check interrupts work again */
- dd->z_int_counter = qib_int_counter(dd);
-
- /*
- * Keep chip from being accessed until we are ready. Use
- * writeq() directly, to allow the write even though QIB_PRESENT
- * isn't set.
- */
- dd->flags &= ~(QIB_INITTED | QIB_PRESENT | QIB_BADINTR);
- dd->flags |= QIB_DOING_RESET;
- val = dd->control | QLOGIC_IB_C_RESET;
- writeq(val, &dd->kregbase[kr_control]);
-
- for (i = 1; i <= 5; i++) {
- /*
- * Allow MBIST, etc. to complete; longer on each retry.
- * We sometimes get machine checks from bus timeout if no
- * response, so for now, make it *really* long.
- */
- msleep(1000 + (1 + i) * 3000);
-
- qib_pcie_reenable(dd, cmdval, int_line, clinesz);
-
- /*
- * Use readq directly, so we don't need to mark it as PRESENT
- * until we get a successful indication that all is well.
- */
- val = readq(&dd->kregbase[kr_revision]);
- if (val == dd->revision)
- break;
- if (i == 5) {
- qib_dev_err(dd,
- "Failed to initialize after reset, unusable\n");
- ret = 0;
- goto bail;
- }
- }
-
- dd->flags |= QIB_PRESENT; /* it's back */
-
- if (msix_entries) {
- /* restore the MSIx vector address and data if saved above */
- for (i = 0; i < msix_entries; i++) {
- if (!msix_vecsave || !msix_vecsave[2 * i])
- continue;
- qib_write_kreg(dd, 2 * i +
- (QIB_7322_MsixTable_OFFS / sizeof(u64)),
- msix_vecsave[2 * i]);
- qib_write_kreg(dd, 1 + 2 * i +
- (QIB_7322_MsixTable_OFFS / sizeof(u64)),
- msix_vecsave[1 + 2 * i]);
- }
- }
-
- /* initialize the remaining registers. */
- for (i = 0; i < dd->num_pports; ++i)
- write_7322_init_portregs(&dd->pport[i]);
- write_7322_initregs(dd);
-
- if (qib_pcie_params(dd, dd->lbus_width, &msix_entries))
- qib_dev_err(dd,
- "Reset failed to setup PCIe or interrupts; continuing anyway\n");
-
- dd->cspec->num_msix_entries = msix_entries;
- qib_setup_7322_interrupt(dd, 1);
-
- for (i = 0; i < dd->num_pports; ++i) {
- struct qib_pportdata *ppd = &dd->pport[i];
-
- spin_lock_irqsave(&ppd->lflags_lock, flags);
- ppd->lflags |= QIBL_IB_FORCE_NOTIFY;
- ppd->lflags &= ~QIBL_IB_AUTONEG_FAILED;
- spin_unlock_irqrestore(&ppd->lflags_lock, flags);
- }
-
-bail:
- dd->flags &= ~QIB_DOING_RESET; /* OK or not, no longer resetting */
- kfree(msix_vecsave);
- return ret;
-}
-
-/**
- * qib_7322_put_tid - write a TID to the chip
- * @dd: the qlogic_ib device
- * @tidptr: pointer to the expected TID (in chip) to update
- * @type: 0 for eager, 1 for expected
- * @pa: physical address of in memory buffer; tidinvalid if freeing
- */
-static void qib_7322_put_tid(struct qib_devdata *dd, u64 __iomem *tidptr,
- u32 type, unsigned long pa)
-{
- if (!(dd->flags & QIB_PRESENT))
- return;
- if (pa != dd->tidinvalid) {
- u64 chippa = pa >> IBA7322_TID_PA_SHIFT;
-
- /* paranoia checks */
- if (pa != (chippa << IBA7322_TID_PA_SHIFT)) {
- qib_dev_err(dd, "Physaddr %lx not 2KB aligned!\n",
- pa);
- return;
- }
- if (chippa >= (1UL << IBA7322_TID_SZ_SHIFT)) {
- qib_dev_err(dd,
- "Physical page address 0x%lx larger than supported\n",
- pa);
- return;
- }
-
- if (type == RCVHQ_RCV_TYPE_EAGER)
- chippa |= dd->tidtemplate;
- else /* for now, always full 4KB page */
- chippa |= IBA7322_TID_SZ_4K;
- pa = chippa;
- }
- writeq(pa, tidptr);
-}
-
-/**
- * qib_7322_clear_tids - clear all TID entries for a ctxt, expected and eager
- * @dd: the qlogic_ib device
- * @rcd: the ctxt
- *
- * clear all TID entries for a ctxt, expected and eager.
- * Used from qib_close().
- */
-static void qib_7322_clear_tids(struct qib_devdata *dd,
- struct qib_ctxtdata *rcd)
-{
- u64 __iomem *tidbase;
- unsigned long tidinv;
- u32 ctxt;
- int i;
-
- if (!dd->kregbase || !rcd)
- return;
-
- ctxt = rcd->ctxt;
-
- tidinv = dd->tidinvalid;
- tidbase = (u64 __iomem *)
- ((char __iomem *) dd->kregbase +
- dd->rcvtidbase +
- ctxt * dd->rcvtidcnt * sizeof(*tidbase));
-
- for (i = 0; i < dd->rcvtidcnt; i++)
- qib_7322_put_tid(dd, &tidbase[i], RCVHQ_RCV_TYPE_EXPECTED,
- tidinv);
-
- tidbase = (u64 __iomem *)
- ((char __iomem *) dd->kregbase +
- dd->rcvegrbase +
- rcd->rcvegr_tid_base * sizeof(*tidbase));
-
- for (i = 0; i < rcd->rcvegrcnt; i++)
- qib_7322_put_tid(dd, &tidbase[i], RCVHQ_RCV_TYPE_EAGER,
- tidinv);
-}
-
-/**
- * qib_7322_tidtemplate - setup constants for TID updates
- * @dd: the qlogic_ib device
- *
- * We setup stuff that we use a lot, to avoid calculating each time
- */
-static void qib_7322_tidtemplate(struct qib_devdata *dd)
-{
- /*
- * For now, we always allocate 4KB buffers (at init) so we can
- * receive max size packets. We may want a module parameter to
- * specify 2KB or 4KB and/or make it per port instead of per device
- * for those who want to reduce memory footprint. Note that the
- * rcvhdrentsize size must be large enough to hold the largest
- * IB header (currently 96 bytes) that we expect to handle (plus of
- * course the 2 dwords of RHF).
- */
- if (dd->rcvegrbufsize == 2048)
- dd->tidtemplate = IBA7322_TID_SZ_2K;
- else if (dd->rcvegrbufsize == 4096)
- dd->tidtemplate = IBA7322_TID_SZ_4K;
- dd->tidinvalid = 0;
-}
-
-/**
- * qib_7322_get_base_info - set chip-specific flags for user code
- * @rcd: the qlogic_ib ctxt
- * @kinfo: qib_base_info pointer
- *
- * We set the PCIE flag because the lower bandwidth on PCIe vs
- * HyperTransport can affect some user packet algorithims.
- */
-
-static int qib_7322_get_base_info(struct qib_ctxtdata *rcd,
- struct qib_base_info *kinfo)
-{
- kinfo->spi_runtime_flags |= QIB_RUNTIME_CTXT_MSB_IN_QP |
- QIB_RUNTIME_PCIE | QIB_RUNTIME_NODMA_RTAIL |
- QIB_RUNTIME_HDRSUPP | QIB_RUNTIME_SDMA;
- if (rcd->dd->cspec->r1)
- kinfo->spi_runtime_flags |= QIB_RUNTIME_RCHK;
- if (rcd->dd->flags & QIB_USE_SPCL_TRIG)
- kinfo->spi_runtime_flags |= QIB_RUNTIME_SPECIAL_TRIGGER;
-
- return 0;
-}
-
-static struct qib_message_header *
-qib_7322_get_msgheader(struct qib_devdata *dd, __le32 *rhf_addr)
-{
- u32 offset = qib_hdrget_offset(rhf_addr);
-
- return (struct qib_message_header *)
- (rhf_addr - dd->rhf_offset + offset);
-}
-
-/*
- * Configure number of contexts.
- */
-static void qib_7322_config_ctxts(struct qib_devdata *dd)
-{
- unsigned long flags;
- u32 nchipctxts;
-
- nchipctxts = qib_read_kreg32(dd, kr_contextcnt);
- dd->cspec->numctxts = nchipctxts;
- if (qib_n_krcv_queues > 1 && dd->num_pports) {
- dd->first_user_ctxt = NUM_IB_PORTS +
- (qib_n_krcv_queues - 1) * dd->num_pports;
- if (dd->first_user_ctxt > nchipctxts)
- dd->first_user_ctxt = nchipctxts;
- dd->n_krcv_queues = dd->first_user_ctxt / dd->num_pports;
- } else {
- dd->first_user_ctxt = NUM_IB_PORTS;
- dd->n_krcv_queues = 1;
- }
-
- if (!qib_cfgctxts) {
- int nctxts = dd->first_user_ctxt + num_online_cpus();
-
- if (nctxts <= 6)
- dd->ctxtcnt = 6;
- else if (nctxts <= 10)
- dd->ctxtcnt = 10;
- else if (nctxts <= nchipctxts)
- dd->ctxtcnt = nchipctxts;
- } else if (qib_cfgctxts < dd->num_pports)
- dd->ctxtcnt = dd->num_pports;
- else if (qib_cfgctxts <= nchipctxts)
- dd->ctxtcnt = qib_cfgctxts;
- if (!dd->ctxtcnt) /* none of the above, set to max */
- dd->ctxtcnt = nchipctxts;
-
- /*
- * Chip can be configured for 6, 10, or 18 ctxts, and choice
- * affects number of eager TIDs per ctxt (1K, 2K, 4K).
- * Lock to be paranoid about later motion, etc.
- */
- spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags);
- if (dd->ctxtcnt > 10)
- dd->rcvctrl |= 2ULL << SYM_LSB(RcvCtrl, ContextCfg);
- else if (dd->ctxtcnt > 6)
- dd->rcvctrl |= 1ULL << SYM_LSB(RcvCtrl, ContextCfg);
- /* else configure for default 6 receive ctxts */
-
- /* The XRC opcode is 5. */
- dd->rcvctrl |= 5ULL << SYM_LSB(RcvCtrl, XrcTypeCode);
-
- /*
- * RcvCtrl *must* be written here so that the
- * chip understands how to change rcvegrcnt below.
- */
- qib_write_kreg(dd, kr_rcvctrl, dd->rcvctrl);
- spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags);
-
- /* kr_rcvegrcnt changes based on the number of contexts enabled */
- dd->cspec->rcvegrcnt = qib_read_kreg32(dd, kr_rcvegrcnt);
- if (qib_rcvhdrcnt)
- dd->rcvhdrcnt = max(dd->cspec->rcvegrcnt, qib_rcvhdrcnt);
- else
- dd->rcvhdrcnt = 2 * max(dd->cspec->rcvegrcnt,
- dd->num_pports > 1 ? 1024U : 2048U);
-}
-
-static int qib_7322_get_ib_cfg(struct qib_pportdata *ppd, int which)
-{
-
- int lsb, ret = 0;
- u64 maskr; /* right-justified mask */
-
- switch (which) {
-
- case QIB_IB_CFG_LWID_ENB: /* Get allowed Link-width */
- ret = ppd->link_width_enabled;
- goto done;
-
- case QIB_IB_CFG_LWID: /* Get currently active Link-width */
- ret = ppd->link_width_active;
- goto done;
-
- case QIB_IB_CFG_SPD_ENB: /* Get allowed Link speeds */
- ret = ppd->link_speed_enabled;
- goto done;
-
- case QIB_IB_CFG_SPD: /* Get current Link spd */
- ret = ppd->link_speed_active;
- goto done;
-
- case QIB_IB_CFG_RXPOL_ENB: /* Get Auto-RX-polarity enable */
- lsb = SYM_LSB(IBCCtrlB_0, IB_POLARITY_REV_SUPP);
- maskr = SYM_RMASK(IBCCtrlB_0, IB_POLARITY_REV_SUPP);
- break;
-
- case QIB_IB_CFG_LREV_ENB: /* Get Auto-Lane-reversal enable */
- lsb = SYM_LSB(IBCCtrlB_0, IB_LANE_REV_SUPPORTED);
- maskr = SYM_RMASK(IBCCtrlB_0, IB_LANE_REV_SUPPORTED);
- break;
-
- case QIB_IB_CFG_LINKLATENCY:
- ret = qib_read_kreg_port(ppd, krp_ibcstatus_b) &
- SYM_MASK(IBCStatusB_0, LinkRoundTripLatency);
- goto done;
-
- case QIB_IB_CFG_OP_VLS:
- ret = ppd->vls_operational;
- goto done;
-
- case QIB_IB_CFG_VL_HIGH_CAP:
- ret = 16;
- goto done;
-
- case QIB_IB_CFG_VL_LOW_CAP:
- ret = 16;
- goto done;
-
- case QIB_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */
- ret = SYM_FIELD(ppd->cpspec->ibcctrl_a, IBCCtrlA_0,
- OverrunThreshold);
- goto done;
-
- case QIB_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */
- ret = SYM_FIELD(ppd->cpspec->ibcctrl_a, IBCCtrlA_0,
- PhyerrThreshold);
- goto done;
-
- case QIB_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */
- /* will only take effect when the link state changes */
- ret = (ppd->cpspec->ibcctrl_a &
- SYM_MASK(IBCCtrlA_0, LinkDownDefaultState)) ?
- IB_LINKINITCMD_SLEEP : IB_LINKINITCMD_POLL;
- goto done;
-
- case QIB_IB_CFG_HRTBT: /* Get Heartbeat off/enable/auto */
- lsb = IBA7322_IBC_HRTBT_LSB;
- maskr = IBA7322_IBC_HRTBT_RMASK; /* OR of AUTO and ENB */
- break;
-
- case QIB_IB_CFG_PMA_TICKS:
- /*
- * 0x00 = 10x link transfer rate or 4 nsec. for 2.5Gbs
- * Since the clock is always 250MHz, the value is 3, 1 or 0.
- */
- if (ppd->link_speed_active == QIB_IB_QDR)
- ret = 3;
- else if (ppd->link_speed_active == QIB_IB_DDR)
- ret = 1;
- else
- ret = 0;
- goto done;
-
- default:
- ret = -EINVAL;
- goto done;
- }
- ret = (int)((ppd->cpspec->ibcctrl_b >> lsb) & maskr);
-done:
- return ret;
-}
-
-/*
- * Below again cribbed liberally from older version. Do not lean
- * heavily on it.
- */
-#define IBA7322_IBC_DLIDLMC_SHIFT QIB_7322_IBCCtrlB_0_IB_DLID_LSB
-#define IBA7322_IBC_DLIDLMC_MASK (QIB_7322_IBCCtrlB_0_IB_DLID_RMASK \
- | (QIB_7322_IBCCtrlB_0_IB_DLID_MASK_RMASK << 16))
-
-static int qib_7322_set_ib_cfg(struct qib_pportdata *ppd, int which, u32 val)
-{
- struct qib_devdata *dd = ppd->dd;
- u64 maskr; /* right-justified mask */
- int lsb, ret = 0;
- u16 lcmd, licmd;
- unsigned long flags;
-
- switch (which) {
- case QIB_IB_CFG_LIDLMC:
- /*
- * Set LID and LMC. Combined to avoid possible hazard
- * caller puts LMC in 16MSbits, DLID in 16LSbits of val
- */
- lsb = IBA7322_IBC_DLIDLMC_SHIFT;
- maskr = IBA7322_IBC_DLIDLMC_MASK;
- /*
- * For header-checking, the SLID in the packet will
- * be masked with SendIBSLMCMask, and compared
- * with SendIBSLIDAssignMask. Make sure we do not
- * set any bits not covered by the mask, or we get
- * false-positives.
- */
- qib_write_kreg_port(ppd, krp_sendslid,
- val & (val >> 16) & SendIBSLIDAssignMask);
- qib_write_kreg_port(ppd, krp_sendslidmask,
- (val >> 16) & SendIBSLMCMask);
- break;
-
- case QIB_IB_CFG_LWID_ENB: /* set allowed Link-width */
- ppd->link_width_enabled = val;
- /* convert IB value to chip register value */
- if (val == IB_WIDTH_1X)
- val = 0;
- else if (val == IB_WIDTH_4X)
- val = 1;
- else
- val = 3;
- maskr = SYM_RMASK(IBCCtrlB_0, IB_NUM_CHANNELS);
- lsb = SYM_LSB(IBCCtrlB_0, IB_NUM_CHANNELS);
- break;
-
- case QIB_IB_CFG_SPD_ENB: /* set allowed Link speeds */
- /*
- * As with width, only write the actual register if the
- * link is currently down, otherwise takes effect on next
- * link change. Since setting is being explicitly requested
- * (via MAD or sysfs), clear autoneg failure status if speed
- * autoneg is enabled.
- */
- ppd->link_speed_enabled = val;
- val <<= IBA7322_IBC_SPEED_LSB;
- maskr = IBA7322_IBC_SPEED_MASK | IBA7322_IBC_IBTA_1_2_MASK |
- IBA7322_IBC_MAX_SPEED_MASK;
- if (val & (val - 1)) {
- /* Muliple speeds enabled */
- val |= IBA7322_IBC_IBTA_1_2_MASK |
- IBA7322_IBC_MAX_SPEED_MASK;
- spin_lock_irqsave(&ppd->lflags_lock, flags);
- ppd->lflags &= ~QIBL_IB_AUTONEG_FAILED;
- spin_unlock_irqrestore(&ppd->lflags_lock, flags);
- } else if (val & IBA7322_IBC_SPEED_QDR)
- val |= IBA7322_IBC_IBTA_1_2_MASK;
- /* IBTA 1.2 mode + min/max + speed bits are contiguous */
- lsb = SYM_LSB(IBCCtrlB_0, IB_ENHANCED_MODE);
- break;
-
- case QIB_IB_CFG_RXPOL_ENB: /* set Auto-RX-polarity enable */
- lsb = SYM_LSB(IBCCtrlB_0, IB_POLARITY_REV_SUPP);
- maskr = SYM_RMASK(IBCCtrlB_0, IB_POLARITY_REV_SUPP);
- break;
-
- case QIB_IB_CFG_LREV_ENB: /* set Auto-Lane-reversal enable */
- lsb = SYM_LSB(IBCCtrlB_0, IB_LANE_REV_SUPPORTED);
- maskr = SYM_RMASK(IBCCtrlB_0, IB_LANE_REV_SUPPORTED);
- break;
-
- case QIB_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */
- maskr = SYM_FIELD(ppd->cpspec->ibcctrl_a, IBCCtrlA_0,
- OverrunThreshold);
- if (maskr != val) {
- ppd->cpspec->ibcctrl_a &=
- ~SYM_MASK(IBCCtrlA_0, OverrunThreshold);
- ppd->cpspec->ibcctrl_a |= (u64) val <<
- SYM_LSB(IBCCtrlA_0, OverrunThreshold);
- qib_write_kreg_port(ppd, krp_ibcctrl_a,
- ppd->cpspec->ibcctrl_a);
- qib_write_kreg(dd, kr_scratch, 0ULL);
- }
- goto bail;
-
- case QIB_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */
- maskr = SYM_FIELD(ppd->cpspec->ibcctrl_a, IBCCtrlA_0,
- PhyerrThreshold);
- if (maskr != val) {
- ppd->cpspec->ibcctrl_a &=
- ~SYM_MASK(IBCCtrlA_0, PhyerrThreshold);
- ppd->cpspec->ibcctrl_a |= (u64) val <<
- SYM_LSB(IBCCtrlA_0, PhyerrThreshold);
- qib_write_kreg_port(ppd, krp_ibcctrl_a,
- ppd->cpspec->ibcctrl_a);
- qib_write_kreg(dd, kr_scratch, 0ULL);
- }
- goto bail;
-
- case QIB_IB_CFG_PKEYS: /* update pkeys */
- maskr = (u64) ppd->pkeys[0] | ((u64) ppd->pkeys[1] << 16) |
- ((u64) ppd->pkeys[2] << 32) |
- ((u64) ppd->pkeys[3] << 48);
- qib_write_kreg_port(ppd, krp_partitionkey, maskr);
- goto bail;
-
- case QIB_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */
- /* will only take effect when the link state changes */
- if (val == IB_LINKINITCMD_POLL)
- ppd->cpspec->ibcctrl_a &=
- ~SYM_MASK(IBCCtrlA_0, LinkDownDefaultState);
- else /* SLEEP */
- ppd->cpspec->ibcctrl_a |=
- SYM_MASK(IBCCtrlA_0, LinkDownDefaultState);
- qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a);
- qib_write_kreg(dd, kr_scratch, 0ULL);
- goto bail;
-
- case QIB_IB_CFG_MTU: /* update the MTU in IBC */
- /*
- * Update our housekeeping variables, and set IBC max
- * size, same as init code; max IBC is max we allow in
- * buffer, less the qword pbc, plus 1 for ICRC, in dwords
- * Set even if it's unchanged, print debug message only
- * on changes.
- */
- val = (ppd->ibmaxlen >> 2) + 1;
- ppd->cpspec->ibcctrl_a &= ~SYM_MASK(IBCCtrlA_0, MaxPktLen);
- ppd->cpspec->ibcctrl_a |= (u64)val <<
- SYM_LSB(IBCCtrlA_0, MaxPktLen);
- qib_write_kreg_port(ppd, krp_ibcctrl_a,
- ppd->cpspec->ibcctrl_a);
- qib_write_kreg(dd, kr_scratch, 0ULL);
- goto bail;
-
- case QIB_IB_CFG_LSTATE: /* set the IB link state */
- switch (val & 0xffff0000) {
- case IB_LINKCMD_DOWN:
- lcmd = QLOGIC_IB_IBCC_LINKCMD_DOWN;
- ppd->cpspec->ibmalfusesnap = 1;
- ppd->cpspec->ibmalfsnap = read_7322_creg32_port(ppd,
- crp_errlink);
- if (!ppd->cpspec->ibdeltainprog &&
- qib_compat_ddr_negotiate) {
- ppd->cpspec->ibdeltainprog = 1;
- ppd->cpspec->ibsymsnap =
- read_7322_creg32_port(ppd,
- crp_ibsymbolerr);
- ppd->cpspec->iblnkerrsnap =
- read_7322_creg32_port(ppd,
- crp_iblinkerrrecov);
- }
- break;
-
- case IB_LINKCMD_ARMED:
- lcmd = QLOGIC_IB_IBCC_LINKCMD_ARMED;
- if (ppd->cpspec->ibmalfusesnap) {
- ppd->cpspec->ibmalfusesnap = 0;
- ppd->cpspec->ibmalfdelta +=
- read_7322_creg32_port(ppd,
- crp_errlink) -
- ppd->cpspec->ibmalfsnap;
- }
- break;
-
- case IB_LINKCMD_ACTIVE:
- lcmd = QLOGIC_IB_IBCC_LINKCMD_ACTIVE;
- break;
-
- default:
- ret = -EINVAL;
- qib_dev_err(dd, "bad linkcmd req 0x%x\n", val >> 16);
- goto bail;
- }
- switch (val & 0xffff) {
- case IB_LINKINITCMD_NOP:
- licmd = 0;
- break;
-
- case IB_LINKINITCMD_POLL:
- licmd = QLOGIC_IB_IBCC_LINKINITCMD_POLL;
- break;
-
- case IB_LINKINITCMD_SLEEP:
- licmd = QLOGIC_IB_IBCC_LINKINITCMD_SLEEP;
- break;
-
- case IB_LINKINITCMD_DISABLE:
- licmd = QLOGIC_IB_IBCC_LINKINITCMD_DISABLE;
- ppd->cpspec->chase_end = 0;
- /*
- * stop state chase counter and timer, if running.
- * wait forpending timer, but don't clear .data (ppd)!
- */
- if (ppd->cpspec->chase_timer.expires) {
- timer_delete_sync(&ppd->cpspec->chase_timer);
- ppd->cpspec->chase_timer.expires = 0;
- }
- break;
-
- default:
- ret = -EINVAL;
- qib_dev_err(dd, "bad linkinitcmd req 0x%x\n",
- val & 0xffff);
- goto bail;
- }
- qib_set_ib_7322_lstate(ppd, lcmd, licmd);
- goto bail;
-
- case QIB_IB_CFG_OP_VLS:
- if (ppd->vls_operational != val) {
- ppd->vls_operational = val;
- set_vls(ppd);
- }
- goto bail;
-
- case QIB_IB_CFG_VL_HIGH_LIMIT:
- qib_write_kreg_port(ppd, krp_highprio_limit, val);
- goto bail;
-
- case QIB_IB_CFG_HRTBT: /* set Heartbeat off/enable/auto */
- if (val > 3) {
- ret = -EINVAL;
- goto bail;
- }
- lsb = IBA7322_IBC_HRTBT_LSB;
- maskr = IBA7322_IBC_HRTBT_RMASK; /* OR of AUTO and ENB */
- break;
-
- case QIB_IB_CFG_PORT:
- /* val is the port number of the switch we are connected to. */
- if (ppd->dd->cspec->r1) {
- cancel_delayed_work(&ppd->cpspec->ipg_work);
- ppd->cpspec->ipg_tries = 0;
- }
- goto bail;
-
- default:
- ret = -EINVAL;
- goto bail;
- }
- ppd->cpspec->ibcctrl_b &= ~(maskr << lsb);
- ppd->cpspec->ibcctrl_b |= (((u64) val & maskr) << lsb);
- qib_write_kreg_port(ppd, krp_ibcctrl_b, ppd->cpspec->ibcctrl_b);
- qib_write_kreg(dd, kr_scratch, 0);
-bail:
- return ret;
-}
-
-static int qib_7322_set_loopback(struct qib_pportdata *ppd, const char *what)
-{
- int ret = 0;
- u64 val, ctrlb;
-
- /* only IBC loopback, may add serdes and xgxs loopbacks later */
- if (!strncmp(what, "ibc", 3)) {
- ppd->cpspec->ibcctrl_a |= SYM_MASK(IBCCtrlA_0,
- Loopback);
- val = 0; /* disable heart beat, so link will come up */
- qib_devinfo(ppd->dd->pcidev, "Enabling IB%u:%u IBC loopback\n",
- ppd->dd->unit, ppd->port);
- } else if (!strncmp(what, "off", 3)) {
- ppd->cpspec->ibcctrl_a &= ~SYM_MASK(IBCCtrlA_0,
- Loopback);
- /* enable heart beat again */
- val = IBA7322_IBC_HRTBT_RMASK << IBA7322_IBC_HRTBT_LSB;
- qib_devinfo(ppd->dd->pcidev,
- "Disabling IB%u:%u IBC loopback (normal)\n",
- ppd->dd->unit, ppd->port);
- } else
- ret = -EINVAL;
- if (!ret) {
- qib_write_kreg_port(ppd, krp_ibcctrl_a,
- ppd->cpspec->ibcctrl_a);
- ctrlb = ppd->cpspec->ibcctrl_b & ~(IBA7322_IBC_HRTBT_MASK
- << IBA7322_IBC_HRTBT_LSB);
- ppd->cpspec->ibcctrl_b = ctrlb | val;
- qib_write_kreg_port(ppd, krp_ibcctrl_b,
- ppd->cpspec->ibcctrl_b);
- qib_write_kreg(ppd->dd, kr_scratch, 0);
- }
- return ret;
-}
-
-static void get_vl_weights(struct qib_pportdata *ppd, unsigned regno,
- struct ib_vl_weight_elem *vl)
-{
- unsigned i;
-
- for (i = 0; i < 16; i++, regno++, vl++) {
- u32 val = qib_read_kreg_port(ppd, regno);
-
- vl->vl = (val >> SYM_LSB(LowPriority0_0, VirtualLane)) &
- SYM_RMASK(LowPriority0_0, VirtualLane);
- vl->weight = (val >> SYM_LSB(LowPriority0_0, Weight)) &
- SYM_RMASK(LowPriority0_0, Weight);
- }
-}
-
-static void set_vl_weights(struct qib_pportdata *ppd, unsigned regno,
- struct ib_vl_weight_elem *vl)
-{
- unsigned i;
-
- for (i = 0; i < 16; i++, regno++, vl++) {
- u64 val;
-
- val = ((vl->vl & SYM_RMASK(LowPriority0_0, VirtualLane)) <<
- SYM_LSB(LowPriority0_0, VirtualLane)) |
- ((vl->weight & SYM_RMASK(LowPriority0_0, Weight)) <<
- SYM_LSB(LowPriority0_0, Weight));
- qib_write_kreg_port(ppd, regno, val);
- }
- if (!(ppd->p_sendctrl & SYM_MASK(SendCtrl_0, IBVLArbiterEn))) {
- struct qib_devdata *dd = ppd->dd;
- unsigned long flags;
-
- spin_lock_irqsave(&dd->sendctrl_lock, flags);
- ppd->p_sendctrl |= SYM_MASK(SendCtrl_0, IBVLArbiterEn);
- qib_write_kreg_port(ppd, krp_sendctrl, ppd->p_sendctrl);
- qib_write_kreg(dd, kr_scratch, 0);
- spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
- }
-}
-
-static int qib_7322_get_ib_table(struct qib_pportdata *ppd, int which, void *t)
-{
- switch (which) {
- case QIB_IB_TBL_VL_HIGH_ARB:
- get_vl_weights(ppd, krp_highprio_0, t);
- break;
-
- case QIB_IB_TBL_VL_LOW_ARB:
- get_vl_weights(ppd, krp_lowprio_0, t);
- break;
-
- default:
- return -EINVAL;
- }
- return 0;
-}
-
-static int qib_7322_set_ib_table(struct qib_pportdata *ppd, int which, void *t)
-{
- switch (which) {
- case QIB_IB_TBL_VL_HIGH_ARB:
- set_vl_weights(ppd, krp_highprio_0, t);
- break;
-
- case QIB_IB_TBL_VL_LOW_ARB:
- set_vl_weights(ppd, krp_lowprio_0, t);
- break;
-
- default:
- return -EINVAL;
- }
- return 0;
-}
-
-static void qib_update_7322_usrhead(struct qib_ctxtdata *rcd, u64 hd,
- u32 updegr, u32 egrhd, u32 npkts)
-{
- /*
- * Need to write timeout register before updating rcvhdrhead to ensure
- * that the timer is enabled on reception of a packet.
- */
- if (hd >> IBA7322_HDRHEAD_PKTINT_SHIFT)
- adjust_rcv_timeout(rcd, npkts);
- if (updegr)
- qib_write_ureg(rcd->dd, ur_rcvegrindexhead, egrhd, rcd->ctxt);
- qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt);
- qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt);
-}
-
-static u32 qib_7322_hdrqempty(struct qib_ctxtdata *rcd)
-{
- u32 head, tail;
-
- head = qib_read_ureg32(rcd->dd, ur_rcvhdrhead, rcd->ctxt);
- if (rcd->rcvhdrtail_kvaddr)
- tail = qib_get_rcvhdrtail(rcd);
- else
- tail = qib_read_ureg32(rcd->dd, ur_rcvhdrtail, rcd->ctxt);
- return head == tail;
-}
-
-#define RCVCTRL_COMMON_MODS (QIB_RCVCTRL_CTXT_ENB | \
- QIB_RCVCTRL_CTXT_DIS | \
- QIB_RCVCTRL_TIDFLOW_ENB | \
- QIB_RCVCTRL_TIDFLOW_DIS | \
- QIB_RCVCTRL_TAILUPD_ENB | \
- QIB_RCVCTRL_TAILUPD_DIS | \
- QIB_RCVCTRL_INTRAVAIL_ENB | \
- QIB_RCVCTRL_INTRAVAIL_DIS | \
- QIB_RCVCTRL_BP_ENB | \
- QIB_RCVCTRL_BP_DIS)
-
-#define RCVCTRL_PORT_MODS (QIB_RCVCTRL_CTXT_ENB | \
- QIB_RCVCTRL_CTXT_DIS | \
- QIB_RCVCTRL_PKEY_DIS | \
- QIB_RCVCTRL_PKEY_ENB)
-
-/*
- * Modify the RCVCTRL register in chip-specific way. This
- * is a function because bit positions and (future) register
- * location is chip-specifc, but the needed operations are
- * generic. <op> is a bit-mask because we often want to
- * do multiple modifications.
- */
-static void rcvctrl_7322_mod(struct qib_pportdata *ppd, unsigned int op,
- int ctxt)
-{
- struct qib_devdata *dd = ppd->dd;
- struct qib_ctxtdata *rcd;
- u64 mask, val;
- unsigned long flags;
-
- spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags);
-
- if (op & QIB_RCVCTRL_TIDFLOW_ENB)
- dd->rcvctrl |= SYM_MASK(RcvCtrl, TidFlowEnable);
- if (op & QIB_RCVCTRL_TIDFLOW_DIS)
- dd->rcvctrl &= ~SYM_MASK(RcvCtrl, TidFlowEnable);
- if (op & QIB_RCVCTRL_TAILUPD_ENB)
- dd->rcvctrl |= SYM_MASK(RcvCtrl, TailUpd);
- if (op & QIB_RCVCTRL_TAILUPD_DIS)
- dd->rcvctrl &= ~SYM_MASK(RcvCtrl, TailUpd);
- if (op & QIB_RCVCTRL_PKEY_ENB)
- ppd->p_rcvctrl &= ~SYM_MASK(RcvCtrl_0, RcvPartitionKeyDisable);
- if (op & QIB_RCVCTRL_PKEY_DIS)
- ppd->p_rcvctrl |= SYM_MASK(RcvCtrl_0, RcvPartitionKeyDisable);
- if (ctxt < 0) {
- mask = (1ULL << dd->ctxtcnt) - 1;
- rcd = NULL;
- } else {
- mask = (1ULL << ctxt);
- rcd = dd->rcd[ctxt];
- }
- if ((op & QIB_RCVCTRL_CTXT_ENB) && rcd) {
- ppd->p_rcvctrl |=
- (mask << SYM_LSB(RcvCtrl_0, ContextEnableKernel));
- if (!(dd->flags & QIB_NODMA_RTAIL)) {
- op |= QIB_RCVCTRL_TAILUPD_ENB; /* need reg write */
- dd->rcvctrl |= SYM_MASK(RcvCtrl, TailUpd);
- }
- /* Write these registers before the context is enabled. */
- qib_write_kreg_ctxt(dd, krc_rcvhdrtailaddr, ctxt,
- rcd->rcvhdrqtailaddr_phys);
- qib_write_kreg_ctxt(dd, krc_rcvhdraddr, ctxt,
- rcd->rcvhdrq_phys);
- rcd->seq_cnt = 1;
- }
- if (op & QIB_RCVCTRL_CTXT_DIS)
- ppd->p_rcvctrl &=
- ~(mask << SYM_LSB(RcvCtrl_0, ContextEnableKernel));
- if (op & QIB_RCVCTRL_BP_ENB)
- dd->rcvctrl |= mask << SYM_LSB(RcvCtrl, dontDropRHQFull);
- if (op & QIB_RCVCTRL_BP_DIS)
- dd->rcvctrl &= ~(mask << SYM_LSB(RcvCtrl, dontDropRHQFull));
- if (op & QIB_RCVCTRL_INTRAVAIL_ENB)
- dd->rcvctrl |= (mask << SYM_LSB(RcvCtrl, IntrAvail));
- if (op & QIB_RCVCTRL_INTRAVAIL_DIS)
- dd->rcvctrl &= ~(mask << SYM_LSB(RcvCtrl, IntrAvail));
- /*
- * Decide which registers to write depending on the ops enabled.
- * Special case is "flush" (no bits set at all)
- * which needs to write both.
- */
- if (op == 0 || (op & RCVCTRL_COMMON_MODS))
- qib_write_kreg(dd, kr_rcvctrl, dd->rcvctrl);
- if (op == 0 || (op & RCVCTRL_PORT_MODS))
- qib_write_kreg_port(ppd, krp_rcvctrl, ppd->p_rcvctrl);
- if ((op & QIB_RCVCTRL_CTXT_ENB) && dd->rcd[ctxt]) {
- /*
- * Init the context registers also; if we were
- * disabled, tail and head should both be zero
- * already from the enable, but since we don't
- * know, we have to do it explicitly.
- */
- val = qib_read_ureg32(dd, ur_rcvegrindextail, ctxt);
- qib_write_ureg(dd, ur_rcvegrindexhead, val, ctxt);
-
- /* be sure enabling write seen; hd/tl should be 0 */
- (void) qib_read_kreg32(dd, kr_scratch);
- val = qib_read_ureg32(dd, ur_rcvhdrtail, ctxt);
- dd->rcd[ctxt]->head = val;
- /* If kctxt, interrupt on next receive. */
- if (ctxt < dd->first_user_ctxt)
- val |= dd->rhdrhead_intr_off;
- qib_write_ureg(dd, ur_rcvhdrhead, val, ctxt);
- } else if ((op & QIB_RCVCTRL_INTRAVAIL_ENB) &&
- dd->rcd[ctxt] && dd->rhdrhead_intr_off) {
- /* arm rcv interrupt */
- val = dd->rcd[ctxt]->head | dd->rhdrhead_intr_off;
- qib_write_ureg(dd, ur_rcvhdrhead, val, ctxt);
- }
- if (op & QIB_RCVCTRL_CTXT_DIS) {
- unsigned f;
-
- /* Now that the context is disabled, clear these registers. */
- if (ctxt >= 0) {
- qib_write_kreg_ctxt(dd, krc_rcvhdrtailaddr, ctxt, 0);
- qib_write_kreg_ctxt(dd, krc_rcvhdraddr, ctxt, 0);
- for (f = 0; f < NUM_TIDFLOWS_CTXT; f++)
- qib_write_ureg(dd, ur_rcvflowtable + f,
- TIDFLOW_ERRBITS, ctxt);
- } else {
- unsigned i;
-
- for (i = 0; i < dd->cfgctxts; i++) {
- qib_write_kreg_ctxt(dd, krc_rcvhdrtailaddr,
- i, 0);
- qib_write_kreg_ctxt(dd, krc_rcvhdraddr, i, 0);
- for (f = 0; f < NUM_TIDFLOWS_CTXT; f++)
- qib_write_ureg(dd, ur_rcvflowtable + f,
- TIDFLOW_ERRBITS, i);
- }
- }
- }
- spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags);
-}
-
-/*
- * Modify the SENDCTRL register in chip-specific way. This
- * is a function where there are multiple such registers with
- * slightly different layouts.
- * The chip doesn't allow back-to-back sendctrl writes, so write
- * the scratch register after writing sendctrl.
- *
- * Which register is written depends on the operation.
- * Most operate on the common register, while
- * SEND_ENB and SEND_DIS operate on the per-port ones.
- * SEND_ENB is included in common because it can change SPCL_TRIG
- */
-#define SENDCTRL_COMMON_MODS (\
- QIB_SENDCTRL_CLEAR | \
- QIB_SENDCTRL_AVAIL_DIS | \
- QIB_SENDCTRL_AVAIL_ENB | \
- QIB_SENDCTRL_AVAIL_BLIP | \
- QIB_SENDCTRL_DISARM | \
- QIB_SENDCTRL_DISARM_ALL | \
- QIB_SENDCTRL_SEND_ENB)
-
-#define SENDCTRL_PORT_MODS (\
- QIB_SENDCTRL_CLEAR | \
- QIB_SENDCTRL_SEND_ENB | \
- QIB_SENDCTRL_SEND_DIS | \
- QIB_SENDCTRL_FLUSH)
-
-static void sendctrl_7322_mod(struct qib_pportdata *ppd, u32 op)
-{
- struct qib_devdata *dd = ppd->dd;
- u64 tmp_dd_sendctrl;
- unsigned long flags;
-
- spin_lock_irqsave(&dd->sendctrl_lock, flags);
-
- /* First the dd ones that are "sticky", saved in shadow */
- if (op & QIB_SENDCTRL_CLEAR)
- dd->sendctrl = 0;
- if (op & QIB_SENDCTRL_AVAIL_DIS)
- dd->sendctrl &= ~SYM_MASK(SendCtrl, SendBufAvailUpd);
- else if (op & QIB_SENDCTRL_AVAIL_ENB) {
- dd->sendctrl |= SYM_MASK(SendCtrl, SendBufAvailUpd);
- if (dd->flags & QIB_USE_SPCL_TRIG)
- dd->sendctrl |= SYM_MASK(SendCtrl, SpecialTriggerEn);
- }
-
- /* Then the ppd ones that are "sticky", saved in shadow */
- if (op & QIB_SENDCTRL_SEND_DIS)
- ppd->p_sendctrl &= ~SYM_MASK(SendCtrl_0, SendEnable);
- else if (op & QIB_SENDCTRL_SEND_ENB)
- ppd->p_sendctrl |= SYM_MASK(SendCtrl_0, SendEnable);
-
- if (op & QIB_SENDCTRL_DISARM_ALL) {
- u32 i, last;
-
- tmp_dd_sendctrl = dd->sendctrl;
- last = dd->piobcnt2k + dd->piobcnt4k + NUM_VL15_BUFS;
- /*
- * Disarm any buffers that are not yet launched,
- * disabling updates until done.
- */
- tmp_dd_sendctrl &= ~SYM_MASK(SendCtrl, SendBufAvailUpd);
- for (i = 0; i < last; i++) {
- qib_write_kreg(dd, kr_sendctrl,
- tmp_dd_sendctrl |
- SYM_MASK(SendCtrl, Disarm) | i);
- qib_write_kreg(dd, kr_scratch, 0);
- }
- }
-
- if (op & QIB_SENDCTRL_FLUSH) {
- u64 tmp_ppd_sendctrl = ppd->p_sendctrl;
-
- /*
- * Now drain all the fifos. The Abort bit should never be
- * needed, so for now, at least, we don't use it.
- */
- tmp_ppd_sendctrl |=
- SYM_MASK(SendCtrl_0, TxeDrainRmFifo) |
- SYM_MASK(SendCtrl_0, TxeDrainLaFifo) |
- SYM_MASK(SendCtrl_0, TxeBypassIbc);
- qib_write_kreg_port(ppd, krp_sendctrl, tmp_ppd_sendctrl);
- qib_write_kreg(dd, kr_scratch, 0);
- }
-
- tmp_dd_sendctrl = dd->sendctrl;
-
- if (op & QIB_SENDCTRL_DISARM)
- tmp_dd_sendctrl |= SYM_MASK(SendCtrl, Disarm) |
- ((op & QIB_7322_SendCtrl_DisarmSendBuf_RMASK) <<
- SYM_LSB(SendCtrl, DisarmSendBuf));
- if ((op & QIB_SENDCTRL_AVAIL_BLIP) &&
- (dd->sendctrl & SYM_MASK(SendCtrl, SendBufAvailUpd)))
- tmp_dd_sendctrl &= ~SYM_MASK(SendCtrl, SendBufAvailUpd);
-
- if (op == 0 || (op & SENDCTRL_COMMON_MODS)) {
- qib_write_kreg(dd, kr_sendctrl, tmp_dd_sendctrl);
- qib_write_kreg(dd, kr_scratch, 0);
- }
-
- if (op == 0 || (op & SENDCTRL_PORT_MODS)) {
- qib_write_kreg_port(ppd, krp_sendctrl, ppd->p_sendctrl);
- qib_write_kreg(dd, kr_scratch, 0);
- }
-
- if (op & QIB_SENDCTRL_AVAIL_BLIP) {
- qib_write_kreg(dd, kr_sendctrl, dd->sendctrl);
- qib_write_kreg(dd, kr_scratch, 0);
- }
-
- spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
-
- if (op & QIB_SENDCTRL_FLUSH) {
- u32 v;
- /*
- * ensure writes have hit chip, then do a few
- * more reads, to allow DMA of pioavail registers
- * to occur, so in-memory copy is in sync with
- * the chip. Not always safe to sleep.
- */
- v = qib_read_kreg32(dd, kr_scratch);
- qib_write_kreg(dd, kr_scratch, v);
- v = qib_read_kreg32(dd, kr_scratch);
- qib_write_kreg(dd, kr_scratch, v);
- qib_read_kreg32(dd, kr_scratch);
- }
-}
-
-#define _PORT_VIRT_FLAG 0x8000U /* "virtual", need adjustments */
-#define _PORT_64BIT_FLAG 0x10000U /* not "virtual", but 64bit */
-#define _PORT_CNTR_IDXMASK 0x7fffU /* mask off flags above */
-
-/**
- * qib_portcntr_7322 - read a per-port chip counter
- * @ppd: the qlogic_ib pport
- * @reg: the counter to read (not a chip offset)
- */
-static u64 qib_portcntr_7322(struct qib_pportdata *ppd, u32 reg)
-{
- struct qib_devdata *dd = ppd->dd;
- u64 ret = 0ULL;
- u16 creg;
- /* 0xffff for unimplemented or synthesized counters */
- static const u32 xlator[] = {
- [QIBPORTCNTR_PKTSEND] = crp_pktsend | _PORT_64BIT_FLAG,
- [QIBPORTCNTR_WORDSEND] = crp_wordsend | _PORT_64BIT_FLAG,
- [QIBPORTCNTR_PSXMITDATA] = crp_psxmitdatacount,
- [QIBPORTCNTR_PSXMITPKTS] = crp_psxmitpktscount,
- [QIBPORTCNTR_PSXMITWAIT] = crp_psxmitwaitcount,
- [QIBPORTCNTR_SENDSTALL] = crp_sendstall,
- [QIBPORTCNTR_PKTRCV] = crp_pktrcv | _PORT_64BIT_FLAG,
- [QIBPORTCNTR_PSRCVDATA] = crp_psrcvdatacount,
- [QIBPORTCNTR_PSRCVPKTS] = crp_psrcvpktscount,
- [QIBPORTCNTR_RCVEBP] = crp_rcvebp,
- [QIBPORTCNTR_RCVOVFL] = crp_rcvovfl,
- [QIBPORTCNTR_WORDRCV] = crp_wordrcv | _PORT_64BIT_FLAG,
- [QIBPORTCNTR_RXDROPPKT] = 0xffff, /* not needed for 7322 */
- [QIBPORTCNTR_RXLOCALPHYERR] = crp_rxotherlocalphyerr,
- [QIBPORTCNTR_RXVLERR] = crp_rxvlerr,
- [QIBPORTCNTR_ERRICRC] = crp_erricrc,
- [QIBPORTCNTR_ERRVCRC] = crp_errvcrc,
- [QIBPORTCNTR_ERRLPCRC] = crp_errlpcrc,
- [QIBPORTCNTR_BADFORMAT] = crp_badformat,
- [QIBPORTCNTR_ERR_RLEN] = crp_err_rlen,
- [QIBPORTCNTR_IBSYMBOLERR] = crp_ibsymbolerr,
- [QIBPORTCNTR_INVALIDRLEN] = crp_invalidrlen,
- [QIBPORTCNTR_UNSUPVL] = crp_txunsupvl,
- [QIBPORTCNTR_EXCESSBUFOVFL] = crp_excessbufferovfl,
- [QIBPORTCNTR_ERRLINK] = crp_errlink,
- [QIBPORTCNTR_IBLINKDOWN] = crp_iblinkdown,
- [QIBPORTCNTR_IBLINKERRRECOV] = crp_iblinkerrrecov,
- [QIBPORTCNTR_LLI] = crp_locallinkintegrityerr,
- [QIBPORTCNTR_VL15PKTDROP] = crp_vl15droppedpkt,
- [QIBPORTCNTR_ERRPKEY] = crp_errpkey,
- /*
- * the next 3 aren't really counters, but were implemented
- * as counters in older chips, so still get accessed as
- * though they were counters from this code.
- */
- [QIBPORTCNTR_PSINTERVAL] = krp_psinterval,
- [QIBPORTCNTR_PSSTART] = krp_psstart,
- [QIBPORTCNTR_PSSTAT] = krp_psstat,
- /* pseudo-counter, summed for all ports */
- [QIBPORTCNTR_KHDROVFL] = 0xffff,
- };
-
- if (reg >= ARRAY_SIZE(xlator)) {
- qib_devinfo(ppd->dd->pcidev,
- "Unimplemented portcounter %u\n", reg);
- goto done;
- }
- creg = xlator[reg] & _PORT_CNTR_IDXMASK;
-
- /* handle non-counters and special cases first */
- if (reg == QIBPORTCNTR_KHDROVFL) {
- int i;
-
- /* sum over all kernel contexts (skip if mini_init) */
- for (i = 0; dd->rcd && i < dd->first_user_ctxt; i++) {
- struct qib_ctxtdata *rcd = dd->rcd[i];
-
- if (!rcd || rcd->ppd != ppd)
- continue;
- ret += read_7322_creg32(dd, cr_base_egrovfl + i);
- }
- goto done;
- } else if (reg == QIBPORTCNTR_RXDROPPKT) {
- /*
- * Used as part of the synthesis of port_rcv_errors
- * in the verbs code for IBTA counters. Not needed for 7322,
- * because all the errors are already counted by other cntrs.
- */
- goto done;
- } else if (reg == QIBPORTCNTR_PSINTERVAL ||
- reg == QIBPORTCNTR_PSSTART || reg == QIBPORTCNTR_PSSTAT) {
- /* were counters in older chips, now per-port kernel regs */
- ret = qib_read_kreg_port(ppd, creg);
- goto done;
- }
-
- /*
- * Only fast increment counters are 64 bits; use 32 bit reads to
- * avoid two independent reads when on Opteron.
- */
- if (xlator[reg] & _PORT_64BIT_FLAG)
- ret = read_7322_creg_port(ppd, creg);
- else
- ret = read_7322_creg32_port(ppd, creg);
- if (creg == crp_ibsymbolerr) {
- if (ppd->cpspec->ibdeltainprog)
- ret -= ret - ppd->cpspec->ibsymsnap;
- ret -= ppd->cpspec->ibsymdelta;
- } else if (creg == crp_iblinkerrrecov) {
- if (ppd->cpspec->ibdeltainprog)
- ret -= ret - ppd->cpspec->iblnkerrsnap;
- ret -= ppd->cpspec->iblnkerrdelta;
- } else if (creg == crp_errlink)
- ret -= ppd->cpspec->ibmalfdelta;
- else if (creg == crp_iblinkdown)
- ret += ppd->cpspec->iblnkdowndelta;
-done:
- return ret;
-}
-
-/*
- * Device counter names (not port-specific), one line per stat,
- * single string. Used by utilities like ipathstats to print the stats
- * in a way which works for different versions of drivers, without changing
- * the utility. Names need to be 12 chars or less (w/o newline), for proper
- * display by utility.
- * Non-error counters are first.
- * Start of "error" conters is indicated by a leading "E " on the first
- * "error" counter, and doesn't count in label length.
- * The EgrOvfl list needs to be last so we truncate them at the configured
- * context count for the device.
- * cntr7322indices contains the corresponding register indices.
- */
-static const char cntr7322names[] =
- "Interrupts\n"
- "HostBusStall\n"
- "E RxTIDFull\n"
- "RxTIDInvalid\n"
- "RxTIDFloDrop\n" /* 7322 only */
- "Ctxt0EgrOvfl\n"
- "Ctxt1EgrOvfl\n"
- "Ctxt2EgrOvfl\n"
- "Ctxt3EgrOvfl\n"
- "Ctxt4EgrOvfl\n"
- "Ctxt5EgrOvfl\n"
- "Ctxt6EgrOvfl\n"
- "Ctxt7EgrOvfl\n"
- "Ctxt8EgrOvfl\n"
- "Ctxt9EgrOvfl\n"
- "Ctx10EgrOvfl\n"
- "Ctx11EgrOvfl\n"
- "Ctx12EgrOvfl\n"
- "Ctx13EgrOvfl\n"
- "Ctx14EgrOvfl\n"
- "Ctx15EgrOvfl\n"
- "Ctx16EgrOvfl\n"
- "Ctx17EgrOvfl\n"
- ;
-
-static const u32 cntr7322indices[] = {
- cr_lbint | _PORT_64BIT_FLAG,
- cr_lbstall | _PORT_64BIT_FLAG,
- cr_tidfull,
- cr_tidinvalid,
- cr_rxtidflowdrop,
- cr_base_egrovfl + 0,
- cr_base_egrovfl + 1,
- cr_base_egrovfl + 2,
- cr_base_egrovfl + 3,
- cr_base_egrovfl + 4,
- cr_base_egrovfl + 5,
- cr_base_egrovfl + 6,
- cr_base_egrovfl + 7,
- cr_base_egrovfl + 8,
- cr_base_egrovfl + 9,
- cr_base_egrovfl + 10,
- cr_base_egrovfl + 11,
- cr_base_egrovfl + 12,
- cr_base_egrovfl + 13,
- cr_base_egrovfl + 14,
- cr_base_egrovfl + 15,
- cr_base_egrovfl + 16,
- cr_base_egrovfl + 17,
-};
-
-/*
- * same as cntr7322names and cntr7322indices, but for port-specific counters.
- * portcntr7322indices is somewhat complicated by some registers needing
- * adjustments of various kinds, and those are ORed with _PORT_VIRT_FLAG
- */
-static const char portcntr7322names[] =
- "TxPkt\n"
- "TxFlowPkt\n"
- "TxWords\n"
- "RxPkt\n"
- "RxFlowPkt\n"
- "RxWords\n"
- "TxFlowStall\n"
- "TxDmaDesc\n" /* 7220 and 7322-only */
- "E RxDlidFltr\n" /* 7220 and 7322-only */
- "IBStatusChng\n"
- "IBLinkDown\n"
- "IBLnkRecov\n"
- "IBRxLinkErr\n"
- "IBSymbolErr\n"
- "RxLLIErr\n"
- "RxBadFormat\n"
- "RxBadLen\n"
- "RxBufOvrfl\n"
- "RxEBP\n"
- "RxFlowCtlErr\n"
- "RxICRCerr\n"
- "RxLPCRCerr\n"
- "RxVCRCerr\n"
- "RxInvalLen\n"
- "RxInvalPKey\n"
- "RxPktDropped\n"
- "TxBadLength\n"
- "TxDropped\n"
- "TxInvalLen\n"
- "TxUnderrun\n"
- "TxUnsupVL\n"
- "RxLclPhyErr\n" /* 7220 and 7322-only from here down */
- "RxVL15Drop\n"
- "RxVlErr\n"
- "XcessBufOvfl\n"
- "RxQPBadCtxt\n" /* 7322-only from here down */
- "TXBadHeader\n"
- ;
-
-static const u32 portcntr7322indices[] = {
- QIBPORTCNTR_PKTSEND | _PORT_VIRT_FLAG,
- crp_pktsendflow,
- QIBPORTCNTR_WORDSEND | _PORT_VIRT_FLAG,
- QIBPORTCNTR_PKTRCV | _PORT_VIRT_FLAG,
- crp_pktrcvflowctrl,
- QIBPORTCNTR_WORDRCV | _PORT_VIRT_FLAG,
- QIBPORTCNTR_SENDSTALL | _PORT_VIRT_FLAG,
- crp_txsdmadesc | _PORT_64BIT_FLAG,
- crp_rxdlidfltr,
- crp_ibstatuschange,
- QIBPORTCNTR_IBLINKDOWN | _PORT_VIRT_FLAG,
- QIBPORTCNTR_IBLINKERRRECOV | _PORT_VIRT_FLAG,
- QIBPORTCNTR_ERRLINK | _PORT_VIRT_FLAG,
- QIBPORTCNTR_IBSYMBOLERR | _PORT_VIRT_FLAG,
- QIBPORTCNTR_LLI | _PORT_VIRT_FLAG,
- QIBPORTCNTR_BADFORMAT | _PORT_VIRT_FLAG,
- QIBPORTCNTR_ERR_RLEN | _PORT_VIRT_FLAG,
- QIBPORTCNTR_RCVOVFL | _PORT_VIRT_FLAG,
- QIBPORTCNTR_RCVEBP | _PORT_VIRT_FLAG,
- crp_rcvflowctrlviol,
- QIBPORTCNTR_ERRICRC | _PORT_VIRT_FLAG,
- QIBPORTCNTR_ERRLPCRC | _PORT_VIRT_FLAG,
- QIBPORTCNTR_ERRVCRC | _PORT_VIRT_FLAG,
- QIBPORTCNTR_INVALIDRLEN | _PORT_VIRT_FLAG,
- QIBPORTCNTR_ERRPKEY | _PORT_VIRT_FLAG,
- QIBPORTCNTR_RXDROPPKT | _PORT_VIRT_FLAG,
- crp_txminmaxlenerr,
- crp_txdroppedpkt,
- crp_txlenerr,
- crp_txunderrun,
- crp_txunsupvl,
- QIBPORTCNTR_RXLOCALPHYERR | _PORT_VIRT_FLAG,
- QIBPORTCNTR_VL15PKTDROP | _PORT_VIRT_FLAG,
- QIBPORTCNTR_RXVLERR | _PORT_VIRT_FLAG,
- QIBPORTCNTR_EXCESSBUFOVFL | _PORT_VIRT_FLAG,
- crp_rxqpinvalidctxt,
- crp_txhdrerr,
-};
-
-/* do all the setup to make the counter reads efficient later */
-static void init_7322_cntrnames(struct qib_devdata *dd)
-{
- int i, j = 0;
- char *s;
-
- for (i = 0, s = (char *)cntr7322names; s && j <= dd->cfgctxts;
- i++) {
- /* we always have at least one counter before the egrovfl */
- if (!j && !strncmp("Ctxt0EgrOvfl", s + 1, 12))
- j = 1;
- s = strchr(s + 1, '\n');
- if (s && j)
- j++;
- }
- dd->cspec->ncntrs = i;
- if (!s)
- /* full list; size is without terminating null */
- dd->cspec->cntrnamelen = sizeof(cntr7322names) - 1;
- else
- dd->cspec->cntrnamelen = 1 + s - cntr7322names;
- dd->cspec->cntrs = kmalloc_array(dd->cspec->ncntrs, sizeof(u64),
- GFP_KERNEL);
-
- for (i = 0, s = (char *)portcntr7322names; s; i++)
- s = strchr(s + 1, '\n');
- dd->cspec->nportcntrs = i - 1;
- dd->cspec->portcntrnamelen = sizeof(portcntr7322names) - 1;
- for (i = 0; i < dd->num_pports; ++i) {
- dd->pport[i].cpspec->portcntrs =
- kmalloc_array(dd->cspec->nportcntrs, sizeof(u64),
- GFP_KERNEL);
- }
-}
-
-static u32 qib_read_7322cntrs(struct qib_devdata *dd, loff_t pos, char **namep,
- u64 **cntrp)
-{
- u32 ret;
-
- if (namep) {
- ret = dd->cspec->cntrnamelen;
- if (pos >= ret)
- ret = 0; /* final read after getting everything */
- else
- *namep = (char *) cntr7322names;
- } else {
- u64 *cntr = dd->cspec->cntrs;
- int i;
-
- ret = dd->cspec->ncntrs * sizeof(u64);
- if (!cntr || pos >= ret) {
- /* everything read, or couldn't get memory */
- ret = 0;
- goto done;
- }
- *cntrp = cntr;
- for (i = 0; i < dd->cspec->ncntrs; i++)
- if (cntr7322indices[i] & _PORT_64BIT_FLAG)
- *cntr++ = read_7322_creg(dd,
- cntr7322indices[i] &
- _PORT_CNTR_IDXMASK);
- else
- *cntr++ = read_7322_creg32(dd,
- cntr7322indices[i]);
- }
-done:
- return ret;
-}
-
-static u32 qib_read_7322portcntrs(struct qib_devdata *dd, loff_t pos, u32 port,
- char **namep, u64 **cntrp)
-{
- u32 ret;
-
- if (namep) {
- ret = dd->cspec->portcntrnamelen;
- if (pos >= ret)
- ret = 0; /* final read after getting everything */
- else
- *namep = (char *)portcntr7322names;
- } else {
- struct qib_pportdata *ppd = &dd->pport[port];
- u64 *cntr = ppd->cpspec->portcntrs;
- int i;
-
- ret = dd->cspec->nportcntrs * sizeof(u64);
- if (!cntr || pos >= ret) {
- /* everything read, or couldn't get memory */
- ret = 0;
- goto done;
- }
- *cntrp = cntr;
- for (i = 0; i < dd->cspec->nportcntrs; i++) {
- if (portcntr7322indices[i] & _PORT_VIRT_FLAG)
- *cntr++ = qib_portcntr_7322(ppd,
- portcntr7322indices[i] &
- _PORT_CNTR_IDXMASK);
- else if (portcntr7322indices[i] & _PORT_64BIT_FLAG)
- *cntr++ = read_7322_creg_port(ppd,
- portcntr7322indices[i] &
- _PORT_CNTR_IDXMASK);
- else
- *cntr++ = read_7322_creg32_port(ppd,
- portcntr7322indices[i]);
- }
- }
-done:
- return ret;
-}
-
-/**
- * qib_get_7322_faststats - get word counters from chip before they overflow
- * @t: contains a pointer to the qlogic_ib device qib_devdata
- *
- * VESTIGIAL IBA7322 has no "small fast counters", so the only
- * real purpose of this function is to maintain the notion of
- * "active time", which in turn is only logged into the eeprom,
- * which we don;t have, yet, for 7322-based boards.
- *
- * called from add_timer
- */
-static void qib_get_7322_faststats(struct timer_list *t)
-{
- struct qib_devdata *dd = timer_container_of(dd, t, stats_timer);
- struct qib_pportdata *ppd;
- unsigned long flags;
- u64 traffic_wds;
- int pidx;
-
- for (pidx = 0; pidx < dd->num_pports; ++pidx) {
- ppd = dd->pport + pidx;
-
- /*
- * If port isn't enabled or not operational ports, or
- * diags is running (can cause memory diags to fail)
- * skip this port this time.
- */
- if (!ppd->link_speed_supported || !(dd->flags & QIB_INITTED)
- || dd->diag_client)
- continue;
-
- /*
- * Maintain an activity timer, based on traffic
- * exceeding a threshold, so we need to check the word-counts
- * even if they are 64-bit.
- */
- traffic_wds = qib_portcntr_7322(ppd, QIBPORTCNTR_WORDRCV) +
- qib_portcntr_7322(ppd, QIBPORTCNTR_WORDSEND);
- spin_lock_irqsave(&ppd->dd->eep_st_lock, flags);
- traffic_wds -= ppd->dd->traffic_wds;
- ppd->dd->traffic_wds += traffic_wds;
- spin_unlock_irqrestore(&ppd->dd->eep_st_lock, flags);
- if (ppd->cpspec->qdr_dfe_on && (ppd->link_speed_active &
- QIB_IB_QDR) &&
- (ppd->lflags & (QIBL_LINKINIT | QIBL_LINKARMED |
- QIBL_LINKACTIVE)) &&
- ppd->cpspec->qdr_dfe_time &&
- time_is_before_jiffies(ppd->cpspec->qdr_dfe_time)) {
- ppd->cpspec->qdr_dfe_on = 0;
-
- qib_write_kreg_port(ppd, krp_static_adapt_dis(2),
- ppd->dd->cspec->r1 ?
- QDR_STATIC_ADAPT_INIT_R1 :
- QDR_STATIC_ADAPT_INIT);
- force_h1(ppd);
- }
- }
- mod_timer(&dd->stats_timer, jiffies + HZ * ACTIVITY_TIMER);
-}
-
-/*
- * If we were using MSIx, try to fallback to INTx.
- */
-static int qib_7322_intr_fallback(struct qib_devdata *dd)
-{
- if (!dd->cspec->num_msix_entries)
- return 0; /* already using INTx */
-
- qib_devinfo(dd->pcidev,
- "MSIx interrupt not detected, trying INTx interrupts\n");
- qib_7322_free_irq(dd);
- if (pci_alloc_irq_vectors(dd->pcidev, 1, 1, PCI_IRQ_INTX) < 0)
- qib_dev_err(dd, "Failed to enable INTx\n");
- qib_setup_7322_interrupt(dd, 0);
- return 1;
-}
-
-/*
- * Reset the XGXS (between serdes and IBC). Slightly less intrusive
- * than resetting the IBC or external link state, and useful in some
- * cases to cause some retraining. To do this right, we reset IBC
- * as well, then return to previous state (which may be still in reset)
- * NOTE: some callers of this "know" this writes the current value
- * of cpspec->ibcctrl_a as part of it's operation, so if that changes,
- * check all callers.
- */
-static void qib_7322_mini_pcs_reset(struct qib_pportdata *ppd)
-{
- u64 val;
- struct qib_devdata *dd = ppd->dd;
- const u64 reset_bits = SYM_MASK(IBPCSConfig_0, xcv_rreset) |
- SYM_MASK(IBPCSConfig_0, xcv_treset) |
- SYM_MASK(IBPCSConfig_0, tx_rx_reset);
-
- val = qib_read_kreg_port(ppd, krp_ib_pcsconfig);
- qib_write_kreg(dd, kr_hwerrmask,
- dd->cspec->hwerrmask & ~HWE_MASK(statusValidNoEop));
- qib_write_kreg_port(ppd, krp_ibcctrl_a,
- ppd->cpspec->ibcctrl_a &
- ~SYM_MASK(IBCCtrlA_0, IBLinkEn));
-
- qib_write_kreg_port(ppd, krp_ib_pcsconfig, val | reset_bits);
- qib_read_kreg32(dd, kr_scratch);
- qib_write_kreg_port(ppd, krp_ib_pcsconfig, val & ~reset_bits);
- qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a);
- qib_write_kreg(dd, kr_scratch, 0ULL);
- qib_write_kreg(dd, kr_hwerrclear,
- SYM_MASK(HwErrClear, statusValidNoEopClear));
- qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask);
-}
-
-/*
- * This code for non-IBTA-compliant IB speed negotiation is only known to
- * work for the SDR to DDR transition, and only between an HCA and a switch
- * with recent firmware. It is based on observed heuristics, rather than
- * actual knowledge of the non-compliant speed negotiation.
- * It has a number of hard-coded fields, since the hope is to rewrite this
- * when a spec is available on how the negoation is intended to work.
- */
-static void autoneg_7322_sendpkt(struct qib_pportdata *ppd, u32 *hdr,
- u32 dcnt, u32 *data)
-{
- int i;
- u64 pbc;
- u32 __iomem *piobuf;
- u32 pnum, control, len;
- struct qib_devdata *dd = ppd->dd;
-
- i = 0;
- len = 7 + dcnt + 1; /* 7 dword header, dword data, icrc */
- control = qib_7322_setpbc_control(ppd, len, 0, 15);
- pbc = ((u64) control << 32) | len;
- while (!(piobuf = qib_7322_getsendbuf(ppd, pbc, &pnum))) {
- if (i++ > 15)
- return;
- udelay(2);
- }
- /* disable header check on this packet, since it can't be valid */
- dd->f_txchk_change(dd, pnum, 1, TXCHK_CHG_TYPE_DIS1, NULL);
- writeq(pbc, piobuf);
- qib_flush_wc();
- qib_pio_copy(piobuf + 2, hdr, 7);
- qib_pio_copy(piobuf + 9, data, dcnt);
- if (dd->flags & QIB_USE_SPCL_TRIG) {
- u32 spcl_off = (pnum >= dd->piobcnt2k) ? 2047 : 1023;
-
- qib_flush_wc();
- __raw_writel(0xaebecede, piobuf + spcl_off);
- }
- qib_flush_wc();
- qib_sendbuf_done(dd, pnum);
- /* and re-enable hdr check */
- dd->f_txchk_change(dd, pnum, 1, TXCHK_CHG_TYPE_ENAB1, NULL);
-}
-
-/*
- * _start packet gets sent twice at start, _done gets sent twice at end
- */
-static void qib_autoneg_7322_send(struct qib_pportdata *ppd, int which)
-{
- struct qib_devdata *dd = ppd->dd;
- static u32 swapped;
- u32 dw, i, hcnt, dcnt, *data;
- static u32 hdr[7] = { 0xf002ffff, 0x48ffff, 0x6400abba };
- static u32 madpayload_start[0x40] = {
- 0x1810103, 0x1, 0x0, 0x0, 0x2c90000, 0x2c9, 0x0, 0x0,
- 0xffffffff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
- 0x1, 0x1388, 0x15e, 0x1, /* rest 0's */
- };
- static u32 madpayload_done[0x40] = {
- 0x1810103, 0x1, 0x0, 0x0, 0x2c90000, 0x2c9, 0x0, 0x0,
- 0xffffffff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
- 0x40000001, 0x1388, 0x15e, /* rest 0's */
- };
-
- dcnt = ARRAY_SIZE(madpayload_start);
- hcnt = ARRAY_SIZE(hdr);
- if (!swapped) {
- /* for maintainability, do it at runtime */
- for (i = 0; i < hcnt; i++) {
- dw = (__force u32) cpu_to_be32(hdr[i]);
- hdr[i] = dw;
- }
- for (i = 0; i < dcnt; i++) {
- dw = (__force u32) cpu_to_be32(madpayload_start[i]);
- madpayload_start[i] = dw;
- dw = (__force u32) cpu_to_be32(madpayload_done[i]);
- madpayload_done[i] = dw;
- }
- swapped = 1;
- }
-
- data = which ? madpayload_done : madpayload_start;
-
- autoneg_7322_sendpkt(ppd, hdr, dcnt, data);
- qib_read_kreg64(dd, kr_scratch);
- udelay(2);
- autoneg_7322_sendpkt(ppd, hdr, dcnt, data);
- qib_read_kreg64(dd, kr_scratch);
- udelay(2);
-}
-
-/*
- * Do the absolute minimum to cause an IB speed change, and make it
- * ready, but don't actually trigger the change. The caller will
- * do that when ready (if link is in Polling training state, it will
- * happen immediately, otherwise when link next goes down)
- *
- * This routine should only be used as part of the DDR autonegotation
- * code for devices that are not compliant with IB 1.2 (or code that
- * fixes things up for same).
- *
- * When link has gone down, and autoneg enabled, or autoneg has
- * failed and we give up until next time we set both speeds, and
- * then we want IBTA enabled as well as "use max enabled speed.
- */
-static void set_7322_ibspeed_fast(struct qib_pportdata *ppd, u32 speed)
-{
- u64 newctrlb;
-
- newctrlb = ppd->cpspec->ibcctrl_b & ~(IBA7322_IBC_SPEED_MASK |
- IBA7322_IBC_IBTA_1_2_MASK |
- IBA7322_IBC_MAX_SPEED_MASK);
-
- if (speed & (speed - 1)) /* multiple speeds */
- newctrlb |= (speed << IBA7322_IBC_SPEED_LSB) |
- IBA7322_IBC_IBTA_1_2_MASK |
- IBA7322_IBC_MAX_SPEED_MASK;
- else
- newctrlb |= speed == QIB_IB_QDR ?
- IBA7322_IBC_SPEED_QDR | IBA7322_IBC_IBTA_1_2_MASK :
- ((speed == QIB_IB_DDR ?
- IBA7322_IBC_SPEED_DDR : IBA7322_IBC_SPEED_SDR));
-
- if (newctrlb == ppd->cpspec->ibcctrl_b)
- return;
-
- ppd->cpspec->ibcctrl_b = newctrlb;
- qib_write_kreg_port(ppd, krp_ibcctrl_b, ppd->cpspec->ibcctrl_b);
- qib_write_kreg(ppd->dd, kr_scratch, 0);
-}
-
-/*
- * This routine is only used when we are not talking to another
- * IB 1.2-compliant device that we think can do DDR.
- * (This includes all existing switch chips as of Oct 2007.)
- * 1.2-compliant devices go directly to DDR prior to reaching INIT
- */
-static void try_7322_autoneg(struct qib_pportdata *ppd)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&ppd->lflags_lock, flags);
- ppd->lflags |= QIBL_IB_AUTONEG_INPROG;
- spin_unlock_irqrestore(&ppd->lflags_lock, flags);
- qib_autoneg_7322_send(ppd, 0);
- set_7322_ibspeed_fast(ppd, QIB_IB_DDR);
- qib_7322_mini_pcs_reset(ppd);
- /* 2 msec is minimum length of a poll cycle */
- queue_delayed_work(ib_wq, &ppd->cpspec->autoneg_work,
- msecs_to_jiffies(2));
-}
-
-/*
- * Handle the empirically determined mechanism for auto-negotiation
- * of DDR speed with switches.
- */
-static void autoneg_7322_work(struct work_struct *work)
-{
- struct qib_pportdata *ppd;
- u32 i;
- unsigned long flags;
-
- ppd = container_of(work, struct qib_chippport_specific,
- autoneg_work.work)->ppd;
-
- /*
- * Busy wait for this first part, it should be at most a
- * few hundred usec, since we scheduled ourselves for 2msec.
- */
- for (i = 0; i < 25; i++) {
- if (SYM_FIELD(ppd->lastibcstat, IBCStatusA_0, LinkState)
- == IB_7322_LT_STATE_POLLQUIET) {
- qib_set_linkstate(ppd, QIB_IB_LINKDOWN_DISABLE);
- break;
- }
- udelay(100);
- }
-
- if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG))
- goto done; /* we got there early or told to stop */
-
- /* we expect this to timeout */
- if (wait_event_timeout(ppd->cpspec->autoneg_wait,
- !(ppd->lflags & QIBL_IB_AUTONEG_INPROG),
- msecs_to_jiffies(90)))
- goto done;
- qib_7322_mini_pcs_reset(ppd);
-
- /* we expect this to timeout */
- if (wait_event_timeout(ppd->cpspec->autoneg_wait,
- !(ppd->lflags & QIBL_IB_AUTONEG_INPROG),
- msecs_to_jiffies(1700)))
- goto done;
- qib_7322_mini_pcs_reset(ppd);
-
- set_7322_ibspeed_fast(ppd, QIB_IB_SDR);
-
- /*
- * Wait up to 250 msec for link to train and get to INIT at DDR;
- * this should terminate early.
- */
- wait_event_timeout(ppd->cpspec->autoneg_wait,
- !(ppd->lflags & QIBL_IB_AUTONEG_INPROG),
- msecs_to_jiffies(250));
-done:
- if (ppd->lflags & QIBL_IB_AUTONEG_INPROG) {
- spin_lock_irqsave(&ppd->lflags_lock, flags);
- ppd->lflags &= ~QIBL_IB_AUTONEG_INPROG;
- if (ppd->cpspec->autoneg_tries == AUTONEG_TRIES) {
- ppd->lflags |= QIBL_IB_AUTONEG_FAILED;
- ppd->cpspec->autoneg_tries = 0;
- }
- spin_unlock_irqrestore(&ppd->lflags_lock, flags);
- set_7322_ibspeed_fast(ppd, ppd->link_speed_enabled);
- }
-}
-
-/*
- * This routine is used to request IPG set in the QLogic switch.
- * Only called if r1.
- */
-static void try_7322_ipg(struct qib_pportdata *ppd)
-{
- struct qib_ibport *ibp = &ppd->ibport_data;
- struct ib_mad_send_buf *send_buf;
- struct ib_mad_agent *agent;
- struct ib_smp *smp;
- unsigned delay;
- int ret;
-
- agent = ibp->rvp.send_agent;
- if (!agent)
- goto retry;
-
- send_buf = ib_create_send_mad(agent, 0, 0, 0, IB_MGMT_MAD_HDR,
- IB_MGMT_MAD_DATA, GFP_ATOMIC,
- IB_MGMT_BASE_VERSION);
- if (IS_ERR(send_buf))
- goto retry;
-
- if (!ibp->smi_ah) {
- struct ib_ah *ah;
-
- ah = qib_create_qp0_ah(ibp, be16_to_cpu(IB_LID_PERMISSIVE));
- if (IS_ERR(ah))
- ret = PTR_ERR(ah);
- else {
- send_buf->ah = ah;
- ibp->smi_ah = ibah_to_rvtah(ah);
- ret = 0;
- }
- } else {
- send_buf->ah = &ibp->smi_ah->ibah;
- ret = 0;
- }
-
- smp = send_buf->mad;
- smp->base_version = IB_MGMT_BASE_VERSION;
- smp->mgmt_class = IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE;
- smp->class_version = 1;
- smp->method = IB_MGMT_METHOD_SEND;
- smp->hop_cnt = 1;
- smp->attr_id = QIB_VENDOR_IPG;
- smp->attr_mod = 0;
-
- if (!ret)
- ret = ib_post_send_mad(send_buf, NULL);
- if (ret)
- ib_free_send_mad(send_buf);
-retry:
- delay = 2 << ppd->cpspec->ipg_tries;
- queue_delayed_work(ib_wq, &ppd->cpspec->ipg_work,
- msecs_to_jiffies(delay));
-}
-
-/*
- * Timeout handler for setting IPG.
- * Only called if r1.
- */
-static void ipg_7322_work(struct work_struct *work)
-{
- struct qib_pportdata *ppd;
-
- ppd = container_of(work, struct qib_chippport_specific,
- ipg_work.work)->ppd;
- if ((ppd->lflags & (QIBL_LINKINIT | QIBL_LINKARMED | QIBL_LINKACTIVE))
- && ++ppd->cpspec->ipg_tries <= 10)
- try_7322_ipg(ppd);
-}
-
-static u32 qib_7322_iblink_state(u64 ibcs)
-{
- u32 state = (u32)SYM_FIELD(ibcs, IBCStatusA_0, LinkState);
-
- switch (state) {
- case IB_7322_L_STATE_INIT:
- state = IB_PORT_INIT;
- break;
- case IB_7322_L_STATE_ARM:
- state = IB_PORT_ARMED;
- break;
- case IB_7322_L_STATE_ACTIVE:
- case IB_7322_L_STATE_ACT_DEFER:
- state = IB_PORT_ACTIVE;
- break;
- default:
- fallthrough;
- case IB_7322_L_STATE_DOWN:
- state = IB_PORT_DOWN;
- break;
- }
- return state;
-}
-
-/* returns the IBTA port state, rather than the IBC link training state */
-static u8 qib_7322_phys_portstate(u64 ibcs)
-{
- u8 state = (u8)SYM_FIELD(ibcs, IBCStatusA_0, LinkTrainingState);
- return qib_7322_physportstate[state];
-}
-
-static int qib_7322_ib_updown(struct qib_pportdata *ppd, int ibup, u64 ibcs)
-{
- int ret = 0, symadj = 0;
- unsigned long flags;
- int mult;
-
- spin_lock_irqsave(&ppd->lflags_lock, flags);
- ppd->lflags &= ~QIBL_IB_FORCE_NOTIFY;
- spin_unlock_irqrestore(&ppd->lflags_lock, flags);
-
- /* Update our picture of width and speed from chip */
- if (ibcs & SYM_MASK(IBCStatusA_0, LinkSpeedQDR)) {
- ppd->link_speed_active = QIB_IB_QDR;
- mult = 4;
- } else if (ibcs & SYM_MASK(IBCStatusA_0, LinkSpeedActive)) {
- ppd->link_speed_active = QIB_IB_DDR;
- mult = 2;
- } else {
- ppd->link_speed_active = QIB_IB_SDR;
- mult = 1;
- }
- if (ibcs & SYM_MASK(IBCStatusA_0, LinkWidthActive)) {
- ppd->link_width_active = IB_WIDTH_4X;
- mult *= 4;
- } else
- ppd->link_width_active = IB_WIDTH_1X;
- ppd->delay_mult = ib_rate_to_delay[mult_to_ib_rate(mult)];
-
- if (!ibup) {
- u64 clr;
-
- /* Link went down. */
- /* do IPG MAD again after linkdown, even if last time failed */
- ppd->cpspec->ipg_tries = 0;
- clr = qib_read_kreg_port(ppd, krp_ibcstatus_b) &
- (SYM_MASK(IBCStatusB_0, heartbeat_timed_out) |
- SYM_MASK(IBCStatusB_0, heartbeat_crosstalk));
- if (clr)
- qib_write_kreg_port(ppd, krp_ibcstatus_b, clr);
- if (!(ppd->lflags & (QIBL_IB_AUTONEG_FAILED |
- QIBL_IB_AUTONEG_INPROG)))
- set_7322_ibspeed_fast(ppd, ppd->link_speed_enabled);
- if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG)) {
- struct qib_qsfp_data *qd =
- &ppd->cpspec->qsfp_data;
- /* unlock the Tx settings, speed may change */
- qib_write_kreg_port(ppd, krp_tx_deemph_override,
- SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
- reset_tx_deemphasis_override));
- qib_cancel_sends(ppd);
- /* on link down, ensure sane pcs state */
- qib_7322_mini_pcs_reset(ppd);
- /* schedule the qsfp refresh which should turn the link
- off */
- if (ppd->dd->flags & QIB_HAS_QSFP) {
- qd->t_insert = jiffies;
- queue_work(ib_wq, &qd->work);
- }
- spin_lock_irqsave(&ppd->sdma_lock, flags);
- if (__qib_sdma_running(ppd))
- __qib_sdma_process_event(ppd,
- qib_sdma_event_e70_go_idle);
- spin_unlock_irqrestore(&ppd->sdma_lock, flags);
- }
- clr = read_7322_creg32_port(ppd, crp_iblinkdown);
- if (clr == ppd->cpspec->iblnkdownsnap)
- ppd->cpspec->iblnkdowndelta++;
- } else {
- if (qib_compat_ddr_negotiate &&
- !(ppd->lflags & (QIBL_IB_AUTONEG_FAILED |
- QIBL_IB_AUTONEG_INPROG)) &&
- ppd->link_speed_active == QIB_IB_SDR &&
- (ppd->link_speed_enabled & QIB_IB_DDR)
- && ppd->cpspec->autoneg_tries < AUTONEG_TRIES) {
- /* we are SDR, and auto-negotiation enabled */
- ++ppd->cpspec->autoneg_tries;
- if (!ppd->cpspec->ibdeltainprog) {
- ppd->cpspec->ibdeltainprog = 1;
- ppd->cpspec->ibsymdelta +=
- read_7322_creg32_port(ppd,
- crp_ibsymbolerr) -
- ppd->cpspec->ibsymsnap;
- ppd->cpspec->iblnkerrdelta +=
- read_7322_creg32_port(ppd,
- crp_iblinkerrrecov) -
- ppd->cpspec->iblnkerrsnap;
- }
- try_7322_autoneg(ppd);
- ret = 1; /* no other IB status change processing */
- } else if ((ppd->lflags & QIBL_IB_AUTONEG_INPROG) &&
- ppd->link_speed_active == QIB_IB_SDR) {
- qib_autoneg_7322_send(ppd, 1);
- set_7322_ibspeed_fast(ppd, QIB_IB_DDR);
- qib_7322_mini_pcs_reset(ppd);
- udelay(2);
- ret = 1; /* no other IB status change processing */
- } else if ((ppd->lflags & QIBL_IB_AUTONEG_INPROG) &&
- (ppd->link_speed_active & QIB_IB_DDR)) {
- spin_lock_irqsave(&ppd->lflags_lock, flags);
- ppd->lflags &= ~(QIBL_IB_AUTONEG_INPROG |
- QIBL_IB_AUTONEG_FAILED);
- spin_unlock_irqrestore(&ppd->lflags_lock, flags);
- ppd->cpspec->autoneg_tries = 0;
- /* re-enable SDR, for next link down */
- set_7322_ibspeed_fast(ppd, ppd->link_speed_enabled);
- wake_up(&ppd->cpspec->autoneg_wait);
- symadj = 1;
- } else if (ppd->lflags & QIBL_IB_AUTONEG_FAILED) {
- /*
- * Clear autoneg failure flag, and do setup
- * so we'll try next time link goes down and
- * back to INIT (possibly connected to a
- * different device).
- */
- spin_lock_irqsave(&ppd->lflags_lock, flags);
- ppd->lflags &= ~QIBL_IB_AUTONEG_FAILED;
- spin_unlock_irqrestore(&ppd->lflags_lock, flags);
- ppd->cpspec->ibcctrl_b |= IBA7322_IBC_IBTA_1_2_MASK;
- symadj = 1;
- }
- if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG)) {
- symadj = 1;
- if (ppd->dd->cspec->r1 && ppd->cpspec->ipg_tries <= 10)
- try_7322_ipg(ppd);
- if (!ppd->cpspec->recovery_init)
- setup_7322_link_recovery(ppd, 0);
- ppd->cpspec->qdr_dfe_time = jiffies +
- msecs_to_jiffies(QDR_DFE_DISABLE_DELAY);
- }
- ppd->cpspec->ibmalfusesnap = 0;
- ppd->cpspec->ibmalfsnap = read_7322_creg32_port(ppd,
- crp_errlink);
- }
- if (symadj) {
- ppd->cpspec->iblnkdownsnap =
- read_7322_creg32_port(ppd, crp_iblinkdown);
- if (ppd->cpspec->ibdeltainprog) {
- ppd->cpspec->ibdeltainprog = 0;
- ppd->cpspec->ibsymdelta += read_7322_creg32_port(ppd,
- crp_ibsymbolerr) - ppd->cpspec->ibsymsnap;
- ppd->cpspec->iblnkerrdelta += read_7322_creg32_port(ppd,
- crp_iblinkerrrecov) - ppd->cpspec->iblnkerrsnap;
- }
- } else if (!ibup && qib_compat_ddr_negotiate &&
- !ppd->cpspec->ibdeltainprog &&
- !(ppd->lflags & QIBL_IB_AUTONEG_INPROG)) {
- ppd->cpspec->ibdeltainprog = 1;
- ppd->cpspec->ibsymsnap = read_7322_creg32_port(ppd,
- crp_ibsymbolerr);
- ppd->cpspec->iblnkerrsnap = read_7322_creg32_port(ppd,
- crp_iblinkerrrecov);
- }
-
- if (!ret)
- qib_setup_7322_setextled(ppd, ibup);
- return ret;
-}
-
-/*
- * Does read/modify/write to appropriate registers to
- * set output and direction bits selected by mask.
- * these are in their canonical positions (e.g. lsb of
- * dir will end up in D48 of extctrl on existing chips).
- * returns contents of GP Inputs.
- */
-static int gpio_7322_mod(struct qib_devdata *dd, u32 out, u32 dir, u32 mask)
-{
- u64 read_val, new_out;
- unsigned long flags;
-
- if (mask) {
- /* some bits being written, lock access to GPIO */
- dir &= mask;
- out &= mask;
- spin_lock_irqsave(&dd->cspec->gpio_lock, flags);
- dd->cspec->extctrl &= ~((u64)mask << SYM_LSB(EXTCtrl, GPIOOe));
- dd->cspec->extctrl |= ((u64) dir << SYM_LSB(EXTCtrl, GPIOOe));
- new_out = (dd->cspec->gpio_out & ~mask) | out;
-
- qib_write_kreg(dd, kr_extctrl, dd->cspec->extctrl);
- qib_write_kreg(dd, kr_gpio_out, new_out);
- dd->cspec->gpio_out = new_out;
- spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags);
- }
- /*
- * It is unlikely that a read at this time would get valid
- * data on a pin whose direction line was set in the same
- * call to this function. We include the read here because
- * that allows us to potentially combine a change on one pin with
- * a read on another, and because the old code did something like
- * this.
- */
- read_val = qib_read_kreg64(dd, kr_extstatus);
- return SYM_FIELD(read_val, EXTStatus, GPIOIn);
-}
-
-/* Enable writes to config EEPROM, if possible. Returns previous state */
-static int qib_7322_eeprom_wen(struct qib_devdata *dd, int wen)
-{
- int prev_wen;
- u32 mask;
-
- mask = 1 << QIB_EEPROM_WEN_NUM;
- prev_wen = ~gpio_7322_mod(dd, 0, 0, 0) >> QIB_EEPROM_WEN_NUM;
- gpio_7322_mod(dd, wen ? 0 : mask, mask, mask);
-
- return prev_wen & 1;
-}
-
-/*
- * Read fundamental info we need to use the chip. These are
- * the registers that describe chip capabilities, and are
- * saved in shadow registers.
- */
-static void get_7322_chip_params(struct qib_devdata *dd)
-{
- u64 val;
- u32 piobufs;
- int mtu;
-
- dd->palign = qib_read_kreg32(dd, kr_pagealign);
-
- dd->uregbase = qib_read_kreg32(dd, kr_userregbase);
-
- dd->rcvtidcnt = qib_read_kreg32(dd, kr_rcvtidcnt);
- dd->rcvtidbase = qib_read_kreg32(dd, kr_rcvtidbase);
- dd->rcvegrbase = qib_read_kreg32(dd, kr_rcvegrbase);
- dd->piobufbase = qib_read_kreg64(dd, kr_sendpiobufbase);
- dd->pio2k_bufbase = dd->piobufbase & 0xffffffff;
-
- val = qib_read_kreg64(dd, kr_sendpiobufcnt);
- dd->piobcnt2k = val & ~0U;
- dd->piobcnt4k = val >> 32;
- val = qib_read_kreg64(dd, kr_sendpiosize);
- dd->piosize2k = val & ~0U;
- dd->piosize4k = val >> 32;
-
- mtu = ib_mtu_enum_to_int(qib_ibmtu);
- if (mtu == -1)
- mtu = QIB_DEFAULT_MTU;
- dd->pport[0].ibmtu = (u32)mtu;
- dd->pport[1].ibmtu = (u32)mtu;
-
- /* these may be adjusted in init_chip_wc_pat() */
- dd->pio2kbase = (u32 __iomem *)
- ((char __iomem *) dd->kregbase + dd->pio2k_bufbase);
- dd->pio4kbase = (u32 __iomem *)
- ((char __iomem *) dd->kregbase +
- (dd->piobufbase >> 32));
- /*
- * 4K buffers take 2 pages; we use roundup just to be
- * paranoid; we calculate it once here, rather than on
- * ever buf allocate
- */
- dd->align4k = ALIGN(dd->piosize4k, dd->palign);
-
- piobufs = dd->piobcnt4k + dd->piobcnt2k + NUM_VL15_BUFS;
-
- dd->pioavregs = ALIGN(piobufs, sizeof(u64) * BITS_PER_BYTE / 2) /
- (sizeof(u64) * BITS_PER_BYTE / 2);
-}
-
-/*
- * The chip base addresses in cspec and cpspec have to be set
- * after possible init_chip_wc_pat(), rather than in
- * get_7322_chip_params(), so split out as separate function
- */
-static void qib_7322_set_baseaddrs(struct qib_devdata *dd)
-{
- u32 cregbase;
-
- cregbase = qib_read_kreg32(dd, kr_counterregbase);
-
- dd->cspec->cregbase = (u64 __iomem *)(cregbase +
- (char __iomem *)dd->kregbase);
-
- dd->egrtidbase = (u64 __iomem *)
- ((char __iomem *) dd->kregbase + dd->rcvegrbase);
-
- /* port registers are defined as relative to base of chip */
- dd->pport[0].cpspec->kpregbase =
- (u64 __iomem *)((char __iomem *)dd->kregbase);
- dd->pport[1].cpspec->kpregbase =
- (u64 __iomem *)(dd->palign +
- (char __iomem *)dd->kregbase);
- dd->pport[0].cpspec->cpregbase =
- (u64 __iomem *)(qib_read_kreg_port(&dd->pport[0],
- kr_counterregbase) + (char __iomem *)dd->kregbase);
- dd->pport[1].cpspec->cpregbase =
- (u64 __iomem *)(qib_read_kreg_port(&dd->pport[1],
- kr_counterregbase) + (char __iomem *)dd->kregbase);
-}
-
-/*
- * This is a fairly special-purpose observer, so we only support
- * the port-specific parts of SendCtrl
- */
-
-#define SENDCTRL_SHADOWED (SYM_MASK(SendCtrl_0, SendEnable) | \
- SYM_MASK(SendCtrl_0, SDmaEnable) | \
- SYM_MASK(SendCtrl_0, SDmaIntEnable) | \
- SYM_MASK(SendCtrl_0, SDmaSingleDescriptor) | \
- SYM_MASK(SendCtrl_0, SDmaHalt) | \
- SYM_MASK(SendCtrl_0, IBVLArbiterEn) | \
- SYM_MASK(SendCtrl_0, ForceCreditUpToDate))
-
-static int sendctrl_hook(struct qib_devdata *dd,
- const struct diag_observer *op, u32 offs,
- u64 *data, u64 mask, int only_32)
-{
- unsigned long flags;
- unsigned idx;
- unsigned pidx;
- struct qib_pportdata *ppd = NULL;
- u64 local_data, all_bits;
-
- /*
- * The fixed correspondence between Physical ports and pports is
- * severed. We need to hunt for the ppd that corresponds
- * to the offset we got. And we have to do that without admitting
- * we know the stride, apparently.
- */
- for (pidx = 0; pidx < dd->num_pports; ++pidx) {
- u64 __iomem *psptr;
- u32 psoffs;
-
- ppd = dd->pport + pidx;
- if (!ppd->cpspec->kpregbase)
- continue;
-
- psptr = ppd->cpspec->kpregbase + krp_sendctrl;
- psoffs = (u32) (psptr - dd->kregbase) * sizeof(*psptr);
- if (psoffs == offs)
- break;
- }
-
- /* If pport is not being managed by driver, just avoid shadows. */
- if (pidx >= dd->num_pports)
- ppd = NULL;
-
- /* In any case, "idx" is flat index in kreg space */
- idx = offs / sizeof(u64);
-
- all_bits = ~0ULL;
- if (only_32)
- all_bits >>= 32;
-
- spin_lock_irqsave(&dd->sendctrl_lock, flags);
- if (!ppd || (mask & all_bits) != all_bits) {
- /*
- * At least some mask bits are zero, so we need
- * to read. The judgement call is whether from
- * reg or shadow. First-cut: read reg, and complain
- * if any bits which should be shadowed are different
- * from their shadowed value.
- */
- if (only_32)
- local_data = (u64)qib_read_kreg32(dd, idx);
- else
- local_data = qib_read_kreg64(dd, idx);
- *data = (local_data & ~mask) | (*data & mask);
- }
- if (mask) {
- /*
- * At least some mask bits are one, so we need
- * to write, but only shadow some bits.
- */
- u64 sval, tval; /* Shadowed, transient */
-
- /*
- * New shadow val is bits we don't want to touch,
- * ORed with bits we do, that are intended for shadow.
- */
- if (ppd) {
- sval = ppd->p_sendctrl & ~mask;
- sval |= *data & SENDCTRL_SHADOWED & mask;
- ppd->p_sendctrl = sval;
- } else
- sval = *data & SENDCTRL_SHADOWED & mask;
- tval = sval | (*data & ~SENDCTRL_SHADOWED & mask);
- qib_write_kreg(dd, idx, tval);
- qib_write_kreg(dd, kr_scratch, 0Ull);
- }
- spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
- return only_32 ? 4 : 8;
-}
-
-static const struct diag_observer sendctrl_0_observer = {
- sendctrl_hook, KREG_IDX(SendCtrl_0) * sizeof(u64),
- KREG_IDX(SendCtrl_0) * sizeof(u64)
-};
-
-static const struct diag_observer sendctrl_1_observer = {
- sendctrl_hook, KREG_IDX(SendCtrl_1) * sizeof(u64),
- KREG_IDX(SendCtrl_1) * sizeof(u64)
-};
-
-static ushort sdma_fetch_prio = 8;
-module_param_named(sdma_fetch_prio, sdma_fetch_prio, ushort, S_IRUGO);
-MODULE_PARM_DESC(sdma_fetch_prio, "SDMA descriptor fetch priority");
-
-/* Besides logging QSFP events, we set appropriate TxDDS values */
-static void init_txdds_table(struct qib_pportdata *ppd, int override);
-
-static void qsfp_7322_event(struct work_struct *work)
-{
- struct qib_qsfp_data *qd;
- struct qib_pportdata *ppd;
- unsigned long pwrup;
- unsigned long flags;
- int ret;
- u32 le2;
-
- qd = container_of(work, struct qib_qsfp_data, work);
- ppd = qd->ppd;
- pwrup = qd->t_insert +
- msecs_to_jiffies(QSFP_PWR_LAG_MSEC - QSFP_MODPRS_LAG_MSEC);
-
- /* Delay for 20 msecs to allow ModPrs resistor to setup */
- mdelay(QSFP_MODPRS_LAG_MSEC);
-
- if (!qib_qsfp_mod_present(ppd)) {
- ppd->cpspec->qsfp_data.modpresent = 0;
- /* Set the physical link to disabled */
- qib_set_ib_7322_lstate(ppd, 0,
- QLOGIC_IB_IBCC_LINKINITCMD_DISABLE);
- spin_lock_irqsave(&ppd->lflags_lock, flags);
- ppd->lflags &= ~QIBL_LINKV;
- spin_unlock_irqrestore(&ppd->lflags_lock, flags);
- } else {
- /*
- * Some QSFP's not only do not respond until the full power-up
- * time, but may behave badly if we try. So hold off responding
- * to insertion.
- */
- while (1) {
- if (time_is_before_jiffies(pwrup))
- break;
- msleep(20);
- }
-
- ret = qib_refresh_qsfp_cache(ppd, &qd->cache);
-
- /*
- * Need to change LE2 back to defaults if we couldn't
- * read the cable type (to handle cable swaps), so do this
- * even on failure to read cable information. We don't
- * get here for QME, so IS_QME check not needed here.
- */
- if (!ret && !ppd->dd->cspec->r1) {
- if (QSFP_IS_ACTIVE_FAR(qd->cache.tech))
- le2 = LE2_QME;
- else if (qd->cache.atten[1] >= qib_long_atten &&
- QSFP_IS_CU(qd->cache.tech))
- le2 = LE2_5m;
- else
- le2 = LE2_DEFAULT;
- } else
- le2 = LE2_DEFAULT;
- ibsd_wr_allchans(ppd, 13, (le2 << 7), BMASK(9, 7));
- /*
- * We always change parameteters, since we can choose
- * values for cables without eeproms, and the cable may have
- * changed from a cable with full or partial eeprom content
- * to one with partial or no content.
- */
- init_txdds_table(ppd, 0);
- /* The physical link is being re-enabled only when the
- * previous state was DISABLED and the VALID bit is not
- * set. This should only happen when the cable has been
- * physically pulled. */
- if (!ppd->cpspec->qsfp_data.modpresent &&
- (ppd->lflags & (QIBL_LINKV | QIBL_IB_LINK_DISABLED))) {
- ppd->cpspec->qsfp_data.modpresent = 1;
- qib_set_ib_7322_lstate(ppd, 0,
- QLOGIC_IB_IBCC_LINKINITCMD_SLEEP);
- spin_lock_irqsave(&ppd->lflags_lock, flags);
- ppd->lflags |= QIBL_LINKV;
- spin_unlock_irqrestore(&ppd->lflags_lock, flags);
- }
- }
-}
-
-/*
- * There is little we can do but complain to the user if QSFP
- * initialization fails.
- */
-static void qib_init_7322_qsfp(struct qib_pportdata *ppd)
-{
- unsigned long flags;
- struct qib_qsfp_data *qd = &ppd->cpspec->qsfp_data;
- struct qib_devdata *dd = ppd->dd;
- u64 mod_prs_bit = QSFP_GPIO_MOD_PRS_N;
-
- mod_prs_bit <<= (QSFP_GPIO_PORT2_SHIFT * ppd->hw_pidx);
- qd->ppd = ppd;
- qib_qsfp_init(qd, qsfp_7322_event);
- spin_lock_irqsave(&dd->cspec->gpio_lock, flags);
- dd->cspec->extctrl |= (mod_prs_bit << SYM_LSB(EXTCtrl, GPIOInvert));
- dd->cspec->gpio_mask |= mod_prs_bit;
- qib_write_kreg(dd, kr_extctrl, dd->cspec->extctrl);
- qib_write_kreg(dd, kr_gpio_mask, dd->cspec->gpio_mask);
- spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags);
-}
-
-/*
- * called at device initialization time, and also if the txselect
- * module parameter is changed. This is used for cables that don't
- * have valid QSFP EEPROMs (not present, or attenuation is zero).
- * We initialize to the default, then if there is a specific
- * unit,port match, we use that (and set it immediately, for the
- * current speed, if the link is at INIT or better).
- * String format is "default# unit#,port#=# ... u,p=#", separators must
- * be a SPACE character. A newline terminates. The u,p=# tuples may
- * optionally have "u,p=#,#", where the final # is the H1 value
- * The last specific match is used (actually, all are used, but last
- * one is the one that winds up set); if none at all, fall back on default.
- */
-static void set_no_qsfp_atten(struct qib_devdata *dd, int change)
-{
- char *nxt, *str;
- u32 pidx, unit, port, deflt, h1;
- unsigned long val;
- int any = 0, seth1;
- int txdds_size;
-
- str = txselect_list;
-
- /* default number is validated in setup_txselect() */
- deflt = simple_strtoul(str, &nxt, 0);
- for (pidx = 0; pidx < dd->num_pports; ++pidx)
- dd->pport[pidx].cpspec->no_eep = deflt;
-
- txdds_size = TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ;
- if (IS_QME(dd) || IS_QMH(dd))
- txdds_size += TXDDS_MFG_SZ;
-
- while (*nxt && nxt[1]) {
- str = ++nxt;
- unit = simple_strtoul(str, &nxt, 0);
- if (nxt == str || !*nxt || *nxt != ',') {
- while (*nxt && *nxt++ != ' ') /* skip to next, if any */
- ;
- continue;
- }
- str = ++nxt;
- port = simple_strtoul(str, &nxt, 0);
- if (nxt == str || *nxt != '=') {
- while (*nxt && *nxt++ != ' ') /* skip to next, if any */
- ;
- continue;
- }
- str = ++nxt;
- val = simple_strtoul(str, &nxt, 0);
- if (nxt == str) {
- while (*nxt && *nxt++ != ' ') /* skip to next, if any */
- ;
- continue;
- }
- if (val >= txdds_size)
- continue;
- seth1 = 0;
- h1 = 0; /* gcc thinks it might be used uninitted */
- if (*nxt == ',' && nxt[1]) {
- str = ++nxt;
- h1 = (u32)simple_strtoul(str, &nxt, 0);
- if (nxt == str)
- while (*nxt && *nxt++ != ' ') /* skip */
- ;
- else
- seth1 = 1;
- }
- for (pidx = 0; dd->unit == unit && pidx < dd->num_pports;
- ++pidx) {
- struct qib_pportdata *ppd = &dd->pport[pidx];
-
- if (ppd->port != port || !ppd->link_speed_supported)
- continue;
- ppd->cpspec->no_eep = val;
- if (seth1)
- ppd->cpspec->h1_val = h1;
- /* now change the IBC and serdes, overriding generic */
- init_txdds_table(ppd, 1);
- /* Re-enable the physical state machine on mezz boards
- * now that the correct settings have been set.
- * QSFP boards are handles by the QSFP event handler */
- if (IS_QMH(dd) || IS_QME(dd))
- qib_set_ib_7322_lstate(ppd, 0,
- QLOGIC_IB_IBCC_LINKINITCMD_SLEEP);
- any++;
- }
- if (*nxt == '\n')
- break; /* done */
- }
- if (change && !any) {
- /* no specific setting, use the default.
- * Change the IBC and serdes, but since it's
- * general, don't override specific settings.
- */
- for (pidx = 0; pidx < dd->num_pports; ++pidx)
- if (dd->pport[pidx].link_speed_supported)
- init_txdds_table(&dd->pport[pidx], 0);
- }
-}
-
-/* handle the txselect parameter changing */
-static int setup_txselect(const char *str, const struct kernel_param *kp)
-{
- struct qib_devdata *dd;
- unsigned long index, val;
- char *n;
-
- if (strlen(str) >= ARRAY_SIZE(txselect_list)) {
- pr_info("txselect_values string too long\n");
- return -ENOSPC;
- }
- val = simple_strtoul(str, &n, 0);
- if (n == str || val >= (TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ +
- TXDDS_MFG_SZ)) {
- pr_info("txselect_values must start with a number < %d\n",
- TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ + TXDDS_MFG_SZ);
- return -EINVAL;
- }
- strscpy(txselect_list, str, sizeof(txselect_list));
-
- xa_for_each(&qib_dev_table, index, dd)
- if (dd->deviceid == PCI_DEVICE_ID_QLOGIC_IB_7322)
- set_no_qsfp_atten(dd, 1);
- return 0;
-}
-
-/*
- * Write the final few registers that depend on some of the
- * init setup. Done late in init, just before bringing up
- * the serdes.
- */
-static int qib_late_7322_initreg(struct qib_devdata *dd)
-{
- int ret = 0, n;
- u64 val;
-
- qib_write_kreg(dd, kr_rcvhdrentsize, dd->rcvhdrentsize);
- qib_write_kreg(dd, kr_rcvhdrsize, dd->rcvhdrsize);
- qib_write_kreg(dd, kr_rcvhdrcnt, dd->rcvhdrcnt);
- qib_write_kreg(dd, kr_sendpioavailaddr, dd->pioavailregs_phys);
- val = qib_read_kreg64(dd, kr_sendpioavailaddr);
- if (val != dd->pioavailregs_phys) {
- qib_dev_err(dd,
- "Catastrophic software error, SendPIOAvailAddr written as %lx, read back as %llx\n",
- (unsigned long) dd->pioavailregs_phys,
- (unsigned long long) val);
- ret = -EINVAL;
- }
-
- n = dd->piobcnt2k + dd->piobcnt4k + NUM_VL15_BUFS;
- qib_7322_txchk_change(dd, 0, n, TXCHK_CHG_TYPE_KERN, NULL);
- /* driver sends get pkey, lid, etc. checking also, to catch bugs */
- qib_7322_txchk_change(dd, 0, n, TXCHK_CHG_TYPE_ENAB1, NULL);
-
- qib_register_observer(dd, &sendctrl_0_observer);
- qib_register_observer(dd, &sendctrl_1_observer);
-
- dd->control &= ~QLOGIC_IB_C_SDMAFETCHPRIOEN;
- qib_write_kreg(dd, kr_control, dd->control);
- /*
- * Set SendDmaFetchPriority and init Tx params, including
- * QSFP handler on boards that have QSFP.
- * First set our default attenuation entry for cables that
- * don't have valid attenuation.
- */
- set_no_qsfp_atten(dd, 0);
- for (n = 0; n < dd->num_pports; ++n) {
- struct qib_pportdata *ppd = dd->pport + n;
-
- qib_write_kreg_port(ppd, krp_senddmaprioritythld,
- sdma_fetch_prio & 0xf);
- /* Initialize qsfp if present on board. */
- if (dd->flags & QIB_HAS_QSFP)
- qib_init_7322_qsfp(ppd);
- }
- dd->control |= QLOGIC_IB_C_SDMAFETCHPRIOEN;
- qib_write_kreg(dd, kr_control, dd->control);
-
- return ret;
-}
-
-/* per IB port errors. */
-#define SENDCTRL_PIBP (MASK_ACROSS(0, 1) | MASK_ACROSS(3, 3) | \
- MASK_ACROSS(8, 15))
-#define RCVCTRL_PIBP (MASK_ACROSS(0, 17) | MASK_ACROSS(39, 41))
-#define ERRS_PIBP (MASK_ACROSS(57, 58) | MASK_ACROSS(54, 54) | \
- MASK_ACROSS(36, 49) | MASK_ACROSS(29, 34) | MASK_ACROSS(14, 17) | \
- MASK_ACROSS(0, 11))
-
-/*
- * Write the initialization per-port registers that need to be done at
- * driver load and after reset completes (i.e., that aren't done as part
- * of other init procedures called from qib_init.c).
- * Some of these should be redundant on reset, but play safe.
- */
-static void write_7322_init_portregs(struct qib_pportdata *ppd)
-{
- u64 val;
- int i;
-
- if (!ppd->link_speed_supported) {
- /* no buffer credits for this port */
- for (i = 1; i < 8; i++)
- qib_write_kreg_port(ppd, krp_rxcreditvl0 + i, 0);
- qib_write_kreg_port(ppd, krp_ibcctrl_b, 0);
- qib_write_kreg(ppd->dd, kr_scratch, 0);
- return;
- }
-
- /*
- * Set the number of supported virtual lanes in IBC,
- * for flow control packet handling on unsupported VLs
- */
- val = qib_read_kreg_port(ppd, krp_ibsdtestiftx);
- val &= ~SYM_MASK(IB_SDTEST_IF_TX_0, VL_CAP);
- val |= (u64)(ppd->vls_supported - 1) <<
- SYM_LSB(IB_SDTEST_IF_TX_0, VL_CAP);
- qib_write_kreg_port(ppd, krp_ibsdtestiftx, val);
-
- qib_write_kreg_port(ppd, krp_rcvbthqp, QIB_KD_QP);
-
- /* enable tx header checking */
- qib_write_kreg_port(ppd, krp_sendcheckcontrol, IBA7322_SENDCHK_PKEY |
- IBA7322_SENDCHK_BTHQP | IBA7322_SENDCHK_SLID |
- IBA7322_SENDCHK_RAW_IPV6 | IBA7322_SENDCHK_MINSZ);
-
- qib_write_kreg_port(ppd, krp_ncmodectrl,
- SYM_MASK(IBNCModeCtrl_0, ScrambleCapLocal));
-
- /*
- * Unconditionally clear the bufmask bits. If SDMA is
- * enabled, we'll set them appropriately later.
- */
- qib_write_kreg_port(ppd, krp_senddmabufmask0, 0);
- qib_write_kreg_port(ppd, krp_senddmabufmask1, 0);
- qib_write_kreg_port(ppd, krp_senddmabufmask2, 0);
- if (ppd->dd->cspec->r1)
- ppd->p_sendctrl |= SYM_MASK(SendCtrl_0, ForceCreditUpToDate);
-}
-
-/*
- * Write the initialization per-device registers that need to be done at
- * driver load and after reset completes (i.e., that aren't done as part
- * of other init procedures called from qib_init.c). Also write per-port
- * registers that are affected by overall device config, such as QP mapping
- * Some of these should be redundant on reset, but play safe.
- */
-static void write_7322_initregs(struct qib_devdata *dd)
-{
- struct qib_pportdata *ppd;
- int i, pidx;
- u64 val;
-
- /* Set Multicast QPs received by port 2 to map to context one. */
- qib_write_kreg(dd, KREG_IDX(RcvQPMulticastContext_1), 1);
-
- for (pidx = 0; pidx < dd->num_pports; ++pidx) {
- unsigned n, regno;
- unsigned long flags;
-
- if (dd->n_krcv_queues < 2 ||
- !dd->pport[pidx].link_speed_supported)
- continue;
-
- ppd = &dd->pport[pidx];
-
- /* be paranoid against later code motion, etc. */
- spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags);
- ppd->p_rcvctrl |= SYM_MASK(RcvCtrl_0, RcvQPMapEnable);
- spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags);
-
- /* Initialize QP to context mapping */
- regno = krp_rcvqpmaptable;
- val = 0;
- if (dd->num_pports > 1)
- n = dd->first_user_ctxt / dd->num_pports;
- else
- n = dd->first_user_ctxt - 1;
- for (i = 0; i < 32; ) {
- unsigned ctxt;
-
- if (dd->num_pports > 1)
- ctxt = (i % n) * dd->num_pports + pidx;
- else if (i % n)
- ctxt = (i % n) + 1;
- else
- ctxt = ppd->hw_pidx;
- val |= ctxt << (5 * (i % 6));
- i++;
- if (i % 6 == 0) {
- qib_write_kreg_port(ppd, regno, val);
- val = 0;
- regno++;
- }
- }
- qib_write_kreg_port(ppd, regno, val);
- }
-
- /*
- * Setup up interrupt mitigation for kernel contexts, but
- * not user contexts (user contexts use interrupts when
- * stalled waiting for any packet, so want those interrupts
- * right away).
- */
- for (i = 0; i < dd->first_user_ctxt; i++) {
- dd->cspec->rcvavail_timeout[i] = rcv_int_timeout;
- qib_write_kreg(dd, kr_rcvavailtimeout + i, rcv_int_timeout);
- }
-
- /*
- * Initialize as (disabled) rcvflow tables. Application code
- * will setup each flow as it uses the flow.
- * Doesn't clear any of the error bits that might be set.
- */
- val = TIDFLOW_ERRBITS; /* these are W1C */
- for (i = 0; i < dd->cfgctxts; i++) {
- int flow;
-
- for (flow = 0; flow < NUM_TIDFLOWS_CTXT; flow++)
- qib_write_ureg(dd, ur_rcvflowtable+flow, val, i);
- }
-
- /*
- * dual cards init to dual port recovery, single port cards to
- * the one port. Dual port cards may later adjust to 1 port,
- * and then back to dual port if both ports are connected
- * */
- if (dd->num_pports)
- setup_7322_link_recovery(dd->pport, dd->num_pports > 1);
-}
-
-static int qib_init_7322_variables(struct qib_devdata *dd)
-{
- struct qib_pportdata *ppd;
- unsigned features, pidx, sbufcnt;
- int ret, mtu;
- u32 sbufs, updthresh;
- resource_size_t vl15off;
-
- /* pport structs are contiguous, allocated after devdata */
- ppd = (struct qib_pportdata *)(dd + 1);
- dd->pport = ppd;
- ppd[0].dd = dd;
- ppd[1].dd = dd;
-
- dd->cspec = (struct qib_chip_specific *)(ppd + 2);
-
- ppd[0].cpspec = (struct qib_chippport_specific *)(dd->cspec + 1);
- ppd[1].cpspec = &ppd[0].cpspec[1];
- ppd[0].cpspec->ppd = &ppd[0]; /* for autoneg_7322_work() */
- ppd[1].cpspec->ppd = &ppd[1]; /* for autoneg_7322_work() */
-
- spin_lock_init(&dd->cspec->rcvmod_lock);
- spin_lock_init(&dd->cspec->gpio_lock);
-
- /* we haven't yet set QIB_PRESENT, so use read directly */
- dd->revision = readq(&dd->kregbase[kr_revision]);
-
- if ((dd->revision & 0xffffffffU) == 0xffffffffU) {
- qib_dev_err(dd,
- "Revision register read failure, giving up initialization\n");
- ret = -ENODEV;
- goto bail;
- }
- dd->flags |= QIB_PRESENT; /* now register routines work */
-
- dd->majrev = (u8) SYM_FIELD(dd->revision, Revision_R, ChipRevMajor);
- dd->minrev = (u8) SYM_FIELD(dd->revision, Revision_R, ChipRevMinor);
- dd->cspec->r1 = dd->minrev == 1;
-
- get_7322_chip_params(dd);
- features = qib_7322_boardname(dd);
-
- /* now that piobcnt2k and 4k set, we can allocate these */
- sbufcnt = dd->piobcnt2k + dd->piobcnt4k + NUM_VL15_BUFS;
-
- dd->cspec->sendchkenable = bitmap_zalloc(sbufcnt, GFP_KERNEL);
- dd->cspec->sendgrhchk = bitmap_zalloc(sbufcnt, GFP_KERNEL);
- dd->cspec->sendibchk = bitmap_zalloc(sbufcnt, GFP_KERNEL);
- if (!dd->cspec->sendchkenable || !dd->cspec->sendgrhchk ||
- !dd->cspec->sendibchk) {
- ret = -ENOMEM;
- goto bail;
- }
-
- ppd = dd->pport;
-
- /*
- * GPIO bits for TWSI data and clock,
- * used for serial EEPROM.
- */
- dd->gpio_sda_num = _QIB_GPIO_SDA_NUM;
- dd->gpio_scl_num = _QIB_GPIO_SCL_NUM;
- dd->twsi_eeprom_dev = QIB_TWSI_EEPROM_DEV;
-
- dd->flags |= QIB_HAS_INTX | QIB_HAS_LINK_LATENCY |
- QIB_NODMA_RTAIL | QIB_HAS_VLSUPP | QIB_HAS_HDRSUPP |
- QIB_HAS_THRESH_UPDATE |
- (sdma_idle_cnt ? QIB_HAS_SDMA_TIMEOUT : 0);
- dd->flags |= qib_special_trigger ?
- QIB_USE_SPCL_TRIG : QIB_HAS_SEND_DMA;
-
- /*
- * Setup initial values. These may change when PAT is enabled, but
- * we need these to do initial chip register accesses.
- */
- qib_7322_set_baseaddrs(dd);
-
- mtu = ib_mtu_enum_to_int(qib_ibmtu);
- if (mtu == -1)
- mtu = QIB_DEFAULT_MTU;
-
- dd->cspec->int_enable_mask = QIB_I_BITSEXTANT;
- /* all hwerrors become interrupts, unless special purposed */
- dd->cspec->hwerrmask = ~0ULL;
- /* link_recovery setup causes these errors, so ignore them,
- * other than clearing them when they occur */
- dd->cspec->hwerrmask &=
- ~(SYM_MASK(HwErrMask, IBSerdesPClkNotDetectMask_0) |
- SYM_MASK(HwErrMask, IBSerdesPClkNotDetectMask_1) |
- HWE_MASK(LATriggered));
-
- for (pidx = 0; pidx < NUM_IB_PORTS; ++pidx) {
- struct qib_chippport_specific *cp = ppd->cpspec;
-
- ppd->link_speed_supported = features & PORT_SPD_CAP;
- features >>= PORT_SPD_CAP_SHIFT;
- if (!ppd->link_speed_supported) {
- /* single port mode (7340, or configured) */
- dd->skip_kctxt_mask |= 1 << pidx;
- if (pidx == 0) {
- /* Make sure port is disabled. */
- qib_write_kreg_port(ppd, krp_rcvctrl, 0);
- qib_write_kreg_port(ppd, krp_ibcctrl_a, 0);
- ppd[0] = ppd[1];
- dd->cspec->hwerrmask &= ~(SYM_MASK(HwErrMask,
- IBSerdesPClkNotDetectMask_0)
- | SYM_MASK(HwErrMask,
- SDmaMemReadErrMask_0));
- dd->cspec->int_enable_mask &= ~(
- SYM_MASK(IntMask, SDmaCleanupDoneMask_0) |
- SYM_MASK(IntMask, SDmaIdleIntMask_0) |
- SYM_MASK(IntMask, SDmaProgressIntMask_0) |
- SYM_MASK(IntMask, SDmaIntMask_0) |
- SYM_MASK(IntMask, ErrIntMask_0) |
- SYM_MASK(IntMask, SendDoneIntMask_0));
- } else {
- /* Make sure port is disabled. */
- qib_write_kreg_port(ppd, krp_rcvctrl, 0);
- qib_write_kreg_port(ppd, krp_ibcctrl_a, 0);
- dd->cspec->hwerrmask &= ~(SYM_MASK(HwErrMask,
- IBSerdesPClkNotDetectMask_1)
- | SYM_MASK(HwErrMask,
- SDmaMemReadErrMask_1));
- dd->cspec->int_enable_mask &= ~(
- SYM_MASK(IntMask, SDmaCleanupDoneMask_1) |
- SYM_MASK(IntMask, SDmaIdleIntMask_1) |
- SYM_MASK(IntMask, SDmaProgressIntMask_1) |
- SYM_MASK(IntMask, SDmaIntMask_1) |
- SYM_MASK(IntMask, ErrIntMask_1) |
- SYM_MASK(IntMask, SendDoneIntMask_1));
- }
- continue;
- }
-
- dd->num_pports++;
- ret = qib_init_pportdata(ppd, dd, pidx, dd->num_pports);
- if (ret) {
- dd->num_pports--;
- goto bail;
- }
-
- ppd->link_width_supported = IB_WIDTH_1X | IB_WIDTH_4X;
- ppd->link_width_enabled = IB_WIDTH_4X;
- ppd->link_speed_enabled = ppd->link_speed_supported;
- /*
- * Set the initial values to reasonable default, will be set
- * for real when link is up.
- */
- ppd->link_width_active = IB_WIDTH_4X;
- ppd->link_speed_active = QIB_IB_SDR;
- ppd->delay_mult = ib_rate_to_delay[IB_RATE_10_GBPS];
- switch (qib_num_cfg_vls) {
- case 1:
- ppd->vls_supported = IB_VL_VL0;
- break;
- case 2:
- ppd->vls_supported = IB_VL_VL0_1;
- break;
- default:
- qib_devinfo(dd->pcidev,
- "Invalid num_vls %u, using 4 VLs\n",
- qib_num_cfg_vls);
- qib_num_cfg_vls = 4;
- fallthrough;
- case 4:
- ppd->vls_supported = IB_VL_VL0_3;
- break;
- case 8:
- if (mtu <= 2048)
- ppd->vls_supported = IB_VL_VL0_7;
- else {
- qib_devinfo(dd->pcidev,
- "Invalid num_vls %u for MTU %d , using 4 VLs\n",
- qib_num_cfg_vls, mtu);
- ppd->vls_supported = IB_VL_VL0_3;
- qib_num_cfg_vls = 4;
- }
- break;
- }
- ppd->vls_operational = ppd->vls_supported;
-
- init_waitqueue_head(&cp->autoneg_wait);
- INIT_DELAYED_WORK(&cp->autoneg_work,
- autoneg_7322_work);
- if (ppd->dd->cspec->r1)
- INIT_DELAYED_WORK(&cp->ipg_work, ipg_7322_work);
-
- /*
- * For Mez and similar cards, no qsfp info, so do
- * the "cable info" setup here. Can be overridden
- * in adapter-specific routines.
- */
- if (!(dd->flags & QIB_HAS_QSFP)) {
- if (!IS_QMH(dd) && !IS_QME(dd))
- qib_devinfo(dd->pcidev,
- "IB%u:%u: Unknown mezzanine card type\n",
- dd->unit, ppd->port);
- cp->h1_val = IS_QMH(dd) ? H1_FORCE_QMH : H1_FORCE_QME;
- /*
- * Choose center value as default tx serdes setting
- * until changed through module parameter.
- */
- ppd->cpspec->no_eep = IS_QMH(dd) ?
- TXDDS_TABLE_SZ + 2 : TXDDS_TABLE_SZ + 4;
- } else
- cp->h1_val = H1_FORCE_VAL;
-
- /* Avoid writes to chip for mini_init */
- if (!qib_mini_init)
- write_7322_init_portregs(ppd);
-
- timer_setup(&cp->chase_timer, reenable_chase, 0);
-
- ppd++;
- }
-
- dd->rcvhdrentsize = qib_rcvhdrentsize ?
- qib_rcvhdrentsize : QIB_RCVHDR_ENTSIZE;
- dd->rcvhdrsize = qib_rcvhdrsize ?
- qib_rcvhdrsize : QIB_DFLT_RCVHDRSIZE;
- dd->rhf_offset = dd->rcvhdrentsize - sizeof(u64) / sizeof(u32);
-
- /* we always allocate at least 2048 bytes for eager buffers */
- dd->rcvegrbufsize = max(mtu, 2048);
- dd->rcvegrbufsize_shift = ilog2(dd->rcvegrbufsize);
-
- qib_7322_tidtemplate(dd);
-
- /*
- * We can request a receive interrupt for 1 or
- * more packets from current offset.
- */
- dd->rhdrhead_intr_off =
- (u64) rcv_int_count << IBA7322_HDRHEAD_PKTINT_SHIFT;
-
- /* setup the stats timer; the add_timer is done at end of init */
- timer_setup(&dd->stats_timer, qib_get_7322_faststats, 0);
-
- dd->ureg_align = 0x10000; /* 64KB alignment */
-
- dd->piosize2kmax_dwords = dd->piosize2k >> 2;
-
- qib_7322_config_ctxts(dd);
- qib_set_ctxtcnt(dd);
-
- /*
- * We do not set WC on the VL15 buffers to avoid
- * a rare problem with unaligned writes from
- * interrupt-flushed store buffers, so we need
- * to map those separately here. We can't solve
- * this for the rarely used mtrr case.
- */
- ret = init_chip_wc_pat(dd, 0);
- if (ret)
- goto bail;
-
- /* vl15 buffers start just after the 4k buffers */
- vl15off = dd->physaddr + (dd->piobufbase >> 32) +
- dd->piobcnt4k * dd->align4k;
- dd->piovl15base = ioremap(vl15off,
- NUM_VL15_BUFS * dd->align4k);
- if (!dd->piovl15base) {
- ret = -ENOMEM;
- goto bail;
- }
-
- qib_7322_set_baseaddrs(dd); /* set chip access pointers now */
-
- ret = 0;
- if (qib_mini_init)
- goto bail;
- if (!dd->num_pports) {
- qib_dev_err(dd, "No ports enabled, giving up initialization\n");
- goto bail; /* no error, so can still figure out why err */
- }
-
- write_7322_initregs(dd);
- ret = qib_create_ctxts(dd);
- init_7322_cntrnames(dd);
-
- updthresh = 8U; /* update threshold */
-
- /* use all of 4KB buffers for the kernel SDMA, zero if !SDMA.
- * reserve the update threshold amount for other kernel use, such
- * as sending SMI, MAD, and ACKs, or 3, whichever is greater,
- * unless we aren't enabling SDMA, in which case we want to use
- * all the 4k bufs for the kernel.
- * if this was less than the update threshold, we could wait
- * a long time for an update. Coded this way because we
- * sometimes change the update threshold for various reasons,
- * and we want this to remain robust.
- */
- if (dd->flags & QIB_HAS_SEND_DMA) {
- dd->cspec->sdmabufcnt = dd->piobcnt4k;
- sbufs = updthresh > 3 ? updthresh : 3;
- } else {
- dd->cspec->sdmabufcnt = 0;
- sbufs = dd->piobcnt4k;
- }
- dd->cspec->lastbuf_for_pio = dd->piobcnt2k + dd->piobcnt4k -
- dd->cspec->sdmabufcnt;
- dd->lastctxt_piobuf = dd->cspec->lastbuf_for_pio - sbufs;
- dd->cspec->lastbuf_for_pio--; /* range is <= , not < */
- dd->last_pio = dd->cspec->lastbuf_for_pio;
- dd->pbufsctxt = (dd->cfgctxts > dd->first_user_ctxt) ?
- dd->lastctxt_piobuf / (dd->cfgctxts - dd->first_user_ctxt) : 0;
-
- /*
- * If we have 16 user contexts, we will have 7 sbufs
- * per context, so reduce the update threshold to match. We
- * want to update before we actually run out, at low pbufs/ctxt
- * so give ourselves some margin.
- */
- if (dd->pbufsctxt >= 2 && dd->pbufsctxt - 2 < updthresh)
- updthresh = dd->pbufsctxt - 2;
- dd->cspec->updthresh_dflt = updthresh;
- dd->cspec->updthresh = updthresh;
-
- /* before full enable, no interrupts, no locking needed */
- dd->sendctrl |= ((updthresh & SYM_RMASK(SendCtrl, AvailUpdThld))
- << SYM_LSB(SendCtrl, AvailUpdThld)) |
- SYM_MASK(SendCtrl, SendBufAvailPad64Byte);
-
- dd->psxmitwait_supported = 1;
- dd->psxmitwait_check_rate = QIB_7322_PSXMITWAIT_CHECK_RATE;
-bail:
- if (!dd->ctxtcnt)
- dd->ctxtcnt = 1; /* for other initialization code */
-
- return ret;
-}
-
-static u32 __iomem *qib_7322_getsendbuf(struct qib_pportdata *ppd, u64 pbc,
- u32 *pbufnum)
-{
- u32 first, last, plen = pbc & QIB_PBC_LENGTH_MASK;
- struct qib_devdata *dd = ppd->dd;
-
- /* last is same for 2k and 4k, because we use 4k if all 2k busy */
- if (pbc & PBC_7322_VL15_SEND) {
- first = dd->piobcnt2k + dd->piobcnt4k + ppd->hw_pidx;
- last = first;
- } else {
- if ((plen + 1) > dd->piosize2kmax_dwords)
- first = dd->piobcnt2k;
- else
- first = 0;
- last = dd->cspec->lastbuf_for_pio;
- }
- return qib_getsendbuf_range(dd, pbufnum, first, last);
-}
-
-static void qib_set_cntr_7322_sample(struct qib_pportdata *ppd, u32 intv,
- u32 start)
-{
- qib_write_kreg_port(ppd, krp_psinterval, intv);
- qib_write_kreg_port(ppd, krp_psstart, start);
-}
-
-/*
- * Must be called with sdma_lock held, or before init finished.
- */
-static void qib_sdma_set_7322_desc_cnt(struct qib_pportdata *ppd, unsigned cnt)
-{
- qib_write_kreg_port(ppd, krp_senddmadesccnt, cnt);
-}
-
-/*
- * sdma_lock should be acquired before calling this routine
- */
-static void dump_sdma_7322_state(struct qib_pportdata *ppd)
-{
- u64 reg, reg1, reg2;
-
- reg = qib_read_kreg_port(ppd, krp_senddmastatus);
- qib_dev_porterr(ppd->dd, ppd->port,
- "SDMA senddmastatus: 0x%016llx\n", reg);
-
- reg = qib_read_kreg_port(ppd, krp_sendctrl);
- qib_dev_porterr(ppd->dd, ppd->port,
- "SDMA sendctrl: 0x%016llx\n", reg);
-
- reg = qib_read_kreg_port(ppd, krp_senddmabase);
- qib_dev_porterr(ppd->dd, ppd->port,
- "SDMA senddmabase: 0x%016llx\n", reg);
-
- reg = qib_read_kreg_port(ppd, krp_senddmabufmask0);
- reg1 = qib_read_kreg_port(ppd, krp_senddmabufmask1);
- reg2 = qib_read_kreg_port(ppd, krp_senddmabufmask2);
- qib_dev_porterr(ppd->dd, ppd->port,
- "SDMA senddmabufmask 0:%llx 1:%llx 2:%llx\n",
- reg, reg1, reg2);
-
- /* get bufuse bits, clear them, and print them again if non-zero */
- reg = qib_read_kreg_port(ppd, krp_senddmabuf_use0);
- qib_write_kreg_port(ppd, krp_senddmabuf_use0, reg);
- reg1 = qib_read_kreg_port(ppd, krp_senddmabuf_use1);
- qib_write_kreg_port(ppd, krp_senddmabuf_use0, reg1);
- reg2 = qib_read_kreg_port(ppd, krp_senddmabuf_use2);
- qib_write_kreg_port(ppd, krp_senddmabuf_use0, reg2);
- /* 0 and 1 should always be zero, so print as short form */
- qib_dev_porterr(ppd->dd, ppd->port,
- "SDMA current senddmabuf_use 0:%llx 1:%llx 2:%llx\n",
- reg, reg1, reg2);
- reg = qib_read_kreg_port(ppd, krp_senddmabuf_use0);
- reg1 = qib_read_kreg_port(ppd, krp_senddmabuf_use1);
- reg2 = qib_read_kreg_port(ppd, krp_senddmabuf_use2);
- /* 0 and 1 should always be zero, so print as short form */
- qib_dev_porterr(ppd->dd, ppd->port,
- "SDMA cleared senddmabuf_use 0:%llx 1:%llx 2:%llx\n",
- reg, reg1, reg2);
-
- reg = qib_read_kreg_port(ppd, krp_senddmatail);
- qib_dev_porterr(ppd->dd, ppd->port,
- "SDMA senddmatail: 0x%016llx\n", reg);
-
- reg = qib_read_kreg_port(ppd, krp_senddmahead);
- qib_dev_porterr(ppd->dd, ppd->port,
- "SDMA senddmahead: 0x%016llx\n", reg);
-
- reg = qib_read_kreg_port(ppd, krp_senddmaheadaddr);
- qib_dev_porterr(ppd->dd, ppd->port,
- "SDMA senddmaheadaddr: 0x%016llx\n", reg);
-
- reg = qib_read_kreg_port(ppd, krp_senddmalengen);
- qib_dev_porterr(ppd->dd, ppd->port,
- "SDMA senddmalengen: 0x%016llx\n", reg);
-
- reg = qib_read_kreg_port(ppd, krp_senddmadesccnt);
- qib_dev_porterr(ppd->dd, ppd->port,
- "SDMA senddmadesccnt: 0x%016llx\n", reg);
-
- reg = qib_read_kreg_port(ppd, krp_senddmaidlecnt);
- qib_dev_porterr(ppd->dd, ppd->port,
- "SDMA senddmaidlecnt: 0x%016llx\n", reg);
-
- reg = qib_read_kreg_port(ppd, krp_senddmaprioritythld);
- qib_dev_porterr(ppd->dd, ppd->port,
- "SDMA senddmapriorityhld: 0x%016llx\n", reg);
-
- reg = qib_read_kreg_port(ppd, krp_senddmareloadcnt);
- qib_dev_porterr(ppd->dd, ppd->port,
- "SDMA senddmareloadcnt: 0x%016llx\n", reg);
-
- dump_sdma_state(ppd);
-}
-
-static struct sdma_set_state_action sdma_7322_action_table[] = {
- [qib_sdma_state_s00_hw_down] = {
- .go_s99_running_tofalse = 1,
- .op_enable = 0,
- .op_intenable = 0,
- .op_halt = 0,
- .op_drain = 0,
- },
- [qib_sdma_state_s10_hw_start_up_wait] = {
- .op_enable = 0,
- .op_intenable = 1,
- .op_halt = 1,
- .op_drain = 0,
- },
- [qib_sdma_state_s20_idle] = {
- .op_enable = 1,
- .op_intenable = 1,
- .op_halt = 1,
- .op_drain = 0,
- },
- [qib_sdma_state_s30_sw_clean_up_wait] = {
- .op_enable = 0,
- .op_intenable = 1,
- .op_halt = 1,
- .op_drain = 0,
- },
- [qib_sdma_state_s40_hw_clean_up_wait] = {
- .op_enable = 1,
- .op_intenable = 1,
- .op_halt = 1,
- .op_drain = 0,
- },
- [qib_sdma_state_s50_hw_halt_wait] = {
- .op_enable = 1,
- .op_intenable = 1,
- .op_halt = 1,
- .op_drain = 1,
- },
- [qib_sdma_state_s99_running] = {
- .op_enable = 1,
- .op_intenable = 1,
- .op_halt = 0,
- .op_drain = 0,
- .go_s99_running_totrue = 1,
- },
-};
-
-static void qib_7322_sdma_init_early(struct qib_pportdata *ppd)
-{
- ppd->sdma_state.set_state_action = sdma_7322_action_table;
-}
-
-static int init_sdma_7322_regs(struct qib_pportdata *ppd)
-{
- struct qib_devdata *dd = ppd->dd;
- unsigned lastbuf, erstbuf;
- u64 senddmabufmask[3] = { 0 };
- int n;
-
- qib_write_kreg_port(ppd, krp_senddmabase, ppd->sdma_descq_phys);
- qib_sdma_7322_setlengen(ppd);
- qib_sdma_update_7322_tail(ppd, 0); /* Set SendDmaTail */
- qib_write_kreg_port(ppd, krp_senddmareloadcnt, sdma_idle_cnt);
- qib_write_kreg_port(ppd, krp_senddmadesccnt, 0);
- qib_write_kreg_port(ppd, krp_senddmaheadaddr, ppd->sdma_head_phys);
-
- if (dd->num_pports)
- n = dd->cspec->sdmabufcnt / dd->num_pports; /* no remainder */
- else
- n = dd->cspec->sdmabufcnt; /* failsafe for init */
- erstbuf = (dd->piobcnt2k + dd->piobcnt4k) -
- ((dd->num_pports == 1 || ppd->port == 2) ? n :
- dd->cspec->sdmabufcnt);
- lastbuf = erstbuf + n;
-
- ppd->sdma_state.first_sendbuf = erstbuf;
- ppd->sdma_state.last_sendbuf = lastbuf;
- for (; erstbuf < lastbuf; ++erstbuf) {
- unsigned word = erstbuf / BITS_PER_LONG;
- unsigned bit = erstbuf & (BITS_PER_LONG - 1);
-
- senddmabufmask[word] |= 1ULL << bit;
- }
- qib_write_kreg_port(ppd, krp_senddmabufmask0, senddmabufmask[0]);
- qib_write_kreg_port(ppd, krp_senddmabufmask1, senddmabufmask[1]);
- qib_write_kreg_port(ppd, krp_senddmabufmask2, senddmabufmask[2]);
- return 0;
-}
-
-/* sdma_lock must be held */
-static u16 qib_sdma_7322_gethead(struct qib_pportdata *ppd)
-{
- struct qib_devdata *dd = ppd->dd;
- int sane;
- int use_dmahead;
- u16 swhead;
- u16 swtail;
- u16 cnt;
- u16 hwhead;
-
- use_dmahead = __qib_sdma_running(ppd) &&
- (dd->flags & QIB_HAS_SDMA_TIMEOUT);
-retry:
- hwhead = use_dmahead ?
- (u16) le64_to_cpu(*ppd->sdma_head_dma) :
- (u16) qib_read_kreg_port(ppd, krp_senddmahead);
-
- swhead = ppd->sdma_descq_head;
- swtail = ppd->sdma_descq_tail;
- cnt = ppd->sdma_descq_cnt;
-
- if (swhead < swtail)
- /* not wrapped */
- sane = (hwhead >= swhead) & (hwhead <= swtail);
- else if (swhead > swtail)
- /* wrapped around */
- sane = ((hwhead >= swhead) && (hwhead < cnt)) ||
- (hwhead <= swtail);
- else
- /* empty */
- sane = (hwhead == swhead);
-
- if (unlikely(!sane)) {
- if (use_dmahead) {
- /* try one more time, directly from the register */
- use_dmahead = 0;
- goto retry;
- }
- /* proceed as if no progress */
- hwhead = swhead;
- }
-
- return hwhead;
-}
-
-static int qib_sdma_7322_busy(struct qib_pportdata *ppd)
-{
- u64 hwstatus = qib_read_kreg_port(ppd, krp_senddmastatus);
-
- return (hwstatus & SYM_MASK(SendDmaStatus_0, ScoreBoardDrainInProg)) ||
- (hwstatus & SYM_MASK(SendDmaStatus_0, HaltInProg)) ||
- !(hwstatus & SYM_MASK(SendDmaStatus_0, InternalSDmaHalt)) ||
- !(hwstatus & SYM_MASK(SendDmaStatus_0, ScbEmpty));
-}
-
-/*
- * Compute the amount of delay before sending the next packet if the
- * port's send rate differs from the static rate set for the QP.
- * The delay affects the next packet and the amount of the delay is
- * based on the length of the this packet.
- */
-static u32 qib_7322_setpbc_control(struct qib_pportdata *ppd, u32 plen,
- u8 srate, u8 vl)
-{
- u8 snd_mult = ppd->delay_mult;
- u8 rcv_mult = ib_rate_to_delay[srate];
- u32 ret;
-
- ret = rcv_mult > snd_mult ? ((plen + 1) >> 1) * snd_mult : 0;
-
- /* Indicate VL15, else set the VL in the control word */
- if (vl == 15)
- ret |= PBC_7322_VL15_SEND_CTRL;
- else
- ret |= vl << PBC_VL_NUM_LSB;
- ret |= ((u32)(ppd->hw_pidx)) << PBC_PORT_SEL_LSB;
-
- return ret;
-}
-
-/*
- * Enable the per-port VL15 send buffers for use.
- * They follow the rest of the buffers, without a config parameter.
- * This was in initregs, but that is done before the shadow
- * is set up, and this has to be done after the shadow is
- * set up.
- */
-static void qib_7322_initvl15_bufs(struct qib_devdata *dd)
-{
- unsigned vl15bufs;
-
- vl15bufs = dd->piobcnt2k + dd->piobcnt4k;
- qib_chg_pioavailkernel(dd, vl15bufs, NUM_VL15_BUFS,
- TXCHK_CHG_TYPE_KERN, NULL);
-}
-
-static void qib_7322_init_ctxt(struct qib_ctxtdata *rcd)
-{
- if (rcd->ctxt < NUM_IB_PORTS) {
- if (rcd->dd->num_pports > 1) {
- rcd->rcvegrcnt = KCTXT0_EGRCNT / 2;
- rcd->rcvegr_tid_base = rcd->ctxt ? rcd->rcvegrcnt : 0;
- } else {
- rcd->rcvegrcnt = KCTXT0_EGRCNT;
- rcd->rcvegr_tid_base = 0;
- }
- } else {
- rcd->rcvegrcnt = rcd->dd->cspec->rcvegrcnt;
- rcd->rcvegr_tid_base = KCTXT0_EGRCNT +
- (rcd->ctxt - NUM_IB_PORTS) * rcd->rcvegrcnt;
- }
-}
-
-#define QTXSLEEPS 5000
-static void qib_7322_txchk_change(struct qib_devdata *dd, u32 start,
- u32 len, u32 which, struct qib_ctxtdata *rcd)
-{
- int i;
- const int last = start + len - 1;
- const int lastr = last / BITS_PER_LONG;
- u32 sleeps = 0;
- int wait = rcd != NULL;
- unsigned long flags;
-
- while (wait) {
- unsigned long shadow = 0;
- int cstart, previ = -1;
-
- /*
- * when flipping from kernel to user, we can't change
- * the checking type if the buffer is allocated to the
- * driver. It's OK the other direction, because it's
- * from close, and we have just disarm'ed all the
- * buffers. All the kernel to kernel changes are also
- * OK.
- */
- for (cstart = start; cstart <= last; cstart++) {
- i = ((2 * cstart) + QLOGIC_IB_SENDPIOAVAIL_BUSY_SHIFT)
- / BITS_PER_LONG;
- if (i != previ) {
- shadow = (unsigned long)
- le64_to_cpu(dd->pioavailregs_dma[i]);
- previ = i;
- }
- if (test_bit(((2 * cstart) +
- QLOGIC_IB_SENDPIOAVAIL_BUSY_SHIFT)
- % BITS_PER_LONG, &shadow))
- break;
- }
-
- if (cstart > last)
- break;
-
- if (sleeps == QTXSLEEPS)
- break;
- /* make sure we see an updated copy next time around */
- sendctrl_7322_mod(dd->pport, QIB_SENDCTRL_AVAIL_BLIP);
- sleeps++;
- msleep(20);
- }
-
- switch (which) {
- case TXCHK_CHG_TYPE_DIS1:
- /*
- * disable checking on a range; used by diags; just
- * one buffer, but still written generically
- */
- for (i = start; i <= last; i++)
- clear_bit(i, dd->cspec->sendchkenable);
- break;
-
- case TXCHK_CHG_TYPE_ENAB1:
- /*
- * (re)enable checking on a range; used by diags; just
- * one buffer, but still written generically; read
- * scratch to be sure buffer actually triggered, not
- * just flushed from processor.
- */
- qib_read_kreg32(dd, kr_scratch);
- for (i = start; i <= last; i++)
- set_bit(i, dd->cspec->sendchkenable);
- break;
-
- case TXCHK_CHG_TYPE_KERN:
- /* usable by kernel */
- for (i = start; i <= last; i++) {
- set_bit(i, dd->cspec->sendibchk);
- clear_bit(i, dd->cspec->sendgrhchk);
- }
- spin_lock_irqsave(&dd->uctxt_lock, flags);
- /* see if we need to raise avail update threshold */
- for (i = dd->first_user_ctxt;
- dd->cspec->updthresh != dd->cspec->updthresh_dflt
- && i < dd->cfgctxts; i++)
- if (dd->rcd[i] && dd->rcd[i]->subctxt_cnt &&
- ((dd->rcd[i]->piocnt / dd->rcd[i]->subctxt_cnt) - 1)
- < dd->cspec->updthresh_dflt)
- break;
- spin_unlock_irqrestore(&dd->uctxt_lock, flags);
- if (i == dd->cfgctxts) {
- spin_lock_irqsave(&dd->sendctrl_lock, flags);
- dd->cspec->updthresh = dd->cspec->updthresh_dflt;
- dd->sendctrl &= ~SYM_MASK(SendCtrl, AvailUpdThld);
- dd->sendctrl |= (dd->cspec->updthresh &
- SYM_RMASK(SendCtrl, AvailUpdThld)) <<
- SYM_LSB(SendCtrl, AvailUpdThld);
- spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
- sendctrl_7322_mod(dd->pport, QIB_SENDCTRL_AVAIL_BLIP);
- }
- break;
-
- case TXCHK_CHG_TYPE_USER:
- /* for user process */
- for (i = start; i <= last; i++) {
- clear_bit(i, dd->cspec->sendibchk);
- set_bit(i, dd->cspec->sendgrhchk);
- }
- spin_lock_irqsave(&dd->sendctrl_lock, flags);
- if (rcd && rcd->subctxt_cnt && ((rcd->piocnt
- / rcd->subctxt_cnt) - 1) < dd->cspec->updthresh) {
- dd->cspec->updthresh = (rcd->piocnt /
- rcd->subctxt_cnt) - 1;
- dd->sendctrl &= ~SYM_MASK(SendCtrl, AvailUpdThld);
- dd->sendctrl |= (dd->cspec->updthresh &
- SYM_RMASK(SendCtrl, AvailUpdThld))
- << SYM_LSB(SendCtrl, AvailUpdThld);
- spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
- sendctrl_7322_mod(dd->pport, QIB_SENDCTRL_AVAIL_BLIP);
- } else
- spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
- break;
-
- default:
- break;
- }
-
- for (i = start / BITS_PER_LONG; which >= 2 && i <= lastr; ++i)
- qib_write_kreg(dd, kr_sendcheckmask + i,
- dd->cspec->sendchkenable[i]);
-
- for (i = start / BITS_PER_LONG; which < 2 && i <= lastr; ++i) {
- qib_write_kreg(dd, kr_sendgrhcheckmask + i,
- dd->cspec->sendgrhchk[i]);
- qib_write_kreg(dd, kr_sendibpktmask + i,
- dd->cspec->sendibchk[i]);
- }
-
- /*
- * Be sure whatever we did was seen by the chip and acted upon,
- * before we return. Mostly important for which >= 2.
- */
- qib_read_kreg32(dd, kr_scratch);
-}
-
-
-/* useful for trigger analyzers, etc. */
-static void writescratch(struct qib_devdata *dd, u32 val)
-{
- qib_write_kreg(dd, kr_scratch, val);
-}
-
-/* Dummy for now, use chip regs soon */
-static int qib_7322_tempsense_rd(struct qib_devdata *dd, int regnum)
-{
- return -ENXIO;
-}
-
-/**
- * qib_init_iba7322_funcs - set up the chip-specific function pointers
- * @pdev: the pci_dev for qlogic_ib device
- * @ent: pci_device_id struct for this dev
- *
- * Also allocates, inits, and returns the devdata struct for this
- * device instance
- *
- * This is global, and is called directly at init to set up the
- * chip-specific function pointers for later use.
- */
-struct qib_devdata *qib_init_iba7322_funcs(struct pci_dev *pdev,
- const struct pci_device_id *ent)
-{
- struct qib_devdata *dd;
- int ret, i;
- u32 tabsize, actual_cnt = 0;
-
- dd = qib_alloc_devdata(pdev,
- NUM_IB_PORTS * sizeof(struct qib_pportdata) +
- sizeof(struct qib_chip_specific) +
- NUM_IB_PORTS * sizeof(struct qib_chippport_specific));
- if (IS_ERR(dd))
- goto bail;
-
- dd->f_bringup_serdes = qib_7322_bringup_serdes;
- dd->f_cleanup = qib_setup_7322_cleanup;
- dd->f_clear_tids = qib_7322_clear_tids;
- dd->f_free_irq = qib_7322_free_irq;
- dd->f_get_base_info = qib_7322_get_base_info;
- dd->f_get_msgheader = qib_7322_get_msgheader;
- dd->f_getsendbuf = qib_7322_getsendbuf;
- dd->f_gpio_mod = gpio_7322_mod;
- dd->f_eeprom_wen = qib_7322_eeprom_wen;
- dd->f_hdrqempty = qib_7322_hdrqempty;
- dd->f_ib_updown = qib_7322_ib_updown;
- dd->f_init_ctxt = qib_7322_init_ctxt;
- dd->f_initvl15_bufs = qib_7322_initvl15_bufs;
- dd->f_intr_fallback = qib_7322_intr_fallback;
- dd->f_late_initreg = qib_late_7322_initreg;
- dd->f_setpbc_control = qib_7322_setpbc_control;
- dd->f_portcntr = qib_portcntr_7322;
- dd->f_put_tid = qib_7322_put_tid;
- dd->f_quiet_serdes = qib_7322_mini_quiet_serdes;
- dd->f_rcvctrl = rcvctrl_7322_mod;
- dd->f_read_cntrs = qib_read_7322cntrs;
- dd->f_read_portcntrs = qib_read_7322portcntrs;
- dd->f_reset = qib_do_7322_reset;
- dd->f_init_sdma_regs = init_sdma_7322_regs;
- dd->f_sdma_busy = qib_sdma_7322_busy;
- dd->f_sdma_gethead = qib_sdma_7322_gethead;
- dd->f_sdma_sendctrl = qib_7322_sdma_sendctrl;
- dd->f_sdma_set_desc_cnt = qib_sdma_set_7322_desc_cnt;
- dd->f_sdma_update_tail = qib_sdma_update_7322_tail;
- dd->f_sendctrl = sendctrl_7322_mod;
- dd->f_set_armlaunch = qib_set_7322_armlaunch;
- dd->f_set_cntr_sample = qib_set_cntr_7322_sample;
- dd->f_iblink_state = qib_7322_iblink_state;
- dd->f_ibphys_portstate = qib_7322_phys_portstate;
- dd->f_get_ib_cfg = qib_7322_get_ib_cfg;
- dd->f_set_ib_cfg = qib_7322_set_ib_cfg;
- dd->f_set_ib_loopback = qib_7322_set_loopback;
- dd->f_get_ib_table = qib_7322_get_ib_table;
- dd->f_set_ib_table = qib_7322_set_ib_table;
- dd->f_set_intr_state = qib_7322_set_intr_state;
- dd->f_setextled = qib_setup_7322_setextled;
- dd->f_txchk_change = qib_7322_txchk_change;
- dd->f_update_usrhead = qib_update_7322_usrhead;
- dd->f_wantpiobuf_intr = qib_wantpiobuf_7322_intr;
- dd->f_xgxs_reset = qib_7322_mini_pcs_reset;
- dd->f_sdma_hw_clean_up = qib_7322_sdma_hw_clean_up;
- dd->f_sdma_hw_start_up = qib_7322_sdma_hw_start_up;
- dd->f_sdma_init_early = qib_7322_sdma_init_early;
- dd->f_writescratch = writescratch;
- dd->f_tempsense_rd = qib_7322_tempsense_rd;
-#ifdef CONFIG_INFINIBAND_QIB_DCA
- dd->f_notify_dca = qib_7322_notify_dca;
-#endif
- /*
- * Do remaining PCIe setup and save PCIe values in dd.
- * Any error printing is already done by the init code.
- * On return, we have the chip mapped, but chip registers
- * are not set up until start of qib_init_7322_variables.
- */
- ret = qib_pcie_ddinit(dd, pdev, ent);
- if (ret < 0)
- goto bail_free;
-
- /* initialize chip-specific variables */
- ret = qib_init_7322_variables(dd);
- if (ret)
- goto bail_cleanup;
-
- if (qib_mini_init || !dd->num_pports)
- goto bail;
-
- /*
- * Determine number of vectors we want; depends on port count
- * and number of configured kernel receive queues actually used.
- * Should also depend on whether sdma is enabled or not, but
- * that's such a rare testing case it's not worth worrying about.
- */
- tabsize = dd->first_user_ctxt + ARRAY_SIZE(irq_table);
- for (i = 0; i < tabsize; i++)
- if ((i < ARRAY_SIZE(irq_table) &&
- irq_table[i].port <= dd->num_pports) ||
- (i >= ARRAY_SIZE(irq_table) &&
- dd->rcd[i - ARRAY_SIZE(irq_table)]))
- actual_cnt++;
- /* reduce by ctxt's < 2 */
- if (qib_krcvq01_no_msi)
- actual_cnt -= dd->num_pports;
-
- tabsize = actual_cnt;
- dd->cspec->msix_entries = kcalloc(tabsize,
- sizeof(struct qib_msix_entry),
- GFP_KERNEL);
- if (!dd->cspec->msix_entries)
- tabsize = 0;
-
- if (qib_pcie_params(dd, 8, &tabsize))
- qib_dev_err(dd,
- "Failed to setup PCIe or interrupts; continuing anyway\n");
- /* may be less than we wanted, if not enough available */
- dd->cspec->num_msix_entries = tabsize;
-
- /* setup interrupt handler */
- qib_setup_7322_interrupt(dd, 1);
-
- /* clear diagctrl register, in case diags were running and crashed */
- qib_write_kreg(dd, kr_hwdiagctrl, 0);
-#ifdef CONFIG_INFINIBAND_QIB_DCA
- if (!dca_add_requester(&pdev->dev)) {
- qib_devinfo(dd->pcidev, "DCA enabled\n");
- dd->flags |= QIB_DCA_ENABLED;
- qib_setup_dca(dd);
- }
-#endif
- goto bail;
-
-bail_cleanup:
- qib_pcie_ddcleanup(dd);
-bail_free:
- qib_free_devdata(dd);
- dd = ERR_PTR(ret);
-bail:
- return dd;
-}
-
-/*
- * Set the table entry at the specified index from the table specifed.
- * There are 3 * TXDDS_TABLE_SZ entries in all per port, with the first
- * TXDDS_TABLE_SZ for SDR, the next for DDR, and the last for QDR.
- * 'idx' below addresses the correct entry, while its 4 LSBs select the
- * corresponding entry (one of TXDDS_TABLE_SZ) from the selected table.
- */
-#define DDS_ENT_AMP_LSB 14
-#define DDS_ENT_MAIN_LSB 9
-#define DDS_ENT_POST_LSB 5
-#define DDS_ENT_PRE_XTRA_LSB 3
-#define DDS_ENT_PRE_LSB 0
-
-/*
- * Set one entry in the TxDDS table for spec'd port
- * ridx picks one of the entries, while tp points
- * to the appropriate table entry.
- */
-static void set_txdds(struct qib_pportdata *ppd, int ridx,
- const struct txdds_ent *tp)
-{
- struct qib_devdata *dd = ppd->dd;
- u32 pack_ent;
- int regidx;
-
- /* Get correct offset in chip-space, and in source table */
- regidx = KREG_IBPORT_IDX(IBSD_DDS_MAP_TABLE) + ridx;
- /*
- * We do not use qib_write_kreg_port() because it was intended
- * only for registers in the lower "port specific" pages.
- * So do index calculation by hand.
- */
- if (ppd->hw_pidx)
- regidx += (dd->palign / sizeof(u64));
-
- pack_ent = tp->amp << DDS_ENT_AMP_LSB;
- pack_ent |= tp->main << DDS_ENT_MAIN_LSB;
- pack_ent |= tp->pre << DDS_ENT_PRE_LSB;
- pack_ent |= tp->post << DDS_ENT_POST_LSB;
- qib_write_kreg(dd, regidx, pack_ent);
- /* Prevent back-to-back writes by hitting scratch */
- qib_write_kreg(ppd->dd, kr_scratch, 0);
-}
-
-static const struct vendor_txdds_ent vendor_txdds[] = {
- { /* Amphenol 1m 30awg NoEq */
- { 0x41, 0x50, 0x48 }, "584470002 ",
- { 10, 0, 0, 5 }, { 10, 0, 0, 9 }, { 7, 1, 0, 13 },
- },
- { /* Amphenol 3m 28awg NoEq */
- { 0x41, 0x50, 0x48 }, "584470004 ",
- { 0, 0, 0, 8 }, { 0, 0, 0, 11 }, { 0, 1, 7, 15 },
- },
- { /* Finisar 3m OM2 Optical */
- { 0x00, 0x90, 0x65 }, "FCBG410QB1C03-QL",
- { 0, 0, 0, 3 }, { 0, 0, 0, 4 }, { 0, 0, 0, 13 },
- },
- { /* Finisar 30m OM2 Optical */
- { 0x00, 0x90, 0x65 }, "FCBG410QB1C30-QL",
- { 0, 0, 0, 1 }, { 0, 0, 0, 5 }, { 0, 0, 0, 11 },
- },
- { /* Finisar Default OM2 Optical */
- { 0x00, 0x90, 0x65 }, NULL,
- { 0, 0, 0, 2 }, { 0, 0, 0, 5 }, { 0, 0, 0, 12 },
- },
- { /* Gore 1m 30awg NoEq */
- { 0x00, 0x21, 0x77 }, "QSN3300-1 ",
- { 0, 0, 0, 6 }, { 0, 0, 0, 9 }, { 0, 1, 0, 15 },
- },
- { /* Gore 2m 30awg NoEq */
- { 0x00, 0x21, 0x77 }, "QSN3300-2 ",
- { 0, 0, 0, 8 }, { 0, 0, 0, 10 }, { 0, 1, 7, 15 },
- },
- { /* Gore 1m 28awg NoEq */
- { 0x00, 0x21, 0x77 }, "QSN3800-1 ",
- { 0, 0, 0, 6 }, { 0, 0, 0, 8 }, { 0, 1, 0, 15 },
- },
- { /* Gore 3m 28awg NoEq */
- { 0x00, 0x21, 0x77 }, "QSN3800-3 ",
- { 0, 0, 0, 9 }, { 0, 0, 0, 13 }, { 0, 1, 7, 15 },
- },
- { /* Gore 5m 24awg Eq */
- { 0x00, 0x21, 0x77 }, "QSN7000-5 ",
- { 0, 0, 0, 7 }, { 0, 0, 0, 9 }, { 0, 1, 3, 15 },
- },
- { /* Gore 7m 24awg Eq */
- { 0x00, 0x21, 0x77 }, "QSN7000-7 ",
- { 0, 0, 0, 9 }, { 0, 0, 0, 11 }, { 0, 2, 6, 15 },
- },
- { /* Gore 5m 26awg Eq */
- { 0x00, 0x21, 0x77 }, "QSN7600-5 ",
- { 0, 0, 0, 8 }, { 0, 0, 0, 11 }, { 0, 1, 9, 13 },
- },
- { /* Gore 7m 26awg Eq */
- { 0x00, 0x21, 0x77 }, "QSN7600-7 ",
- { 0, 0, 0, 8 }, { 0, 0, 0, 11 }, { 10, 1, 8, 15 },
- },
- { /* Intersil 12m 24awg Active */
- { 0x00, 0x30, 0xB4 }, "QLX4000CQSFP1224",
- { 0, 0, 0, 2 }, { 0, 0, 0, 5 }, { 0, 3, 0, 9 },
- },
- { /* Intersil 10m 28awg Active */
- { 0x00, 0x30, 0xB4 }, "QLX4000CQSFP1028",
- { 0, 0, 0, 6 }, { 0, 0, 0, 4 }, { 0, 2, 0, 2 },
- },
- { /* Intersil 7m 30awg Active */
- { 0x00, 0x30, 0xB4 }, "QLX4000CQSFP0730",
- { 0, 0, 0, 6 }, { 0, 0, 0, 4 }, { 0, 1, 0, 3 },
- },
- { /* Intersil 5m 32awg Active */
- { 0x00, 0x30, 0xB4 }, "QLX4000CQSFP0532",
- { 0, 0, 0, 6 }, { 0, 0, 0, 6 }, { 0, 2, 0, 8 },
- },
- { /* Intersil Default Active */
- { 0x00, 0x30, 0xB4 }, NULL,
- { 0, 0, 0, 6 }, { 0, 0, 0, 5 }, { 0, 2, 0, 5 },
- },
- { /* Luxtera 20m Active Optical */
- { 0x00, 0x25, 0x63 }, NULL,
- { 0, 0, 0, 5 }, { 0, 0, 0, 8 }, { 0, 2, 0, 12 },
- },
- { /* Molex 1M Cu loopback */
- { 0x00, 0x09, 0x3A }, "74763-0025 ",
- { 2, 2, 6, 15 }, { 2, 2, 6, 15 }, { 2, 2, 6, 15 },
- },
- { /* Molex 2m 28awg NoEq */
- { 0x00, 0x09, 0x3A }, "74757-2201 ",
- { 0, 0, 0, 6 }, { 0, 0, 0, 9 }, { 0, 1, 1, 15 },
- },
-};
-
-static const struct txdds_ent txdds_sdr[TXDDS_TABLE_SZ] = {
- /* amp, pre, main, post */
- { 2, 2, 15, 6 }, /* Loopback */
- { 0, 0, 0, 1 }, /* 2 dB */
- { 0, 0, 0, 2 }, /* 3 dB */
- { 0, 0, 0, 3 }, /* 4 dB */
- { 0, 0, 0, 4 }, /* 5 dB */
- { 0, 0, 0, 5 }, /* 6 dB */
- { 0, 0, 0, 6 }, /* 7 dB */
- { 0, 0, 0, 7 }, /* 8 dB */
- { 0, 0, 0, 8 }, /* 9 dB */
- { 0, 0, 0, 9 }, /* 10 dB */
- { 0, 0, 0, 10 }, /* 11 dB */
- { 0, 0, 0, 11 }, /* 12 dB */
- { 0, 0, 0, 12 }, /* 13 dB */
- { 0, 0, 0, 13 }, /* 14 dB */
- { 0, 0, 0, 14 }, /* 15 dB */
- { 0, 0, 0, 15 }, /* 16 dB */
-};
-
-static const struct txdds_ent txdds_ddr[TXDDS_TABLE_SZ] = {
- /* amp, pre, main, post */
- { 2, 2, 15, 6 }, /* Loopback */
- { 0, 0, 0, 8 }, /* 2 dB */
- { 0, 0, 0, 8 }, /* 3 dB */
- { 0, 0, 0, 9 }, /* 4 dB */
- { 0, 0, 0, 9 }, /* 5 dB */
- { 0, 0, 0, 10 }, /* 6 dB */
- { 0, 0, 0, 10 }, /* 7 dB */
- { 0, 0, 0, 11 }, /* 8 dB */
- { 0, 0, 0, 11 }, /* 9 dB */
- { 0, 0, 0, 12 }, /* 10 dB */
- { 0, 0, 0, 12 }, /* 11 dB */
- { 0, 0, 0, 13 }, /* 12 dB */
- { 0, 0, 0, 13 }, /* 13 dB */
- { 0, 0, 0, 14 }, /* 14 dB */
- { 0, 0, 0, 14 }, /* 15 dB */
- { 0, 0, 0, 15 }, /* 16 dB */
-};
-
-static const struct txdds_ent txdds_qdr[TXDDS_TABLE_SZ] = {
- /* amp, pre, main, post */
- { 2, 2, 15, 6 }, /* Loopback */
- { 0, 1, 0, 7 }, /* 2 dB (also QMH7342) */
- { 0, 1, 0, 9 }, /* 3 dB (also QMH7342) */
- { 0, 1, 0, 11 }, /* 4 dB */
- { 0, 1, 0, 13 }, /* 5 dB */
- { 0, 1, 0, 15 }, /* 6 dB */
- { 0, 1, 3, 15 }, /* 7 dB */
- { 0, 1, 7, 15 }, /* 8 dB */
- { 0, 1, 7, 15 }, /* 9 dB */
- { 0, 1, 8, 15 }, /* 10 dB */
- { 0, 1, 9, 15 }, /* 11 dB */
- { 0, 1, 10, 15 }, /* 12 dB */
- { 0, 2, 6, 15 }, /* 13 dB */
- { 0, 2, 7, 15 }, /* 14 dB */
- { 0, 2, 8, 15 }, /* 15 dB */
- { 0, 2, 9, 15 }, /* 16 dB */
-};
-
-/*
- * extra entries for use with txselect, for indices >= TXDDS_TABLE_SZ.
- * These are mostly used for mez cards going through connectors
- * and backplane traces, but can be used to add other "unusual"
- * table values as well.
- */
-static const struct txdds_ent txdds_extra_sdr[TXDDS_EXTRA_SZ] = {
- /* amp, pre, main, post */
- { 0, 0, 0, 1 }, /* QMH7342 backplane settings */
- { 0, 0, 0, 1 }, /* QMH7342 backplane settings */
- { 0, 0, 0, 2 }, /* QMH7342 backplane settings */
- { 0, 0, 0, 2 }, /* QMH7342 backplane settings */
- { 0, 0, 0, 3 }, /* QMH7342 backplane settings */
- { 0, 0, 0, 4 }, /* QMH7342 backplane settings */
- { 0, 1, 4, 15 }, /* QME7342 backplane settings 1.0 */
- { 0, 1, 3, 15 }, /* QME7342 backplane settings 1.0 */
- { 0, 1, 0, 12 }, /* QME7342 backplane settings 1.0 */
- { 0, 1, 0, 11 }, /* QME7342 backplane settings 1.0 */
- { 0, 1, 0, 9 }, /* QME7342 backplane settings 1.0 */
- { 0, 1, 0, 14 }, /* QME7342 backplane settings 1.0 */
- { 0, 1, 2, 15 }, /* QME7342 backplane settings 1.0 */
- { 0, 1, 0, 11 }, /* QME7342 backplane settings 1.1 */
- { 0, 1, 0, 7 }, /* QME7342 backplane settings 1.1 */
- { 0, 1, 0, 9 }, /* QME7342 backplane settings 1.1 */
- { 0, 1, 0, 6 }, /* QME7342 backplane settings 1.1 */
- { 0, 1, 0, 8 }, /* QME7342 backplane settings 1.1 */
-};
-
-static const struct txdds_ent txdds_extra_ddr[TXDDS_EXTRA_SZ] = {
- /* amp, pre, main, post */
- { 0, 0, 0, 7 }, /* QMH7342 backplane settings */
- { 0, 0, 0, 7 }, /* QMH7342 backplane settings */
- { 0, 0, 0, 8 }, /* QMH7342 backplane settings */
- { 0, 0, 0, 8 }, /* QMH7342 backplane settings */
- { 0, 0, 0, 9 }, /* QMH7342 backplane settings */
- { 0, 0, 0, 10 }, /* QMH7342 backplane settings */
- { 0, 1, 4, 15 }, /* QME7342 backplane settings 1.0 */
- { 0, 1, 3, 15 }, /* QME7342 backplane settings 1.0 */
- { 0, 1, 0, 12 }, /* QME7342 backplane settings 1.0 */
- { 0, 1, 0, 11 }, /* QME7342 backplane settings 1.0 */
- { 0, 1, 0, 9 }, /* QME7342 backplane settings 1.0 */
- { 0, 1, 0, 14 }, /* QME7342 backplane settings 1.0 */
- { 0, 1, 2, 15 }, /* QME7342 backplane settings 1.0 */
- { 0, 1, 0, 11 }, /* QME7342 backplane settings 1.1 */
- { 0, 1, 0, 7 }, /* QME7342 backplane settings 1.1 */
- { 0, 1, 0, 9 }, /* QME7342 backplane settings 1.1 */
- { 0, 1, 0, 6 }, /* QME7342 backplane settings 1.1 */
- { 0, 1, 0, 8 }, /* QME7342 backplane settings 1.1 */
-};
-
-static const struct txdds_ent txdds_extra_qdr[TXDDS_EXTRA_SZ] = {
- /* amp, pre, main, post */
- { 0, 1, 0, 4 }, /* QMH7342 backplane settings */
- { 0, 1, 0, 5 }, /* QMH7342 backplane settings */
- { 0, 1, 0, 6 }, /* QMH7342 backplane settings */
- { 0, 1, 0, 8 }, /* QMH7342 backplane settings */
- { 0, 1, 0, 10 }, /* QMH7342 backplane settings */
- { 0, 1, 0, 12 }, /* QMH7342 backplane settings */
- { 0, 1, 4, 15 }, /* QME7342 backplane settings 1.0 */
- { 0, 1, 3, 15 }, /* QME7342 backplane settings 1.0 */
- { 0, 1, 0, 12 }, /* QME7342 backplane settings 1.0 */
- { 0, 1, 0, 11 }, /* QME7342 backplane settings 1.0 */
- { 0, 1, 0, 9 }, /* QME7342 backplane settings 1.0 */
- { 0, 1, 0, 14 }, /* QME7342 backplane settings 1.0 */
- { 0, 1, 2, 15 }, /* QME7342 backplane settings 1.0 */
- { 0, 1, 0, 11 }, /* QME7342 backplane settings 1.1 */
- { 0, 1, 0, 7 }, /* QME7342 backplane settings 1.1 */
- { 0, 1, 0, 9 }, /* QME7342 backplane settings 1.1 */
- { 0, 1, 0, 6 }, /* QME7342 backplane settings 1.1 */
- { 0, 1, 0, 8 }, /* QME7342 backplane settings 1.1 */
-};
-
-static const struct txdds_ent txdds_extra_mfg[TXDDS_MFG_SZ] = {
- /* amp, pre, main, post */
- { 0, 0, 0, 0 }, /* QME7342 mfg settings */
- { 0, 0, 0, 6 }, /* QME7342 P2 mfg settings */
-};
-
-static const struct txdds_ent *get_atten_table(const struct txdds_ent *txdds,
- unsigned atten)
-{
- /*
- * The attenuation table starts at 2dB for entry 1,
- * with entry 0 being the loopback entry.
- */
- if (atten <= 2)
- atten = 1;
- else if (atten > TXDDS_TABLE_SZ)
- atten = TXDDS_TABLE_SZ - 1;
- else
- atten--;
- return txdds + atten;
-}
-
-/*
- * if override is set, the module parameter txselect has a value
- * for this specific port, so use it, rather than our normal mechanism.
- */
-static void find_best_ent(struct qib_pportdata *ppd,
- const struct txdds_ent **sdr_dds,
- const struct txdds_ent **ddr_dds,
- const struct txdds_ent **qdr_dds, int override)
-{
- struct qib_qsfp_cache *qd = &ppd->cpspec->qsfp_data.cache;
- int idx;
-
- /* Search table of known cables */
- for (idx = 0; !override && idx < ARRAY_SIZE(vendor_txdds); ++idx) {
- const struct vendor_txdds_ent *v = vendor_txdds + idx;
-
- if (!memcmp(v->oui, qd->oui, QSFP_VOUI_LEN) &&
- (!v->partnum ||
- !memcmp(v->partnum, qd->partnum, QSFP_PN_LEN))) {
- *sdr_dds = &v->sdr;
- *ddr_dds = &v->ddr;
- *qdr_dds = &v->qdr;
- return;
- }
- }
-
- /* Active cables don't have attenuation so we only set SERDES
- * settings to account for the attenuation of the board traces. */
- if (!override && QSFP_IS_ACTIVE(qd->tech)) {
- *sdr_dds = txdds_sdr + ppd->dd->board_atten;
- *ddr_dds = txdds_ddr + ppd->dd->board_atten;
- *qdr_dds = txdds_qdr + ppd->dd->board_atten;
- return;
- }
-
- if (!override && QSFP_HAS_ATTEN(qd->tech) && (qd->atten[0] ||
- qd->atten[1])) {
- *sdr_dds = get_atten_table(txdds_sdr, qd->atten[0]);
- *ddr_dds = get_atten_table(txdds_ddr, qd->atten[0]);
- *qdr_dds = get_atten_table(txdds_qdr, qd->atten[1]);
- return;
- } else if (ppd->cpspec->no_eep < TXDDS_TABLE_SZ) {
- /*
- * If we have no (or incomplete) data from the cable
- * EEPROM, or no QSFP, or override is set, use the
- * module parameter value to index into the attentuation
- * table.
- */
- idx = ppd->cpspec->no_eep;
- *sdr_dds = &txdds_sdr[idx];
- *ddr_dds = &txdds_ddr[idx];
- *qdr_dds = &txdds_qdr[idx];
- } else if (ppd->cpspec->no_eep < (TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ)) {
- /* similar to above, but index into the "extra" table. */
- idx = ppd->cpspec->no_eep - TXDDS_TABLE_SZ;
- *sdr_dds = &txdds_extra_sdr[idx];
- *ddr_dds = &txdds_extra_ddr[idx];
- *qdr_dds = &txdds_extra_qdr[idx];
- } else if ((IS_QME(ppd->dd) || IS_QMH(ppd->dd)) &&
- ppd->cpspec->no_eep < (TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ +
- TXDDS_MFG_SZ)) {
- idx = ppd->cpspec->no_eep - (TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ);
- pr_info("IB%u:%u use idx %u into txdds_mfg\n",
- ppd->dd->unit, ppd->port, idx);
- *sdr_dds = &txdds_extra_mfg[idx];
- *ddr_dds = &txdds_extra_mfg[idx];
- *qdr_dds = &txdds_extra_mfg[idx];
- } else {
- /* this shouldn't happen, it's range checked */
- *sdr_dds = txdds_sdr + qib_long_atten;
- *ddr_dds = txdds_ddr + qib_long_atten;
- *qdr_dds = txdds_qdr + qib_long_atten;
- }
-}
-
-static void init_txdds_table(struct qib_pportdata *ppd, int override)
-{
- const struct txdds_ent *sdr_dds, *ddr_dds, *qdr_dds;
- struct txdds_ent *dds;
- int idx;
- int single_ent = 0;
-
- find_best_ent(ppd, &sdr_dds, &ddr_dds, &qdr_dds, override);
-
- /* for mez cards or override, use the selected value for all entries */
- if (!(ppd->dd->flags & QIB_HAS_QSFP) || override)
- single_ent = 1;
-
- /* Fill in the first entry with the best entry found. */
- set_txdds(ppd, 0, sdr_dds);
- set_txdds(ppd, TXDDS_TABLE_SZ, ddr_dds);
- set_txdds(ppd, 2 * TXDDS_TABLE_SZ, qdr_dds);
- if (ppd->lflags & (QIBL_LINKINIT | QIBL_LINKARMED |
- QIBL_LINKACTIVE)) {
- dds = (struct txdds_ent *)(ppd->link_speed_active ==
- QIB_IB_QDR ? qdr_dds :
- (ppd->link_speed_active ==
- QIB_IB_DDR ? ddr_dds : sdr_dds));
- write_tx_serdes_param(ppd, dds);
- }
-
- /* Fill in the remaining entries with the default table values. */
- for (idx = 1; idx < ARRAY_SIZE(txdds_sdr); ++idx) {
- set_txdds(ppd, idx, single_ent ? sdr_dds : txdds_sdr + idx);
- set_txdds(ppd, idx + TXDDS_TABLE_SZ,
- single_ent ? ddr_dds : txdds_ddr + idx);
- set_txdds(ppd, idx + 2 * TXDDS_TABLE_SZ,
- single_ent ? qdr_dds : txdds_qdr + idx);
- }
-}
-
-#define KR_AHB_ACC KREG_IDX(ahb_access_ctrl)
-#define KR_AHB_TRANS KREG_IDX(ahb_transaction_reg)
-#define AHB_TRANS_RDY SYM_MASK(ahb_transaction_reg, ahb_rdy)
-#define AHB_ADDR_LSB SYM_LSB(ahb_transaction_reg, ahb_address)
-#define AHB_DATA_LSB SYM_LSB(ahb_transaction_reg, ahb_data)
-#define AHB_WR SYM_MASK(ahb_transaction_reg, write_not_read)
-#define AHB_TRANS_TRIES 10
-
-/*
- * The chan argument is 0=chan0, 1=chan1, 2=pll, 3=chan2, 4=chan4,
- * 5=subsystem which is why most calls have "chan + chan >> 1"
- * for the channel argument.
- */
-static u32 ahb_mod(struct qib_devdata *dd, int quad, int chan, int addr,
- u32 data, u32 mask)
-{
- u32 rd_data, wr_data, sz_mask;
- u64 trans, acc, prev_acc;
- u32 ret = 0xBAD0BAD;
- int tries;
-
- prev_acc = qib_read_kreg64(dd, KR_AHB_ACC);
- /* From this point on, make sure we return access */
- acc = (quad << 1) | 1;
- qib_write_kreg(dd, KR_AHB_ACC, acc);
-
- for (tries = 1; tries < AHB_TRANS_TRIES; ++tries) {
- trans = qib_read_kreg64(dd, KR_AHB_TRANS);
- if (trans & AHB_TRANS_RDY)
- break;
- }
- if (tries >= AHB_TRANS_TRIES) {
- qib_dev_err(dd, "No ahb_rdy in %d tries\n", AHB_TRANS_TRIES);
- goto bail;
- }
-
- /* If mask is not all 1s, we need to read, but different SerDes
- * entities have different sizes
- */
- sz_mask = (1UL << ((quad == 1) ? 32 : 16)) - 1;
- wr_data = data & mask & sz_mask;
- if ((~mask & sz_mask) != 0) {
- trans = ((chan << 6) | addr) << (AHB_ADDR_LSB + 1);
- qib_write_kreg(dd, KR_AHB_TRANS, trans);
-
- for (tries = 1; tries < AHB_TRANS_TRIES; ++tries) {
- trans = qib_read_kreg64(dd, KR_AHB_TRANS);
- if (trans & AHB_TRANS_RDY)
- break;
- }
- if (tries >= AHB_TRANS_TRIES) {
- qib_dev_err(dd, "No Rd ahb_rdy in %d tries\n",
- AHB_TRANS_TRIES);
- goto bail;
- }
- /* Re-read in case host split reads and read data first */
- trans = qib_read_kreg64(dd, KR_AHB_TRANS);
- rd_data = (uint32_t)(trans >> AHB_DATA_LSB);
- wr_data |= (rd_data & ~mask & sz_mask);
- }
-
- /* If mask is not zero, we need to write. */
- if (mask & sz_mask) {
- trans = ((chan << 6) | addr) << (AHB_ADDR_LSB + 1);
- trans |= ((uint64_t)wr_data << AHB_DATA_LSB);
- trans |= AHB_WR;
- qib_write_kreg(dd, KR_AHB_TRANS, trans);
-
- for (tries = 1; tries < AHB_TRANS_TRIES; ++tries) {
- trans = qib_read_kreg64(dd, KR_AHB_TRANS);
- if (trans & AHB_TRANS_RDY)
- break;
- }
- if (tries >= AHB_TRANS_TRIES) {
- qib_dev_err(dd, "No Wr ahb_rdy in %d tries\n",
- AHB_TRANS_TRIES);
- goto bail;
- }
- }
- ret = wr_data;
-bail:
- qib_write_kreg(dd, KR_AHB_ACC, prev_acc);
- return ret;
-}
-
-static void ibsd_wr_allchans(struct qib_pportdata *ppd, int addr, unsigned data,
- unsigned mask)
-{
- struct qib_devdata *dd = ppd->dd;
- int chan;
-
- for (chan = 0; chan < SERDES_CHANS; ++chan) {
- ahb_mod(dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)), addr,
- data, mask);
- ahb_mod(dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)), addr,
- 0, 0);
- }
-}
-
-static void serdes_7322_los_enable(struct qib_pportdata *ppd, int enable)
-{
- u64 data = qib_read_kreg_port(ppd, krp_serdesctrl);
- u8 state = SYM_FIELD(data, IBSerdesCtrl_0, RXLOSEN);
-
- if (enable && !state) {
- pr_info("IB%u:%u Turning LOS on\n",
- ppd->dd->unit, ppd->port);
- data |= SYM_MASK(IBSerdesCtrl_0, RXLOSEN);
- } else if (!enable && state) {
- pr_info("IB%u:%u Turning LOS off\n",
- ppd->dd->unit, ppd->port);
- data &= ~SYM_MASK(IBSerdesCtrl_0, RXLOSEN);
- }
- qib_write_kreg_port(ppd, krp_serdesctrl, data);
-}
-
-static int serdes_7322_init(struct qib_pportdata *ppd)
-{
- int ret = 0;
-
- if (ppd->dd->cspec->r1)
- ret = serdes_7322_init_old(ppd);
- else
- ret = serdes_7322_init_new(ppd);
- return ret;
-}
-
-static int serdes_7322_init_old(struct qib_pportdata *ppd)
-{
- u32 le_val;
-
- /*
- * Initialize the Tx DDS tables. Also done every QSFP event,
- * for adapters with QSFP
- */
- init_txdds_table(ppd, 0);
-
- /* ensure no tx overrides from earlier driver loads */
- qib_write_kreg_port(ppd, krp_tx_deemph_override,
- SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
- reset_tx_deemphasis_override));
-
- /* Patch some SerDes defaults to "Better for IB" */
- /* Timing Loop Bandwidth: cdr_timing[11:9] = 0 */
- ibsd_wr_allchans(ppd, 2, 0, BMASK(11, 9));
-
- /* Termination: rxtermctrl_r2d addr 11 bits [12:11] = 1 */
- ibsd_wr_allchans(ppd, 11, (1 << 11), BMASK(12, 11));
- /* Enable LE2: rxle2en_r2a addr 13 bit [6] = 1 */
- ibsd_wr_allchans(ppd, 13, (1 << 6), (1 << 6));
-
- /* May be overridden in qsfp_7322_event */
- le_val = IS_QME(ppd->dd) ? LE2_QME : LE2_DEFAULT;
- ibsd_wr_allchans(ppd, 13, (le_val << 7), BMASK(9, 7));
-
- /* enable LE1 adaptation for all but QME, which is disabled */
- le_val = IS_QME(ppd->dd) ? 0 : 1;
- ibsd_wr_allchans(ppd, 13, (le_val << 5), (1 << 5));
-
- /* Clear cmode-override, may be set from older driver */
- ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 0 << 14, 1 << 14);
-
- /* Timing Recovery: rxtapsel addr 5 bits [9:8] = 0 */
- ibsd_wr_allchans(ppd, 5, (0 << 8), BMASK(9, 8));
-
- /* setup LoS params; these are subsystem, so chan == 5 */
- /* LoS filter threshold_count on, ch 0-3, set to 8 */
- ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 5, 8 << 11, BMASK(14, 11));
- ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 7, 8 << 4, BMASK(7, 4));
- ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 8, 8 << 11, BMASK(14, 11));
- ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 8 << 4, BMASK(7, 4));
-
- /* LoS filter threshold_count off, ch 0-3, set to 4 */
- ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 6, 4 << 0, BMASK(3, 0));
- ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 7, 4 << 8, BMASK(11, 8));
- ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 9, 4 << 0, BMASK(3, 0));
- ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 4 << 8, BMASK(11, 8));
-
- /* LoS filter select enabled */
- ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 9, 1 << 15, 1 << 15);
-
- /* LoS target data: SDR=4, DDR=2, QDR=1 */
- ibsd_wr_allchans(ppd, 14, (1 << 3), BMASK(5, 3)); /* QDR */
- ibsd_wr_allchans(ppd, 20, (2 << 10), BMASK(12, 10)); /* DDR */
- ibsd_wr_allchans(ppd, 20, (4 << 13), BMASK(15, 13)); /* SDR */
-
- serdes_7322_los_enable(ppd, 1);
-
- /* rxbistena; set 0 to avoid effects of it switch later */
- ibsd_wr_allchans(ppd, 9, 0 << 15, 1 << 15);
-
- /* Configure 4 DFE taps, and only they adapt */
- ibsd_wr_allchans(ppd, 16, 0 << 0, BMASK(1, 0));
-
- /* gain hi stop 32 (22) (6:1) lo stop 7 (10:7) target 22 (13) (15:11) */
- le_val = (ppd->dd->cspec->r1 || IS_QME(ppd->dd)) ? 0xb6c0 : 0x6bac;
- ibsd_wr_allchans(ppd, 21, le_val, 0xfffe);
-
- /*
- * Set receive adaptation mode. SDR and DDR adaptation are
- * always on, and QDR is initially enabled; later disabled.
- */
- qib_write_kreg_port(ppd, krp_static_adapt_dis(0), 0ULL);
- qib_write_kreg_port(ppd, krp_static_adapt_dis(1), 0ULL);
- qib_write_kreg_port(ppd, krp_static_adapt_dis(2),
- ppd->dd->cspec->r1 ?
- QDR_STATIC_ADAPT_DOWN_R1 : QDR_STATIC_ADAPT_DOWN);
- ppd->cpspec->qdr_dfe_on = 1;
-
- /* FLoop LOS gate: PPM filter enabled */
- ibsd_wr_allchans(ppd, 38, 0 << 10, 1 << 10);
-
- /* rx offset center enabled */
- ibsd_wr_allchans(ppd, 12, 1 << 4, 1 << 4);
-
- if (!ppd->dd->cspec->r1) {
- ibsd_wr_allchans(ppd, 12, 1 << 12, 1 << 12);
- ibsd_wr_allchans(ppd, 12, 2 << 8, 0x0f << 8);
- }
-
- /* Set the frequency loop bandwidth to 15 */
- ibsd_wr_allchans(ppd, 2, 15 << 5, BMASK(8, 5));
-
- return 0;
-}
-
-static int serdes_7322_init_new(struct qib_pportdata *ppd)
-{
- unsigned long tend;
- u32 le_val, rxcaldone;
- int chan, chan_done = (1 << SERDES_CHANS) - 1;
-
- /* Clear cmode-override, may be set from older driver */
- ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 0 << 14, 1 << 14);
-
- /* ensure no tx overrides from earlier driver loads */
- qib_write_kreg_port(ppd, krp_tx_deemph_override,
- SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
- reset_tx_deemphasis_override));
-
- /* START OF LSI SUGGESTED SERDES BRINGUP */
- /* Reset - Calibration Setup */
- /* Stop DFE adaptaion */
- ibsd_wr_allchans(ppd, 1, 0, BMASK(9, 1));
- /* Disable LE1 */
- ibsd_wr_allchans(ppd, 13, 0, BMASK(5, 5));
- /* Disable autoadapt for LE1 */
- ibsd_wr_allchans(ppd, 1, 0, BMASK(15, 15));
- /* Disable LE2 */
- ibsd_wr_allchans(ppd, 13, 0, BMASK(6, 6));
- /* Disable VGA */
- ibsd_wr_allchans(ppd, 5, 0, BMASK(0, 0));
- /* Disable AFE Offset Cancel */
- ibsd_wr_allchans(ppd, 12, 0, BMASK(12, 12));
- /* Disable Timing Loop */
- ibsd_wr_allchans(ppd, 2, 0, BMASK(3, 3));
- /* Disable Frequency Loop */
- ibsd_wr_allchans(ppd, 2, 0, BMASK(4, 4));
- /* Disable Baseline Wander Correction */
- ibsd_wr_allchans(ppd, 13, 0, BMASK(13, 13));
- /* Disable RX Calibration */
- ibsd_wr_allchans(ppd, 4, 0, BMASK(10, 10));
- /* Disable RX Offset Calibration */
- ibsd_wr_allchans(ppd, 12, 0, BMASK(4, 4));
- /* Select BB CDR */
- ibsd_wr_allchans(ppd, 2, (1 << 15), BMASK(15, 15));
- /* CDR Step Size */
- ibsd_wr_allchans(ppd, 5, 0, BMASK(9, 8));
- /* Enable phase Calibration */
- ibsd_wr_allchans(ppd, 12, (1 << 5), BMASK(5, 5));
- /* DFE Bandwidth [2:14-12] */
- ibsd_wr_allchans(ppd, 2, (4 << 12), BMASK(14, 12));
- /* DFE Config (4 taps only) */
- ibsd_wr_allchans(ppd, 16, 0, BMASK(1, 0));
- /* Gain Loop Bandwidth */
- if (!ppd->dd->cspec->r1) {
- ibsd_wr_allchans(ppd, 12, 1 << 12, BMASK(12, 12));
- ibsd_wr_allchans(ppd, 12, 2 << 8, BMASK(11, 8));
- } else {
- ibsd_wr_allchans(ppd, 19, (3 << 11), BMASK(13, 11));
- }
- /* Baseline Wander Correction Gain [13:4-0] (leave as default) */
- /* Baseline Wander Correction Gain [3:7-5] (leave as default) */
- /* Data Rate Select [5:7-6] (leave as default) */
- /* RX Parallel Word Width [3:10-8] (leave as default) */
-
- /* RX REST */
- /* Single- or Multi-channel reset */
- /* RX Analog reset */
- /* RX Digital reset */
- ibsd_wr_allchans(ppd, 0, 0, BMASK(15, 13));
- msleep(20);
- /* RX Analog reset */
- ibsd_wr_allchans(ppd, 0, (1 << 14), BMASK(14, 14));
- msleep(20);
- /* RX Digital reset */
- ibsd_wr_allchans(ppd, 0, (1 << 13), BMASK(13, 13));
- msleep(20);
-
- /* setup LoS params; these are subsystem, so chan == 5 */
- /* LoS filter threshold_count on, ch 0-3, set to 8 */
- ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 5, 8 << 11, BMASK(14, 11));
- ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 7, 8 << 4, BMASK(7, 4));
- ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 8, 8 << 11, BMASK(14, 11));
- ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 8 << 4, BMASK(7, 4));
-
- /* LoS filter threshold_count off, ch 0-3, set to 4 */
- ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 6, 4 << 0, BMASK(3, 0));
- ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 7, 4 << 8, BMASK(11, 8));
- ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 9, 4 << 0, BMASK(3, 0));
- ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 4 << 8, BMASK(11, 8));
-
- /* LoS filter select enabled */
- ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 9, 1 << 15, 1 << 15);
-
- /* LoS target data: SDR=4, DDR=2, QDR=1 */
- ibsd_wr_allchans(ppd, 14, (1 << 3), BMASK(5, 3)); /* QDR */
- ibsd_wr_allchans(ppd, 20, (2 << 10), BMASK(12, 10)); /* DDR */
- ibsd_wr_allchans(ppd, 20, (4 << 13), BMASK(15, 13)); /* SDR */
-
- /* Turn on LOS on initial SERDES init */
- serdes_7322_los_enable(ppd, 1);
- /* FLoop LOS gate: PPM filter enabled */
- ibsd_wr_allchans(ppd, 38, 0 << 10, 1 << 10);
-
- /* RX LATCH CALIBRATION */
- /* Enable Eyefinder Phase Calibration latch */
- ibsd_wr_allchans(ppd, 15, 1, BMASK(0, 0));
- /* Enable RX Offset Calibration latch */
- ibsd_wr_allchans(ppd, 12, (1 << 4), BMASK(4, 4));
- msleep(20);
- /* Start Calibration */
- ibsd_wr_allchans(ppd, 4, (1 << 10), BMASK(10, 10));
- tend = jiffies + msecs_to_jiffies(500);
- while (chan_done && !time_is_before_jiffies(tend)) {
- msleep(20);
- for (chan = 0; chan < SERDES_CHANS; ++chan) {
- rxcaldone = ahb_mod(ppd->dd, IBSD(ppd->hw_pidx),
- (chan + (chan >> 1)),
- 25, 0, 0);
- if ((~rxcaldone & (u32)BMASK(9, 9)) == 0 &&
- (~chan_done & (1 << chan)) == 0)
- chan_done &= ~(1 << chan);
- }
- }
- if (chan_done) {
- pr_info("Serdes %d calibration not done after .5 sec: 0x%x\n",
- IBSD(ppd->hw_pidx), chan_done);
- } else {
- for (chan = 0; chan < SERDES_CHANS; ++chan) {
- rxcaldone = ahb_mod(ppd->dd, IBSD(ppd->hw_pidx),
- (chan + (chan >> 1)),
- 25, 0, 0);
- if ((~rxcaldone & (u32)BMASK(10, 10)) == 0)
- pr_info("Serdes %d chan %d calibration failed\n",
- IBSD(ppd->hw_pidx), chan);
- }
- }
-
- /* Turn off Calibration */
- ibsd_wr_allchans(ppd, 4, 0, BMASK(10, 10));
- msleep(20);
-
- /* BRING RX UP */
- /* Set LE2 value (May be overridden in qsfp_7322_event) */
- le_val = IS_QME(ppd->dd) ? LE2_QME : LE2_DEFAULT;
- ibsd_wr_allchans(ppd, 13, (le_val << 7), BMASK(9, 7));
- /* Set LE2 Loop bandwidth */
- ibsd_wr_allchans(ppd, 3, (7 << 5), BMASK(7, 5));
- /* Enable LE2 */
- ibsd_wr_allchans(ppd, 13, (1 << 6), BMASK(6, 6));
- msleep(20);
- /* Enable H0 only */
- ibsd_wr_allchans(ppd, 1, 1, BMASK(9, 1));
- /* gain hi stop 32 (22) (6:1) lo stop 7 (10:7) target 22 (13) (15:11) */
- le_val = (ppd->dd->cspec->r1 || IS_QME(ppd->dd)) ? 0xb6c0 : 0x6bac;
- ibsd_wr_allchans(ppd, 21, le_val, 0xfffe);
- /* Enable VGA */
- ibsd_wr_allchans(ppd, 5, 0, BMASK(0, 0));
- msleep(20);
- /* Set Frequency Loop Bandwidth */
- ibsd_wr_allchans(ppd, 2, (15 << 5), BMASK(8, 5));
- /* Enable Frequency Loop */
- ibsd_wr_allchans(ppd, 2, (1 << 4), BMASK(4, 4));
- /* Set Timing Loop Bandwidth */
- ibsd_wr_allchans(ppd, 2, 0, BMASK(11, 9));
- /* Enable Timing Loop */
- ibsd_wr_allchans(ppd, 2, (1 << 3), BMASK(3, 3));
- msleep(50);
- /* Enable DFE
- * Set receive adaptation mode. SDR and DDR adaptation are
- * always on, and QDR is initially enabled; later disabled.
- */
- qib_write_kreg_port(ppd, krp_static_adapt_dis(0), 0ULL);
- qib_write_kreg_port(ppd, krp_static_adapt_dis(1), 0ULL);
- qib_write_kreg_port(ppd, krp_static_adapt_dis(2),
- ppd->dd->cspec->r1 ?
- QDR_STATIC_ADAPT_DOWN_R1 : QDR_STATIC_ADAPT_DOWN);
- ppd->cpspec->qdr_dfe_on = 1;
- /* Disable LE1 */
- ibsd_wr_allchans(ppd, 13, (0 << 5), (1 << 5));
- /* Disable auto adapt for LE1 */
- ibsd_wr_allchans(ppd, 1, (0 << 15), BMASK(15, 15));
- msleep(20);
- /* Enable AFE Offset Cancel */
- ibsd_wr_allchans(ppd, 12, (1 << 12), BMASK(12, 12));
- /* Enable Baseline Wander Correction */
- ibsd_wr_allchans(ppd, 12, (1 << 13), BMASK(13, 13));
- /* Termination: rxtermctrl_r2d addr 11 bits [12:11] = 1 */
- ibsd_wr_allchans(ppd, 11, (1 << 11), BMASK(12, 11));
- /* VGA output common mode */
- ibsd_wr_allchans(ppd, 12, (3 << 2), BMASK(3, 2));
-
- /*
- * Initialize the Tx DDS tables. Also done every QSFP event,
- * for adapters with QSFP
- */
- init_txdds_table(ppd, 0);
-
- return 0;
-}
-
-/* start adjust QMH serdes parameters */
-
-static void set_man_code(struct qib_pportdata *ppd, int chan, int code)
-{
- ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
- 9, code << 9, 0x3f << 9);
-}
-
-static void set_man_mode_h1(struct qib_pportdata *ppd, int chan,
- int enable, u32 tapenable)
-{
- if (enable)
- ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
- 1, 3 << 10, 0x1f << 10);
- else
- ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
- 1, 0, 0x1f << 10);
-}
-
-/* Set clock to 1, 0, 1, 0 */
-static void clock_man(struct qib_pportdata *ppd, int chan)
-{
- ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
- 4, 0x4000, 0x4000);
- ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
- 4, 0, 0x4000);
- ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
- 4, 0x4000, 0x4000);
- ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
- 4, 0, 0x4000);
-}
-
-/*
- * write the current Tx serdes pre,post,main,amp settings into the serdes.
- * The caller must pass the settings appropriate for the current speed,
- * or not care if they are correct for the current speed.
- */
-static void write_tx_serdes_param(struct qib_pportdata *ppd,
- struct txdds_ent *txdds)
-{
- u64 deemph;
-
- deemph = qib_read_kreg_port(ppd, krp_tx_deemph_override);
- /* field names for amp, main, post, pre, respectively */
- deemph &= ~(SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, txampcntl_d2a) |
- SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, txc0_ena) |
- SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, txcp1_ena) |
- SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, txcn1_ena));
-
- deemph |= SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
- tx_override_deemphasis_select);
- deemph |= (txdds->amp & SYM_RMASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
- txampcntl_d2a)) << SYM_LSB(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
- txampcntl_d2a);
- deemph |= (txdds->main & SYM_RMASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
- txc0_ena)) << SYM_LSB(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
- txc0_ena);
- deemph |= (txdds->post & SYM_RMASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
- txcp1_ena)) << SYM_LSB(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
- txcp1_ena);
- deemph |= (txdds->pre & SYM_RMASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
- txcn1_ena)) << SYM_LSB(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
- txcn1_ena);
- qib_write_kreg_port(ppd, krp_tx_deemph_override, deemph);
-}
-
-/*
- * Set the parameters for mez cards on link bounce, so they are
- * always exactly what was requested. Similar logic to init_txdds
- * but does just the serdes.
- */
-static void adj_tx_serdes(struct qib_pportdata *ppd)
-{
- const struct txdds_ent *sdr_dds, *ddr_dds, *qdr_dds;
- struct txdds_ent *dds;
-
- find_best_ent(ppd, &sdr_dds, &ddr_dds, &qdr_dds, 1);
- dds = (struct txdds_ent *)(ppd->link_speed_active == QIB_IB_QDR ?
- qdr_dds : (ppd->link_speed_active == QIB_IB_DDR ?
- ddr_dds : sdr_dds));
- write_tx_serdes_param(ppd, dds);
-}
-
-/* set QDR forced value for H1, if needed */
-static void force_h1(struct qib_pportdata *ppd)
-{
- int chan;
-
- ppd->cpspec->qdr_reforce = 0;
- if (!ppd->dd->cspec->r1)
- return;
-
- for (chan = 0; chan < SERDES_CHANS; chan++) {
- set_man_mode_h1(ppd, chan, 1, 0);
- set_man_code(ppd, chan, ppd->cpspec->h1_val);
- clock_man(ppd, chan);
- set_man_mode_h1(ppd, chan, 0, 0);
- }
-}
-
-#define SJA_EN SYM_MASK(SPC_JTAG_ACCESS_REG, SPC_JTAG_ACCESS_EN)
-#define BISTEN_LSB SYM_LSB(SPC_JTAG_ACCESS_REG, bist_en)
-
-#define R_OPCODE_LSB 3
-#define R_OP_NOP 0
-#define R_OP_SHIFT 2
-#define R_OP_UPDATE 3
-#define R_TDI_LSB 2
-#define R_TDO_LSB 1
-#define R_RDY 1
-
-static int qib_r_grab(struct qib_devdata *dd)
-{
- u64 val = SJA_EN;
-
- qib_write_kreg(dd, kr_r_access, val);
- qib_read_kreg32(dd, kr_scratch);
- return 0;
-}
-
-/* qib_r_wait_for_rdy() not only waits for the ready bit, it
- * returns the current state of R_TDO
- */
-static int qib_r_wait_for_rdy(struct qib_devdata *dd)
-{
- u64 val;
- int timeout;
-
- for (timeout = 0; timeout < 100 ; ++timeout) {
- val = qib_read_kreg32(dd, kr_r_access);
- if (val & R_RDY)
- return (val >> R_TDO_LSB) & 1;
- }
- return -1;
-}
-
-static int qib_r_shift(struct qib_devdata *dd, int bisten,
- int len, u8 *inp, u8 *outp)
-{
- u64 valbase, val;
- int ret, pos;
-
- valbase = SJA_EN | (bisten << BISTEN_LSB) |
- (R_OP_SHIFT << R_OPCODE_LSB);
- ret = qib_r_wait_for_rdy(dd);
- if (ret < 0)
- goto bail;
- for (pos = 0; pos < len; ++pos) {
- val = valbase;
- if (outp) {
- outp[pos >> 3] &= ~(1 << (pos & 7));
- outp[pos >> 3] |= (ret << (pos & 7));
- }
- if (inp) {
- int tdi = inp[pos >> 3] >> (pos & 7);
-
- val |= ((tdi & 1) << R_TDI_LSB);
- }
- qib_write_kreg(dd, kr_r_access, val);
- qib_read_kreg32(dd, kr_scratch);
- ret = qib_r_wait_for_rdy(dd);
- if (ret < 0)
- break;
- }
- /* Restore to NOP between operations. */
- val = SJA_EN | (bisten << BISTEN_LSB);
- qib_write_kreg(dd, kr_r_access, val);
- qib_read_kreg32(dd, kr_scratch);
- ret = qib_r_wait_for_rdy(dd);
-
- if (ret >= 0)
- ret = pos;
-bail:
- return ret;
-}
-
-static int qib_r_update(struct qib_devdata *dd, int bisten)
-{
- u64 val;
- int ret;
-
- val = SJA_EN | (bisten << BISTEN_LSB) | (R_OP_UPDATE << R_OPCODE_LSB);
- ret = qib_r_wait_for_rdy(dd);
- if (ret >= 0) {
- qib_write_kreg(dd, kr_r_access, val);
- qib_read_kreg32(dd, kr_scratch);
- }
- return ret;
-}
-
-#define BISTEN_PORT_SEL 15
-#define LEN_PORT_SEL 625
-#define BISTEN_AT 17
-#define LEN_AT 156
-#define BISTEN_ETM 16
-#define LEN_ETM 632
-
-#define BIT2BYTE(x) (((x) + BITS_PER_BYTE - 1) / BITS_PER_BYTE)
-
-/* these are common for all IB port use cases. */
-static u8 reset_at[BIT2BYTE(LEN_AT)] = {
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x00,
-};
-static u8 reset_atetm[BIT2BYTE(LEN_ETM)] = {
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x80, 0xe3, 0x81, 0x73, 0x3c, 0x70, 0x8e,
- 0x07, 0xce, 0xf1, 0xc0, 0x39, 0x1e, 0x38, 0xc7, 0x03, 0xe7,
- 0x78, 0xe0, 0x1c, 0x0f, 0x9c, 0x7f, 0x80, 0x73, 0x0f, 0x70,
- 0xde, 0x01, 0xce, 0x39, 0xc0, 0xf9, 0x06, 0x38, 0xd7, 0x00,
- 0xe7, 0x19, 0xe0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00,
-};
-static u8 at[BIT2BYTE(LEN_AT)] = {
- 0x00, 0x00, 0x18, 0x00, 0x00, 0x00, 0x18, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x00,
-};
-
-/* used for IB1 or IB2, only one in use */
-static u8 atetm_1port[BIT2BYTE(LEN_ETM)] = {
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x10, 0xf2, 0x80, 0x83, 0x1e, 0x38, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x50, 0xf4, 0x41, 0x00, 0x18, 0x78, 0xc8, 0x03,
- 0x07, 0x7b, 0xa0, 0x3e, 0x00, 0x02, 0x00, 0x00, 0x18, 0x00,
- 0x18, 0x00, 0x00, 0x00, 0x00, 0x4b, 0x00, 0x00, 0x00,
-};
-
-/* used when both IB1 and IB2 are in use */
-static u8 atetm_2port[BIT2BYTE(LEN_ETM)] = {
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x79,
- 0xc0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0xf8, 0x80, 0x83, 0x1e, 0x38, 0xe0, 0x03, 0x05,
- 0x7b, 0xa0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80,
- 0xa2, 0x0f, 0x50, 0xf4, 0x41, 0x00, 0x18, 0x78, 0xd1, 0x07,
- 0x02, 0x7c, 0x80, 0x3e, 0x00, 0x02, 0x00, 0x00, 0x3e, 0x00,
- 0x02, 0x00, 0x00, 0x00, 0x00, 0x64, 0x00, 0x00, 0x00,
-};
-
-/* used when only IB1 is in use */
-static u8 portsel_port1[BIT2BYTE(LEN_PORT_SEL)] = {
- 0x32, 0x65, 0xa4, 0x7b, 0x10, 0x98, 0xdc, 0xfe, 0x13, 0x13,
- 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x73, 0x0c, 0x0c, 0x0c,
- 0x0c, 0x0c, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13,
- 0x13, 0x78, 0x78, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13,
- 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x74, 0x32,
- 0x32, 0x32, 0x32, 0x32, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14,
- 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14,
- 0x14, 0x14, 0x9f, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00,
-};
-
-/* used when only IB2 is in use */
-static u8 portsel_port2[BIT2BYTE(LEN_PORT_SEL)] = {
- 0x32, 0x65, 0xa4, 0x7b, 0x10, 0x98, 0xdc, 0xfe, 0x39, 0x39,
- 0x39, 0x39, 0x39, 0x39, 0x39, 0x39, 0x73, 0x32, 0x32, 0x32,
- 0x32, 0x32, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39,
- 0x39, 0x78, 0x78, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39,
- 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x74, 0x32,
- 0x32, 0x32, 0x32, 0x32, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a,
- 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a,
- 0x3a, 0x3a, 0x9f, 0x01, 0x00, 0x00, 0x00, 0x00, 0x01,
-};
-
-/* used when both IB1 and IB2 are in use */
-static u8 portsel_2port[BIT2BYTE(LEN_PORT_SEL)] = {
- 0x32, 0xba, 0x54, 0x76, 0x10, 0x98, 0xdc, 0xfe, 0x13, 0x13,
- 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x73, 0x0c, 0x0c, 0x0c,
- 0x0c, 0x0c, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13,
- 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13,
- 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x74, 0x32,
- 0x32, 0x32, 0x32, 0x32, 0x14, 0x14, 0x14, 0x14, 0x14, 0x3a,
- 0x3a, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14,
- 0x14, 0x14, 0x9f, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00,
-};
-
-/*
- * Do setup to properly handle IB link recovery; if port is zero, we
- * are initializing to cover both ports; otherwise we are initializing
- * to cover a single port card, or the port has reached INIT and we may
- * need to switch coverage types.
- */
-static void setup_7322_link_recovery(struct qib_pportdata *ppd, u32 both)
-{
- u8 *portsel, *etm;
- struct qib_devdata *dd = ppd->dd;
-
- if (!ppd->dd->cspec->r1)
- return;
- if (!both) {
- dd->cspec->recovery_ports_initted++;
- ppd->cpspec->recovery_init = 1;
- }
- if (!both && dd->cspec->recovery_ports_initted == 1) {
- portsel = ppd->port == 1 ? portsel_port1 : portsel_port2;
- etm = atetm_1port;
- } else {
- portsel = portsel_2port;
- etm = atetm_2port;
- }
-
- if (qib_r_grab(dd) < 0 ||
- qib_r_shift(dd, BISTEN_ETM, LEN_ETM, reset_atetm, NULL) < 0 ||
- qib_r_update(dd, BISTEN_ETM) < 0 ||
- qib_r_shift(dd, BISTEN_AT, LEN_AT, reset_at, NULL) < 0 ||
- qib_r_update(dd, BISTEN_AT) < 0 ||
- qib_r_shift(dd, BISTEN_PORT_SEL, LEN_PORT_SEL,
- portsel, NULL) < 0 ||
- qib_r_update(dd, BISTEN_PORT_SEL) < 0 ||
- qib_r_shift(dd, BISTEN_AT, LEN_AT, at, NULL) < 0 ||
- qib_r_update(dd, BISTEN_AT) < 0 ||
- qib_r_shift(dd, BISTEN_ETM, LEN_ETM, etm, NULL) < 0 ||
- qib_r_update(dd, BISTEN_ETM) < 0)
- qib_dev_err(dd, "Failed IB link recovery setup\n");
-}
-
-static void check_7322_rxe_status(struct qib_pportdata *ppd)
-{
- struct qib_devdata *dd = ppd->dd;
- u64 fmask;
-
- if (dd->cspec->recovery_ports_initted != 1)
- return; /* rest doesn't apply to dualport */
- qib_write_kreg(dd, kr_control, dd->control |
- SYM_MASK(Control, FreezeMode));
- (void)qib_read_kreg64(dd, kr_scratch);
- udelay(3); /* ibcreset asserted 400ns, be sure that's over */
- fmask = qib_read_kreg64(dd, kr_act_fmask);
- if (!fmask) {
- /*
- * require a powercycle before we'll work again, and make
- * sure we get no more interrupts, and don't turn off
- * freeze.
- */
- ppd->dd->cspec->stay_in_freeze = 1;
- qib_7322_set_intr_state(ppd->dd, 0);
- qib_write_kreg(dd, kr_fmask, 0ULL);
- qib_dev_err(dd, "HCA unusable until powercycled\n");
- return; /* eventually reset */
- }
-
- qib_write_kreg(ppd->dd, kr_hwerrclear,
- SYM_MASK(HwErrClear, IBSerdesPClkNotDetectClear_1));
-
- /* don't do the full clear_freeze(), not needed for this */
- qib_write_kreg(dd, kr_control, dd->control);
- qib_read_kreg32(dd, kr_scratch);
- /* take IBC out of reset */
- if (ppd->link_speed_supported) {
- ppd->cpspec->ibcctrl_a &=
- ~SYM_MASK(IBCCtrlA_0, IBStatIntReductionEn);
- qib_write_kreg_port(ppd, krp_ibcctrl_a,
- ppd->cpspec->ibcctrl_a);
- qib_read_kreg32(dd, kr_scratch);
- if (ppd->lflags & QIBL_IB_LINK_DISABLED)
- qib_set_ib_7322_lstate(ppd, 0,
- QLOGIC_IB_IBCC_LINKINITCMD_DISABLE);
- }
-}
diff --git a/drivers/infiniband/hw/qib/qib_init.c b/drivers/infiniband/hw/qib/qib_init.c
deleted file mode 100644
index 1c45814f5646..000000000000
--- a/drivers/infiniband/hw/qib/qib_init.c
+++ /dev/null
@@ -1,1782 +0,0 @@
-/*
- * Copyright (c) 2012, 2013 Intel Corporation. All rights reserved.
- * Copyright (c) 2006 - 2012 QLogic Corporation. All rights reserved.
- * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <linux/pci.h>
-#include <linux/netdevice.h>
-#include <linux/vmalloc.h>
-#include <linux/delay.h>
-#include <linux/module.h>
-#include <linux/printk.h>
-#ifdef CONFIG_INFINIBAND_QIB_DCA
-#include <linux/dca.h>
-#endif
-#include <rdma/rdma_vt.h>
-
-#include "qib.h"
-#include "qib_common.h"
-#include "qib_mad.h"
-#ifdef CONFIG_DEBUG_FS
-#include "qib_debugfs.h"
-#include "qib_verbs.h"
-#endif
-
-#undef pr_fmt
-#define pr_fmt(fmt) QIB_DRV_NAME ": " fmt
-
-/*
- * min buffers we want to have per context, after driver
- */
-#define QIB_MIN_USER_CTXT_BUFCNT 7
-
-#define QLOGIC_IB_R_SOFTWARE_MASK 0xFF
-#define QLOGIC_IB_R_SOFTWARE_SHIFT 24
-#define QLOGIC_IB_R_EMULATOR_MASK (1ULL<<62)
-
-/*
- * Number of ctxts we are configured to use (to allow for more pio
- * buffers per ctxt, etc.) Zero means use chip value.
- */
-ushort qib_cfgctxts;
-module_param_named(cfgctxts, qib_cfgctxts, ushort, S_IRUGO);
-MODULE_PARM_DESC(cfgctxts, "Set max number of contexts to use");
-
-unsigned qib_numa_aware;
-module_param_named(numa_aware, qib_numa_aware, uint, S_IRUGO);
-MODULE_PARM_DESC(numa_aware,
- "0 -> PSM allocation close to HCA, 1 -> PSM allocation local to process");
-
-/*
- * If set, do not write to any regs if avoidable, hack to allow
- * check for deranged default register values.
- */
-ushort qib_mini_init;
-module_param_named(mini_init, qib_mini_init, ushort, S_IRUGO);
-MODULE_PARM_DESC(mini_init, "If set, do minimal diag init");
-
-unsigned qib_n_krcv_queues;
-module_param_named(krcvqs, qib_n_krcv_queues, uint, S_IRUGO);
-MODULE_PARM_DESC(krcvqs, "number of kernel receive queues per IB port");
-
-unsigned qib_cc_table_size;
-module_param_named(cc_table_size, qib_cc_table_size, uint, S_IRUGO);
-MODULE_PARM_DESC(cc_table_size, "Congestion control table entries 0 (CCA disabled - default), min = 128, max = 1984");
-
-static void verify_interrupt(struct timer_list *);
-
-DEFINE_XARRAY_FLAGS(qib_dev_table, XA_FLAGS_ALLOC | XA_FLAGS_LOCK_IRQ);
-u32 qib_cpulist_count;
-unsigned long *qib_cpulist;
-
-/* set number of contexts we'll actually use */
-void qib_set_ctxtcnt(struct qib_devdata *dd)
-{
- if (!qib_cfgctxts) {
- dd->cfgctxts = dd->first_user_ctxt + num_online_cpus();
- if (dd->cfgctxts > dd->ctxtcnt)
- dd->cfgctxts = dd->ctxtcnt;
- } else if (qib_cfgctxts < dd->num_pports)
- dd->cfgctxts = dd->ctxtcnt;
- else if (qib_cfgctxts <= dd->ctxtcnt)
- dd->cfgctxts = qib_cfgctxts;
- else
- dd->cfgctxts = dd->ctxtcnt;
- dd->freectxts = (dd->first_user_ctxt > dd->cfgctxts) ? 0 :
- dd->cfgctxts - dd->first_user_ctxt;
-}
-
-/*
- * Common code for creating the receive context array.
- */
-int qib_create_ctxts(struct qib_devdata *dd)
-{
- unsigned i;
- int local_node_id = pcibus_to_node(dd->pcidev->bus);
-
- if (local_node_id < 0)
- local_node_id = numa_node_id();
- dd->assigned_node_id = local_node_id;
-
- /*
- * Allocate full ctxtcnt array, rather than just cfgctxts, because
- * cleanup iterates across all possible ctxts.
- */
- dd->rcd = kcalloc(dd->ctxtcnt, sizeof(*dd->rcd), GFP_KERNEL);
- if (!dd->rcd)
- return -ENOMEM;
-
- /* create (one or more) kctxt */
- for (i = 0; i < dd->first_user_ctxt; ++i) {
- struct qib_pportdata *ppd;
- struct qib_ctxtdata *rcd;
-
- if (dd->skip_kctxt_mask & (1 << i))
- continue;
-
- ppd = dd->pport + (i % dd->num_pports);
-
- rcd = qib_create_ctxtdata(ppd, i, dd->assigned_node_id);
- if (!rcd) {
- qib_dev_err(dd,
- "Unable to allocate ctxtdata for Kernel ctxt, failing\n");
- kfree(dd->rcd);
- dd->rcd = NULL;
- return -ENOMEM;
- }
- rcd->pkeys[0] = QIB_DEFAULT_P_KEY;
- rcd->seq_cnt = 1;
- }
- return 0;
-}
-
-/*
- * Common code for user and kernel context setup.
- */
-struct qib_ctxtdata *qib_create_ctxtdata(struct qib_pportdata *ppd, u32 ctxt,
- int node_id)
-{
- struct qib_devdata *dd = ppd->dd;
- struct qib_ctxtdata *rcd;
-
- rcd = kzalloc_node(sizeof(*rcd), GFP_KERNEL, node_id);
- if (rcd) {
- INIT_LIST_HEAD(&rcd->qp_wait_list);
- rcd->node_id = node_id;
- rcd->ppd = ppd;
- rcd->dd = dd;
- rcd->cnt = 1;
- rcd->ctxt = ctxt;
- dd->rcd[ctxt] = rcd;
-#ifdef CONFIG_DEBUG_FS
- if (ctxt < dd->first_user_ctxt) { /* N/A for PSM contexts */
- rcd->opstats = kzalloc_node(sizeof(*rcd->opstats),
- GFP_KERNEL, node_id);
- if (!rcd->opstats) {
- kfree(rcd);
- qib_dev_err(dd,
- "Unable to allocate per ctxt stats buffer\n");
- return NULL;
- }
- }
-#endif
- dd->f_init_ctxt(rcd);
-
- /*
- * To avoid wasting a lot of memory, we allocate 32KB chunks
- * of physically contiguous memory, advance through it until
- * used up and then allocate more. Of course, we need
- * memory to store those extra pointers, now. 32KB seems to
- * be the most that is "safe" under memory pressure
- * (creating large files and then copying them over
- * NFS while doing lots of MPI jobs). The OOM killer can
- * get invoked, even though we say we can sleep and this can
- * cause significant system problems....
- */
- rcd->rcvegrbuf_size = 0x8000;
- rcd->rcvegrbufs_perchunk =
- rcd->rcvegrbuf_size / dd->rcvegrbufsize;
- rcd->rcvegrbuf_chunks = (rcd->rcvegrcnt +
- rcd->rcvegrbufs_perchunk - 1) /
- rcd->rcvegrbufs_perchunk;
- rcd->rcvegrbufs_perchunk_shift =
- ilog2(rcd->rcvegrbufs_perchunk);
- }
- return rcd;
-}
-
-/*
- * Common code for initializing the physical port structure.
- */
-int qib_init_pportdata(struct qib_pportdata *ppd, struct qib_devdata *dd,
- u8 hw_pidx, u8 port)
-{
- int size;
-
- ppd->dd = dd;
- ppd->hw_pidx = hw_pidx;
- ppd->port = port; /* IB port number, not index */
-
- spin_lock_init(&ppd->sdma_lock);
- spin_lock_init(&ppd->lflags_lock);
- spin_lock_init(&ppd->cc_shadow_lock);
- init_waitqueue_head(&ppd->state_wait);
-
- timer_setup(&ppd->symerr_clear_timer, qib_clear_symerror_on_linkup, 0);
-
- ppd->qib_wq = NULL;
- ppd->ibport_data.pmastats =
- alloc_percpu(struct qib_pma_counters);
- if (!ppd->ibport_data.pmastats)
- return -ENOMEM;
- ppd->ibport_data.rvp.rc_acks = alloc_percpu(u64);
- ppd->ibport_data.rvp.rc_qacks = alloc_percpu(u64);
- ppd->ibport_data.rvp.rc_delayed_comp = alloc_percpu(u64);
- if (!(ppd->ibport_data.rvp.rc_acks) ||
- !(ppd->ibport_data.rvp.rc_qacks) ||
- !(ppd->ibport_data.rvp.rc_delayed_comp))
- return -ENOMEM;
-
- if (qib_cc_table_size < IB_CCT_MIN_ENTRIES)
- goto bail;
-
- ppd->cc_supported_table_entries = min(max_t(int, qib_cc_table_size,
- IB_CCT_MIN_ENTRIES), IB_CCT_ENTRIES*IB_CC_TABLE_CAP_DEFAULT);
-
- ppd->cc_max_table_entries =
- ppd->cc_supported_table_entries/IB_CCT_ENTRIES;
-
- size = IB_CC_TABLE_CAP_DEFAULT * sizeof(struct ib_cc_table_entry)
- * IB_CCT_ENTRIES;
- ppd->ccti_entries = kzalloc(size, GFP_KERNEL);
- if (!ppd->ccti_entries)
- goto bail;
-
- size = IB_CC_CCS_ENTRIES * sizeof(struct ib_cc_congestion_entry);
- ppd->congestion_entries = kzalloc(size, GFP_KERNEL);
- if (!ppd->congestion_entries)
- goto bail_1;
-
- size = sizeof(struct cc_table_shadow);
- ppd->ccti_entries_shadow = kzalloc(size, GFP_KERNEL);
- if (!ppd->ccti_entries_shadow)
- goto bail_2;
-
- size = sizeof(struct ib_cc_congestion_setting_attr);
- ppd->congestion_entries_shadow = kzalloc(size, GFP_KERNEL);
- if (!ppd->congestion_entries_shadow)
- goto bail_3;
-
- return 0;
-
-bail_3:
- kfree(ppd->ccti_entries_shadow);
- ppd->ccti_entries_shadow = NULL;
-bail_2:
- kfree(ppd->congestion_entries);
- ppd->congestion_entries = NULL;
-bail_1:
- kfree(ppd->ccti_entries);
- ppd->ccti_entries = NULL;
-bail:
- /* User is intentionally disabling the congestion control agent */
- if (!qib_cc_table_size)
- return 0;
-
- if (qib_cc_table_size < IB_CCT_MIN_ENTRIES) {
- qib_cc_table_size = 0;
- qib_dev_err(dd,
- "Congestion Control table size %d less than minimum %d for port %d\n",
- qib_cc_table_size, IB_CCT_MIN_ENTRIES, port);
- }
-
- qib_dev_err(dd, "Congestion Control Agent disabled for port %d\n",
- port);
- return 0;
-}
-
-static int init_pioavailregs(struct qib_devdata *dd)
-{
- int ret, pidx;
- u64 *status_page;
-
- dd->pioavailregs_dma = dma_alloc_coherent(
- &dd->pcidev->dev, PAGE_SIZE, &dd->pioavailregs_phys,
- GFP_KERNEL);
- if (!dd->pioavailregs_dma) {
- qib_dev_err(dd,
- "failed to allocate PIOavail reg area in memory\n");
- ret = -ENOMEM;
- goto done;
- }
-
- /*
- * We really want L2 cache aligned, but for current CPUs of
- * interest, they are the same.
- */
- status_page = (u64 *)
- ((char *) dd->pioavailregs_dma +
- ((2 * L1_CACHE_BYTES +
- dd->pioavregs * sizeof(u64)) & ~L1_CACHE_BYTES));
- /* device status comes first, for backwards compatibility */
- dd->devstatusp = status_page;
- *status_page++ = 0;
- for (pidx = 0; pidx < dd->num_pports; ++pidx) {
- dd->pport[pidx].statusp = status_page;
- *status_page++ = 0;
- }
-
- /*
- * Setup buffer to hold freeze and other messages, accessible to
- * apps, following statusp. This is per-unit, not per port.
- */
- dd->freezemsg = (char *) status_page;
- *dd->freezemsg = 0;
- /* length of msg buffer is "whatever is left" */
- ret = (char *) status_page - (char *) dd->pioavailregs_dma;
- dd->freezelen = PAGE_SIZE - ret;
-
- ret = 0;
-
-done:
- return ret;
-}
-
-/**
- * init_shadow_tids - allocate the shadow TID array
- * @dd: the qlogic_ib device
- *
- * allocate the shadow TID array, so we can qib_munlock previous
- * entries. It may make more sense to move the pageshadow to the
- * ctxt data structure, so we only allocate memory for ctxts actually
- * in use, since we at 8k per ctxt, now.
- * We don't want failures here to prevent use of the driver/chip,
- * so no return value.
- */
-static void init_shadow_tids(struct qib_devdata *dd)
-{
- struct page **pages;
- dma_addr_t *addrs;
-
- pages = vzalloc(array_size(sizeof(struct page *),
- dd->cfgctxts * dd->rcvtidcnt));
- if (!pages)
- goto bail;
-
- addrs = vzalloc(array_size(sizeof(dma_addr_t),
- dd->cfgctxts * dd->rcvtidcnt));
- if (!addrs)
- goto bail_free;
-
- dd->pageshadow = pages;
- dd->physshadow = addrs;
- return;
-
-bail_free:
- vfree(pages);
-bail:
- dd->pageshadow = NULL;
-}
-
-/*
- * Do initialization for device that is only needed on
- * first detect, not on resets.
- */
-static int loadtime_init(struct qib_devdata *dd)
-{
- int ret = 0;
-
- if (((dd->revision >> QLOGIC_IB_R_SOFTWARE_SHIFT) &
- QLOGIC_IB_R_SOFTWARE_MASK) != QIB_CHIP_SWVERSION) {
- qib_dev_err(dd,
- "Driver only handles version %d, chip swversion is %d (%llx), failing\n",
- QIB_CHIP_SWVERSION,
- (int)(dd->revision >>
- QLOGIC_IB_R_SOFTWARE_SHIFT) &
- QLOGIC_IB_R_SOFTWARE_MASK,
- (unsigned long long) dd->revision);
- ret = -ENOSYS;
- goto done;
- }
-
- if (dd->revision & QLOGIC_IB_R_EMULATOR_MASK)
- qib_devinfo(dd->pcidev, "%s", dd->boardversion);
-
- spin_lock_init(&dd->pioavail_lock);
- spin_lock_init(&dd->sendctrl_lock);
- spin_lock_init(&dd->uctxt_lock);
- spin_lock_init(&dd->qib_diag_trans_lock);
- spin_lock_init(&dd->eep_st_lock);
- mutex_init(&dd->eep_lock);
-
- if (qib_mini_init)
- goto done;
-
- ret = init_pioavailregs(dd);
- init_shadow_tids(dd);
-
- qib_get_eeprom_info(dd);
-
- /* setup time (don't start yet) to verify we got interrupt */
- timer_setup(&dd->intrchk_timer, verify_interrupt, 0);
-done:
- return ret;
-}
-
-/**
- * init_after_reset - re-initialize after a reset
- * @dd: the qlogic_ib device
- *
- * sanity check at least some of the values after reset, and
- * ensure no receive or transmit (explicitly, in case reset
- * failed
- */
-static int init_after_reset(struct qib_devdata *dd)
-{
- int i;
-
- /*
- * Ensure chip does no sends or receives, tail updates, or
- * pioavail updates while we re-initialize. This is mostly
- * for the driver data structures, not chip registers.
- */
- for (i = 0; i < dd->num_pports; ++i) {
- /*
- * ctxt == -1 means "all contexts". Only really safe for
- * _dis_abling things, as here.
- */
- dd->f_rcvctrl(dd->pport + i, QIB_RCVCTRL_CTXT_DIS |
- QIB_RCVCTRL_INTRAVAIL_DIS |
- QIB_RCVCTRL_TAILUPD_DIS, -1);
- /* Redundant across ports for some, but no big deal. */
- dd->f_sendctrl(dd->pport + i, QIB_SENDCTRL_SEND_DIS |
- QIB_SENDCTRL_AVAIL_DIS);
- }
-
- return 0;
-}
-
-static void enable_chip(struct qib_devdata *dd)
-{
- u64 rcvmask;
- int i;
-
- /*
- * Enable PIO send, and update of PIOavail regs to memory.
- */
- for (i = 0; i < dd->num_pports; ++i)
- dd->f_sendctrl(dd->pport + i, QIB_SENDCTRL_SEND_ENB |
- QIB_SENDCTRL_AVAIL_ENB);
- /*
- * Enable kernel ctxts' receive and receive interrupt.
- * Other ctxts done as user opens and inits them.
- */
- rcvmask = QIB_RCVCTRL_CTXT_ENB | QIB_RCVCTRL_INTRAVAIL_ENB;
- rcvmask |= (dd->flags & QIB_NODMA_RTAIL) ?
- QIB_RCVCTRL_TAILUPD_DIS : QIB_RCVCTRL_TAILUPD_ENB;
- for (i = 0; dd->rcd && i < dd->first_user_ctxt; ++i) {
- struct qib_ctxtdata *rcd = dd->rcd[i];
-
- if (rcd)
- dd->f_rcvctrl(rcd->ppd, rcvmask, i);
- }
-}
-
-static void verify_interrupt(struct timer_list *t)
-{
- struct qib_devdata *dd = timer_container_of(dd, t, intrchk_timer);
- u64 int_counter;
-
- if (!dd)
- return; /* being torn down */
-
- /*
- * If we don't have a lid or any interrupts, let the user know and
- * don't bother checking again.
- */
- int_counter = qib_int_counter(dd) - dd->z_int_counter;
- if (int_counter == 0) {
- if (!dd->f_intr_fallback(dd))
- dev_err(&dd->pcidev->dev,
- "No interrupts detected, not usable.\n");
- else /* re-arm the timer to see if fallback works */
- mod_timer(&dd->intrchk_timer, jiffies + HZ/2);
- }
-}
-
-static void init_piobuf_state(struct qib_devdata *dd)
-{
- int i, pidx;
- u32 uctxts;
-
- /*
- * Ensure all buffers are free, and fifos empty. Buffers
- * are common, so only do once for port 0.
- *
- * After enable and qib_chg_pioavailkernel so we can safely
- * enable pioavail updates and PIOENABLE. After this, packets
- * are ready and able to go out.
- */
- dd->f_sendctrl(dd->pport, QIB_SENDCTRL_DISARM_ALL);
- for (pidx = 0; pidx < dd->num_pports; ++pidx)
- dd->f_sendctrl(dd->pport + pidx, QIB_SENDCTRL_FLUSH);
-
- /*
- * If not all sendbufs are used, add the one to each of the lower
- * numbered contexts. pbufsctxt and lastctxt_piobuf are
- * calculated in chip-specific code because it may cause some
- * chip-specific adjustments to be made.
- */
- uctxts = dd->cfgctxts - dd->first_user_ctxt;
- dd->ctxts_extrabuf = dd->pbufsctxt ?
- dd->lastctxt_piobuf - (dd->pbufsctxt * uctxts) : 0;
-
- /*
- * Set up the shadow copies of the piobufavail registers,
- * which we compare against the chip registers for now, and
- * the in memory DMA'ed copies of the registers.
- * By now pioavail updates to memory should have occurred, so
- * copy them into our working/shadow registers; this is in
- * case something went wrong with abort, but mostly to get the
- * initial values of the generation bit correct.
- */
- for (i = 0; i < dd->pioavregs; i++) {
- __le64 tmp;
-
- tmp = dd->pioavailregs_dma[i];
- /*
- * Don't need to worry about pioavailkernel here
- * because we will call qib_chg_pioavailkernel() later
- * in initialization, to busy out buffers as needed.
- */
- dd->pioavailshadow[i] = le64_to_cpu(tmp);
- }
- while (i < ARRAY_SIZE(dd->pioavailshadow))
- dd->pioavailshadow[i++] = 0; /* for debugging sanity */
-
- /* after pioavailshadow is setup */
- qib_chg_pioavailkernel(dd, 0, dd->piobcnt2k + dd->piobcnt4k,
- TXCHK_CHG_TYPE_KERN, NULL);
- dd->f_initvl15_bufs(dd);
-}
-
-/**
- * qib_create_workqueues - create per port workqueues
- * @dd: the qlogic_ib device
- */
-static int qib_create_workqueues(struct qib_devdata *dd)
-{
- int pidx;
- struct qib_pportdata *ppd;
-
- for (pidx = 0; pidx < dd->num_pports; ++pidx) {
- ppd = dd->pport + pidx;
- if (!ppd->qib_wq) {
- ppd->qib_wq = alloc_ordered_workqueue("qib%d_%d",
- WQ_MEM_RECLAIM,
- dd->unit, pidx);
- if (!ppd->qib_wq)
- goto wq_error;
- }
- }
- return 0;
-wq_error:
- pr_err("create_singlethread_workqueue failed for port %d\n",
- pidx + 1);
- for (pidx = 0; pidx < dd->num_pports; ++pidx) {
- ppd = dd->pport + pidx;
- if (ppd->qib_wq) {
- destroy_workqueue(ppd->qib_wq);
- ppd->qib_wq = NULL;
- }
- }
- return -ENOMEM;
-}
-
-static void qib_free_pportdata(struct qib_pportdata *ppd)
-{
- free_percpu(ppd->ibport_data.pmastats);
- free_percpu(ppd->ibport_data.rvp.rc_acks);
- free_percpu(ppd->ibport_data.rvp.rc_qacks);
- free_percpu(ppd->ibport_data.rvp.rc_delayed_comp);
- ppd->ibport_data.pmastats = NULL;
-}
-
-/**
- * qib_init - do the actual initialization sequence on the chip
- * @dd: the qlogic_ib device
- * @reinit: reinitializing, so don't allocate new memory
- *
- * Do the actual initialization sequence on the chip. This is done
- * both from the init routine called from the PCI infrastructure, and
- * when we reset the chip, or detect that it was reset internally,
- * or it's administratively re-enabled.
- *
- * Memory allocation here and in called routines is only done in
- * the first case (reinit == 0). We have to be careful, because even
- * without memory allocation, we need to re-write all the chip registers
- * TIDs, etc. after the reset or enable has completed.
- */
-int qib_init(struct qib_devdata *dd, int reinit)
-{
- int ret = 0, pidx, lastfail = 0;
- u32 portok = 0;
- unsigned i;
- struct qib_ctxtdata *rcd;
- struct qib_pportdata *ppd;
- unsigned long flags;
-
- /* Set linkstate to unknown, so we can watch for a transition. */
- for (pidx = 0; pidx < dd->num_pports; ++pidx) {
- ppd = dd->pport + pidx;
- spin_lock_irqsave(&ppd->lflags_lock, flags);
- ppd->lflags &= ~(QIBL_LINKACTIVE | QIBL_LINKARMED |
- QIBL_LINKDOWN | QIBL_LINKINIT |
- QIBL_LINKV);
- spin_unlock_irqrestore(&ppd->lflags_lock, flags);
- }
-
- if (reinit)
- ret = init_after_reset(dd);
- else
- ret = loadtime_init(dd);
- if (ret)
- goto done;
-
- /* Bypass most chip-init, to get to device creation */
- if (qib_mini_init)
- return 0;
-
- ret = dd->f_late_initreg(dd);
- if (ret)
- goto done;
-
- /* dd->rcd can be NULL if early init failed */
- for (i = 0; dd->rcd && i < dd->first_user_ctxt; ++i) {
- /*
- * Set up the (kernel) rcvhdr queue and egr TIDs. If doing
- * re-init, the simplest way to handle this is to free
- * existing, and re-allocate.
- * Need to re-create rest of ctxt 0 ctxtdata as well.
- */
- rcd = dd->rcd[i];
- if (!rcd)
- continue;
-
- lastfail = qib_create_rcvhdrq(dd, rcd);
- if (!lastfail)
- lastfail = qib_setup_eagerbufs(rcd);
- if (lastfail)
- qib_dev_err(dd,
- "failed to allocate kernel ctxt's rcvhdrq and/or egr bufs\n");
- }
-
- for (pidx = 0; pidx < dd->num_pports; ++pidx) {
- int mtu;
-
- if (lastfail)
- ret = lastfail;
- ppd = dd->pport + pidx;
- mtu = ib_mtu_enum_to_int(qib_ibmtu);
- if (mtu == -1) {
- mtu = QIB_DEFAULT_MTU;
- qib_ibmtu = 0; /* don't leave invalid value */
- }
- /* set max we can ever have for this driver load */
- ppd->init_ibmaxlen = min(mtu > 2048 ?
- dd->piosize4k : dd->piosize2k,
- dd->rcvegrbufsize +
- (dd->rcvhdrentsize << 2));
- /*
- * Have to initialize ibmaxlen, but this will normally
- * change immediately in qib_set_mtu().
- */
- ppd->ibmaxlen = ppd->init_ibmaxlen;
- qib_set_mtu(ppd, mtu);
-
- spin_lock_irqsave(&ppd->lflags_lock, flags);
- ppd->lflags |= QIBL_IB_LINK_DISABLED;
- spin_unlock_irqrestore(&ppd->lflags_lock, flags);
-
- lastfail = dd->f_bringup_serdes(ppd);
- if (lastfail) {
- qib_devinfo(dd->pcidev,
- "Failed to bringup IB port %u\n", ppd->port);
- lastfail = -ENETDOWN;
- continue;
- }
-
- portok++;
- }
-
- if (!portok) {
- /* none of the ports initialized */
- if (!ret && lastfail)
- ret = lastfail;
- else if (!ret)
- ret = -ENETDOWN;
- /* but continue on, so we can debug cause */
- }
-
- enable_chip(dd);
-
- init_piobuf_state(dd);
-
-done:
- if (!ret) {
- /* chip is OK for user apps; mark it as initialized */
- for (pidx = 0; pidx < dd->num_pports; ++pidx) {
- ppd = dd->pport + pidx;
- /*
- * Set status even if port serdes is not initialized
- * so that diags will work.
- */
- *ppd->statusp |= QIB_STATUS_CHIP_PRESENT |
- QIB_STATUS_INITTED;
- if (!ppd->link_speed_enabled)
- continue;
- if (dd->flags & QIB_HAS_SEND_DMA)
- ret = qib_setup_sdma(ppd);
- timer_setup(&ppd->hol_timer, qib_hol_event, 0);
- ppd->hol_state = QIB_HOL_UP;
- }
-
- /* now we can enable all interrupts from the chip */
- dd->f_set_intr_state(dd, 1);
-
- /*
- * Setup to verify we get an interrupt, and fallback
- * to an alternate if necessary and possible.
- */
- mod_timer(&dd->intrchk_timer, jiffies + HZ/2);
- /* start stats retrieval timer */
- mod_timer(&dd->stats_timer, jiffies + HZ * ACTIVITY_TIMER);
- }
-
- /* if ret is non-zero, we probably should do some cleanup here... */
- return ret;
-}
-
-/*
- * These next two routines are placeholders in case we don't have per-arch
- * code for controlling write combining. If explicit control of write
- * combining is not available, performance will probably be awful.
- */
-
-int __attribute__((weak)) qib_enable_wc(struct qib_devdata *dd)
-{
- return -EOPNOTSUPP;
-}
-
-void __attribute__((weak)) qib_disable_wc(struct qib_devdata *dd)
-{
-}
-
-struct qib_devdata *qib_lookup(int unit)
-{
- return xa_load(&qib_dev_table, unit);
-}
-
-/*
- * Stop the timers during unit shutdown, or after an error late
- * in initialization.
- */
-static void qib_stop_timers(struct qib_devdata *dd)
-{
- struct qib_pportdata *ppd;
- int pidx;
-
- if (dd->stats_timer.function)
- timer_delete_sync(&dd->stats_timer);
- if (dd->intrchk_timer.function)
- timer_delete_sync(&dd->intrchk_timer);
- for (pidx = 0; pidx < dd->num_pports; ++pidx) {
- ppd = dd->pport + pidx;
- if (ppd->hol_timer.function)
- timer_delete_sync(&ppd->hol_timer);
- if (ppd->led_override_timer.function) {
- timer_delete_sync(&ppd->led_override_timer);
- atomic_set(&ppd->led_override_timer_active, 0);
- }
- if (ppd->symerr_clear_timer.function)
- timer_delete_sync(&ppd->symerr_clear_timer);
- }
-}
-
-/**
- * qib_shutdown_device - shut down a device
- * @dd: the qlogic_ib device
- *
- * This is called to make the device quiet when we are about to
- * unload the driver, and also when the device is administratively
- * disabled. It does not free any data structures.
- * Everything it does has to be setup again by qib_init(dd, 1)
- */
-static void qib_shutdown_device(struct qib_devdata *dd)
-{
- struct qib_pportdata *ppd;
- unsigned pidx;
-
- if (dd->flags & QIB_SHUTDOWN)
- return;
- dd->flags |= QIB_SHUTDOWN;
-
- for (pidx = 0; pidx < dd->num_pports; ++pidx) {
- ppd = dd->pport + pidx;
-
- spin_lock_irq(&ppd->lflags_lock);
- ppd->lflags &= ~(QIBL_LINKDOWN | QIBL_LINKINIT |
- QIBL_LINKARMED | QIBL_LINKACTIVE |
- QIBL_LINKV);
- spin_unlock_irq(&ppd->lflags_lock);
- *ppd->statusp &= ~(QIB_STATUS_IB_CONF | QIB_STATUS_IB_READY);
- }
- dd->flags &= ~QIB_INITTED;
-
- /* mask interrupts, but not errors */
- dd->f_set_intr_state(dd, 0);
-
- for (pidx = 0; pidx < dd->num_pports; ++pidx) {
- ppd = dd->pport + pidx;
- dd->f_rcvctrl(ppd, QIB_RCVCTRL_TAILUPD_DIS |
- QIB_RCVCTRL_CTXT_DIS |
- QIB_RCVCTRL_INTRAVAIL_DIS |
- QIB_RCVCTRL_PKEY_ENB, -1);
- /*
- * Gracefully stop all sends allowing any in progress to
- * trickle out first.
- */
- dd->f_sendctrl(ppd, QIB_SENDCTRL_CLEAR);
- }
-
- /*
- * Enough for anything that's going to trickle out to have actually
- * done so.
- */
- udelay(20);
-
- for (pidx = 0; pidx < dd->num_pports; ++pidx) {
- ppd = dd->pport + pidx;
- dd->f_setextled(ppd, 0); /* make sure LEDs are off */
-
- if (dd->flags & QIB_HAS_SEND_DMA)
- qib_teardown_sdma(ppd);
-
- dd->f_sendctrl(ppd, QIB_SENDCTRL_AVAIL_DIS |
- QIB_SENDCTRL_SEND_DIS);
- /*
- * Clear SerdesEnable.
- * We can't count on interrupts since we are stopping.
- */
- dd->f_quiet_serdes(ppd);
-
- if (ppd->qib_wq) {
- destroy_workqueue(ppd->qib_wq);
- ppd->qib_wq = NULL;
- }
- qib_free_pportdata(ppd);
- }
-
-}
-
-/**
- * qib_free_ctxtdata - free a context's allocated data
- * @dd: the qlogic_ib device
- * @rcd: the ctxtdata structure
- *
- * free up any allocated data for a context
- * This should not touch anything that would affect a simultaneous
- * re-allocation of context data, because it is called after qib_mutex
- * is released (and can be called from reinit as well).
- * It should never change any chip state, or global driver state.
- */
-void qib_free_ctxtdata(struct qib_devdata *dd, struct qib_ctxtdata *rcd)
-{
- if (!rcd)
- return;
-
- if (rcd->rcvhdrq) {
- dma_free_coherent(&dd->pcidev->dev, rcd->rcvhdrq_size,
- rcd->rcvhdrq, rcd->rcvhdrq_phys);
- rcd->rcvhdrq = NULL;
- if (rcd->rcvhdrtail_kvaddr) {
- dma_free_coherent(&dd->pcidev->dev, PAGE_SIZE,
- rcd->rcvhdrtail_kvaddr,
- rcd->rcvhdrqtailaddr_phys);
- rcd->rcvhdrtail_kvaddr = NULL;
- }
- }
- if (rcd->rcvegrbuf) {
- unsigned e;
-
- for (e = 0; e < rcd->rcvegrbuf_chunks; e++) {
- void *base = rcd->rcvegrbuf[e];
- size_t size = rcd->rcvegrbuf_size;
-
- dma_free_coherent(&dd->pcidev->dev, size,
- base, rcd->rcvegrbuf_phys[e]);
- }
- kfree(rcd->rcvegrbuf);
- rcd->rcvegrbuf = NULL;
- kfree(rcd->rcvegrbuf_phys);
- rcd->rcvegrbuf_phys = NULL;
- rcd->rcvegrbuf_chunks = 0;
- }
-
- kfree(rcd->tid_pg_list);
- vfree(rcd->user_event_mask);
- vfree(rcd->subctxt_uregbase);
- vfree(rcd->subctxt_rcvegrbuf);
- vfree(rcd->subctxt_rcvhdr_base);
-#ifdef CONFIG_DEBUG_FS
- kfree(rcd->opstats);
- rcd->opstats = NULL;
-#endif
- kfree(rcd);
-}
-
-/*
- * Perform a PIO buffer bandwidth write test, to verify proper system
- * configuration. Even when all the setup calls work, occasionally
- * BIOS or other issues can prevent write combining from working, or
- * can cause other bandwidth problems to the chip.
- *
- * This test simply writes the same buffer over and over again, and
- * measures close to the peak bandwidth to the chip (not testing
- * data bandwidth to the wire). On chips that use an address-based
- * trigger to send packets to the wire, this is easy. On chips that
- * use a count to trigger, we want to make sure that the packet doesn't
- * go out on the wire, or trigger flow control checks.
- */
-static void qib_verify_pioperf(struct qib_devdata *dd)
-{
- u32 pbnum, cnt, lcnt;
- u32 __iomem *piobuf;
- u32 *addr;
- u64 msecs, emsecs;
-
- piobuf = dd->f_getsendbuf(dd->pport, 0ULL, &pbnum);
- if (!piobuf) {
- qib_devinfo(dd->pcidev,
- "No PIObufs for checking perf, skipping\n");
- return;
- }
-
- /*
- * Enough to give us a reasonable test, less than piobuf size, and
- * likely multiple of store buffer length.
- */
- cnt = 1024;
-
- addr = vmalloc(cnt);
- if (!addr)
- goto done;
-
- preempt_disable(); /* we want reasonably accurate elapsed time */
- msecs = 1 + jiffies_to_msecs(jiffies);
- for (lcnt = 0; lcnt < 10000U; lcnt++) {
- /* wait until we cross msec boundary */
- if (jiffies_to_msecs(jiffies) >= msecs)
- break;
- udelay(1);
- }
-
- dd->f_set_armlaunch(dd, 0);
-
- /*
- * length 0, no dwords actually sent
- */
- writeq(0, piobuf);
- qib_flush_wc();
-
- /*
- * This is only roughly accurate, since even with preempt we
- * still take interrupts that could take a while. Running for
- * >= 5 msec seems to get us "close enough" to accurate values.
- */
- msecs = jiffies_to_msecs(jiffies);
- for (emsecs = lcnt = 0; emsecs <= 5UL; lcnt++) {
- qib_pio_copy(piobuf + 64, addr, cnt >> 2);
- emsecs = jiffies_to_msecs(jiffies) - msecs;
- }
-
- /* 1 GiB/sec, slightly over IB SDR line rate */
- if (lcnt < (emsecs * 1024U))
- qib_dev_err(dd,
- "Performance problem: bandwidth to PIO buffers is only %u MiB/sec\n",
- lcnt / (u32) emsecs);
-
- preempt_enable();
-
- vfree(addr);
-
-done:
- /* disarm piobuf, so it's available again */
- dd->f_sendctrl(dd->pport, QIB_SENDCTRL_DISARM_BUF(pbnum));
- qib_sendbuf_done(dd, pbnum);
- dd->f_set_armlaunch(dd, 1);
-}
-
-void qib_free_devdata(struct qib_devdata *dd)
-{
- unsigned long flags;
-
- xa_lock_irqsave(&qib_dev_table, flags);
- __xa_erase(&qib_dev_table, dd->unit);
- xa_unlock_irqrestore(&qib_dev_table, flags);
-
-#ifdef CONFIG_DEBUG_FS
- qib_dbg_ibdev_exit(&dd->verbs_dev);
-#endif
- free_percpu(dd->int_counter);
- rvt_dealloc_device(&dd->verbs_dev.rdi);
-}
-
-u64 qib_int_counter(struct qib_devdata *dd)
-{
- int cpu;
- u64 int_counter = 0;
-
- for_each_possible_cpu(cpu)
- int_counter += *per_cpu_ptr(dd->int_counter, cpu);
- return int_counter;
-}
-
-u64 qib_sps_ints(void)
-{
- unsigned long index, flags;
- struct qib_devdata *dd;
- u64 sps_ints = 0;
-
- xa_lock_irqsave(&qib_dev_table, flags);
- xa_for_each(&qib_dev_table, index, dd) {
- sps_ints += qib_int_counter(dd);
- }
- xa_unlock_irqrestore(&qib_dev_table, flags);
- return sps_ints;
-}
-
-/*
- * Allocate our primary per-unit data structure. Must be done via verbs
- * allocator, because the verbs cleanup process both does cleanup and
- * free of the data structure.
- * "extra" is for chip-specific data.
- */
-struct qib_devdata *qib_alloc_devdata(struct pci_dev *pdev, size_t extra)
-{
- struct qib_devdata *dd;
- int ret, nports;
-
- /* extra is * number of ports */
- nports = extra / sizeof(struct qib_pportdata);
- dd = (struct qib_devdata *)rvt_alloc_device(sizeof(*dd) + extra,
- nports);
- if (!dd)
- return ERR_PTR(-ENOMEM);
-
- ret = xa_alloc_irq(&qib_dev_table, &dd->unit, dd, xa_limit_32b,
- GFP_KERNEL);
- if (ret < 0) {
- qib_early_err(&pdev->dev,
- "Could not allocate unit ID: error %d\n", -ret);
- goto bail;
- }
- rvt_set_ibdev_name(&dd->verbs_dev.rdi, "%s%d", "qib", dd->unit);
-
- dd->int_counter = alloc_percpu(u64);
- if (!dd->int_counter) {
- ret = -ENOMEM;
- qib_early_err(&pdev->dev,
- "Could not allocate per-cpu int_counter\n");
- goto bail;
- }
-
- if (!qib_cpulist_count) {
- u32 count = num_online_cpus();
-
- qib_cpulist = bitmap_zalloc(count, GFP_KERNEL);
- if (qib_cpulist)
- qib_cpulist_count = count;
- }
-#ifdef CONFIG_DEBUG_FS
- qib_dbg_ibdev_init(&dd->verbs_dev);
-#endif
- return dd;
-bail:
- if (!list_empty(&dd->list))
- list_del_init(&dd->list);
- rvt_dealloc_device(&dd->verbs_dev.rdi);
- return ERR_PTR(ret);
-}
-
-/*
- * Called from freeze mode handlers, and from PCI error
- * reporting code. Should be paranoid about state of
- * system and data structures.
- */
-void qib_disable_after_error(struct qib_devdata *dd)
-{
- if (dd->flags & QIB_INITTED) {
- u32 pidx;
-
- dd->flags &= ~QIB_INITTED;
- if (dd->pport)
- for (pidx = 0; pidx < dd->num_pports; ++pidx) {
- struct qib_pportdata *ppd;
-
- ppd = dd->pport + pidx;
- if (dd->flags & QIB_PRESENT) {
- qib_set_linkstate(ppd,
- QIB_IB_LINKDOWN_DISABLE);
- dd->f_setextled(ppd, 0);
- }
- *ppd->statusp &= ~QIB_STATUS_IB_READY;
- }
- }
-
- /*
- * Mark as having had an error for driver, and also
- * for /sys and status word mapped to user programs.
- * This marks unit as not usable, until reset.
- */
- if (dd->devstatusp)
- *dd->devstatusp |= QIB_STATUS_HWERROR;
-}
-
-static void qib_remove_one(struct pci_dev *);
-static int qib_init_one(struct pci_dev *, const struct pci_device_id *);
-static void qib_shutdown_one(struct pci_dev *);
-
-#define DRIVER_LOAD_MSG "Intel " QIB_DRV_NAME " loaded: "
-#define PFX QIB_DRV_NAME ": "
-
-static const struct pci_device_id qib_pci_tbl[] = {
- { PCI_DEVICE(PCI_VENDOR_ID_PATHSCALE, PCI_DEVICE_ID_QLOGIC_IB_6120) },
- { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_IB_7220) },
- { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_IB_7322) },
- { 0, }
-};
-
-MODULE_DEVICE_TABLE(pci, qib_pci_tbl);
-
-static struct pci_driver qib_driver = {
- .name = QIB_DRV_NAME,
- .probe = qib_init_one,
- .remove = qib_remove_one,
- .shutdown = qib_shutdown_one,
- .id_table = qib_pci_tbl,
- .err_handler = &qib_pci_err_handler,
-};
-
-#ifdef CONFIG_INFINIBAND_QIB_DCA
-
-static int qib_notify_dca(struct notifier_block *, unsigned long, void *);
-static struct notifier_block dca_notifier = {
- .notifier_call = qib_notify_dca,
- .next = NULL,
- .priority = 0
-};
-
-static int qib_notify_dca_device(struct device *device, void *data)
-{
- struct qib_devdata *dd = dev_get_drvdata(device);
- unsigned long event = *(unsigned long *)data;
-
- return dd->f_notify_dca(dd, event);
-}
-
-static int qib_notify_dca(struct notifier_block *nb, unsigned long event,
- void *p)
-{
- int rval;
-
- rval = driver_for_each_device(&qib_driver.driver, NULL,
- &event, qib_notify_dca_device);
- return rval ? NOTIFY_BAD : NOTIFY_DONE;
-}
-
-#endif
-
-/*
- * Do all the generic driver unit- and chip-independent memory
- * allocation and initialization.
- */
-static int __init qib_ib_init(void)
-{
- int ret;
-
- ret = qib_dev_init();
- if (ret)
- goto bail;
-
- /*
- * These must be called before the driver is registered with
- * the PCI subsystem.
- */
-#ifdef CONFIG_INFINIBAND_QIB_DCA
- dca_register_notify(&dca_notifier);
-#endif
-#ifdef CONFIG_DEBUG_FS
- qib_dbg_init();
-#endif
- ret = pci_register_driver(&qib_driver);
- if (ret < 0) {
- pr_err("Unable to register driver: error %d\n", -ret);
- goto bail_dev;
- }
-
- /* not fatal if it doesn't work */
- if (qib_init_qibfs())
- pr_err("Unable to register ipathfs\n");
- goto bail; /* all OK */
-
-bail_dev:
-#ifdef CONFIG_INFINIBAND_QIB_DCA
- dca_unregister_notify(&dca_notifier);
-#endif
-#ifdef CONFIG_DEBUG_FS
- qib_dbg_exit();
-#endif
- qib_dev_cleanup();
-bail:
- return ret;
-}
-
-module_init(qib_ib_init);
-
-/*
- * Do the non-unit driver cleanup, memory free, etc. at unload.
- */
-static void __exit qib_ib_cleanup(void)
-{
- int ret;
-
- ret = qib_exit_qibfs();
- if (ret)
- pr_err(
- "Unable to cleanup counter filesystem: error %d\n",
- -ret);
-
-#ifdef CONFIG_INFINIBAND_QIB_DCA
- dca_unregister_notify(&dca_notifier);
-#endif
- pci_unregister_driver(&qib_driver);
-#ifdef CONFIG_DEBUG_FS
- qib_dbg_exit();
-#endif
-
- qib_cpulist_count = 0;
- bitmap_free(qib_cpulist);
-
- WARN_ON(!xa_empty(&qib_dev_table));
- qib_dev_cleanup();
-}
-
-module_exit(qib_ib_cleanup);
-
-/* this can only be called after a successful initialization */
-static void cleanup_device_data(struct qib_devdata *dd)
-{
- int ctxt;
- int pidx;
- struct qib_ctxtdata **tmp;
- unsigned long flags;
-
- /* users can't do anything more with chip */
- for (pidx = 0; pidx < dd->num_pports; ++pidx) {
- if (dd->pport[pidx].statusp)
- *dd->pport[pidx].statusp &= ~QIB_STATUS_CHIP_PRESENT;
-
- spin_lock(&dd->pport[pidx].cc_shadow_lock);
-
- kfree(dd->pport[pidx].congestion_entries);
- dd->pport[pidx].congestion_entries = NULL;
- kfree(dd->pport[pidx].ccti_entries);
- dd->pport[pidx].ccti_entries = NULL;
- kfree(dd->pport[pidx].ccti_entries_shadow);
- dd->pport[pidx].ccti_entries_shadow = NULL;
- kfree(dd->pport[pidx].congestion_entries_shadow);
- dd->pport[pidx].congestion_entries_shadow = NULL;
-
- spin_unlock(&dd->pport[pidx].cc_shadow_lock);
- }
-
- qib_disable_wc(dd);
-
- if (dd->pioavailregs_dma) {
- dma_free_coherent(&dd->pcidev->dev, PAGE_SIZE,
- (void *) dd->pioavailregs_dma,
- dd->pioavailregs_phys);
- dd->pioavailregs_dma = NULL;
- }
-
- if (dd->pageshadow) {
- struct page **tmpp = dd->pageshadow;
- dma_addr_t *tmpd = dd->physshadow;
- int i;
-
- for (ctxt = 0; ctxt < dd->cfgctxts; ctxt++) {
- int ctxt_tidbase = ctxt * dd->rcvtidcnt;
- int maxtid = ctxt_tidbase + dd->rcvtidcnt;
-
- for (i = ctxt_tidbase; i < maxtid; i++) {
- if (!tmpp[i])
- continue;
- dma_unmap_page(&dd->pcidev->dev, tmpd[i],
- PAGE_SIZE, DMA_FROM_DEVICE);
- qib_release_user_pages(&tmpp[i], 1);
- tmpp[i] = NULL;
- }
- }
-
- dd->pageshadow = NULL;
- vfree(tmpp);
- dd->physshadow = NULL;
- vfree(tmpd);
- }
-
- /*
- * Free any resources still in use (usually just kernel contexts)
- * at unload; we do for ctxtcnt, because that's what we allocate.
- * We acquire lock to be really paranoid that rcd isn't being
- * accessed from some interrupt-related code (that should not happen,
- * but best to be sure).
- */
- spin_lock_irqsave(&dd->uctxt_lock, flags);
- tmp = dd->rcd;
- dd->rcd = NULL;
- spin_unlock_irqrestore(&dd->uctxt_lock, flags);
- for (ctxt = 0; tmp && ctxt < dd->ctxtcnt; ctxt++) {
- struct qib_ctxtdata *rcd = tmp[ctxt];
-
- tmp[ctxt] = NULL; /* debugging paranoia */
- qib_free_ctxtdata(dd, rcd);
- }
- kfree(tmp);
-}
-
-/*
- * Clean up on unit shutdown, or error during unit load after
- * successful initialization.
- */
-static void qib_postinit_cleanup(struct qib_devdata *dd)
-{
- /*
- * Clean up chip-specific stuff.
- * We check for NULL here, because it's outside
- * the kregbase check, and we need to call it
- * after the free_irq. Thus it's possible that
- * the function pointers were never initialized.
- */
- if (dd->f_cleanup)
- dd->f_cleanup(dd);
-
- qib_pcie_ddcleanup(dd);
-
- cleanup_device_data(dd);
-
- qib_free_devdata(dd);
-}
-
-static int qib_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
-{
- int ret, j, pidx, initfail;
- struct qib_devdata *dd = NULL;
-
- ret = qib_pcie_init(pdev, ent);
- if (ret)
- goto bail;
-
- /*
- * Do device-specific initialiation, function table setup, dd
- * allocation, etc.
- */
- switch (ent->device) {
- case PCI_DEVICE_ID_QLOGIC_IB_6120:
-#ifdef CONFIG_PCI_MSI
- dd = qib_init_iba6120_funcs(pdev, ent);
-#else
- qib_early_err(&pdev->dev,
- "Intel PCIE device 0x%x cannot work if CONFIG_PCI_MSI is not enabled\n",
- ent->device);
- dd = ERR_PTR(-ENODEV);
-#endif
- break;
-
- case PCI_DEVICE_ID_QLOGIC_IB_7220:
- dd = qib_init_iba7220_funcs(pdev, ent);
- break;
-
- case PCI_DEVICE_ID_QLOGIC_IB_7322:
- dd = qib_init_iba7322_funcs(pdev, ent);
- break;
-
- default:
- qib_early_err(&pdev->dev,
- "Failing on unknown Intel deviceid 0x%x\n",
- ent->device);
- ret = -ENODEV;
- }
-
- if (IS_ERR(dd))
- ret = PTR_ERR(dd);
- if (ret)
- goto bail; /* error already printed */
-
- ret = qib_create_workqueues(dd);
- if (ret)
- goto bail;
-
- /* do the generic initialization */
- initfail = qib_init(dd, 0);
-
- ret = qib_register_ib_device(dd);
-
- /*
- * Now ready for use. this should be cleared whenever we
- * detect a reset, or initiate one. If earlier failure,
- * we still create devices, so diags, etc. can be used
- * to determine cause of problem.
- */
- if (!qib_mini_init && !initfail && !ret)
- dd->flags |= QIB_INITTED;
-
- j = qib_device_create(dd);
- if (j)
- qib_dev_err(dd, "Failed to create /dev devices: %d\n", -j);
- j = qibfs_add(dd);
- if (j)
- qib_dev_err(dd, "Failed filesystem setup for counters: %d\n",
- -j);
-
- if (qib_mini_init || initfail || ret) {
- qib_stop_timers(dd);
- flush_workqueue(ib_wq);
- for (pidx = 0; pidx < dd->num_pports; ++pidx)
- dd->f_quiet_serdes(dd->pport + pidx);
- if (qib_mini_init)
- goto bail;
- if (!j) {
- (void) qibfs_remove(dd);
- qib_device_remove(dd);
- }
- if (!ret)
- qib_unregister_ib_device(dd);
- qib_postinit_cleanup(dd);
- if (initfail)
- ret = initfail;
- goto bail;
- }
-
- ret = qib_enable_wc(dd);
- if (ret) {
- qib_dev_err(dd,
- "Write combining not enabled (err %d): performance may be poor\n",
- -ret);
- ret = 0;
- }
-
- qib_verify_pioperf(dd);
-bail:
- return ret;
-}
-
-static void qib_remove_one(struct pci_dev *pdev)
-{
- struct qib_devdata *dd = pci_get_drvdata(pdev);
- int ret;
-
- /* unregister from IB core */
- qib_unregister_ib_device(dd);
-
- /*
- * Disable the IB link, disable interrupts on the device,
- * clear dma engines, etc.
- */
- if (!qib_mini_init)
- qib_shutdown_device(dd);
-
- qib_stop_timers(dd);
-
- /* wait until all of our (qsfp) queue_work() calls complete */
- flush_workqueue(ib_wq);
-
- ret = qibfs_remove(dd);
- if (ret)
- qib_dev_err(dd, "Failed counters filesystem cleanup: %d\n",
- -ret);
-
- qib_device_remove(dd);
-
- qib_postinit_cleanup(dd);
-}
-
-static void qib_shutdown_one(struct pci_dev *pdev)
-{
- struct qib_devdata *dd = pci_get_drvdata(pdev);
-
- qib_shutdown_device(dd);
-}
-
-/**
- * qib_create_rcvhdrq - create a receive header queue
- * @dd: the qlogic_ib device
- * @rcd: the context data
- *
- * This must be contiguous memory (from an i/o perspective), and must be
- * DMA'able (which means for some systems, it will go through an IOMMU,
- * or be forced into a low address range).
- */
-int qib_create_rcvhdrq(struct qib_devdata *dd, struct qib_ctxtdata *rcd)
-{
- unsigned amt;
- int old_node_id;
-
- if (!rcd->rcvhdrq) {
- dma_addr_t phys_hdrqtail;
-
- amt = ALIGN(dd->rcvhdrcnt * dd->rcvhdrentsize *
- sizeof(u32), PAGE_SIZE);
-
- old_node_id = dev_to_node(&dd->pcidev->dev);
- set_dev_node(&dd->pcidev->dev, rcd->node_id);
- rcd->rcvhdrq = dma_alloc_coherent(&dd->pcidev->dev, amt,
- &rcd->rcvhdrq_phys, GFP_KERNEL);
- set_dev_node(&dd->pcidev->dev, old_node_id);
-
- if (!rcd->rcvhdrq) {
- qib_dev_err(dd,
- "attempt to allocate %d bytes for ctxt %u rcvhdrq failed\n",
- amt, rcd->ctxt);
- goto bail;
- }
-
- if (rcd->ctxt >= dd->first_user_ctxt) {
- rcd->user_event_mask = vmalloc_user(PAGE_SIZE);
- if (!rcd->user_event_mask)
- goto bail_free_hdrq;
- }
-
- if (!(dd->flags & QIB_NODMA_RTAIL)) {
- set_dev_node(&dd->pcidev->dev, rcd->node_id);
- rcd->rcvhdrtail_kvaddr = dma_alloc_coherent(
- &dd->pcidev->dev, PAGE_SIZE, &phys_hdrqtail,
- GFP_KERNEL);
- set_dev_node(&dd->pcidev->dev, old_node_id);
- if (!rcd->rcvhdrtail_kvaddr)
- goto bail_free;
- rcd->rcvhdrqtailaddr_phys = phys_hdrqtail;
- }
-
- rcd->rcvhdrq_size = amt;
- }
-
- /* clear for security and sanity on each use */
- memset(rcd->rcvhdrq, 0, rcd->rcvhdrq_size);
- if (rcd->rcvhdrtail_kvaddr)
- memset(rcd->rcvhdrtail_kvaddr, 0, PAGE_SIZE);
- return 0;
-
-bail_free:
- qib_dev_err(dd,
- "attempt to allocate 1 page for ctxt %u rcvhdrqtailaddr failed\n",
- rcd->ctxt);
- vfree(rcd->user_event_mask);
- rcd->user_event_mask = NULL;
-bail_free_hdrq:
- dma_free_coherent(&dd->pcidev->dev, amt, rcd->rcvhdrq,
- rcd->rcvhdrq_phys);
- rcd->rcvhdrq = NULL;
-bail:
- return -ENOMEM;
-}
-
-/**
- * qib_setup_eagerbufs - allocate eager buffers, both kernel and user contexts.
- * @rcd: the context we are setting up.
- *
- * Allocate the eager TID buffers and program them into hip.
- * They are no longer completely contiguous, we do multiple allocation
- * calls. Otherwise we get the OOM code involved, by asking for too
- * much per call, with disastrous results on some kernels.
- */
-int qib_setup_eagerbufs(struct qib_ctxtdata *rcd)
-{
- struct qib_devdata *dd = rcd->dd;
- unsigned e, egrcnt, egrperchunk, chunk, egrsize, egroff;
- size_t size;
- int old_node_id;
-
- egrcnt = rcd->rcvegrcnt;
- egroff = rcd->rcvegr_tid_base;
- egrsize = dd->rcvegrbufsize;
-
- chunk = rcd->rcvegrbuf_chunks;
- egrperchunk = rcd->rcvegrbufs_perchunk;
- size = rcd->rcvegrbuf_size;
- if (!rcd->rcvegrbuf) {
- rcd->rcvegrbuf =
- kcalloc_node(chunk, sizeof(rcd->rcvegrbuf[0]),
- GFP_KERNEL, rcd->node_id);
- if (!rcd->rcvegrbuf)
- goto bail;
- }
- if (!rcd->rcvegrbuf_phys) {
- rcd->rcvegrbuf_phys =
- kmalloc_array_node(chunk,
- sizeof(rcd->rcvegrbuf_phys[0]),
- GFP_KERNEL, rcd->node_id);
- if (!rcd->rcvegrbuf_phys)
- goto bail_rcvegrbuf;
- }
- for (e = 0; e < rcd->rcvegrbuf_chunks; e++) {
- if (rcd->rcvegrbuf[e])
- continue;
-
- old_node_id = dev_to_node(&dd->pcidev->dev);
- set_dev_node(&dd->pcidev->dev, rcd->node_id);
- rcd->rcvegrbuf[e] =
- dma_alloc_coherent(&dd->pcidev->dev, size,
- &rcd->rcvegrbuf_phys[e],
- GFP_KERNEL);
- set_dev_node(&dd->pcidev->dev, old_node_id);
- if (!rcd->rcvegrbuf[e])
- goto bail_rcvegrbuf_phys;
- }
-
- rcd->rcvegr_phys = rcd->rcvegrbuf_phys[0];
-
- for (e = chunk = 0; chunk < rcd->rcvegrbuf_chunks; chunk++) {
- dma_addr_t pa = rcd->rcvegrbuf_phys[chunk];
- unsigned i;
-
- /* clear for security and sanity on each use */
- memset(rcd->rcvegrbuf[chunk], 0, size);
-
- for (i = 0; e < egrcnt && i < egrperchunk; e++, i++) {
- dd->f_put_tid(dd, e + egroff +
- (u64 __iomem *)
- ((char __iomem *)
- dd->kregbase +
- dd->rcvegrbase),
- RCVHQ_RCV_TYPE_EAGER, pa);
- pa += egrsize;
- }
- cond_resched(); /* don't hog the cpu */
- }
-
- return 0;
-
-bail_rcvegrbuf_phys:
- for (e = 0; e < rcd->rcvegrbuf_chunks && rcd->rcvegrbuf[e]; e++)
- dma_free_coherent(&dd->pcidev->dev, size,
- rcd->rcvegrbuf[e], rcd->rcvegrbuf_phys[e]);
- kfree(rcd->rcvegrbuf_phys);
- rcd->rcvegrbuf_phys = NULL;
-bail_rcvegrbuf:
- kfree(rcd->rcvegrbuf);
- rcd->rcvegrbuf = NULL;
-bail:
- return -ENOMEM;
-}
-
-/*
- * Note: Changes to this routine should be mirrored
- * for the diagnostics routine qib_remap_ioaddr32().
- * There is also related code for VL15 buffers in qib_init_7322_variables().
- * The teardown code that unmaps is in qib_pcie_ddcleanup()
- */
-int init_chip_wc_pat(struct qib_devdata *dd, u32 vl15buflen)
-{
- u64 __iomem *qib_kregbase = NULL;
- void __iomem *qib_piobase = NULL;
- u64 __iomem *qib_userbase = NULL;
- u64 qib_kreglen;
- u64 qib_pio2koffset = dd->piobufbase & 0xffffffff;
- u64 qib_pio4koffset = dd->piobufbase >> 32;
- u64 qib_pio2klen = dd->piobcnt2k * dd->palign;
- u64 qib_pio4klen = dd->piobcnt4k * dd->align4k;
- u64 qib_physaddr = dd->physaddr;
- u64 qib_piolen;
- u64 qib_userlen = 0;
-
- /*
- * Free the old mapping because the kernel will try to reuse the
- * old mapping and not create a new mapping with the
- * write combining attribute.
- */
- iounmap(dd->kregbase);
- dd->kregbase = NULL;
-
- /*
- * Assumes chip address space looks like:
- * - kregs + sregs + cregs + uregs (in any order)
- * - piobufs (2K and 4K bufs in either order)
- * or:
- * - kregs + sregs + cregs (in any order)
- * - piobufs (2K and 4K bufs in either order)
- * - uregs
- */
- if (dd->piobcnt4k == 0) {
- qib_kreglen = qib_pio2koffset;
- qib_piolen = qib_pio2klen;
- } else if (qib_pio2koffset < qib_pio4koffset) {
- qib_kreglen = qib_pio2koffset;
- qib_piolen = qib_pio4koffset + qib_pio4klen - qib_kreglen;
- } else {
- qib_kreglen = qib_pio4koffset;
- qib_piolen = qib_pio2koffset + qib_pio2klen - qib_kreglen;
- }
- qib_piolen += vl15buflen;
- /* Map just the configured ports (not all hw ports) */
- if (dd->uregbase > qib_kreglen)
- qib_userlen = dd->ureg_align * dd->cfgctxts;
-
- /* Sanity checks passed, now create the new mappings */
- qib_kregbase = ioremap(qib_physaddr, qib_kreglen);
- if (!qib_kregbase)
- goto bail;
-
- qib_piobase = ioremap_wc(qib_physaddr + qib_kreglen, qib_piolen);
- if (!qib_piobase)
- goto bail_kregbase;
-
- if (qib_userlen) {
- qib_userbase = ioremap(qib_physaddr + dd->uregbase,
- qib_userlen);
- if (!qib_userbase)
- goto bail_piobase;
- }
-
- dd->kregbase = qib_kregbase;
- dd->kregend = (u64 __iomem *)
- ((char __iomem *) qib_kregbase + qib_kreglen);
- dd->piobase = qib_piobase;
- dd->pio2kbase = (void __iomem *)
- (((char __iomem *) dd->piobase) +
- qib_pio2koffset - qib_kreglen);
- if (dd->piobcnt4k)
- dd->pio4kbase = (void __iomem *)
- (((char __iomem *) dd->piobase) +
- qib_pio4koffset - qib_kreglen);
- if (qib_userlen)
- /* ureg will now be accessed relative to dd->userbase */
- dd->userbase = qib_userbase;
- return 0;
-
-bail_piobase:
- iounmap(qib_piobase);
-bail_kregbase:
- iounmap(qib_kregbase);
-bail:
- return -ENOMEM;
-}
diff --git a/drivers/infiniband/hw/qib/qib_intr.c b/drivers/infiniband/hw/qib/qib_intr.c
deleted file mode 100644
index 93357823c6c0..000000000000
--- a/drivers/infiniband/hw/qib/qib_intr.c
+++ /dev/null
@@ -1,241 +0,0 @@
-/*
- * Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation.
- * All rights reserved.
- * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <linux/pci.h>
-#include <linux/delay.h>
-
-#include "qib.h"
-#include "qib_common.h"
-
-/**
- * qib_format_hwmsg - format a single hwerror message
- * @msg: message buffer
- * @msgl: length of message buffer
- * @hwmsg: message to add to message buffer
- */
-static void qib_format_hwmsg(char *msg, size_t msgl, const char *hwmsg)
-{
- strlcat(msg, "[", msgl);
- strlcat(msg, hwmsg, msgl);
- strlcat(msg, "]", msgl);
-}
-
-/**
- * qib_format_hwerrors - format hardware error messages for display
- * @hwerrs: hardware errors bit vector
- * @hwerrmsgs: hardware error descriptions
- * @nhwerrmsgs: number of hwerrmsgs
- * @msg: message buffer
- * @msgl: message buffer length
- */
-void qib_format_hwerrors(u64 hwerrs, const struct qib_hwerror_msgs *hwerrmsgs,
- size_t nhwerrmsgs, char *msg, size_t msgl)
-{
- int i;
-
- for (i = 0; i < nhwerrmsgs; i++)
- if (hwerrs & hwerrmsgs[i].mask)
- qib_format_hwmsg(msg, msgl, hwerrmsgs[i].msg);
-}
-
-static void signal_ib_event(struct qib_pportdata *ppd, enum ib_event_type ev)
-{
- struct ib_event event;
- struct qib_devdata *dd = ppd->dd;
-
- event.device = &dd->verbs_dev.rdi.ibdev;
- event.element.port_num = ppd->port;
- event.event = ev;
- ib_dispatch_event(&event);
-}
-
-void qib_handle_e_ibstatuschanged(struct qib_pportdata *ppd, u64 ibcs)
-{
- struct qib_devdata *dd = ppd->dd;
- unsigned long flags;
- u32 lstate;
- u8 ltstate;
- enum ib_event_type ev = 0;
-
- lstate = dd->f_iblink_state(ibcs); /* linkstate */
- ltstate = dd->f_ibphys_portstate(ibcs);
-
- /*
- * If linkstate transitions into INIT from any of the various down
- * states, or if it transitions from any of the up (INIT or better)
- * states into any of the down states (except link recovery), then
- * call the chip-specific code to take appropriate actions.
- *
- * ppd->lflags could be 0 if this is the first time the interrupt
- * handlers has been called but the link is already up.
- */
- if (lstate >= IB_PORT_INIT &&
- (!ppd->lflags || (ppd->lflags & QIBL_LINKDOWN)) &&
- ltstate == IB_PHYSPORTSTATE_LINKUP) {
- /* transitioned to UP */
- if (dd->f_ib_updown(ppd, 1, ibcs))
- goto skip_ibchange; /* chip-code handled */
- } else if (ppd->lflags & (QIBL_LINKINIT | QIBL_LINKARMED |
- QIBL_LINKACTIVE | QIBL_IB_FORCE_NOTIFY)) {
- if (ltstate != IB_PHYSPORTSTATE_LINKUP &&
- ltstate <= IB_PHYSPORTSTATE_CFG_TRAIN &&
- dd->f_ib_updown(ppd, 0, ibcs))
- goto skip_ibchange; /* chip-code handled */
- qib_set_uevent_bits(ppd, _QIB_EVENT_LINKDOWN_BIT);
- }
-
- if (lstate != IB_PORT_DOWN) {
- /* lstate is INIT, ARMED, or ACTIVE */
- if (lstate != IB_PORT_ACTIVE) {
- *ppd->statusp &= ~QIB_STATUS_IB_READY;
- if (ppd->lflags & QIBL_LINKACTIVE)
- ev = IB_EVENT_PORT_ERR;
- spin_lock_irqsave(&ppd->lflags_lock, flags);
- if (lstate == IB_PORT_ARMED) {
- ppd->lflags |= QIBL_LINKARMED | QIBL_LINKV;
- ppd->lflags &= ~(QIBL_LINKINIT |
- QIBL_LINKDOWN | QIBL_LINKACTIVE);
- } else {
- ppd->lflags |= QIBL_LINKINIT | QIBL_LINKV;
- ppd->lflags &= ~(QIBL_LINKARMED |
- QIBL_LINKDOWN | QIBL_LINKACTIVE);
- }
- spin_unlock_irqrestore(&ppd->lflags_lock, flags);
- /* start a 75msec timer to clear symbol errors */
- mod_timer(&ppd->symerr_clear_timer,
- msecs_to_jiffies(75));
- } else if (ltstate == IB_PHYSPORTSTATE_LINKUP &&
- !(ppd->lflags & QIBL_LINKACTIVE)) {
- /* active, but not active defered */
- qib_hol_up(ppd); /* useful only for 6120 now */
- *ppd->statusp |=
- QIB_STATUS_IB_READY | QIB_STATUS_IB_CONF;
- qib_clear_symerror_on_linkup(&ppd->symerr_clear_timer);
- spin_lock_irqsave(&ppd->lflags_lock, flags);
- ppd->lflags |= QIBL_LINKACTIVE | QIBL_LINKV;
- ppd->lflags &= ~(QIBL_LINKINIT |
- QIBL_LINKDOWN | QIBL_LINKARMED);
- spin_unlock_irqrestore(&ppd->lflags_lock, flags);
- if (dd->flags & QIB_HAS_SEND_DMA)
- qib_sdma_process_event(ppd,
- qib_sdma_event_e30_go_running);
- ev = IB_EVENT_PORT_ACTIVE;
- dd->f_setextled(ppd, 1);
- }
- } else { /* down */
- if (ppd->lflags & QIBL_LINKACTIVE)
- ev = IB_EVENT_PORT_ERR;
- spin_lock_irqsave(&ppd->lflags_lock, flags);
- ppd->lflags |= QIBL_LINKDOWN | QIBL_LINKV;
- ppd->lflags &= ~(QIBL_LINKINIT |
- QIBL_LINKACTIVE | QIBL_LINKARMED);
- spin_unlock_irqrestore(&ppd->lflags_lock, flags);
- *ppd->statusp &= ~QIB_STATUS_IB_READY;
- }
-
-skip_ibchange:
- ppd->lastibcstat = ibcs;
- if (ev)
- signal_ib_event(ppd, ev);
-}
-
-void qib_clear_symerror_on_linkup(struct timer_list *t)
-{
- struct qib_pportdata *ppd = timer_container_of(ppd, t,
- symerr_clear_timer);
-
- if (ppd->lflags & QIBL_LINKACTIVE)
- return;
-
- ppd->ibport_data.z_symbol_error_counter =
- ppd->dd->f_portcntr(ppd, QIBPORTCNTR_IBSYMBOLERR);
-}
-
-/*
- * Handle receive interrupts for user ctxts; this means a user
- * process was waiting for a packet to arrive, and didn't want
- * to poll.
- */
-void qib_handle_urcv(struct qib_devdata *dd, u64 ctxtr)
-{
- struct qib_ctxtdata *rcd;
- unsigned long flags;
- int i;
-
- spin_lock_irqsave(&dd->uctxt_lock, flags);
- for (i = dd->first_user_ctxt; dd->rcd && i < dd->cfgctxts; i++) {
- if (!(ctxtr & (1ULL << i)))
- continue;
- rcd = dd->rcd[i];
- if (!rcd || !rcd->cnt)
- continue;
-
- if (test_and_clear_bit(QIB_CTXT_WAITING_RCV, &rcd->flag)) {
- wake_up_interruptible(&rcd->wait);
- dd->f_rcvctrl(rcd->ppd, QIB_RCVCTRL_INTRAVAIL_DIS,
- rcd->ctxt);
- } else if (test_and_clear_bit(QIB_CTXT_WAITING_URG,
- &rcd->flag)) {
- rcd->urgent++;
- wake_up_interruptible(&rcd->wait);
- }
- }
- spin_unlock_irqrestore(&dd->uctxt_lock, flags);
-}
-
-void qib_bad_intrstatus(struct qib_devdata *dd)
-{
- static int allbits;
-
- /* separate routine, for better optimization of qib_intr() */
-
- /*
- * We print the message and disable interrupts, in hope of
- * having a better chance of debugging the problem.
- */
- qib_dev_err(dd,
- "Read of chip interrupt status failed disabling interrupts\n");
- if (allbits++) {
- /* disable interrupt delivery, something is very wrong */
- if (allbits == 2)
- dd->f_set_intr_state(dd, 0);
- if (allbits == 3) {
- qib_dev_err(dd,
- "2nd bad interrupt status, unregistering interrupts\n");
- dd->flags |= QIB_BADINTR;
- dd->flags &= ~QIB_INITTED;
- dd->f_free_irq(dd);
- }
- }
-}
diff --git a/drivers/infiniband/hw/qib/qib_mad.c b/drivers/infiniband/hw/qib/qib_mad.c
deleted file mode 100644
index d99932b2ce21..000000000000
--- a/drivers/infiniband/hw/qib/qib_mad.c
+++ /dev/null
@@ -1,2450 +0,0 @@
-/*
- * Copyright (c) 2012 Intel Corporation. All rights reserved.
- * Copyright (c) 2006 - 2012 QLogic Corporation. All rights reserved.
- * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <rdma/ib_smi.h>
-
-#include "qib.h"
-#include "qib_mad.h"
-
-static int reply(struct ib_smp *smp)
-{
- /*
- * The verbs framework will handle the directed/LID route
- * packet changes.
- */
- smp->method = IB_MGMT_METHOD_GET_RESP;
- if (smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
- smp->status |= IB_SMP_DIRECTION;
- return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
-}
-
-static int reply_failure(struct ib_smp *smp)
-{
- /*
- * The verbs framework will handle the directed/LID route
- * packet changes.
- */
- smp->method = IB_MGMT_METHOD_GET_RESP;
- if (smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
- smp->status |= IB_SMP_DIRECTION;
- return IB_MAD_RESULT_FAILURE | IB_MAD_RESULT_REPLY;
-}
-
-static void qib_send_trap(struct qib_ibport *ibp, void *data, unsigned len)
-{
- struct ib_mad_send_buf *send_buf;
- struct ib_mad_agent *agent;
- struct ib_smp *smp;
- int ret;
- unsigned long flags;
- unsigned long timeout;
-
- agent = ibp->rvp.send_agent;
- if (!agent)
- return;
-
- /* o14-3.2.1 */
- if (!(ppd_from_ibp(ibp)->lflags & QIBL_LINKACTIVE))
- return;
-
- /* o14-2 */
- if (ibp->rvp.trap_timeout &&
- time_before(jiffies, ibp->rvp.trap_timeout))
- return;
-
- send_buf = ib_create_send_mad(agent, 0, 0, 0, IB_MGMT_MAD_HDR,
- IB_MGMT_MAD_DATA, GFP_ATOMIC,
- IB_MGMT_BASE_VERSION);
- if (IS_ERR(send_buf))
- return;
-
- smp = send_buf->mad;
- smp->base_version = IB_MGMT_BASE_VERSION;
- smp->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED;
- smp->class_version = 1;
- smp->method = IB_MGMT_METHOD_TRAP;
- ibp->rvp.tid++;
- smp->tid = cpu_to_be64(ibp->rvp.tid);
- smp->attr_id = IB_SMP_ATTR_NOTICE;
- /* o14-1: smp->mkey = 0; */
- memcpy(smp->data, data, len);
-
- spin_lock_irqsave(&ibp->rvp.lock, flags);
- if (!ibp->rvp.sm_ah) {
- if (ibp->rvp.sm_lid != be16_to_cpu(IB_LID_PERMISSIVE)) {
- struct ib_ah *ah;
-
- ah = qib_create_qp0_ah(ibp, (u16)ibp->rvp.sm_lid);
- if (IS_ERR(ah))
- ret = PTR_ERR(ah);
- else {
- send_buf->ah = ah;
- ibp->rvp.sm_ah = ibah_to_rvtah(ah);
- ret = 0;
- }
- } else
- ret = -EINVAL;
- } else {
- send_buf->ah = &ibp->rvp.sm_ah->ibah;
- ret = 0;
- }
- spin_unlock_irqrestore(&ibp->rvp.lock, flags);
-
- if (!ret)
- ret = ib_post_send_mad(send_buf, NULL);
- if (!ret) {
- /* 4.096 usec. */
- timeout = (4096 * (1UL << ibp->rvp.subnet_timeout)) / 1000;
- ibp->rvp.trap_timeout = jiffies + usecs_to_jiffies(timeout);
- } else {
- ib_free_send_mad(send_buf);
- ibp->rvp.trap_timeout = 0;
- }
-}
-
-/*
- * Send a bad P_Key trap (ch. 14.3.8).
- */
-void qib_bad_pkey(struct qib_ibport *ibp, u32 key, u32 sl,
- u32 qp1, u32 qp2, __be16 lid1, __be16 lid2)
-{
- struct ib_mad_notice_attr data;
-
- ibp->rvp.n_pkt_drops++;
- ibp->rvp.pkey_violations++;
-
- /* Send violation trap */
- data.generic_type = IB_NOTICE_TYPE_SECURITY;
- data.prod_type_msb = 0;
- data.prod_type_lsb = IB_NOTICE_PROD_CA;
- data.trap_num = IB_NOTICE_TRAP_BAD_PKEY;
- data.issuer_lid = cpu_to_be16(ppd_from_ibp(ibp)->lid);
- data.toggle_count = 0;
- memset(&data.details, 0, sizeof(data.details));
- data.details.ntc_257_258.lid1 = lid1;
- data.details.ntc_257_258.lid2 = lid2;
- data.details.ntc_257_258.key = cpu_to_be32(key);
- data.details.ntc_257_258.sl_qp1 = cpu_to_be32((sl << 28) | qp1);
- data.details.ntc_257_258.qp2 = cpu_to_be32(qp2);
-
- qib_send_trap(ibp, &data, sizeof(data));
-}
-
-/*
- * Send a bad M_Key trap (ch. 14.3.9).
- */
-static void qib_bad_mkey(struct qib_ibport *ibp, struct ib_smp *smp)
-{
- struct ib_mad_notice_attr data;
-
- /* Send violation trap */
- data.generic_type = IB_NOTICE_TYPE_SECURITY;
- data.prod_type_msb = 0;
- data.prod_type_lsb = IB_NOTICE_PROD_CA;
- data.trap_num = IB_NOTICE_TRAP_BAD_MKEY;
- data.issuer_lid = cpu_to_be16(ppd_from_ibp(ibp)->lid);
- data.toggle_count = 0;
- memset(&data.details, 0, sizeof(data.details));
- data.details.ntc_256.lid = data.issuer_lid;
- data.details.ntc_256.method = smp->method;
- data.details.ntc_256.attr_id = smp->attr_id;
- data.details.ntc_256.attr_mod = smp->attr_mod;
- data.details.ntc_256.mkey = smp->mkey;
- if (smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
- u8 hop_cnt;
-
- data.details.ntc_256.dr_slid = smp->dr_slid;
- data.details.ntc_256.dr_trunc_hop = IB_NOTICE_TRAP_DR_NOTICE;
- hop_cnt = smp->hop_cnt;
- if (hop_cnt > ARRAY_SIZE(data.details.ntc_256.dr_rtn_path)) {
- data.details.ntc_256.dr_trunc_hop |=
- IB_NOTICE_TRAP_DR_TRUNC;
- hop_cnt = ARRAY_SIZE(data.details.ntc_256.dr_rtn_path);
- }
- data.details.ntc_256.dr_trunc_hop |= hop_cnt;
- memcpy(data.details.ntc_256.dr_rtn_path, smp->return_path,
- hop_cnt);
- }
-
- qib_send_trap(ibp, &data, sizeof(data));
-}
-
-/*
- * Send a Port Capability Mask Changed trap (ch. 14.3.11).
- */
-void qib_cap_mask_chg(struct rvt_dev_info *rdi, u32 port_num)
-{
- struct qib_ibdev *ibdev = container_of(rdi, struct qib_ibdev, rdi);
- struct qib_devdata *dd = dd_from_dev(ibdev);
- struct qib_ibport *ibp = &dd->pport[port_num - 1].ibport_data;
- struct ib_mad_notice_attr data;
-
- data.generic_type = IB_NOTICE_TYPE_INFO;
- data.prod_type_msb = 0;
- data.prod_type_lsb = IB_NOTICE_PROD_CA;
- data.trap_num = IB_NOTICE_TRAP_CAP_MASK_CHG;
- data.issuer_lid = cpu_to_be16(ppd_from_ibp(ibp)->lid);
- data.toggle_count = 0;
- memset(&data.details, 0, sizeof(data.details));
- data.details.ntc_144.lid = data.issuer_lid;
- data.details.ntc_144.new_cap_mask =
- cpu_to_be32(ibp->rvp.port_cap_flags);
- qib_send_trap(ibp, &data, sizeof(data));
-}
-
-/*
- * Send a System Image GUID Changed trap (ch. 14.3.12).
- */
-void qib_sys_guid_chg(struct qib_ibport *ibp)
-{
- struct ib_mad_notice_attr data;
-
- data.generic_type = IB_NOTICE_TYPE_INFO;
- data.prod_type_msb = 0;
- data.prod_type_lsb = IB_NOTICE_PROD_CA;
- data.trap_num = IB_NOTICE_TRAP_SYS_GUID_CHG;
- data.issuer_lid = cpu_to_be16(ppd_from_ibp(ibp)->lid);
- data.toggle_count = 0;
- memset(&data.details, 0, sizeof(data.details));
- data.details.ntc_145.lid = data.issuer_lid;
- data.details.ntc_145.new_sys_guid = ib_qib_sys_image_guid;
-
- qib_send_trap(ibp, &data, sizeof(data));
-}
-
-/*
- * Send a Node Description Changed trap (ch. 14.3.13).
- */
-void qib_node_desc_chg(struct qib_ibport *ibp)
-{
- struct ib_mad_notice_attr data;
-
- data.generic_type = IB_NOTICE_TYPE_INFO;
- data.prod_type_msb = 0;
- data.prod_type_lsb = IB_NOTICE_PROD_CA;
- data.trap_num = IB_NOTICE_TRAP_CAP_MASK_CHG;
- data.issuer_lid = cpu_to_be16(ppd_from_ibp(ibp)->lid);
- data.toggle_count = 0;
- memset(&data.details, 0, sizeof(data.details));
- data.details.ntc_144.lid = data.issuer_lid;
- data.details.ntc_144.local_changes = 1;
- data.details.ntc_144.change_flags = IB_NOTICE_TRAP_NODE_DESC_CHG;
-
- qib_send_trap(ibp, &data, sizeof(data));
-}
-
-static int subn_get_nodedescription(struct ib_smp *smp,
- struct ib_device *ibdev)
-{
- if (smp->attr_mod)
- smp->status |= IB_SMP_INVALID_FIELD;
-
- memcpy(smp->data, ibdev->node_desc, sizeof(smp->data));
-
- return reply(smp);
-}
-
-static int subn_get_nodeinfo(struct ib_smp *smp, struct ib_device *ibdev,
- u8 port)
-{
- struct ib_node_info *nip = (struct ib_node_info *)&smp->data;
- struct qib_devdata *dd = dd_from_ibdev(ibdev);
- u32 majrev, minrev;
- unsigned pidx = port - 1; /* IB number port from 1, hdw from 0 */
-
- /* GUID 0 is illegal */
- if (smp->attr_mod || pidx >= dd->num_pports ||
- dd->pport[pidx].guid == 0)
- smp->status |= IB_SMP_INVALID_FIELD;
- else
- nip->port_guid = dd->pport[pidx].guid;
-
- nip->base_version = 1;
- nip->class_version = 1;
- nip->node_type = 1; /* channel adapter */
- nip->num_ports = ibdev->phys_port_cnt;
- /* This is already in network order */
- nip->sys_guid = ib_qib_sys_image_guid;
- nip->node_guid = dd->pport->guid; /* Use first-port GUID as node */
- nip->partition_cap = cpu_to_be16(qib_get_npkeys(dd));
- nip->device_id = cpu_to_be16(dd->deviceid);
- majrev = dd->majrev;
- minrev = dd->minrev;
- nip->revision = cpu_to_be32((majrev << 16) | minrev);
- nip->local_port_num = port;
- nip->vendor_id[0] = QIB_SRC_OUI_1;
- nip->vendor_id[1] = QIB_SRC_OUI_2;
- nip->vendor_id[2] = QIB_SRC_OUI_3;
-
- return reply(smp);
-}
-
-static int subn_get_guidinfo(struct ib_smp *smp, struct ib_device *ibdev,
- u8 port)
-{
- struct qib_devdata *dd = dd_from_ibdev(ibdev);
- u32 startgx = 8 * be32_to_cpu(smp->attr_mod);
- __be64 *p = (__be64 *) smp->data;
- unsigned pidx = port - 1; /* IB number port from 1, hdw from 0 */
-
- /* 32 blocks of 8 64-bit GUIDs per block */
-
- memset(smp->data, 0, sizeof(smp->data));
-
- if (startgx == 0 && pidx < dd->num_pports) {
- struct qib_pportdata *ppd = dd->pport + pidx;
- struct qib_ibport *ibp = &ppd->ibport_data;
- __be64 g = ppd->guid;
- unsigned i;
-
- /* GUID 0 is illegal */
- if (g == 0)
- smp->status |= IB_SMP_INVALID_FIELD;
- else {
- /* The first is a copy of the read-only HW GUID. */
- p[0] = g;
- for (i = 1; i < QIB_GUIDS_PER_PORT; i++)
- p[i] = ibp->guids[i - 1];
- }
- } else
- smp->status |= IB_SMP_INVALID_FIELD;
-
- return reply(smp);
-}
-
-static void set_link_width_enabled(struct qib_pportdata *ppd, u32 w)
-{
- (void) ppd->dd->f_set_ib_cfg(ppd, QIB_IB_CFG_LWID_ENB, w);
-}
-
-static void set_link_speed_enabled(struct qib_pportdata *ppd, u32 s)
-{
- (void) ppd->dd->f_set_ib_cfg(ppd, QIB_IB_CFG_SPD_ENB, s);
-}
-
-static int get_overrunthreshold(struct qib_pportdata *ppd)
-{
- return ppd->dd->f_get_ib_cfg(ppd, QIB_IB_CFG_OVERRUN_THRESH);
-}
-
-/**
- * set_overrunthreshold - set the overrun threshold
- * @ppd: the physical port data
- * @n: the new threshold
- *
- * Note that this will only take effect when the link state changes.
- */
-static int set_overrunthreshold(struct qib_pportdata *ppd, unsigned n)
-{
- (void) ppd->dd->f_set_ib_cfg(ppd, QIB_IB_CFG_OVERRUN_THRESH,
- (u32)n);
- return 0;
-}
-
-static int get_phyerrthreshold(struct qib_pportdata *ppd)
-{
- return ppd->dd->f_get_ib_cfg(ppd, QIB_IB_CFG_PHYERR_THRESH);
-}
-
-/**
- * set_phyerrthreshold - set the physical error threshold
- * @ppd: the physical port data
- * @n: the new threshold
- *
- * Note that this will only take effect when the link state changes.
- */
-static int set_phyerrthreshold(struct qib_pportdata *ppd, unsigned n)
-{
- (void) ppd->dd->f_set_ib_cfg(ppd, QIB_IB_CFG_PHYERR_THRESH,
- (u32)n);
- return 0;
-}
-
-/**
- * get_linkdowndefaultstate - get the default linkdown state
- * @ppd: the physical port data
- *
- * Returns zero if the default is POLL, 1 if the default is SLEEP.
- */
-static int get_linkdowndefaultstate(struct qib_pportdata *ppd)
-{
- return ppd->dd->f_get_ib_cfg(ppd, QIB_IB_CFG_LINKDEFAULT) ==
- IB_LINKINITCMD_SLEEP;
-}
-
-static int check_mkey(struct qib_ibport *ibp, struct ib_smp *smp, int mad_flags)
-{
- int valid_mkey = 0;
- int ret = 0;
-
- /* Is the mkey in the process of expiring? */
- if (ibp->rvp.mkey_lease_timeout &&
- time_after_eq(jiffies, ibp->rvp.mkey_lease_timeout)) {
- /* Clear timeout and mkey protection field. */
- ibp->rvp.mkey_lease_timeout = 0;
- ibp->rvp.mkeyprot = 0;
- }
-
- if ((mad_flags & IB_MAD_IGNORE_MKEY) || ibp->rvp.mkey == 0 ||
- ibp->rvp.mkey == smp->mkey)
- valid_mkey = 1;
-
- /* Unset lease timeout on any valid Get/Set/TrapRepress */
- if (valid_mkey && ibp->rvp.mkey_lease_timeout &&
- (smp->method == IB_MGMT_METHOD_GET ||
- smp->method == IB_MGMT_METHOD_SET ||
- smp->method == IB_MGMT_METHOD_TRAP_REPRESS))
- ibp->rvp.mkey_lease_timeout = 0;
-
- if (!valid_mkey) {
- switch (smp->method) {
- case IB_MGMT_METHOD_GET:
- /* Bad mkey not a violation below level 2 */
- if (ibp->rvp.mkeyprot < 2)
- break;
- fallthrough;
- case IB_MGMT_METHOD_SET:
- case IB_MGMT_METHOD_TRAP_REPRESS:
- if (ibp->rvp.mkey_violations != 0xFFFF)
- ++ibp->rvp.mkey_violations;
- if (!ibp->rvp.mkey_lease_timeout &&
- ibp->rvp.mkey_lease_period)
- ibp->rvp.mkey_lease_timeout = jiffies +
- ibp->rvp.mkey_lease_period * HZ;
- /* Generate a trap notice. */
- qib_bad_mkey(ibp, smp);
- ret = 1;
- }
- }
-
- return ret;
-}
-
-static int subn_get_portinfo(struct ib_smp *smp, struct ib_device *ibdev,
- u8 port)
-{
- struct qib_devdata *dd;
- struct qib_pportdata *ppd;
- struct qib_ibport *ibp;
- struct ib_port_info *pip = (struct ib_port_info *)smp->data;
- u8 mtu;
- int ret;
- u32 state;
- u32 port_num = be32_to_cpu(smp->attr_mod);
-
- if (port_num == 0)
- port_num = port;
- else {
- if (port_num > ibdev->phys_port_cnt) {
- smp->status |= IB_SMP_INVALID_FIELD;
- ret = reply(smp);
- goto bail;
- }
- if (port_num != port) {
- ibp = to_iport(ibdev, port_num);
- ret = check_mkey(ibp, smp, 0);
- if (ret) {
- ret = IB_MAD_RESULT_FAILURE;
- goto bail;
- }
- }
- }
-
- dd = dd_from_ibdev(ibdev);
- /* IB numbers ports from 1, hdw from 0 */
- ppd = dd->pport + (port_num - 1);
- ibp = &ppd->ibport_data;
-
- /* Clear all fields. Only set the non-zero fields. */
- memset(smp->data, 0, sizeof(smp->data));
-
- /* Only return the mkey if the protection field allows it. */
- if (!(smp->method == IB_MGMT_METHOD_GET &&
- ibp->rvp.mkey != smp->mkey &&
- ibp->rvp.mkeyprot == 1))
- pip->mkey = ibp->rvp.mkey;
- pip->gid_prefix = ibp->rvp.gid_prefix;
- pip->lid = cpu_to_be16(ppd->lid);
- pip->sm_lid = cpu_to_be16((u16)ibp->rvp.sm_lid);
- pip->cap_mask = cpu_to_be32(ibp->rvp.port_cap_flags);
- /* pip->diag_code; */
- pip->mkey_lease_period = cpu_to_be16(ibp->rvp.mkey_lease_period);
- pip->local_port_num = port;
- pip->link_width_enabled = ppd->link_width_enabled;
- pip->link_width_supported = ppd->link_width_supported;
- pip->link_width_active = ppd->link_width_active;
- state = dd->f_iblink_state(ppd->lastibcstat);
- pip->linkspeed_portstate = ppd->link_speed_supported << 4 | state;
-
- pip->portphysstate_linkdown =
- (dd->f_ibphys_portstate(ppd->lastibcstat) << 4) |
- (get_linkdowndefaultstate(ppd) ? 1 : 2);
- pip->mkeyprot_resv_lmc = (ibp->rvp.mkeyprot << 6) | ppd->lmc;
- pip->linkspeedactive_enabled = (ppd->link_speed_active << 4) |
- ppd->link_speed_enabled;
- switch (ppd->ibmtu) {
- default: /* something is wrong; fall through */
- case 4096:
- mtu = IB_MTU_4096;
- break;
- case 2048:
- mtu = IB_MTU_2048;
- break;
- case 1024:
- mtu = IB_MTU_1024;
- break;
- case 512:
- mtu = IB_MTU_512;
- break;
- case 256:
- mtu = IB_MTU_256;
- break;
- }
- pip->neighbormtu_mastersmsl = (mtu << 4) | ibp->rvp.sm_sl;
- pip->vlcap_inittype = ppd->vls_supported << 4; /* InitType = 0 */
- pip->vl_high_limit = ibp->rvp.vl_high_limit;
- pip->vl_arb_high_cap =
- dd->f_get_ib_cfg(ppd, QIB_IB_CFG_VL_HIGH_CAP);
- pip->vl_arb_low_cap =
- dd->f_get_ib_cfg(ppd, QIB_IB_CFG_VL_LOW_CAP);
- /* InitTypeReply = 0 */
- pip->inittypereply_mtucap = qib_ibmtu ? qib_ibmtu : IB_MTU_4096;
- /* HCAs ignore VLStallCount and HOQLife */
- /* pip->vlstallcnt_hoqlife; */
- pip->operationalvl_pei_peo_fpi_fpo =
- dd->f_get_ib_cfg(ppd, QIB_IB_CFG_OP_VLS) << 4;
- pip->mkey_violations = cpu_to_be16(ibp->rvp.mkey_violations);
- /* P_KeyViolations are counted by hardware. */
- pip->pkey_violations = cpu_to_be16(ibp->rvp.pkey_violations);
- pip->qkey_violations = cpu_to_be16(ibp->rvp.qkey_violations);
- /* Only the hardware GUID is supported for now */
- pip->guid_cap = QIB_GUIDS_PER_PORT;
- pip->clientrereg_resv_subnetto = ibp->rvp.subnet_timeout;
- /* 32.768 usec. response time (guessing) */
- pip->resv_resptimevalue = 3;
- pip->localphyerrors_overrunerrors =
- (get_phyerrthreshold(ppd) << 4) |
- get_overrunthreshold(ppd);
- /* pip->max_credit_hint; */
- if (ibp->rvp.port_cap_flags & IB_PORT_LINK_LATENCY_SUP) {
- u32 v;
-
- v = dd->f_get_ib_cfg(ppd, QIB_IB_CFG_LINKLATENCY);
- pip->link_roundtrip_latency[0] = v >> 16;
- pip->link_roundtrip_latency[1] = v >> 8;
- pip->link_roundtrip_latency[2] = v;
- }
-
- ret = reply(smp);
-
-bail:
- return ret;
-}
-
-/**
- * get_pkeys - return the PKEY table
- * @dd: the qlogic_ib device
- * @port: the IB port number
- * @pkeys: the pkey table is placed here
- */
-static int get_pkeys(struct qib_devdata *dd, u8 port, u16 *pkeys)
-{
- struct qib_pportdata *ppd = dd->pport + port - 1;
- /*
- * always a kernel context, no locking needed.
- * If we get here with ppd setup, no need to check
- * that pd is valid.
- */
- struct qib_ctxtdata *rcd = dd->rcd[ppd->hw_pidx];
-
- memcpy(pkeys, rcd->pkeys, sizeof(rcd->pkeys));
-
- return 0;
-}
-
-static int subn_get_pkeytable(struct ib_smp *smp, struct ib_device *ibdev,
- u8 port)
-{
- u32 startpx = 32 * (be32_to_cpu(smp->attr_mod) & 0xffff);
- u16 *p = (u16 *) smp->data;
- __be16 *q = (__be16 *) smp->data;
-
- /* 64 blocks of 32 16-bit P_Key entries */
-
- memset(smp->data, 0, sizeof(smp->data));
- if (startpx == 0) {
- struct qib_devdata *dd = dd_from_ibdev(ibdev);
- unsigned i, n = qib_get_npkeys(dd);
-
- get_pkeys(dd, port, p);
-
- for (i = 0; i < n; i++)
- q[i] = cpu_to_be16(p[i]);
- } else
- smp->status |= IB_SMP_INVALID_FIELD;
-
- return reply(smp);
-}
-
-static int subn_set_guidinfo(struct ib_smp *smp, struct ib_device *ibdev,
- u8 port)
-{
- struct qib_devdata *dd = dd_from_ibdev(ibdev);
- u32 startgx = 8 * be32_to_cpu(smp->attr_mod);
- __be64 *p = (__be64 *) smp->data;
- unsigned pidx = port - 1; /* IB number port from 1, hdw from 0 */
-
- /* 32 blocks of 8 64-bit GUIDs per block */
-
- if (startgx == 0 && pidx < dd->num_pports) {
- struct qib_pportdata *ppd = dd->pport + pidx;
- struct qib_ibport *ibp = &ppd->ibport_data;
- unsigned i;
-
- /* The first entry is read-only. */
- for (i = 1; i < QIB_GUIDS_PER_PORT; i++)
- ibp->guids[i - 1] = p[i];
- } else
- smp->status |= IB_SMP_INVALID_FIELD;
-
- /* The only GUID we support is the first read-only entry. */
- return subn_get_guidinfo(smp, ibdev, port);
-}
-
-/**
- * subn_set_portinfo - set port information
- * @smp: the incoming SM packet
- * @ibdev: the infiniband device
- * @port: the port on the device
- *
- * Set Portinfo (see ch. 14.2.5.6).
- */
-static int subn_set_portinfo(struct ib_smp *smp, struct ib_device *ibdev,
- u8 port)
-{
- struct ib_port_info *pip = (struct ib_port_info *)smp->data;
- struct ib_event event;
- struct qib_devdata *dd;
- struct qib_pportdata *ppd;
- struct qib_ibport *ibp;
- u8 clientrereg = (pip->clientrereg_resv_subnetto & 0x80);
- unsigned long flags;
- u16 lid, smlid;
- u8 lwe;
- u8 lse;
- u8 state;
- u8 vls;
- u8 msl;
- u16 lstate;
- int ret, ore, mtu;
- u32 port_num = be32_to_cpu(smp->attr_mod);
-
- if (port_num == 0)
- port_num = port;
- else {
- if (port_num > ibdev->phys_port_cnt)
- goto err;
- /* Port attributes can only be set on the receiving port */
- if (port_num != port)
- goto get_only;
- }
-
- dd = dd_from_ibdev(ibdev);
- /* IB numbers ports from 1, hdw from 0 */
- ppd = dd->pport + (port_num - 1);
- ibp = &ppd->ibport_data;
- event.device = ibdev;
- event.element.port_num = port;
-
- ibp->rvp.mkey = pip->mkey;
- ibp->rvp.gid_prefix = pip->gid_prefix;
- ibp->rvp.mkey_lease_period = be16_to_cpu(pip->mkey_lease_period);
-
- lid = be16_to_cpu(pip->lid);
- /* Must be a valid unicast LID address. */
- if (lid == 0 || lid >= be16_to_cpu(IB_MULTICAST_LID_BASE))
- smp->status |= IB_SMP_INVALID_FIELD;
- else if (ppd->lid != lid || ppd->lmc != (pip->mkeyprot_resv_lmc & 7)) {
- if (ppd->lid != lid)
- qib_set_uevent_bits(ppd, _QIB_EVENT_LID_CHANGE_BIT);
- if (ppd->lmc != (pip->mkeyprot_resv_lmc & 7))
- qib_set_uevent_bits(ppd, _QIB_EVENT_LMC_CHANGE_BIT);
- qib_set_lid(ppd, lid, pip->mkeyprot_resv_lmc & 7);
- event.event = IB_EVENT_LID_CHANGE;
- ib_dispatch_event(&event);
- }
-
- smlid = be16_to_cpu(pip->sm_lid);
- msl = pip->neighbormtu_mastersmsl & 0xF;
- /* Must be a valid unicast LID address. */
- if (smlid == 0 || smlid >= be16_to_cpu(IB_MULTICAST_LID_BASE))
- smp->status |= IB_SMP_INVALID_FIELD;
- else if (smlid != ibp->rvp.sm_lid || msl != ibp->rvp.sm_sl) {
- spin_lock_irqsave(&ibp->rvp.lock, flags);
- if (ibp->rvp.sm_ah) {
- if (smlid != ibp->rvp.sm_lid)
- rdma_ah_set_dlid(&ibp->rvp.sm_ah->attr,
- smlid);
- if (msl != ibp->rvp.sm_sl)
- rdma_ah_set_sl(&ibp->rvp.sm_ah->attr, msl);
- }
- spin_unlock_irqrestore(&ibp->rvp.lock, flags);
- if (smlid != ibp->rvp.sm_lid)
- ibp->rvp.sm_lid = smlid;
- if (msl != ibp->rvp.sm_sl)
- ibp->rvp.sm_sl = msl;
- event.event = IB_EVENT_SM_CHANGE;
- ib_dispatch_event(&event);
- }
-
- /* Allow 1x or 4x to be set (see 14.2.6.6). */
- lwe = pip->link_width_enabled;
- if (lwe) {
- if (lwe == 0xFF)
- set_link_width_enabled(ppd, ppd->link_width_supported);
- else if (lwe >= 16 || (lwe & ~ppd->link_width_supported))
- smp->status |= IB_SMP_INVALID_FIELD;
- else if (lwe != ppd->link_width_enabled)
- set_link_width_enabled(ppd, lwe);
- }
-
- lse = pip->linkspeedactive_enabled & 0xF;
- if (lse) {
- /*
- * The IB 1.2 spec. only allows link speed values
- * 1, 3, 5, 7, 15. 1.2.1 extended to allow specific
- * speeds.
- */
- if (lse == 15)
- set_link_speed_enabled(ppd,
- ppd->link_speed_supported);
- else if (lse >= 8 || (lse & ~ppd->link_speed_supported))
- smp->status |= IB_SMP_INVALID_FIELD;
- else if (lse != ppd->link_speed_enabled)
- set_link_speed_enabled(ppd, lse);
- }
-
- /* Set link down default state. */
- switch (pip->portphysstate_linkdown & 0xF) {
- case 0: /* NOP */
- break;
- case 1: /* SLEEP */
- (void) dd->f_set_ib_cfg(ppd, QIB_IB_CFG_LINKDEFAULT,
- IB_LINKINITCMD_SLEEP);
- break;
- case 2: /* POLL */
- (void) dd->f_set_ib_cfg(ppd, QIB_IB_CFG_LINKDEFAULT,
- IB_LINKINITCMD_POLL);
- break;
- default:
- smp->status |= IB_SMP_INVALID_FIELD;
- }
-
- ibp->rvp.mkeyprot = pip->mkeyprot_resv_lmc >> 6;
- ibp->rvp.vl_high_limit = pip->vl_high_limit;
- (void) dd->f_set_ib_cfg(ppd, QIB_IB_CFG_VL_HIGH_LIMIT,
- ibp->rvp.vl_high_limit);
-
- mtu = ib_mtu_enum_to_int((pip->neighbormtu_mastersmsl >> 4) & 0xF);
- if (mtu == -1)
- smp->status |= IB_SMP_INVALID_FIELD;
- else
- qib_set_mtu(ppd, mtu);
-
- /* Set operational VLs */
- vls = (pip->operationalvl_pei_peo_fpi_fpo >> 4) & 0xF;
- if (vls) {
- if (vls > ppd->vls_supported)
- smp->status |= IB_SMP_INVALID_FIELD;
- else
- (void) dd->f_set_ib_cfg(ppd, QIB_IB_CFG_OP_VLS, vls);
- }
-
- if (pip->mkey_violations == 0)
- ibp->rvp.mkey_violations = 0;
-
- if (pip->pkey_violations == 0)
- ibp->rvp.pkey_violations = 0;
-
- if (pip->qkey_violations == 0)
- ibp->rvp.qkey_violations = 0;
-
- ore = pip->localphyerrors_overrunerrors;
- if (set_phyerrthreshold(ppd, (ore >> 4) & 0xF))
- smp->status |= IB_SMP_INVALID_FIELD;
-
- if (set_overrunthreshold(ppd, (ore & 0xF)))
- smp->status |= IB_SMP_INVALID_FIELD;
-
- ibp->rvp.subnet_timeout = pip->clientrereg_resv_subnetto & 0x1F;
-
- /*
- * Do the port state change now that the other link parameters
- * have been set.
- * Changing the port physical state only makes sense if the link
- * is down or is being set to down.
- */
- state = pip->linkspeed_portstate & 0xF;
- lstate = (pip->portphysstate_linkdown >> 4) & 0xF;
- if (lstate && !(state == IB_PORT_DOWN || state == IB_PORT_NOP))
- smp->status |= IB_SMP_INVALID_FIELD;
-
- /*
- * Only state changes of DOWN, ARM, and ACTIVE are valid
- * and must be in the correct state to take effect (see 7.2.6).
- */
- switch (state) {
- case IB_PORT_NOP:
- if (lstate == 0)
- break;
- fallthrough;
- case IB_PORT_DOWN:
- if (lstate == 0)
- lstate = QIB_IB_LINKDOWN_ONLY;
- else if (lstate == 1)
- lstate = QIB_IB_LINKDOWN_SLEEP;
- else if (lstate == 2)
- lstate = QIB_IB_LINKDOWN;
- else if (lstate == 3)
- lstate = QIB_IB_LINKDOWN_DISABLE;
- else {
- smp->status |= IB_SMP_INVALID_FIELD;
- break;
- }
- spin_lock_irqsave(&ppd->lflags_lock, flags);
- ppd->lflags &= ~QIBL_LINKV;
- spin_unlock_irqrestore(&ppd->lflags_lock, flags);
- qib_set_linkstate(ppd, lstate);
- /*
- * Don't send a reply if the response would be sent
- * through the disabled port.
- */
- if (lstate == QIB_IB_LINKDOWN_DISABLE && smp->hop_cnt) {
- ret = IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
- goto done;
- }
- qib_wait_linkstate(ppd, QIBL_LINKV, 10);
- break;
- case IB_PORT_ARMED:
- qib_set_linkstate(ppd, QIB_IB_LINKARM);
- break;
- case IB_PORT_ACTIVE:
- qib_set_linkstate(ppd, QIB_IB_LINKACTIVE);
- break;
- default:
- smp->status |= IB_SMP_INVALID_FIELD;
- }
-
- if (clientrereg) {
- event.event = IB_EVENT_CLIENT_REREGISTER;
- ib_dispatch_event(&event);
- }
-
- /* restore re-reg bit per o14-12.2.1 */
- pip->clientrereg_resv_subnetto |= clientrereg;
-
- goto get_only;
-
-err:
- smp->status |= IB_SMP_INVALID_FIELD;
-get_only:
- ret = subn_get_portinfo(smp, ibdev, port);
-done:
- return ret;
-}
-
-/**
- * rm_pkey - decrecment the reference count for the given PKEY
- * @ppd: the qlogic_ib device
- * @key: the PKEY index
- *
- * Return true if this was the last reference and the hardware table entry
- * needs to be changed.
- */
-static int rm_pkey(struct qib_pportdata *ppd, u16 key)
-{
- int i;
- int ret;
-
- for (i = 0; i < ARRAY_SIZE(ppd->pkeys); i++) {
- if (ppd->pkeys[i] != key)
- continue;
- if (atomic_dec_and_test(&ppd->pkeyrefs[i])) {
- ppd->pkeys[i] = 0;
- ret = 1;
- goto bail;
- }
- break;
- }
-
- ret = 0;
-
-bail:
- return ret;
-}
-
-/**
- * add_pkey - add the given PKEY to the hardware table
- * @ppd: the qlogic_ib device
- * @key: the PKEY
- *
- * Return an error code if unable to add the entry, zero if no change,
- * or 1 if the hardware PKEY register needs to be updated.
- */
-static int add_pkey(struct qib_pportdata *ppd, u16 key)
-{
- int i;
- u16 lkey = key & 0x7FFF;
- int any = 0;
- int ret;
-
- if (lkey == 0x7FFF) {
- ret = 0;
- goto bail;
- }
-
- /* Look for an empty slot or a matching PKEY. */
- for (i = 0; i < ARRAY_SIZE(ppd->pkeys); i++) {
- if (!ppd->pkeys[i]) {
- any++;
- continue;
- }
- /* If it matches exactly, try to increment the ref count */
- if (ppd->pkeys[i] == key) {
- if (atomic_inc_return(&ppd->pkeyrefs[i]) > 1) {
- ret = 0;
- goto bail;
- }
- /* Lost the race. Look for an empty slot below. */
- atomic_dec(&ppd->pkeyrefs[i]);
- any++;
- }
- /*
- * It makes no sense to have both the limited and unlimited
- * PKEY set at the same time since the unlimited one will
- * disable the limited one.
- */
- if ((ppd->pkeys[i] & 0x7FFF) == lkey) {
- ret = -EEXIST;
- goto bail;
- }
- }
- if (!any) {
- ret = -EBUSY;
- goto bail;
- }
- for (i = 0; i < ARRAY_SIZE(ppd->pkeys); i++) {
- if (!ppd->pkeys[i] &&
- atomic_inc_return(&ppd->pkeyrefs[i]) == 1) {
- /* for qibstats, etc. */
- ppd->pkeys[i] = key;
- ret = 1;
- goto bail;
- }
- }
- ret = -EBUSY;
-
-bail:
- return ret;
-}
-
-/**
- * set_pkeys - set the PKEY table for ctxt 0
- * @dd: the qlogic_ib device
- * @port: the IB port number
- * @pkeys: the PKEY table
- */
-static int set_pkeys(struct qib_devdata *dd, u8 port, u16 *pkeys)
-{
- struct qib_pportdata *ppd;
- struct qib_ctxtdata *rcd;
- int i;
- int changed = 0;
-
- /*
- * IB port one/two always maps to context zero/one,
- * always a kernel context, no locking needed
- * If we get here with ppd setup, no need to check
- * that rcd is valid.
- */
- ppd = dd->pport + (port - 1);
- rcd = dd->rcd[ppd->hw_pidx];
-
- for (i = 0; i < ARRAY_SIZE(rcd->pkeys); i++) {
- u16 key = pkeys[i];
- u16 okey = rcd->pkeys[i];
-
- if (key == okey)
- continue;
- /*
- * The value of this PKEY table entry is changing.
- * Remove the old entry in the hardware's array of PKEYs.
- */
- if (okey & 0x7FFF)
- changed |= rm_pkey(ppd, okey);
- if (key & 0x7FFF) {
- int ret = add_pkey(ppd, key);
-
- if (ret < 0)
- key = 0;
- else
- changed |= ret;
- }
- rcd->pkeys[i] = key;
- }
- if (changed) {
- struct ib_event event;
-
- (void) dd->f_set_ib_cfg(ppd, QIB_IB_CFG_PKEYS, 0);
-
- event.event = IB_EVENT_PKEY_CHANGE;
- event.device = &dd->verbs_dev.rdi.ibdev;
- event.element.port_num = port;
- ib_dispatch_event(&event);
- }
- return 0;
-}
-
-static int subn_set_pkeytable(struct ib_smp *smp, struct ib_device *ibdev,
- u8 port)
-{
- u32 startpx = 32 * (be32_to_cpu(smp->attr_mod) & 0xffff);
- __be16 *p = (__be16 *) smp->data;
- u16 *q = (u16 *) smp->data;
- struct qib_devdata *dd = dd_from_ibdev(ibdev);
- unsigned i, n = qib_get_npkeys(dd);
-
- for (i = 0; i < n; i++)
- q[i] = be16_to_cpu(p[i]);
-
- if (startpx != 0 || set_pkeys(dd, port, q) != 0)
- smp->status |= IB_SMP_INVALID_FIELD;
-
- return subn_get_pkeytable(smp, ibdev, port);
-}
-
-static int subn_get_sl_to_vl(struct ib_smp *smp, struct ib_device *ibdev,
- u8 port)
-{
- struct qib_ibport *ibp = to_iport(ibdev, port);
- u8 *p = (u8 *) smp->data;
- unsigned i;
-
- memset(smp->data, 0, sizeof(smp->data));
-
- if (!(ibp->rvp.port_cap_flags & IB_PORT_SL_MAP_SUP))
- smp->status |= IB_SMP_UNSUP_METHOD;
- else
- for (i = 0; i < ARRAY_SIZE(ibp->sl_to_vl); i += 2)
- *p++ = (ibp->sl_to_vl[i] << 4) | ibp->sl_to_vl[i + 1];
-
- return reply(smp);
-}
-
-static int subn_set_sl_to_vl(struct ib_smp *smp, struct ib_device *ibdev,
- u8 port)
-{
- struct qib_ibport *ibp = to_iport(ibdev, port);
- u8 *p = (u8 *) smp->data;
- unsigned i;
-
- if (!(ibp->rvp.port_cap_flags & IB_PORT_SL_MAP_SUP)) {
- smp->status |= IB_SMP_UNSUP_METHOD;
- return reply(smp);
- }
-
- for (i = 0; i < ARRAY_SIZE(ibp->sl_to_vl); i += 2, p++) {
- ibp->sl_to_vl[i] = *p >> 4;
- ibp->sl_to_vl[i + 1] = *p & 0xF;
- }
- qib_set_uevent_bits(ppd_from_ibp(to_iport(ibdev, port)),
- _QIB_EVENT_SL2VL_CHANGE_BIT);
-
- return subn_get_sl_to_vl(smp, ibdev, port);
-}
-
-static int subn_get_vl_arb(struct ib_smp *smp, struct ib_device *ibdev,
- u8 port)
-{
- unsigned which = be32_to_cpu(smp->attr_mod) >> 16;
- struct qib_pportdata *ppd = ppd_from_ibp(to_iport(ibdev, port));
-
- memset(smp->data, 0, sizeof(smp->data));
-
- if (ppd->vls_supported == IB_VL_VL0)
- smp->status |= IB_SMP_UNSUP_METHOD;
- else if (which == IB_VLARB_LOWPRI_0_31)
- (void) ppd->dd->f_get_ib_table(ppd, QIB_IB_TBL_VL_LOW_ARB,
- smp->data);
- else if (which == IB_VLARB_HIGHPRI_0_31)
- (void) ppd->dd->f_get_ib_table(ppd, QIB_IB_TBL_VL_HIGH_ARB,
- smp->data);
- else
- smp->status |= IB_SMP_INVALID_FIELD;
-
- return reply(smp);
-}
-
-static int subn_set_vl_arb(struct ib_smp *smp, struct ib_device *ibdev,
- u8 port)
-{
- unsigned which = be32_to_cpu(smp->attr_mod) >> 16;
- struct qib_pportdata *ppd = ppd_from_ibp(to_iport(ibdev, port));
-
- if (ppd->vls_supported == IB_VL_VL0)
- smp->status |= IB_SMP_UNSUP_METHOD;
- else if (which == IB_VLARB_LOWPRI_0_31)
- (void) ppd->dd->f_set_ib_table(ppd, QIB_IB_TBL_VL_LOW_ARB,
- smp->data);
- else if (which == IB_VLARB_HIGHPRI_0_31)
- (void) ppd->dd->f_set_ib_table(ppd, QIB_IB_TBL_VL_HIGH_ARB,
- smp->data);
- else
- smp->status |= IB_SMP_INVALID_FIELD;
-
- return subn_get_vl_arb(smp, ibdev, port);
-}
-
-static int subn_trap_repress(struct ib_smp *smp, struct ib_device *ibdev,
- u8 port)
-{
- /*
- * For now, we only send the trap once so no need to process this.
- * o13-6, o13-7,
- * o14-3.a4 The SMA shall not send any message in response to a valid
- * SubnTrapRepress() message.
- */
- return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
-}
-
-static int pma_get_classportinfo(struct ib_pma_mad *pmp,
- struct ib_device *ibdev)
-{
- struct ib_class_port_info *p =
- (struct ib_class_port_info *)pmp->data;
- struct qib_devdata *dd = dd_from_ibdev(ibdev);
-
- memset(pmp->data, 0, sizeof(pmp->data));
-
- if (pmp->mad_hdr.attr_mod != 0)
- pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
-
- /* Note that AllPortSelect is not valid */
- p->base_version = 1;
- p->class_version = 1;
- p->capability_mask = IB_PMA_CLASS_CAP_EXT_WIDTH;
- /*
- * Set the most significant bit of CM2 to indicate support for
- * congestion statistics
- */
- ib_set_cpi_capmask2(p,
- dd->psxmitwait_supported <<
- (31 - IB_CLASS_PORT_INFO_RESP_TIME_FIELD_SIZE));
- /*
- * Expected response time is 4.096 usec. * 2^18 == 1.073741824 sec.
- */
- ib_set_cpi_resp_time(p, 18);
-
- return reply((struct ib_smp *) pmp);
-}
-
-static int pma_get_portsamplescontrol(struct ib_pma_mad *pmp,
- struct ib_device *ibdev, u8 port)
-{
- struct ib_pma_portsamplescontrol *p =
- (struct ib_pma_portsamplescontrol *)pmp->data;
- struct qib_ibdev *dev = to_idev(ibdev);
- struct qib_devdata *dd = dd_from_dev(dev);
- struct qib_ibport *ibp = to_iport(ibdev, port);
- struct qib_pportdata *ppd = ppd_from_ibp(ibp);
- unsigned long flags;
- u8 port_select = p->port_select;
-
- memset(pmp->data, 0, sizeof(pmp->data));
-
- p->port_select = port_select;
- if (pmp->mad_hdr.attr_mod != 0 || port_select != port) {
- pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
- goto bail;
- }
- spin_lock_irqsave(&ibp->rvp.lock, flags);
- p->tick = dd->f_get_ib_cfg(ppd, QIB_IB_CFG_PMA_TICKS);
- p->sample_status = dd->f_portcntr(ppd, QIBPORTCNTR_PSSTAT);
- p->counter_width = 4; /* 32 bit counters */
- p->counter_mask0_9 = COUNTER_MASK0_9;
- p->sample_start = cpu_to_be32(ibp->rvp.pma_sample_start);
- p->sample_interval = cpu_to_be32(ibp->rvp.pma_sample_interval);
- p->tag = cpu_to_be16(ibp->rvp.pma_tag);
- p->counter_select[0] = ibp->rvp.pma_counter_select[0];
- p->counter_select[1] = ibp->rvp.pma_counter_select[1];
- p->counter_select[2] = ibp->rvp.pma_counter_select[2];
- p->counter_select[3] = ibp->rvp.pma_counter_select[3];
- p->counter_select[4] = ibp->rvp.pma_counter_select[4];
- spin_unlock_irqrestore(&ibp->rvp.lock, flags);
-
-bail:
- return reply((struct ib_smp *) pmp);
-}
-
-static int pma_set_portsamplescontrol(struct ib_pma_mad *pmp,
- struct ib_device *ibdev, u8 port)
-{
- struct ib_pma_portsamplescontrol *p =
- (struct ib_pma_portsamplescontrol *)pmp->data;
- struct qib_ibdev *dev = to_idev(ibdev);
- struct qib_devdata *dd = dd_from_dev(dev);
- struct qib_ibport *ibp = to_iport(ibdev, port);
- struct qib_pportdata *ppd = ppd_from_ibp(ibp);
- unsigned long flags;
- u8 status, xmit_flags;
- int ret;
-
- if (pmp->mad_hdr.attr_mod != 0 || p->port_select != port) {
- pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
- ret = reply((struct ib_smp *) pmp);
- goto bail;
- }
-
- spin_lock_irqsave(&ibp->rvp.lock, flags);
-
- /* Port Sampling code owns the PS* HW counters */
- xmit_flags = ppd->cong_stats.flags;
- ppd->cong_stats.flags = IB_PMA_CONG_HW_CONTROL_SAMPLE;
- status = dd->f_portcntr(ppd, QIBPORTCNTR_PSSTAT);
- if (status == IB_PMA_SAMPLE_STATUS_DONE ||
- (status == IB_PMA_SAMPLE_STATUS_RUNNING &&
- xmit_flags == IB_PMA_CONG_HW_CONTROL_TIMER)) {
- ibp->rvp.pma_sample_start = be32_to_cpu(p->sample_start);
- ibp->rvp.pma_sample_interval = be32_to_cpu(p->sample_interval);
- ibp->rvp.pma_tag = be16_to_cpu(p->tag);
- ibp->rvp.pma_counter_select[0] = p->counter_select[0];
- ibp->rvp.pma_counter_select[1] = p->counter_select[1];
- ibp->rvp.pma_counter_select[2] = p->counter_select[2];
- ibp->rvp.pma_counter_select[3] = p->counter_select[3];
- ibp->rvp.pma_counter_select[4] = p->counter_select[4];
- dd->f_set_cntr_sample(ppd, ibp->rvp.pma_sample_interval,
- ibp->rvp.pma_sample_start);
- }
- spin_unlock_irqrestore(&ibp->rvp.lock, flags);
-
- ret = pma_get_portsamplescontrol(pmp, ibdev, port);
-
-bail:
- return ret;
-}
-
-static u64 get_counter(struct qib_ibport *ibp, struct qib_pportdata *ppd,
- __be16 sel)
-{
- u64 ret;
-
- switch (sel) {
- case IB_PMA_PORT_XMIT_DATA:
- ret = ppd->dd->f_portcntr(ppd, QIBPORTCNTR_PSXMITDATA);
- break;
- case IB_PMA_PORT_RCV_DATA:
- ret = ppd->dd->f_portcntr(ppd, QIBPORTCNTR_PSRCVDATA);
- break;
- case IB_PMA_PORT_XMIT_PKTS:
- ret = ppd->dd->f_portcntr(ppd, QIBPORTCNTR_PSXMITPKTS);
- break;
- case IB_PMA_PORT_RCV_PKTS:
- ret = ppd->dd->f_portcntr(ppd, QIBPORTCNTR_PSRCVPKTS);
- break;
- case IB_PMA_PORT_XMIT_WAIT:
- ret = ppd->dd->f_portcntr(ppd, QIBPORTCNTR_PSXMITWAIT);
- break;
- default:
- ret = 0;
- }
-
- return ret;
-}
-
-/* This function assumes that the xmit_wait lock is already held */
-static u64 xmit_wait_get_value_delta(struct qib_pportdata *ppd)
-{
- u32 delta;
-
- delta = get_counter(&ppd->ibport_data, ppd,
- IB_PMA_PORT_XMIT_WAIT);
- return ppd->cong_stats.counter + delta;
-}
-
-static void cache_hw_sample_counters(struct qib_pportdata *ppd)
-{
- struct qib_ibport *ibp = &ppd->ibport_data;
-
- ppd->cong_stats.counter_cache.psxmitdata =
- get_counter(ibp, ppd, IB_PMA_PORT_XMIT_DATA);
- ppd->cong_stats.counter_cache.psrcvdata =
- get_counter(ibp, ppd, IB_PMA_PORT_RCV_DATA);
- ppd->cong_stats.counter_cache.psxmitpkts =
- get_counter(ibp, ppd, IB_PMA_PORT_XMIT_PKTS);
- ppd->cong_stats.counter_cache.psrcvpkts =
- get_counter(ibp, ppd, IB_PMA_PORT_RCV_PKTS);
- ppd->cong_stats.counter_cache.psxmitwait =
- get_counter(ibp, ppd, IB_PMA_PORT_XMIT_WAIT);
-}
-
-static u64 get_cache_hw_sample_counters(struct qib_pportdata *ppd,
- __be16 sel)
-{
- u64 ret;
-
- switch (sel) {
- case IB_PMA_PORT_XMIT_DATA:
- ret = ppd->cong_stats.counter_cache.psxmitdata;
- break;
- case IB_PMA_PORT_RCV_DATA:
- ret = ppd->cong_stats.counter_cache.psrcvdata;
- break;
- case IB_PMA_PORT_XMIT_PKTS:
- ret = ppd->cong_stats.counter_cache.psxmitpkts;
- break;
- case IB_PMA_PORT_RCV_PKTS:
- ret = ppd->cong_stats.counter_cache.psrcvpkts;
- break;
- case IB_PMA_PORT_XMIT_WAIT:
- ret = ppd->cong_stats.counter_cache.psxmitwait;
- break;
- default:
- ret = 0;
- }
-
- return ret;
-}
-
-static int pma_get_portsamplesresult(struct ib_pma_mad *pmp,
- struct ib_device *ibdev, u8 port)
-{
- struct ib_pma_portsamplesresult *p =
- (struct ib_pma_portsamplesresult *)pmp->data;
- struct qib_ibdev *dev = to_idev(ibdev);
- struct qib_devdata *dd = dd_from_dev(dev);
- struct qib_ibport *ibp = to_iport(ibdev, port);
- struct qib_pportdata *ppd = ppd_from_ibp(ibp);
- unsigned long flags;
- u8 status;
- int i;
-
- memset(pmp->data, 0, sizeof(pmp->data));
- spin_lock_irqsave(&ibp->rvp.lock, flags);
- p->tag = cpu_to_be16(ibp->rvp.pma_tag);
- if (ppd->cong_stats.flags == IB_PMA_CONG_HW_CONTROL_TIMER)
- p->sample_status = IB_PMA_SAMPLE_STATUS_DONE;
- else {
- status = dd->f_portcntr(ppd, QIBPORTCNTR_PSSTAT);
- p->sample_status = cpu_to_be16(status);
- if (status == IB_PMA_SAMPLE_STATUS_DONE) {
- cache_hw_sample_counters(ppd);
- ppd->cong_stats.counter =
- xmit_wait_get_value_delta(ppd);
- dd->f_set_cntr_sample(ppd,
- QIB_CONG_TIMER_PSINTERVAL, 0);
- ppd->cong_stats.flags = IB_PMA_CONG_HW_CONTROL_TIMER;
- }
- }
- for (i = 0; i < ARRAY_SIZE(ibp->rvp.pma_counter_select); i++)
- p->counter[i] = cpu_to_be32(
- get_cache_hw_sample_counters(
- ppd, ibp->rvp.pma_counter_select[i]));
- spin_unlock_irqrestore(&ibp->rvp.lock, flags);
-
- return reply((struct ib_smp *) pmp);
-}
-
-static int pma_get_portsamplesresult_ext(struct ib_pma_mad *pmp,
- struct ib_device *ibdev, u8 port)
-{
- struct ib_pma_portsamplesresult_ext *p =
- (struct ib_pma_portsamplesresult_ext *)pmp->data;
- struct qib_ibdev *dev = to_idev(ibdev);
- struct qib_devdata *dd = dd_from_dev(dev);
- struct qib_ibport *ibp = to_iport(ibdev, port);
- struct qib_pportdata *ppd = ppd_from_ibp(ibp);
- unsigned long flags;
- u8 status;
- int i;
-
- /* Port Sampling code owns the PS* HW counters */
- memset(pmp->data, 0, sizeof(pmp->data));
- spin_lock_irqsave(&ibp->rvp.lock, flags);
- p->tag = cpu_to_be16(ibp->rvp.pma_tag);
- if (ppd->cong_stats.flags == IB_PMA_CONG_HW_CONTROL_TIMER)
- p->sample_status = IB_PMA_SAMPLE_STATUS_DONE;
- else {
- status = dd->f_portcntr(ppd, QIBPORTCNTR_PSSTAT);
- p->sample_status = cpu_to_be16(status);
- /* 64 bits */
- p->extended_width = cpu_to_be32(0x80000000);
- if (status == IB_PMA_SAMPLE_STATUS_DONE) {
- cache_hw_sample_counters(ppd);
- ppd->cong_stats.counter =
- xmit_wait_get_value_delta(ppd);
- dd->f_set_cntr_sample(ppd,
- QIB_CONG_TIMER_PSINTERVAL, 0);
- ppd->cong_stats.flags = IB_PMA_CONG_HW_CONTROL_TIMER;
- }
- }
- for (i = 0; i < ARRAY_SIZE(ibp->rvp.pma_counter_select); i++)
- p->counter[i] = cpu_to_be64(
- get_cache_hw_sample_counters(
- ppd, ibp->rvp.pma_counter_select[i]));
- spin_unlock_irqrestore(&ibp->rvp.lock, flags);
-
- return reply((struct ib_smp *) pmp);
-}
-
-static int pma_get_portcounters(struct ib_pma_mad *pmp,
- struct ib_device *ibdev, u8 port)
-{
- struct ib_pma_portcounters *p = (struct ib_pma_portcounters *)
- pmp->data;
- struct qib_ibport *ibp = to_iport(ibdev, port);
- struct qib_pportdata *ppd = ppd_from_ibp(ibp);
- struct qib_verbs_counters cntrs;
- u8 port_select = p->port_select;
-
- qib_get_counters(ppd, &cntrs);
-
- /* Adjust counters for any resets done. */
- cntrs.symbol_error_counter -= ibp->z_symbol_error_counter;
- cntrs.link_error_recovery_counter -=
- ibp->z_link_error_recovery_counter;
- cntrs.link_downed_counter -= ibp->z_link_downed_counter;
- cntrs.port_rcv_errors -= ibp->z_port_rcv_errors;
- cntrs.port_rcv_remphys_errors -= ibp->z_port_rcv_remphys_errors;
- cntrs.port_xmit_discards -= ibp->z_port_xmit_discards;
- cntrs.port_xmit_data -= ibp->z_port_xmit_data;
- cntrs.port_rcv_data -= ibp->z_port_rcv_data;
- cntrs.port_xmit_packets -= ibp->z_port_xmit_packets;
- cntrs.port_rcv_packets -= ibp->z_port_rcv_packets;
- cntrs.local_link_integrity_errors -=
- ibp->z_local_link_integrity_errors;
- cntrs.excessive_buffer_overrun_errors -=
- ibp->z_excessive_buffer_overrun_errors;
- cntrs.vl15_dropped -= ibp->z_vl15_dropped;
- cntrs.vl15_dropped += ibp->rvp.n_vl15_dropped;
-
- memset(pmp->data, 0, sizeof(pmp->data));
-
- p->port_select = port_select;
- if (pmp->mad_hdr.attr_mod != 0 || port_select != port)
- pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
-
- if (cntrs.symbol_error_counter > 0xFFFFUL)
- p->symbol_error_counter = cpu_to_be16(0xFFFF);
- else
- p->symbol_error_counter =
- cpu_to_be16((u16)cntrs.symbol_error_counter);
- if (cntrs.link_error_recovery_counter > 0xFFUL)
- p->link_error_recovery_counter = 0xFF;
- else
- p->link_error_recovery_counter =
- (u8)cntrs.link_error_recovery_counter;
- if (cntrs.link_downed_counter > 0xFFUL)
- p->link_downed_counter = 0xFF;
- else
- p->link_downed_counter = (u8)cntrs.link_downed_counter;
- if (cntrs.port_rcv_errors > 0xFFFFUL)
- p->port_rcv_errors = cpu_to_be16(0xFFFF);
- else
- p->port_rcv_errors =
- cpu_to_be16((u16) cntrs.port_rcv_errors);
- if (cntrs.port_rcv_remphys_errors > 0xFFFFUL)
- p->port_rcv_remphys_errors = cpu_to_be16(0xFFFF);
- else
- p->port_rcv_remphys_errors =
- cpu_to_be16((u16)cntrs.port_rcv_remphys_errors);
- if (cntrs.port_xmit_discards > 0xFFFFUL)
- p->port_xmit_discards = cpu_to_be16(0xFFFF);
- else
- p->port_xmit_discards =
- cpu_to_be16((u16)cntrs.port_xmit_discards);
- if (cntrs.local_link_integrity_errors > 0xFUL)
- cntrs.local_link_integrity_errors = 0xFUL;
- if (cntrs.excessive_buffer_overrun_errors > 0xFUL)
- cntrs.excessive_buffer_overrun_errors = 0xFUL;
- p->link_overrun_errors = (cntrs.local_link_integrity_errors << 4) |
- cntrs.excessive_buffer_overrun_errors;
- if (cntrs.vl15_dropped > 0xFFFFUL)
- p->vl15_dropped = cpu_to_be16(0xFFFF);
- else
- p->vl15_dropped = cpu_to_be16((u16)cntrs.vl15_dropped);
- if (cntrs.port_xmit_data > 0xFFFFFFFFUL)
- p->port_xmit_data = cpu_to_be32(0xFFFFFFFF);
- else
- p->port_xmit_data = cpu_to_be32((u32)cntrs.port_xmit_data);
- if (cntrs.port_rcv_data > 0xFFFFFFFFUL)
- p->port_rcv_data = cpu_to_be32(0xFFFFFFFF);
- else
- p->port_rcv_data = cpu_to_be32((u32)cntrs.port_rcv_data);
- if (cntrs.port_xmit_packets > 0xFFFFFFFFUL)
- p->port_xmit_packets = cpu_to_be32(0xFFFFFFFF);
- else
- p->port_xmit_packets =
- cpu_to_be32((u32)cntrs.port_xmit_packets);
- if (cntrs.port_rcv_packets > 0xFFFFFFFFUL)
- p->port_rcv_packets = cpu_to_be32(0xFFFFFFFF);
- else
- p->port_rcv_packets =
- cpu_to_be32((u32) cntrs.port_rcv_packets);
-
- return reply((struct ib_smp *) pmp);
-}
-
-static int pma_get_portcounters_cong(struct ib_pma_mad *pmp,
- struct ib_device *ibdev, u8 port)
-{
- /* Congestion PMA packets start at offset 24 not 64 */
- struct ib_pma_portcounters_cong *p =
- (struct ib_pma_portcounters_cong *)pmp->reserved;
- struct qib_verbs_counters cntrs;
- struct qib_ibport *ibp = to_iport(ibdev, port);
- struct qib_pportdata *ppd = ppd_from_ibp(ibp);
- struct qib_devdata *dd = dd_from_ppd(ppd);
- u32 port_select = be32_to_cpu(pmp->mad_hdr.attr_mod) & 0xFF;
- u64 xmit_wait_counter;
- unsigned long flags;
-
- /*
- * This check is performed only in the GET method because the
- * SET method ends up calling this anyway.
- */
- if (!dd->psxmitwait_supported)
- pmp->mad_hdr.status |= IB_SMP_UNSUP_METH_ATTR;
- if (port_select != port)
- pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
-
- qib_get_counters(ppd, &cntrs);
- spin_lock_irqsave(&ppd->ibport_data.rvp.lock, flags);
- xmit_wait_counter = xmit_wait_get_value_delta(ppd);
- spin_unlock_irqrestore(&ppd->ibport_data.rvp.lock, flags);
-
- /* Adjust counters for any resets done. */
- cntrs.symbol_error_counter -= ibp->z_symbol_error_counter;
- cntrs.link_error_recovery_counter -=
- ibp->z_link_error_recovery_counter;
- cntrs.link_downed_counter -= ibp->z_link_downed_counter;
- cntrs.port_rcv_errors -= ibp->z_port_rcv_errors;
- cntrs.port_rcv_remphys_errors -=
- ibp->z_port_rcv_remphys_errors;
- cntrs.port_xmit_discards -= ibp->z_port_xmit_discards;
- cntrs.local_link_integrity_errors -=
- ibp->z_local_link_integrity_errors;
- cntrs.excessive_buffer_overrun_errors -=
- ibp->z_excessive_buffer_overrun_errors;
- cntrs.vl15_dropped -= ibp->z_vl15_dropped;
- cntrs.vl15_dropped += ibp->rvp.n_vl15_dropped;
- cntrs.port_xmit_data -= ibp->z_port_xmit_data;
- cntrs.port_rcv_data -= ibp->z_port_rcv_data;
- cntrs.port_xmit_packets -= ibp->z_port_xmit_packets;
- cntrs.port_rcv_packets -= ibp->z_port_rcv_packets;
-
- memset(pmp->reserved, 0, sizeof(pmp->reserved));
- memset(pmp->data, 0, sizeof(pmp->data));
-
- /*
- * Set top 3 bits to indicate interval in picoseconds in
- * remaining bits.
- */
- p->port_check_rate =
- cpu_to_be16((QIB_XMIT_RATE_PICO << 13) |
- (dd->psxmitwait_check_rate &
- ~(QIB_XMIT_RATE_PICO << 13)));
- p->port_adr_events = cpu_to_be64(0);
- p->port_xmit_wait = cpu_to_be64(xmit_wait_counter);
- p->port_xmit_data = cpu_to_be64(cntrs.port_xmit_data);
- p->port_rcv_data = cpu_to_be64(cntrs.port_rcv_data);
- p->port_xmit_packets =
- cpu_to_be64(cntrs.port_xmit_packets);
- p->port_rcv_packets =
- cpu_to_be64(cntrs.port_rcv_packets);
- if (cntrs.symbol_error_counter > 0xFFFFUL)
- p->symbol_error_counter = cpu_to_be16(0xFFFF);
- else
- p->symbol_error_counter =
- cpu_to_be16(
- (u16)cntrs.symbol_error_counter);
- if (cntrs.link_error_recovery_counter > 0xFFUL)
- p->link_error_recovery_counter = 0xFF;
- else
- p->link_error_recovery_counter =
- (u8)cntrs.link_error_recovery_counter;
- if (cntrs.link_downed_counter > 0xFFUL)
- p->link_downed_counter = 0xFF;
- else
- p->link_downed_counter =
- (u8)cntrs.link_downed_counter;
- if (cntrs.port_rcv_errors > 0xFFFFUL)
- p->port_rcv_errors = cpu_to_be16(0xFFFF);
- else
- p->port_rcv_errors =
- cpu_to_be16((u16) cntrs.port_rcv_errors);
- if (cntrs.port_rcv_remphys_errors > 0xFFFFUL)
- p->port_rcv_remphys_errors = cpu_to_be16(0xFFFF);
- else
- p->port_rcv_remphys_errors =
- cpu_to_be16(
- (u16)cntrs.port_rcv_remphys_errors);
- if (cntrs.port_xmit_discards > 0xFFFFUL)
- p->port_xmit_discards = cpu_to_be16(0xFFFF);
- else
- p->port_xmit_discards =
- cpu_to_be16((u16)cntrs.port_xmit_discards);
- if (cntrs.local_link_integrity_errors > 0xFUL)
- cntrs.local_link_integrity_errors = 0xFUL;
- if (cntrs.excessive_buffer_overrun_errors > 0xFUL)
- cntrs.excessive_buffer_overrun_errors = 0xFUL;
- p->link_overrun_errors = (cntrs.local_link_integrity_errors << 4) |
- cntrs.excessive_buffer_overrun_errors;
- if (cntrs.vl15_dropped > 0xFFFFUL)
- p->vl15_dropped = cpu_to_be16(0xFFFF);
- else
- p->vl15_dropped = cpu_to_be16((u16)cntrs.vl15_dropped);
-
- return reply((struct ib_smp *)pmp);
-}
-
-static void qib_snapshot_pmacounters(
- struct qib_ibport *ibp,
- struct qib_pma_counters *pmacounters)
-{
- struct qib_pma_counters *p;
- int cpu;
-
- memset(pmacounters, 0, sizeof(*pmacounters));
- for_each_possible_cpu(cpu) {
- p = per_cpu_ptr(ibp->pmastats, cpu);
- pmacounters->n_unicast_xmit += p->n_unicast_xmit;
- pmacounters->n_unicast_rcv += p->n_unicast_rcv;
- pmacounters->n_multicast_xmit += p->n_multicast_xmit;
- pmacounters->n_multicast_rcv += p->n_multicast_rcv;
- }
-}
-
-static int pma_get_portcounters_ext(struct ib_pma_mad *pmp,
- struct ib_device *ibdev, u8 port)
-{
- struct ib_pma_portcounters_ext *p =
- (struct ib_pma_portcounters_ext *)pmp->data;
- struct qib_ibport *ibp = to_iport(ibdev, port);
- struct qib_pportdata *ppd = ppd_from_ibp(ibp);
- u64 swords, rwords, spkts, rpkts, xwait;
- struct qib_pma_counters pma;
- u8 port_select = p->port_select;
-
- memset(pmp->data, 0, sizeof(pmp->data));
-
- p->port_select = port_select;
- if (pmp->mad_hdr.attr_mod != 0 || port_select != port) {
- pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
- goto bail;
- }
-
- qib_snapshot_counters(ppd, &swords, &rwords, &spkts, &rpkts, &xwait);
-
- /* Adjust counters for any resets done. */
- swords -= ibp->z_port_xmit_data;
- rwords -= ibp->z_port_rcv_data;
- spkts -= ibp->z_port_xmit_packets;
- rpkts -= ibp->z_port_rcv_packets;
-
- p->port_xmit_data = cpu_to_be64(swords);
- p->port_rcv_data = cpu_to_be64(rwords);
- p->port_xmit_packets = cpu_to_be64(spkts);
- p->port_rcv_packets = cpu_to_be64(rpkts);
-
- qib_snapshot_pmacounters(ibp, &pma);
-
- p->port_unicast_xmit_packets = cpu_to_be64(pma.n_unicast_xmit
- - ibp->z_unicast_xmit);
- p->port_unicast_rcv_packets = cpu_to_be64(pma.n_unicast_rcv
- - ibp->z_unicast_rcv);
- p->port_multicast_xmit_packets = cpu_to_be64(pma.n_multicast_xmit
- - ibp->z_multicast_xmit);
- p->port_multicast_rcv_packets = cpu_to_be64(pma.n_multicast_rcv
- - ibp->z_multicast_rcv);
-
-bail:
- return reply((struct ib_smp *) pmp);
-}
-
-static int pma_set_portcounters(struct ib_pma_mad *pmp,
- struct ib_device *ibdev, u8 port)
-{
- struct ib_pma_portcounters *p = (struct ib_pma_portcounters *)
- pmp->data;
- struct qib_ibport *ibp = to_iport(ibdev, port);
- struct qib_pportdata *ppd = ppd_from_ibp(ibp);
- struct qib_verbs_counters cntrs;
-
- /*
- * Since the HW doesn't support clearing counters, we save the
- * current count and subtract it from future responses.
- */
- qib_get_counters(ppd, &cntrs);
-
- if (p->counter_select & IB_PMA_SEL_SYMBOL_ERROR)
- ibp->z_symbol_error_counter = cntrs.symbol_error_counter;
-
- if (p->counter_select & IB_PMA_SEL_LINK_ERROR_RECOVERY)
- ibp->z_link_error_recovery_counter =
- cntrs.link_error_recovery_counter;
-
- if (p->counter_select & IB_PMA_SEL_LINK_DOWNED)
- ibp->z_link_downed_counter = cntrs.link_downed_counter;
-
- if (p->counter_select & IB_PMA_SEL_PORT_RCV_ERRORS)
- ibp->z_port_rcv_errors = cntrs.port_rcv_errors;
-
- if (p->counter_select & IB_PMA_SEL_PORT_RCV_REMPHYS_ERRORS)
- ibp->z_port_rcv_remphys_errors =
- cntrs.port_rcv_remphys_errors;
-
- if (p->counter_select & IB_PMA_SEL_PORT_XMIT_DISCARDS)
- ibp->z_port_xmit_discards = cntrs.port_xmit_discards;
-
- if (p->counter_select & IB_PMA_SEL_LOCAL_LINK_INTEGRITY_ERRORS)
- ibp->z_local_link_integrity_errors =
- cntrs.local_link_integrity_errors;
-
- if (p->counter_select & IB_PMA_SEL_EXCESSIVE_BUFFER_OVERRUNS)
- ibp->z_excessive_buffer_overrun_errors =
- cntrs.excessive_buffer_overrun_errors;
-
- if (p->counter_select & IB_PMA_SEL_PORT_VL15_DROPPED) {
- ibp->rvp.n_vl15_dropped = 0;
- ibp->z_vl15_dropped = cntrs.vl15_dropped;
- }
-
- if (p->counter_select & IB_PMA_SEL_PORT_XMIT_DATA)
- ibp->z_port_xmit_data = cntrs.port_xmit_data;
-
- if (p->counter_select & IB_PMA_SEL_PORT_RCV_DATA)
- ibp->z_port_rcv_data = cntrs.port_rcv_data;
-
- if (p->counter_select & IB_PMA_SEL_PORT_XMIT_PACKETS)
- ibp->z_port_xmit_packets = cntrs.port_xmit_packets;
-
- if (p->counter_select & IB_PMA_SEL_PORT_RCV_PACKETS)
- ibp->z_port_rcv_packets = cntrs.port_rcv_packets;
-
- return pma_get_portcounters(pmp, ibdev, port);
-}
-
-static int pma_set_portcounters_cong(struct ib_pma_mad *pmp,
- struct ib_device *ibdev, u8 port)
-{
- struct qib_ibport *ibp = to_iport(ibdev, port);
- struct qib_pportdata *ppd = ppd_from_ibp(ibp);
- struct qib_devdata *dd = dd_from_ppd(ppd);
- struct qib_verbs_counters cntrs;
- u32 counter_select = (be32_to_cpu(pmp->mad_hdr.attr_mod) >> 24) & 0xFF;
- int ret = 0;
- unsigned long flags;
-
- qib_get_counters(ppd, &cntrs);
- /* Get counter values before we save them */
- ret = pma_get_portcounters_cong(pmp, ibdev, port);
-
- if (counter_select & IB_PMA_SEL_CONG_XMIT) {
- spin_lock_irqsave(&ppd->ibport_data.rvp.lock, flags);
- ppd->cong_stats.counter = 0;
- dd->f_set_cntr_sample(ppd, QIB_CONG_TIMER_PSINTERVAL,
- 0x0);
- spin_unlock_irqrestore(&ppd->ibport_data.rvp.lock, flags);
- }
- if (counter_select & IB_PMA_SEL_CONG_PORT_DATA) {
- ibp->z_port_xmit_data = cntrs.port_xmit_data;
- ibp->z_port_rcv_data = cntrs.port_rcv_data;
- ibp->z_port_xmit_packets = cntrs.port_xmit_packets;
- ibp->z_port_rcv_packets = cntrs.port_rcv_packets;
- }
- if (counter_select & IB_PMA_SEL_CONG_ALL) {
- ibp->z_symbol_error_counter =
- cntrs.symbol_error_counter;
- ibp->z_link_error_recovery_counter =
- cntrs.link_error_recovery_counter;
- ibp->z_link_downed_counter =
- cntrs.link_downed_counter;
- ibp->z_port_rcv_errors = cntrs.port_rcv_errors;
- ibp->z_port_rcv_remphys_errors =
- cntrs.port_rcv_remphys_errors;
- ibp->z_port_xmit_discards =
- cntrs.port_xmit_discards;
- ibp->z_local_link_integrity_errors =
- cntrs.local_link_integrity_errors;
- ibp->z_excessive_buffer_overrun_errors =
- cntrs.excessive_buffer_overrun_errors;
- ibp->rvp.n_vl15_dropped = 0;
- ibp->z_vl15_dropped = cntrs.vl15_dropped;
- }
-
- return ret;
-}
-
-static int pma_set_portcounters_ext(struct ib_pma_mad *pmp,
- struct ib_device *ibdev, u8 port)
-{
- struct ib_pma_portcounters *p = (struct ib_pma_portcounters *)
- pmp->data;
- struct qib_ibport *ibp = to_iport(ibdev, port);
- struct qib_pportdata *ppd = ppd_from_ibp(ibp);
- u64 swords, rwords, spkts, rpkts, xwait;
- struct qib_pma_counters pma;
-
- qib_snapshot_counters(ppd, &swords, &rwords, &spkts, &rpkts, &xwait);
-
- if (p->counter_select & IB_PMA_SELX_PORT_XMIT_DATA)
- ibp->z_port_xmit_data = swords;
-
- if (p->counter_select & IB_PMA_SELX_PORT_RCV_DATA)
- ibp->z_port_rcv_data = rwords;
-
- if (p->counter_select & IB_PMA_SELX_PORT_XMIT_PACKETS)
- ibp->z_port_xmit_packets = spkts;
-
- if (p->counter_select & IB_PMA_SELX_PORT_RCV_PACKETS)
- ibp->z_port_rcv_packets = rpkts;
-
- qib_snapshot_pmacounters(ibp, &pma);
-
- if (p->counter_select & IB_PMA_SELX_PORT_UNI_XMIT_PACKETS)
- ibp->z_unicast_xmit = pma.n_unicast_xmit;
-
- if (p->counter_select & IB_PMA_SELX_PORT_UNI_RCV_PACKETS)
- ibp->z_unicast_rcv = pma.n_unicast_rcv;
-
- if (p->counter_select & IB_PMA_SELX_PORT_MULTI_XMIT_PACKETS)
- ibp->z_multicast_xmit = pma.n_multicast_xmit;
-
- if (p->counter_select & IB_PMA_SELX_PORT_MULTI_RCV_PACKETS)
- ibp->z_multicast_rcv = pma.n_multicast_rcv;
-
- return pma_get_portcounters_ext(pmp, ibdev, port);
-}
-
-static int process_subn(struct ib_device *ibdev, int mad_flags,
- u8 port, const struct ib_mad *in_mad,
- struct ib_mad *out_mad)
-{
- struct ib_smp *smp = (struct ib_smp *)out_mad;
- struct qib_ibport *ibp = to_iport(ibdev, port);
- struct qib_pportdata *ppd = ppd_from_ibp(ibp);
- int ret;
-
- *out_mad = *in_mad;
- if (smp->class_version != 1) {
- smp->status |= IB_SMP_UNSUP_VERSION;
- ret = reply(smp);
- goto bail;
- }
-
- ret = check_mkey(ibp, smp, mad_flags);
- if (ret) {
- u32 port_num = be32_to_cpu(smp->attr_mod);
-
- /*
- * If this is a get/set portinfo, we already check the
- * M_Key if the MAD is for another port and the M_Key
- * is OK on the receiving port. This check is needed
- * to increment the error counters when the M_Key
- * fails to match on *both* ports.
- */
- if (in_mad->mad_hdr.attr_id == IB_SMP_ATTR_PORT_INFO &&
- (smp->method == IB_MGMT_METHOD_GET ||
- smp->method == IB_MGMT_METHOD_SET) &&
- port_num && port_num <= ibdev->phys_port_cnt &&
- port != port_num)
- (void) check_mkey(to_iport(ibdev, port_num), smp, 0);
- ret = IB_MAD_RESULT_FAILURE;
- goto bail;
- }
-
- switch (smp->method) {
- case IB_MGMT_METHOD_GET:
- switch (smp->attr_id) {
- case IB_SMP_ATTR_NODE_DESC:
- ret = subn_get_nodedescription(smp, ibdev);
- goto bail;
- case IB_SMP_ATTR_NODE_INFO:
- ret = subn_get_nodeinfo(smp, ibdev, port);
- goto bail;
- case IB_SMP_ATTR_GUID_INFO:
- ret = subn_get_guidinfo(smp, ibdev, port);
- goto bail;
- case IB_SMP_ATTR_PORT_INFO:
- ret = subn_get_portinfo(smp, ibdev, port);
- goto bail;
- case IB_SMP_ATTR_PKEY_TABLE:
- ret = subn_get_pkeytable(smp, ibdev, port);
- goto bail;
- case IB_SMP_ATTR_SL_TO_VL_TABLE:
- ret = subn_get_sl_to_vl(smp, ibdev, port);
- goto bail;
- case IB_SMP_ATTR_VL_ARB_TABLE:
- ret = subn_get_vl_arb(smp, ibdev, port);
- goto bail;
- case IB_SMP_ATTR_SM_INFO:
- if (ibp->rvp.port_cap_flags & IB_PORT_SM_DISABLED) {
- ret = IB_MAD_RESULT_SUCCESS |
- IB_MAD_RESULT_CONSUMED;
- goto bail;
- }
- if (ibp->rvp.port_cap_flags & IB_PORT_SM) {
- ret = IB_MAD_RESULT_SUCCESS;
- goto bail;
- }
- fallthrough;
- default:
- smp->status |= IB_SMP_UNSUP_METH_ATTR;
- ret = reply(smp);
- goto bail;
- }
-
- case IB_MGMT_METHOD_SET:
- switch (smp->attr_id) {
- case IB_SMP_ATTR_GUID_INFO:
- ret = subn_set_guidinfo(smp, ibdev, port);
- goto bail;
- case IB_SMP_ATTR_PORT_INFO:
- ret = subn_set_portinfo(smp, ibdev, port);
- goto bail;
- case IB_SMP_ATTR_PKEY_TABLE:
- ret = subn_set_pkeytable(smp, ibdev, port);
- goto bail;
- case IB_SMP_ATTR_SL_TO_VL_TABLE:
- ret = subn_set_sl_to_vl(smp, ibdev, port);
- goto bail;
- case IB_SMP_ATTR_VL_ARB_TABLE:
- ret = subn_set_vl_arb(smp, ibdev, port);
- goto bail;
- case IB_SMP_ATTR_SM_INFO:
- if (ibp->rvp.port_cap_flags & IB_PORT_SM_DISABLED) {
- ret = IB_MAD_RESULT_SUCCESS |
- IB_MAD_RESULT_CONSUMED;
- goto bail;
- }
- if (ibp->rvp.port_cap_flags & IB_PORT_SM) {
- ret = IB_MAD_RESULT_SUCCESS;
- goto bail;
- }
- fallthrough;
- default:
- smp->status |= IB_SMP_UNSUP_METH_ATTR;
- ret = reply(smp);
- goto bail;
- }
-
- case IB_MGMT_METHOD_TRAP_REPRESS:
- if (smp->attr_id == IB_SMP_ATTR_NOTICE)
- ret = subn_trap_repress(smp, ibdev, port);
- else {
- smp->status |= IB_SMP_UNSUP_METH_ATTR;
- ret = reply(smp);
- }
- goto bail;
-
- case IB_MGMT_METHOD_TRAP:
- case IB_MGMT_METHOD_REPORT:
- case IB_MGMT_METHOD_REPORT_RESP:
- case IB_MGMT_METHOD_GET_RESP:
- /*
- * The ib_mad module will call us to process responses
- * before checking for other consumers.
- * Just tell the caller to process it normally.
- */
- ret = IB_MAD_RESULT_SUCCESS;
- goto bail;
-
- case IB_MGMT_METHOD_SEND:
- if (ib_get_smp_direction(smp) &&
- smp->attr_id == QIB_VENDOR_IPG) {
- ppd->dd->f_set_ib_cfg(ppd, QIB_IB_CFG_PORT,
- smp->data[0]);
- ret = IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
- } else
- ret = IB_MAD_RESULT_SUCCESS;
- goto bail;
-
- default:
- smp->status |= IB_SMP_UNSUP_METHOD;
- ret = reply(smp);
- }
-
-bail:
- return ret;
-}
-
-static int process_perf(struct ib_device *ibdev, u8 port,
- const struct ib_mad *in_mad,
- struct ib_mad *out_mad)
-{
- struct ib_pma_mad *pmp = (struct ib_pma_mad *)out_mad;
- int ret;
-
- *out_mad = *in_mad;
- if (pmp->mad_hdr.class_version != 1) {
- pmp->mad_hdr.status |= IB_SMP_UNSUP_VERSION;
- ret = reply((struct ib_smp *) pmp);
- goto bail;
- }
-
- switch (pmp->mad_hdr.method) {
- case IB_MGMT_METHOD_GET:
- switch (pmp->mad_hdr.attr_id) {
- case IB_PMA_CLASS_PORT_INFO:
- ret = pma_get_classportinfo(pmp, ibdev);
- goto bail;
- case IB_PMA_PORT_SAMPLES_CONTROL:
- ret = pma_get_portsamplescontrol(pmp, ibdev, port);
- goto bail;
- case IB_PMA_PORT_SAMPLES_RESULT:
- ret = pma_get_portsamplesresult(pmp, ibdev, port);
- goto bail;
- case IB_PMA_PORT_SAMPLES_RESULT_EXT:
- ret = pma_get_portsamplesresult_ext(pmp, ibdev, port);
- goto bail;
- case IB_PMA_PORT_COUNTERS:
- ret = pma_get_portcounters(pmp, ibdev, port);
- goto bail;
- case IB_PMA_PORT_COUNTERS_EXT:
- ret = pma_get_portcounters_ext(pmp, ibdev, port);
- goto bail;
- case IB_PMA_PORT_COUNTERS_CONG:
- ret = pma_get_portcounters_cong(pmp, ibdev, port);
- goto bail;
- default:
- pmp->mad_hdr.status |= IB_SMP_UNSUP_METH_ATTR;
- ret = reply((struct ib_smp *) pmp);
- goto bail;
- }
-
- case IB_MGMT_METHOD_SET:
- switch (pmp->mad_hdr.attr_id) {
- case IB_PMA_PORT_SAMPLES_CONTROL:
- ret = pma_set_portsamplescontrol(pmp, ibdev, port);
- goto bail;
- case IB_PMA_PORT_COUNTERS:
- ret = pma_set_portcounters(pmp, ibdev, port);
- goto bail;
- case IB_PMA_PORT_COUNTERS_EXT:
- ret = pma_set_portcounters_ext(pmp, ibdev, port);
- goto bail;
- case IB_PMA_PORT_COUNTERS_CONG:
- ret = pma_set_portcounters_cong(pmp, ibdev, port);
- goto bail;
- default:
- pmp->mad_hdr.status |= IB_SMP_UNSUP_METH_ATTR;
- ret = reply((struct ib_smp *) pmp);
- goto bail;
- }
-
- case IB_MGMT_METHOD_TRAP:
- case IB_MGMT_METHOD_GET_RESP:
- /*
- * The ib_mad module will call us to process responses
- * before checking for other consumers.
- * Just tell the caller to process it normally.
- */
- ret = IB_MAD_RESULT_SUCCESS;
- goto bail;
-
- default:
- pmp->mad_hdr.status |= IB_SMP_UNSUP_METHOD;
- ret = reply((struct ib_smp *) pmp);
- }
-
-bail:
- return ret;
-}
-
-static int cc_get_classportinfo(struct ib_cc_mad *ccp,
- struct ib_device *ibdev)
-{
- struct ib_cc_classportinfo_attr *p =
- (struct ib_cc_classportinfo_attr *)ccp->mgmt_data;
-
- p->base_version = 1;
- p->class_version = 1;
- p->cap_mask = 0;
-
- /*
- * Expected response time is 4.096 usec. * 2^18 == 1.073741824 sec.
- */
- p->resp_time_value = 18;
-
- return reply((struct ib_smp *) ccp);
-}
-
-static int cc_get_congestion_info(struct ib_cc_mad *ccp,
- struct ib_device *ibdev, u8 port)
-{
- struct ib_cc_info_attr *p =
- (struct ib_cc_info_attr *)ccp->mgmt_data;
- struct qib_ibport *ibp = to_iport(ibdev, port);
- struct qib_pportdata *ppd = ppd_from_ibp(ibp);
-
- p->congestion_info = 0;
- p->control_table_cap = ppd->cc_max_table_entries;
-
- return reply((struct ib_smp *) ccp);
-}
-
-static int cc_get_congestion_setting(struct ib_cc_mad *ccp,
- struct ib_device *ibdev, u8 port)
-{
- int i;
- struct ib_cc_congestion_setting_attr *p =
- (struct ib_cc_congestion_setting_attr *)ccp->mgmt_data;
- struct qib_ibport *ibp = to_iport(ibdev, port);
- struct qib_pportdata *ppd = ppd_from_ibp(ibp);
- struct ib_cc_congestion_entry_shadow *entries;
-
- spin_lock(&ppd->cc_shadow_lock);
-
- entries = ppd->congestion_entries_shadow->entries;
- p->port_control = cpu_to_be16(
- ppd->congestion_entries_shadow->port_control);
- p->control_map = cpu_to_be16(
- ppd->congestion_entries_shadow->control_map);
- for (i = 0; i < IB_CC_CCS_ENTRIES; i++) {
- p->entries[i].ccti_increase = entries[i].ccti_increase;
- p->entries[i].ccti_timer = cpu_to_be16(entries[i].ccti_timer);
- p->entries[i].trigger_threshold = entries[i].trigger_threshold;
- p->entries[i].ccti_min = entries[i].ccti_min;
- }
-
- spin_unlock(&ppd->cc_shadow_lock);
-
- return reply((struct ib_smp *) ccp);
-}
-
-static int cc_get_congestion_control_table(struct ib_cc_mad *ccp,
- struct ib_device *ibdev, u8 port)
-{
- struct ib_cc_table_attr *p =
- (struct ib_cc_table_attr *)ccp->mgmt_data;
- struct qib_ibport *ibp = to_iport(ibdev, port);
- struct qib_pportdata *ppd = ppd_from_ibp(ibp);
- u32 cct_block_index = be32_to_cpu(ccp->attr_mod);
- u32 max_cct_block;
- u32 cct_entry;
- struct ib_cc_table_entry_shadow *entries;
- int i;
-
- /* Is the table index more than what is supported? */
- if (cct_block_index > IB_CC_TABLE_CAP_DEFAULT - 1)
- goto bail;
-
- spin_lock(&ppd->cc_shadow_lock);
-
- max_cct_block =
- (ppd->ccti_entries_shadow->ccti_last_entry + 1)/IB_CCT_ENTRIES;
- max_cct_block = max_cct_block ? max_cct_block - 1 : 0;
-
- if (cct_block_index > max_cct_block) {
- spin_unlock(&ppd->cc_shadow_lock);
- goto bail;
- }
-
- ccp->attr_mod = cpu_to_be32(cct_block_index);
-
- cct_entry = IB_CCT_ENTRIES * (cct_block_index + 1);
-
- cct_entry--;
-
- p->ccti_limit = cpu_to_be16(cct_entry);
-
- entries = &ppd->ccti_entries_shadow->
- entries[IB_CCT_ENTRIES * cct_block_index];
- cct_entry %= IB_CCT_ENTRIES;
-
- for (i = 0; i <= cct_entry; i++)
- p->ccti_entries[i].entry = cpu_to_be16(entries[i].entry);
-
- spin_unlock(&ppd->cc_shadow_lock);
-
- return reply((struct ib_smp *) ccp);
-
-bail:
- return reply_failure((struct ib_smp *) ccp);
-}
-
-static int cc_set_congestion_setting(struct ib_cc_mad *ccp,
- struct ib_device *ibdev, u8 port)
-{
- struct ib_cc_congestion_setting_attr *p =
- (struct ib_cc_congestion_setting_attr *)ccp->mgmt_data;
- struct qib_ibport *ibp = to_iport(ibdev, port);
- struct qib_pportdata *ppd = ppd_from_ibp(ibp);
- int i;
-
- ppd->cc_sl_control_map = be16_to_cpu(p->control_map);
-
- for (i = 0; i < IB_CC_CCS_ENTRIES; i++) {
- ppd->congestion_entries[i].ccti_increase =
- p->entries[i].ccti_increase;
-
- ppd->congestion_entries[i].ccti_timer =
- be16_to_cpu(p->entries[i].ccti_timer);
-
- ppd->congestion_entries[i].trigger_threshold =
- p->entries[i].trigger_threshold;
-
- ppd->congestion_entries[i].ccti_min =
- p->entries[i].ccti_min;
- }
-
- return reply((struct ib_smp *) ccp);
-}
-
-static int cc_set_congestion_control_table(struct ib_cc_mad *ccp,
- struct ib_device *ibdev, u8 port)
-{
- struct ib_cc_table_attr *p =
- (struct ib_cc_table_attr *)ccp->mgmt_data;
- struct qib_ibport *ibp = to_iport(ibdev, port);
- struct qib_pportdata *ppd = ppd_from_ibp(ibp);
- u32 cct_block_index = be32_to_cpu(ccp->attr_mod);
- u32 cct_entry;
- struct ib_cc_table_entry_shadow *entries;
- int i;
-
- /* Is the table index more than what is supported? */
- if (cct_block_index > IB_CC_TABLE_CAP_DEFAULT - 1)
- goto bail;
-
- /* If this packet is the first in the sequence then
- * zero the total table entry count.
- */
- if (be16_to_cpu(p->ccti_limit) < IB_CCT_ENTRIES)
- ppd->total_cct_entry = 0;
-
- cct_entry = (be16_to_cpu(p->ccti_limit))%IB_CCT_ENTRIES;
-
- /* ccti_limit is 0 to 63 */
- ppd->total_cct_entry += (cct_entry + 1);
-
- if (ppd->total_cct_entry > ppd->cc_supported_table_entries)
- goto bail;
-
- ppd->ccti_limit = be16_to_cpu(p->ccti_limit);
-
- entries = ppd->ccti_entries + (IB_CCT_ENTRIES * cct_block_index);
-
- for (i = 0; i <= cct_entry; i++)
- entries[i].entry = be16_to_cpu(p->ccti_entries[i].entry);
-
- spin_lock(&ppd->cc_shadow_lock);
-
- ppd->ccti_entries_shadow->ccti_last_entry = ppd->total_cct_entry - 1;
- memcpy(ppd->ccti_entries_shadow->entries, ppd->ccti_entries,
- (ppd->total_cct_entry * sizeof(struct ib_cc_table_entry)));
-
- ppd->congestion_entries_shadow->port_control = IB_CC_CCS_PC_SL_BASED;
- ppd->congestion_entries_shadow->control_map = ppd->cc_sl_control_map;
- memcpy(ppd->congestion_entries_shadow->entries, ppd->congestion_entries,
- IB_CC_CCS_ENTRIES * sizeof(struct ib_cc_congestion_entry));
-
- spin_unlock(&ppd->cc_shadow_lock);
-
- return reply((struct ib_smp *) ccp);
-
-bail:
- return reply_failure((struct ib_smp *) ccp);
-}
-
-static int process_cc(struct ib_device *ibdev, int mad_flags,
- u8 port, const struct ib_mad *in_mad,
- struct ib_mad *out_mad)
-{
- struct ib_cc_mad *ccp = (struct ib_cc_mad *)out_mad;
- *out_mad = *in_mad;
-
- if (ccp->class_version != 2) {
- ccp->status |= IB_SMP_UNSUP_VERSION;
- return reply((struct ib_smp *)ccp);
- }
-
- switch (ccp->method) {
- case IB_MGMT_METHOD_GET:
- switch (ccp->attr_id) {
- case IB_CC_ATTR_CLASSPORTINFO:
- return cc_get_classportinfo(ccp, ibdev);
- case IB_CC_ATTR_CONGESTION_INFO:
- return cc_get_congestion_info(ccp, ibdev, port);
- case IB_CC_ATTR_CA_CONGESTION_SETTING:
- return cc_get_congestion_setting(ccp, ibdev, port);
- case IB_CC_ATTR_CONGESTION_CONTROL_TABLE:
- return cc_get_congestion_control_table(ccp, ibdev, port);
- default:
- ccp->status |= IB_SMP_UNSUP_METH_ATTR;
- return reply((struct ib_smp *) ccp);
- }
- case IB_MGMT_METHOD_SET:
- switch (ccp->attr_id) {
- case IB_CC_ATTR_CA_CONGESTION_SETTING:
- return cc_set_congestion_setting(ccp, ibdev, port);
- case IB_CC_ATTR_CONGESTION_CONTROL_TABLE:
- return cc_set_congestion_control_table(ccp, ibdev, port);
- default:
- ccp->status |= IB_SMP_UNSUP_METH_ATTR;
- return reply((struct ib_smp *) ccp);
- }
- case IB_MGMT_METHOD_GET_RESP:
- /*
- * The ib_mad module will call us to process responses
- * before checking for other consumers.
- * Just tell the caller to process it normally.
- */
- return IB_MAD_RESULT_SUCCESS;
- }
-
- /* method is unsupported */
- ccp->status |= IB_SMP_UNSUP_METHOD;
- return reply((struct ib_smp *) ccp);
-}
-
-/**
- * qib_process_mad - process an incoming MAD packet
- * @ibdev: the infiniband device this packet came in on
- * @mad_flags: MAD flags
- * @port: the port number this packet came in on
- * @in_wc: the work completion entry for this packet
- * @in_grh: the global route header for this packet
- * @in: the incoming MAD
- * @out: any outgoing MAD reply
- * @out_mad_size: size of the outgoing MAD reply
- * @out_mad_pkey_index: unused
- *
- * Returns IB_MAD_RESULT_SUCCESS if this is a MAD that we are not
- * interested in processing.
- *
- * Note that the verbs framework has already done the MAD sanity checks,
- * and hop count/pointer updating for IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
- * MADs.
- *
- * This is called by the ib_mad module.
- */
-int qib_process_mad(struct ib_device *ibdev, int mad_flags, u32 port,
- const struct ib_wc *in_wc, const struct ib_grh *in_grh,
- const struct ib_mad *in, struct ib_mad *out,
- size_t *out_mad_size, u16 *out_mad_pkey_index)
-{
- int ret;
- struct qib_ibport *ibp = to_iport(ibdev, port);
- struct qib_pportdata *ppd = ppd_from_ibp(ibp);
-
- switch (in->mad_hdr.mgmt_class) {
- case IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE:
- case IB_MGMT_CLASS_SUBN_LID_ROUTED:
- ret = process_subn(ibdev, mad_flags, port, in, out);
- goto bail;
-
- case IB_MGMT_CLASS_PERF_MGMT:
- ret = process_perf(ibdev, port, in, out);
- goto bail;
-
- case IB_MGMT_CLASS_CONG_MGMT:
- if (!ppd->congestion_entries_shadow ||
- !qib_cc_table_size) {
- ret = IB_MAD_RESULT_SUCCESS;
- goto bail;
- }
- ret = process_cc(ibdev, mad_flags, port, in, out);
- goto bail;
-
- default:
- ret = IB_MAD_RESULT_SUCCESS;
- }
-
-bail:
- return ret;
-}
-
-static void xmit_wait_timer_func(struct timer_list *t)
-{
- struct qib_pportdata *ppd = timer_container_of(ppd, t,
- cong_stats.timer);
- struct qib_devdata *dd = dd_from_ppd(ppd);
- unsigned long flags;
- u8 status;
-
- spin_lock_irqsave(&ppd->ibport_data.rvp.lock, flags);
- if (ppd->cong_stats.flags == IB_PMA_CONG_HW_CONTROL_SAMPLE) {
- status = dd->f_portcntr(ppd, QIBPORTCNTR_PSSTAT);
- if (status == IB_PMA_SAMPLE_STATUS_DONE) {
- /* save counter cache */
- cache_hw_sample_counters(ppd);
- ppd->cong_stats.flags = IB_PMA_CONG_HW_CONTROL_TIMER;
- } else
- goto done;
- }
- ppd->cong_stats.counter = xmit_wait_get_value_delta(ppd);
- dd->f_set_cntr_sample(ppd, QIB_CONG_TIMER_PSINTERVAL, 0x0);
-done:
- spin_unlock_irqrestore(&ppd->ibport_data.rvp.lock, flags);
- mod_timer(&ppd->cong_stats.timer, jiffies + HZ);
-}
-
-void qib_notify_create_mad_agent(struct rvt_dev_info *rdi, int port_idx)
-{
- struct qib_ibdev *ibdev = container_of(rdi, struct qib_ibdev, rdi);
- struct qib_devdata *dd = container_of(ibdev,
- struct qib_devdata, verbs_dev);
-
- /* Initialize xmit_wait structure */
- dd->pport[port_idx].cong_stats.counter = 0;
- timer_setup(&dd->pport[port_idx].cong_stats.timer,
- xmit_wait_timer_func, 0);
- dd->pport[port_idx].cong_stats.timer.expires = 0;
- add_timer(&dd->pport[port_idx].cong_stats.timer);
-}
-
-void qib_notify_free_mad_agent(struct rvt_dev_info *rdi, int port_idx)
-{
- struct qib_ibdev *ibdev = container_of(rdi, struct qib_ibdev, rdi);
- struct qib_devdata *dd = container_of(ibdev,
- struct qib_devdata, verbs_dev);
-
- if (dd->pport[port_idx].cong_stats.timer.function)
- timer_delete_sync(&dd->pport[port_idx].cong_stats.timer);
-
- if (dd->pport[port_idx].ibport_data.smi_ah)
- rdma_destroy_ah(&dd->pport[port_idx].ibport_data.smi_ah->ibah,
- RDMA_DESTROY_AH_SLEEPABLE);
-}
diff --git a/drivers/infiniband/hw/qib/qib_mad.h b/drivers/infiniband/hw/qib/qib_mad.h
deleted file mode 100644
index 57e99dc0d80c..000000000000
--- a/drivers/infiniband/hw/qib/qib_mad.h
+++ /dev/null
@@ -1,300 +0,0 @@
-/*
- * Copyright (c) 2012 Intel Corporation. All rights reserved.
- * Copyright (c) 2006 - 2012 QLogic Corporation. All rights reserved.
- * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef _QIB_MAD_H
-#define _QIB_MAD_H
-
-#include <rdma/ib_pma.h>
-
-#define IB_SMP_UNSUP_VERSION \
-cpu_to_be16(IB_MGMT_MAD_STATUS_BAD_VERSION)
-
-#define IB_SMP_UNSUP_METHOD \
-cpu_to_be16(IB_MGMT_MAD_STATUS_UNSUPPORTED_METHOD)
-
-#define IB_SMP_UNSUP_METH_ATTR \
-cpu_to_be16(IB_MGMT_MAD_STATUS_UNSUPPORTED_METHOD_ATTRIB)
-
-#define IB_SMP_INVALID_FIELD \
-cpu_to_be16(IB_MGMT_MAD_STATUS_INVALID_ATTRIB_VALUE)
-
-#define IB_VLARB_LOWPRI_0_31 1
-#define IB_VLARB_LOWPRI_32_63 2
-#define IB_VLARB_HIGHPRI_0_31 3
-#define IB_VLARB_HIGHPRI_32_63 4
-
-#define IB_PMA_PORT_COUNTERS_CONG cpu_to_be16(0xFF00)
-
-struct ib_pma_portcounters_cong {
- u8 reserved;
- u8 reserved1;
- __be16 port_check_rate;
- __be16 symbol_error_counter;
- u8 link_error_recovery_counter;
- u8 link_downed_counter;
- __be16 port_rcv_errors;
- __be16 port_rcv_remphys_errors;
- __be16 port_rcv_switch_relay_errors;
- __be16 port_xmit_discards;
- u8 port_xmit_constraint_errors;
- u8 port_rcv_constraint_errors;
- u8 reserved2;
- u8 link_overrun_errors; /* LocalLink: 7:4, BufferOverrun: 3:0 */
- __be16 reserved3;
- __be16 vl15_dropped;
- __be64 port_xmit_data;
- __be64 port_rcv_data;
- __be64 port_xmit_packets;
- __be64 port_rcv_packets;
- __be64 port_xmit_wait;
- __be64 port_adr_events;
-} __packed;
-
-#define IB_PMA_CONG_HW_CONTROL_TIMER 0x00
-#define IB_PMA_CONG_HW_CONTROL_SAMPLE 0x01
-
-#define QIB_XMIT_RATE_UNSUPPORTED 0x0
-#define QIB_XMIT_RATE_PICO 0x7
-/* number of 4nsec cycles equaling 2secs */
-#define QIB_CONG_TIMER_PSINTERVAL 0x1DCD64EC
-
-#define IB_PMA_SEL_CONG_ALL 0x01
-#define IB_PMA_SEL_CONG_PORT_DATA 0x02
-#define IB_PMA_SEL_CONG_XMIT 0x04
-#define IB_PMA_SEL_CONG_ROUTING 0x08
-
-/*
- * Congestion control class attributes
- */
-#define IB_CC_ATTR_CLASSPORTINFO cpu_to_be16(0x0001)
-#define IB_CC_ATTR_NOTICE cpu_to_be16(0x0002)
-#define IB_CC_ATTR_CONGESTION_INFO cpu_to_be16(0x0011)
-#define IB_CC_ATTR_CONGESTION_KEY_INFO cpu_to_be16(0x0012)
-#define IB_CC_ATTR_CONGESTION_LOG cpu_to_be16(0x0013)
-#define IB_CC_ATTR_SWITCH_CONGESTION_SETTING cpu_to_be16(0x0014)
-#define IB_CC_ATTR_SWITCH_PORT_CONGESTION_SETTING cpu_to_be16(0x0015)
-#define IB_CC_ATTR_CA_CONGESTION_SETTING cpu_to_be16(0x0016)
-#define IB_CC_ATTR_CONGESTION_CONTROL_TABLE cpu_to_be16(0x0017)
-#define IB_CC_ATTR_TIME_STAMP cpu_to_be16(0x0018)
-
-/* generalizations for threshold values */
-#define IB_CC_THRESHOLD_NONE 0x0
-#define IB_CC_THRESHOLD_MIN 0x1
-#define IB_CC_THRESHOLD_MAX 0xf
-
-/* CCA MAD header constants */
-#define IB_CC_MAD_LOGDATA_LEN 32
-#define IB_CC_MAD_MGMTDATA_LEN 192
-
-struct ib_cc_mad {
- u8 base_version;
- u8 mgmt_class;
- u8 class_version;
- u8 method;
- __be16 status;
- __be16 class_specific;
- __be64 tid;
- __be16 attr_id;
- __be16 resv;
- __be32 attr_mod;
- __be64 cckey;
-
- /* For CongestionLog attribute only */
- u8 log_data[IB_CC_MAD_LOGDATA_LEN];
-
- u8 mgmt_data[IB_CC_MAD_MGMTDATA_LEN];
-} __packed;
-
-/*
- * Congestion Control class portinfo capability mask bits
- */
-#define IB_CC_CPI_CM_TRAP_GEN cpu_to_be16(1 << 0)
-#define IB_CC_CPI_CM_GET_SET_NOTICE cpu_to_be16(1 << 1)
-#define IB_CC_CPI_CM_CAP2 cpu_to_be16(1 << 2)
-#define IB_CC_CPI_CM_ENHANCEDPORT0_CC cpu_to_be16(1 << 8)
-
-struct ib_cc_classportinfo_attr {
- u8 base_version;
- u8 class_version;
- __be16 cap_mask;
- u8 reserved[3];
- u8 resp_time_value; /* only lower 5 bits */
- union ib_gid redirect_gid;
- __be32 redirect_tc_sl_fl; /* 8, 4, 20 bits respectively */
- __be16 redirect_lid;
- __be16 redirect_pkey;
- __be32 redirect_qp; /* only lower 24 bits */
- __be32 redirect_qkey;
- union ib_gid trap_gid;
- __be32 trap_tc_sl_fl; /* 8, 4, 20 bits respectively */
- __be16 trap_lid;
- __be16 trap_pkey;
- __be32 trap_hl_qp; /* 8, 24 bits respectively */
- __be32 trap_qkey;
-} __packed;
-
-/* Congestion control traps */
-#define IB_CC_TRAP_KEY_VIOLATION 0x0000
-
-struct ib_cc_trap_key_violation_attr {
- __be16 source_lid;
- u8 method;
- u8 reserved1;
- __be16 attrib_id;
- __be32 attrib_mod;
- __be32 qp;
- __be64 cckey;
- u8 sgid[16];
- u8 padding[24];
-} __packed;
-
-/* Congestion info flags */
-#define IB_CC_CI_FLAGS_CREDIT_STARVATION 0x1
-#define IB_CC_TABLE_CAP_DEFAULT 31
-
-struct ib_cc_info_attr {
- __be16 congestion_info;
- u8 control_table_cap; /* Multiple of 64 entry unit CCTs */
-} __packed;
-
-struct ib_cc_key_info_attr {
- __be64 cckey;
- u8 protect;
- __be16 lease_period;
- __be16 violations;
-} __packed;
-
-#define IB_CC_CL_CA_LOGEVENTS_LEN 208
-
-struct ib_cc_log_attr {
- u8 log_type;
- u8 congestion_flags;
- __be16 threshold_event_counter;
- __be16 threshold_congestion_event_map;
- __be16 current_time_stamp;
- u8 log_events[IB_CC_CL_CA_LOGEVENTS_LEN];
-} __packed;
-
-#define IB_CC_CLEC_SERVICETYPE_RC 0x0
-#define IB_CC_CLEC_SERVICETYPE_UC 0x1
-#define IB_CC_CLEC_SERVICETYPE_RD 0x2
-#define IB_CC_CLEC_SERVICETYPE_UD 0x3
-
-struct ib_cc_log_event {
- u8 local_qp_cn_entry;
- u8 remote_qp_number_cn_entry[3];
- u8 sl_cn_entry:4;
- u8 service_type_cn_entry:4;
- __be32 remote_lid_cn_entry;
- __be32 timestamp_cn_entry;
-} __packed;
-
-/* Sixteen congestion entries */
-#define IB_CC_CCS_ENTRIES 16
-
-/* Port control flags */
-#define IB_CC_CCS_PC_SL_BASED 0x01
-
-struct ib_cc_congestion_entry {
- u8 ccti_increase;
- __be16 ccti_timer;
- u8 trigger_threshold;
- u8 ccti_min; /* min CCTI for cc table */
-} __packed;
-
-struct ib_cc_congestion_entry_shadow {
- u8 ccti_increase;
- u16 ccti_timer;
- u8 trigger_threshold;
- u8 ccti_min; /* min CCTI for cc table */
-} __packed;
-
-struct ib_cc_congestion_setting_attr {
- __be16 port_control;
- __be16 control_map;
- struct ib_cc_congestion_entry entries[IB_CC_CCS_ENTRIES];
-} __packed;
-
-struct ib_cc_congestion_setting_attr_shadow {
- u16 port_control;
- u16 control_map;
- struct ib_cc_congestion_entry_shadow entries[IB_CC_CCS_ENTRIES];
-} __packed;
-
-#define IB_CC_TABLE_ENTRY_INCREASE_DEFAULT 1
-#define IB_CC_TABLE_ENTRY_TIMER_DEFAULT 1
-
-/* 64 Congestion Control table entries in a single MAD */
-#define IB_CCT_ENTRIES 64
-#define IB_CCT_MIN_ENTRIES (IB_CCT_ENTRIES * 2)
-
-struct ib_cc_table_entry {
- __be16 entry; /* shift:2, multiplier:14 */
-};
-
-struct ib_cc_table_entry_shadow {
- u16 entry; /* shift:2, multiplier:14 */
-};
-
-struct ib_cc_table_attr {
- __be16 ccti_limit; /* max CCTI for cc table */
- struct ib_cc_table_entry ccti_entries[IB_CCT_ENTRIES];
-} __packed;
-
-struct ib_cc_table_attr_shadow {
- u16 ccti_limit; /* max CCTI for cc table */
- struct ib_cc_table_entry_shadow ccti_entries[IB_CCT_ENTRIES];
-} __packed;
-
-#define CC_TABLE_SHADOW_MAX \
- (IB_CC_TABLE_CAP_DEFAULT * IB_CCT_ENTRIES)
-
-struct cc_table_shadow {
- u16 ccti_last_entry;
- struct ib_cc_table_entry_shadow entries[CC_TABLE_SHADOW_MAX];
-} __packed;
-
-/*
- * The PortSamplesControl.CounterMasks field is an array of 3 bit fields
- * which specify the N'th counter's capabilities. See ch. 16.1.3.2.
- * We support 5 counters which only count the mandatory quantities.
- */
-#define COUNTER_MASK(q, n) (q << ((9 - n) * 3))
-#define COUNTER_MASK0_9 \
- cpu_to_be32(COUNTER_MASK(1, 0) | \
- COUNTER_MASK(1, 1) | \
- COUNTER_MASK(1, 2) | \
- COUNTER_MASK(1, 3) | \
- COUNTER_MASK(1, 4))
-
-#endif /* _QIB_MAD_H */
diff --git a/drivers/infiniband/hw/qib/qib_pcie.c b/drivers/infiniband/hw/qib/qib_pcie.c
deleted file mode 100644
index 58c1d62d341b..000000000000
--- a/drivers/infiniband/hw/qib/qib_pcie.c
+++ /dev/null
@@ -1,598 +0,0 @@
-/*
- * Copyright (c) 2010 - 2017 Intel Corporation. All rights reserved.
- * Copyright (c) 2008, 2009 QLogic Corporation. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <linux/pci.h>
-#include <linux/io.h>
-#include <linux/delay.h>
-#include <linux/vmalloc.h>
-#include <linux/module.h>
-
-#include "qib.h"
-
-/*
- * This file contains PCIe utility routines that are common to the
- * various QLogic InfiniPath adapters
- */
-
-/*
- * Code to adjust PCIe capabilities.
- * To minimize the change footprint, we call it
- * from qib_pcie_params, which every chip-specific
- * file calls, even though this violates some
- * expectations of harmlessness.
- */
-static void qib_tune_pcie_caps(struct qib_devdata *);
-static void qib_tune_pcie_coalesce(struct qib_devdata *);
-
-/*
- * Do all the common PCIe setup and initialization.
- * devdata is not yet allocated, and is not allocated until after this
- * routine returns success. Therefore qib_dev_err() can't be used for error
- * printing.
- */
-int qib_pcie_init(struct pci_dev *pdev, const struct pci_device_id *ent)
-{
- int ret;
-
- ret = pci_enable_device(pdev);
- if (ret) {
- /*
- * This can happen (in theory) iff:
- * We did a chip reset, and then failed to reprogram the
- * BAR, or the chip reset due to an internal error. We then
- * unloaded the driver and reloaded it.
- *
- * Both reset cases set the BAR back to initial state. For
- * the latter case, the AER sticky error bit at offset 0x718
- * should be set, but the Linux kernel doesn't yet know
- * about that, it appears. If the original BAR was retained
- * in the kernel data structures, this may be OK.
- */
- qib_early_err(&pdev->dev, "pci enable failed: error %d\n",
- -ret);
- goto done;
- }
-
- ret = pci_request_regions(pdev, QIB_DRV_NAME);
- if (ret) {
- qib_devinfo(pdev, "pci_request_regions fails: err %d\n", -ret);
- goto bail;
- }
-
- ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
- if (ret) {
- /*
- * If the 64 bit setup fails, try 32 bit. Some systems
- * do not setup 64 bit maps on systems with 2GB or less
- * memory installed.
- */
- ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
- if (ret) {
- qib_devinfo(pdev, "Unable to set DMA mask: %d\n", ret);
- goto bail;
- }
- }
-
- pci_set_master(pdev);
- goto done;
-
-bail:
- pci_disable_device(pdev);
- pci_release_regions(pdev);
-done:
- return ret;
-}
-
-/*
- * Do remaining PCIe setup, once dd is allocated, and save away
- * fields required to re-initialize after a chip reset, or for
- * various other purposes
- */
-int qib_pcie_ddinit(struct qib_devdata *dd, struct pci_dev *pdev,
- const struct pci_device_id *ent)
-{
- unsigned long len;
- resource_size_t addr;
-
- dd->pcidev = pdev;
- pci_set_drvdata(pdev, dd);
-
- addr = pci_resource_start(pdev, 0);
- len = pci_resource_len(pdev, 0);
-
- dd->kregbase = ioremap(addr, len);
- if (!dd->kregbase)
- return -ENOMEM;
-
- dd->kregend = (u64 __iomem *)((void __iomem *) dd->kregbase + len);
- dd->physaddr = addr; /* used for io_remap, etc. */
-
- /*
- * Save BARs to rewrite after device reset. Save all 64 bits of
- * BAR, just in case.
- */
- dd->pcibar0 = addr;
- dd->pcibar1 = addr >> 32;
- dd->deviceid = ent->device; /* save for later use */
- dd->vendorid = ent->vendor;
-
- return 0;
-}
-
-/*
- * Do PCIe cleanup, after chip-specific cleanup, etc. Just prior
- * to releasing the dd memory.
- * void because none of the core pcie cleanup returns are void
- */
-void qib_pcie_ddcleanup(struct qib_devdata *dd)
-{
- u64 __iomem *base = (void __iomem *) dd->kregbase;
-
- dd->kregbase = NULL;
- iounmap(base);
- if (dd->piobase)
- iounmap(dd->piobase);
- if (dd->userbase)
- iounmap(dd->userbase);
- if (dd->piovl15base)
- iounmap(dd->piovl15base);
-
- pci_disable_device(dd->pcidev);
- pci_release_regions(dd->pcidev);
-
- pci_set_drvdata(dd->pcidev, NULL);
-}
-
-/*
- * We save the msi lo and hi values, so we can restore them after
- * chip reset (the kernel PCI infrastructure doesn't yet handle that
- * correctly.
- */
-static void qib_cache_msi_info(struct qib_devdata *dd, int pos)
-{
- struct pci_dev *pdev = dd->pcidev;
- u16 control;
-
- pci_read_config_dword(pdev, pos + PCI_MSI_ADDRESS_LO, &dd->msi_lo);
- pci_read_config_dword(pdev, pos + PCI_MSI_ADDRESS_HI, &dd->msi_hi);
- pci_read_config_word(pdev, pos + PCI_MSI_FLAGS, &control);
-
- /* now save the data (vector) info */
- pci_read_config_word(pdev,
- pos + ((control & PCI_MSI_FLAGS_64BIT) ? 12 : 8),
- &dd->msi_data);
-}
-
-int qib_pcie_params(struct qib_devdata *dd, u32 minw, u32 *nent)
-{
- u16 linkstat, speed;
- int nvec;
- int maxvec;
- unsigned int flags = PCI_IRQ_MSIX | PCI_IRQ_MSI;
-
- if (!pci_is_pcie(dd->pcidev)) {
- qib_dev_err(dd, "Can't find PCI Express capability!\n");
- /* set up something... */
- dd->lbus_width = 1;
- dd->lbus_speed = 2500; /* Gen1, 2.5GHz */
- nvec = -1;
- goto bail;
- }
-
- if (dd->flags & QIB_HAS_INTX)
- flags |= PCI_IRQ_INTX;
- maxvec = (nent && *nent) ? *nent : 1;
- nvec = pci_alloc_irq_vectors(dd->pcidev, 1, maxvec, flags);
- if (nvec < 0)
- goto bail;
-
- /*
- * If nent exists, make sure to record how many vectors were allocated.
- * If msix_enabled is false, return 0 so the fallback code works
- * correctly.
- */
- if (nent)
- *nent = !dd->pcidev->msix_enabled ? 0 : nvec;
-
- if (dd->pcidev->msi_enabled)
- qib_cache_msi_info(dd, dd->pcidev->msi_cap);
-
- pcie_capability_read_word(dd->pcidev, PCI_EXP_LNKSTA, &linkstat);
- /*
- * speed is bits 0-3, linkwidth is bits 4-8
- * no defines for them in headers
- */
- speed = linkstat & 0xf;
- linkstat >>= 4;
- linkstat &= 0x1f;
- dd->lbus_width = linkstat;
-
- switch (speed) {
- case 1:
- dd->lbus_speed = 2500; /* Gen1, 2.5GHz */
- break;
- case 2:
- dd->lbus_speed = 5000; /* Gen1, 5GHz */
- break;
- default: /* not defined, assume gen1 */
- dd->lbus_speed = 2500;
- break;
- }
-
- /*
- * Check against expected pcie width and complain if "wrong"
- * on first initialization, not afterwards (i.e., reset).
- */
- if (minw && linkstat < minw)
- qib_dev_err(dd,
- "PCIe width %u (x%u HCA), performance reduced\n",
- linkstat, minw);
-
- qib_tune_pcie_caps(dd);
-
- qib_tune_pcie_coalesce(dd);
-
-bail:
- /* fill in string, even on errors */
- snprintf(dd->lbus_info, sizeof(dd->lbus_info),
- "PCIe,%uMHz,x%u\n", dd->lbus_speed, dd->lbus_width);
- return nvec < 0 ? nvec : 0;
-}
-
-/**
- * qib_free_irq - Cleanup INTx and MSI interrupts
- * @dd: valid pointer to qib dev data
- *
- * Since cleanup for INTx and MSI interrupts is trivial, have a common
- * routine.
- *
- */
-void qib_free_irq(struct qib_devdata *dd)
-{
- pci_free_irq(dd->pcidev, 0, dd);
- pci_free_irq_vectors(dd->pcidev);
-}
-
-/*
- * Setup pcie interrupt stuff again after a reset. I'd like to just call
- * pci_enable_msi() again for msi, but when I do that,
- * the MSI enable bit doesn't get set in the command word, and
- * we switch to a different interrupt vector, which is confusing,
- * so I instead just do it all inline. Perhaps somehow can tie this
- * into the PCIe hotplug support at some point
- */
-int qib_reinit_intr(struct qib_devdata *dd)
-{
- int pos;
- u16 control;
- int ret = 0;
-
- /* If we aren't using MSI, don't restore it */
- if (!dd->msi_lo)
- goto bail;
-
- pos = dd->pcidev->msi_cap;
- if (!pos) {
- qib_dev_err(dd,
- "Can't find MSI capability, can't restore MSI settings\n");
- ret = 0;
- /* nothing special for MSIx, just MSI */
- goto bail;
- }
- pci_write_config_dword(dd->pcidev, pos + PCI_MSI_ADDRESS_LO,
- dd->msi_lo);
- pci_write_config_dword(dd->pcidev, pos + PCI_MSI_ADDRESS_HI,
- dd->msi_hi);
- pci_read_config_word(dd->pcidev, pos + PCI_MSI_FLAGS, &control);
- if (!(control & PCI_MSI_FLAGS_ENABLE)) {
- control |= PCI_MSI_FLAGS_ENABLE;
- pci_write_config_word(dd->pcidev, pos + PCI_MSI_FLAGS,
- control);
- }
- /* now rewrite the data (vector) info */
- pci_write_config_word(dd->pcidev, pos +
- ((control & PCI_MSI_FLAGS_64BIT) ? 12 : 8),
- dd->msi_data);
- ret = 1;
-bail:
- qib_free_irq(dd);
-
- if (!ret && (dd->flags & QIB_HAS_INTX))
- ret = 1;
-
- /* and now set the pci master bit again */
- pci_set_master(dd->pcidev);
-
- return ret;
-}
-
-/*
- * These two routines are helper routines for the device reset code
- * to move all the pcie code out of the chip-specific driver code.
- */
-void qib_pcie_getcmd(struct qib_devdata *dd, u16 *cmd, u8 *iline, u8 *cline)
-{
- pci_read_config_word(dd->pcidev, PCI_COMMAND, cmd);
- pci_read_config_byte(dd->pcidev, PCI_INTERRUPT_LINE, iline);
- pci_read_config_byte(dd->pcidev, PCI_CACHE_LINE_SIZE, cline);
-}
-
-void qib_pcie_reenable(struct qib_devdata *dd, u16 cmd, u8 iline, u8 cline)
-{
- int r;
-
- r = pci_write_config_dword(dd->pcidev, PCI_BASE_ADDRESS_0,
- dd->pcibar0);
- if (r)
- qib_dev_err(dd, "rewrite of BAR0 failed: %d\n", r);
- r = pci_write_config_dword(dd->pcidev, PCI_BASE_ADDRESS_1,
- dd->pcibar1);
- if (r)
- qib_dev_err(dd, "rewrite of BAR1 failed: %d\n", r);
- /* now re-enable memory access, and restore cosmetic settings */
- pci_write_config_word(dd->pcidev, PCI_COMMAND, cmd);
- pci_write_config_byte(dd->pcidev, PCI_INTERRUPT_LINE, iline);
- pci_write_config_byte(dd->pcidev, PCI_CACHE_LINE_SIZE, cline);
- r = pci_enable_device(dd->pcidev);
- if (r)
- qib_dev_err(dd,
- "pci_enable_device failed after reset: %d\n", r);
-}
-
-
-static int qib_pcie_coalesce;
-module_param_named(pcie_coalesce, qib_pcie_coalesce, int, S_IRUGO);
-MODULE_PARM_DESC(pcie_coalesce, "tune PCIe coalescing on some Intel chipsets");
-
-/*
- * Enable PCIe completion and data coalescing, on Intel 5x00 and 7300
- * chipsets. This is known to be unsafe for some revisions of some
- * of these chipsets, with some BIOS settings, and enabling it on those
- * systems may result in the system crashing, and/or data corruption.
- */
-static void qib_tune_pcie_coalesce(struct qib_devdata *dd)
-{
- struct pci_dev *parent;
- u16 devid;
- u32 mask, bits, val;
-
- if (!qib_pcie_coalesce)
- return;
-
- /* Find out supported and configured values for parent (root) */
- parent = dd->pcidev->bus->self;
- if (parent->bus->parent) {
- qib_devinfo(dd->pcidev, "Parent not root\n");
- return;
- }
- if (!pci_is_pcie(parent))
- return;
- if (parent->vendor != 0x8086)
- return;
-
- /*
- * - bit 12: Max_rdcmp_Imt_EN: need to set to 1
- * - bit 11: COALESCE_FORCE: need to set to 0
- * - bit 10: COALESCE_EN: need to set to 1
- * (but limitations on some on some chipsets)
- *
- * On the Intel 5000, 5100, and 7300 chipsets, there is
- * also: - bit 25:24: COALESCE_MODE, need to set to 0
- */
- devid = parent->device;
- if (devid >= 0x25e2 && devid <= 0x25fa) {
- /* 5000 P/V/X/Z */
- if (parent->revision <= 0xb2)
- bits = 1U << 10;
- else
- bits = 7U << 10;
- mask = (3U << 24) | (7U << 10);
- } else if (devid >= 0x65e2 && devid <= 0x65fa) {
- /* 5100 */
- bits = 1U << 10;
- mask = (3U << 24) | (7U << 10);
- } else if (devid >= 0x4021 && devid <= 0x402e) {
- /* 5400 */
- bits = 7U << 10;
- mask = 7U << 10;
- } else if (devid >= 0x3604 && devid <= 0x360a) {
- /* 7300 */
- bits = 7U << 10;
- mask = (3U << 24) | (7U << 10);
- } else {
- /* not one of the chipsets that we know about */
- return;
- }
- pci_read_config_dword(parent, 0x48, &val);
- val &= ~mask;
- val |= bits;
- pci_write_config_dword(parent, 0x48, val);
-}
-
-/*
- * BIOS may not set PCIe bus-utilization parameters for best performance.
- * Check and optionally adjust them to maximize our throughput.
- */
-static int qib_pcie_caps;
-module_param_named(pcie_caps, qib_pcie_caps, int, S_IRUGO);
-MODULE_PARM_DESC(pcie_caps, "Max PCIe tuning: Payload (0..3), ReadReq (4..7)");
-
-static void qib_tune_pcie_caps(struct qib_devdata *dd)
-{
- struct pci_dev *parent;
- u16 rc_mpss, rc_mps, ep_mpss, ep_mps;
- u16 rc_mrrs, ep_mrrs, max_mrrs;
-
- /* Find out supported and configured values for parent (root) */
- parent = dd->pcidev->bus->self;
- if (!pci_is_root_bus(parent->bus)) {
- qib_devinfo(dd->pcidev, "Parent not root\n");
- return;
- }
-
- if (!pci_is_pcie(parent) || !pci_is_pcie(dd->pcidev))
- return;
-
- rc_mpss = parent->pcie_mpss;
- rc_mps = ffs(pcie_get_mps(parent)) - 8;
- /* Find out supported and configured values for endpoint (us) */
- ep_mpss = dd->pcidev->pcie_mpss;
- ep_mps = ffs(pcie_get_mps(dd->pcidev)) - 8;
-
- /* Find max payload supported by root, endpoint */
- if (rc_mpss > ep_mpss)
- rc_mpss = ep_mpss;
-
- /* If Supported greater than limit in module param, limit it */
- if (rc_mpss > (qib_pcie_caps & 7))
- rc_mpss = qib_pcie_caps & 7;
- /* If less than (allowed, supported), bump root payload */
- if (rc_mpss > rc_mps) {
- rc_mps = rc_mpss;
- pcie_set_mps(parent, 128 << rc_mps);
- }
- /* If less than (allowed, supported), bump endpoint payload */
- if (rc_mpss > ep_mps) {
- ep_mps = rc_mpss;
- pcie_set_mps(dd->pcidev, 128 << ep_mps);
- }
-
- /*
- * Now the Read Request size.
- * No field for max supported, but PCIe spec limits it to 4096,
- * which is code '5' (log2(4096) - 7)
- */
- max_mrrs = 5;
- if (max_mrrs > ((qib_pcie_caps >> 4) & 7))
- max_mrrs = (qib_pcie_caps >> 4) & 7;
-
- max_mrrs = 128 << max_mrrs;
- rc_mrrs = pcie_get_readrq(parent);
- ep_mrrs = pcie_get_readrq(dd->pcidev);
-
- if (max_mrrs > rc_mrrs) {
- rc_mrrs = max_mrrs;
- pcie_set_readrq(parent, rc_mrrs);
- }
- if (max_mrrs > ep_mrrs) {
- ep_mrrs = max_mrrs;
- pcie_set_readrq(dd->pcidev, ep_mrrs);
- }
-}
-/* End of PCIe capability tuning */
-
-/*
- * From here through qib_pci_err_handler definition is invoked via
- * PCI error infrastructure, registered via pci
- */
-static pci_ers_result_t
-qib_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
-{
- struct qib_devdata *dd = pci_get_drvdata(pdev);
- pci_ers_result_t ret = PCI_ERS_RESULT_RECOVERED;
-
- switch (state) {
- case pci_channel_io_normal:
- qib_devinfo(pdev, "State Normal, ignoring\n");
- break;
-
- case pci_channel_io_frozen:
- qib_devinfo(pdev, "State Frozen, requesting reset\n");
- pci_disable_device(pdev);
- ret = PCI_ERS_RESULT_NEED_RESET;
- break;
-
- case pci_channel_io_perm_failure:
- qib_devinfo(pdev, "State Permanent Failure, disabling\n");
- if (dd) {
- /* no more register accesses! */
- dd->flags &= ~QIB_PRESENT;
- qib_disable_after_error(dd);
- }
- /* else early, or other problem */
- ret = PCI_ERS_RESULT_DISCONNECT;
- break;
-
- default: /* shouldn't happen */
- qib_devinfo(pdev, "QIB PCI errors detected (state %d)\n",
- state);
- break;
- }
- return ret;
-}
-
-static pci_ers_result_t
-qib_pci_mmio_enabled(struct pci_dev *pdev)
-{
- u64 words = 0U;
- struct qib_devdata *dd = pci_get_drvdata(pdev);
- pci_ers_result_t ret = PCI_ERS_RESULT_RECOVERED;
-
- if (dd && dd->pport) {
- words = dd->f_portcntr(dd->pport, QIBPORTCNTR_WORDRCV);
- if (words == ~0ULL)
- ret = PCI_ERS_RESULT_NEED_RESET;
- }
- qib_devinfo(pdev,
- "QIB mmio_enabled function called, read wordscntr %Lx, returning %d\n",
- words, ret);
- return ret;
-}
-
-static pci_ers_result_t
-qib_pci_slot_reset(struct pci_dev *pdev)
-{
- qib_devinfo(pdev, "QIB slot_reset function called, ignored\n");
- return PCI_ERS_RESULT_CAN_RECOVER;
-}
-
-static void
-qib_pci_resume(struct pci_dev *pdev)
-{
- struct qib_devdata *dd = pci_get_drvdata(pdev);
-
- qib_devinfo(pdev, "QIB resume function called\n");
- /*
- * Running jobs will fail, since it's asynchronous
- * unlike sysfs-requested reset. Better than
- * doing nothing.
- */
- qib_init(dd, 1); /* same as re-init after reset */
-}
-
-const struct pci_error_handlers qib_pci_err_handler = {
- .error_detected = qib_pci_error_detected,
- .mmio_enabled = qib_pci_mmio_enabled,
- .slot_reset = qib_pci_slot_reset,
- .resume = qib_pci_resume,
-};
diff --git a/drivers/infiniband/hw/qib/qib_pio_copy.c b/drivers/infiniband/hw/qib/qib_pio_copy.c
deleted file mode 100644
index 10b8c444dd31..000000000000
--- a/drivers/infiniband/hw/qib/qib_pio_copy.c
+++ /dev/null
@@ -1,64 +0,0 @@
-/*
- * Copyright (c) 2009 QLogic Corporation. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include "qib.h"
-
-/**
- * qib_pio_copy - copy data to MMIO space, in multiples of 32-bits
- * @to: destination, in MMIO space (must be 64-bit aligned)
- * @from: source (must be 64-bit aligned)
- * @count: number of 32-bit quantities to copy
- *
- * Copy data from kernel space to MMIO space, in multiples of 32 bits at a
- * time. Order of access is not guaranteed, nor is a memory barrier
- * performed afterwards.
- */
-void qib_pio_copy(void __iomem *to, const void *from, size_t count)
-{
-#ifdef CONFIG_64BIT
- u64 __iomem *dst = to;
- const u64 *src = from;
- const u64 *end = src + (count >> 1);
-
- while (src < end)
- __raw_writeq(*src++, dst++);
- if (count & 1)
- __raw_writel(*(const u32 *)src, dst);
-#else
- u32 __iomem *dst = to;
- const u32 *src = from;
- const u32 *end = src + count;
-
- while (src < end)
- __raw_writel(*src++, dst++);
-#endif
-}
diff --git a/drivers/infiniband/hw/qib/qib_qp.c b/drivers/infiniband/hw/qib/qib_qp.c
deleted file mode 100644
index 1974ceb9d405..000000000000
--- a/drivers/infiniband/hw/qib/qib_qp.c
+++ /dev/null
@@ -1,454 +0,0 @@
-/*
- * Copyright (c) 2012 - 2019 Intel Corporation. All rights reserved.
- * Copyright (c) 2006 - 2012 QLogic Corporation. * All rights reserved.
- * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <linux/err.h>
-#include <linux/vmalloc.h>
-#include <rdma/rdma_vt.h>
-#ifdef CONFIG_DEBUG_FS
-#include <linux/seq_file.h>
-#endif
-
-#include "qib.h"
-
-static inline unsigned mk_qpn(struct rvt_qpn_table *qpt,
- struct rvt_qpn_map *map, unsigned off)
-{
- return (map - qpt->map) * RVT_BITS_PER_PAGE + off;
-}
-
-static inline unsigned find_next_offset(struct rvt_qpn_table *qpt,
- struct rvt_qpn_map *map, unsigned off,
- unsigned n, u16 qpt_mask)
-{
- if (qpt_mask) {
- off++;
- if (((off & qpt_mask) >> 1) >= n)
- off = (off | qpt_mask) + 2;
- } else {
- off = find_next_zero_bit(map->page, RVT_BITS_PER_PAGE, off);
- }
- return off;
-}
-
-const struct rvt_operation_params qib_post_parms[RVT_OPERATION_MAX] = {
-[IB_WR_RDMA_WRITE] = {
- .length = sizeof(struct ib_rdma_wr),
- .qpt_support = BIT(IB_QPT_UC) | BIT(IB_QPT_RC),
-},
-
-[IB_WR_RDMA_READ] = {
- .length = sizeof(struct ib_rdma_wr),
- .qpt_support = BIT(IB_QPT_RC),
- .flags = RVT_OPERATION_ATOMIC,
-},
-
-[IB_WR_ATOMIC_CMP_AND_SWP] = {
- .length = sizeof(struct ib_atomic_wr),
- .qpt_support = BIT(IB_QPT_RC),
- .flags = RVT_OPERATION_ATOMIC | RVT_OPERATION_ATOMIC_SGE,
-},
-
-[IB_WR_ATOMIC_FETCH_AND_ADD] = {
- .length = sizeof(struct ib_atomic_wr),
- .qpt_support = BIT(IB_QPT_RC),
- .flags = RVT_OPERATION_ATOMIC | RVT_OPERATION_ATOMIC_SGE,
-},
-
-[IB_WR_RDMA_WRITE_WITH_IMM] = {
- .length = sizeof(struct ib_rdma_wr),
- .qpt_support = BIT(IB_QPT_UC) | BIT(IB_QPT_RC),
-},
-
-[IB_WR_SEND] = {
- .length = sizeof(struct ib_send_wr),
- .qpt_support = BIT(IB_QPT_UD) | BIT(IB_QPT_SMI) | BIT(IB_QPT_GSI) |
- BIT(IB_QPT_UC) | BIT(IB_QPT_RC),
-},
-
-[IB_WR_SEND_WITH_IMM] = {
- .length = sizeof(struct ib_send_wr),
- .qpt_support = BIT(IB_QPT_UD) | BIT(IB_QPT_SMI) | BIT(IB_QPT_GSI) |
- BIT(IB_QPT_UC) | BIT(IB_QPT_RC),
-},
-
-};
-
-static void get_map_page(struct rvt_qpn_table *qpt, struct rvt_qpn_map *map)
-{
- unsigned long page = get_zeroed_page(GFP_KERNEL);
-
- /*
- * Free the page if someone raced with us installing it.
- */
-
- spin_lock(&qpt->lock);
- if (map->page)
- free_page(page);
- else
- map->page = (void *)page;
- spin_unlock(&qpt->lock);
-}
-
-/*
- * Allocate the next available QPN or
- * zero/one for QP type IB_QPT_SMI/IB_QPT_GSI.
- */
-int qib_alloc_qpn(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt,
- enum ib_qp_type type, u32 port)
-{
- u32 i, offset, max_scan, qpn;
- struct rvt_qpn_map *map;
- u32 ret;
- struct qib_ibdev *verbs_dev = container_of(rdi, struct qib_ibdev, rdi);
- struct qib_devdata *dd = container_of(verbs_dev, struct qib_devdata,
- verbs_dev);
- u16 qpt_mask = dd->qpn_mask;
-
- if (type == IB_QPT_SMI || type == IB_QPT_GSI) {
- u32 n;
-
- ret = type == IB_QPT_GSI;
- n = 1 << (ret + 2 * (port - 1));
- spin_lock(&qpt->lock);
- if (qpt->flags & n)
- ret = -EINVAL;
- else
- qpt->flags |= n;
- spin_unlock(&qpt->lock);
- goto bail;
- }
-
- qpn = qpt->last + 2;
- if (qpn >= RVT_QPN_MAX)
- qpn = 2;
- if (qpt_mask && ((qpn & qpt_mask) >> 1) >= dd->n_krcv_queues)
- qpn = (qpn | qpt_mask) + 2;
- offset = qpn & RVT_BITS_PER_PAGE_MASK;
- map = &qpt->map[qpn / RVT_BITS_PER_PAGE];
- max_scan = qpt->nmaps - !offset;
- for (i = 0;;) {
- if (unlikely(!map->page)) {
- get_map_page(qpt, map);
- if (unlikely(!map->page))
- break;
- }
- do {
- if (!test_and_set_bit(offset, map->page)) {
- qpt->last = qpn;
- ret = qpn;
- goto bail;
- }
- offset = find_next_offset(qpt, map, offset,
- dd->n_krcv_queues, qpt_mask);
- qpn = mk_qpn(qpt, map, offset);
- /*
- * This test differs from alloc_pidmap().
- * If find_next_offset() does find a zero
- * bit, we don't need to check for QPN
- * wrapping around past our starting QPN.
- * We just need to be sure we don't loop
- * forever.
- */
- } while (offset < RVT_BITS_PER_PAGE && qpn < RVT_QPN_MAX);
- /*
- * In order to keep the number of pages allocated to a
- * minimum, we scan the all existing pages before increasing
- * the size of the bitmap table.
- */
- if (++i > max_scan) {
- if (qpt->nmaps == RVT_QPNMAP_ENTRIES)
- break;
- map = &qpt->map[qpt->nmaps++];
- offset = 0;
- } else if (map < &qpt->map[qpt->nmaps]) {
- ++map;
- offset = 0;
- } else {
- map = &qpt->map[0];
- offset = 2;
- }
- qpn = mk_qpn(qpt, map, offset);
- }
-
- ret = -ENOMEM;
-
-bail:
- return ret;
-}
-
-/*
- * qib_free_all_qps - check for QPs still in use
- */
-unsigned qib_free_all_qps(struct rvt_dev_info *rdi)
-{
- struct qib_ibdev *verbs_dev = container_of(rdi, struct qib_ibdev, rdi);
- struct qib_devdata *dd = container_of(verbs_dev, struct qib_devdata,
- verbs_dev);
- unsigned n, qp_inuse = 0;
-
- for (n = 0; n < dd->num_pports; n++) {
- struct qib_ibport *ibp = &dd->pport[n].ibport_data;
-
- rcu_read_lock();
- if (rcu_dereference(ibp->rvp.qp[0]))
- qp_inuse++;
- if (rcu_dereference(ibp->rvp.qp[1]))
- qp_inuse++;
- rcu_read_unlock();
- }
- return qp_inuse;
-}
-
-void qib_notify_qp_reset(struct rvt_qp *qp)
-{
- struct qib_qp_priv *priv = qp->priv;
-
- atomic_set(&priv->s_dma_busy, 0);
-}
-
-void qib_notify_error_qp(struct rvt_qp *qp)
-{
- struct qib_qp_priv *priv = qp->priv;
- struct qib_ibdev *dev = to_idev(qp->ibqp.device);
-
- spin_lock(&dev->rdi.pending_lock);
- if (!list_empty(&priv->iowait) && !(qp->s_flags & RVT_S_BUSY)) {
- qp->s_flags &= ~RVT_S_ANY_WAIT_IO;
- list_del_init(&priv->iowait);
- }
- spin_unlock(&dev->rdi.pending_lock);
-
- if (!(qp->s_flags & RVT_S_BUSY)) {
- qp->s_hdrwords = 0;
- if (qp->s_rdma_mr) {
- rvt_put_mr(qp->s_rdma_mr);
- qp->s_rdma_mr = NULL;
- }
- if (priv->s_tx) {
- qib_put_txreq(priv->s_tx);
- priv->s_tx = NULL;
- }
- }
-}
-
-static int mtu_to_enum(u32 mtu)
-{
- int enum_mtu;
-
- switch (mtu) {
- case 4096:
- enum_mtu = IB_MTU_4096;
- break;
- case 2048:
- enum_mtu = IB_MTU_2048;
- break;
- case 1024:
- enum_mtu = IB_MTU_1024;
- break;
- case 512:
- enum_mtu = IB_MTU_512;
- break;
- case 256:
- enum_mtu = IB_MTU_256;
- break;
- default:
- enum_mtu = IB_MTU_2048;
- }
- return enum_mtu;
-}
-
-int qib_get_pmtu_from_attr(struct rvt_dev_info *rdi, struct rvt_qp *qp,
- struct ib_qp_attr *attr)
-{
- int mtu, pmtu, pidx = qp->port_num - 1;
- struct qib_ibdev *verbs_dev = container_of(rdi, struct qib_ibdev, rdi);
- struct qib_devdata *dd = container_of(verbs_dev, struct qib_devdata,
- verbs_dev);
- mtu = ib_mtu_enum_to_int(attr->path_mtu);
- if (mtu == -1)
- return -EINVAL;
-
- if (mtu > dd->pport[pidx].ibmtu)
- pmtu = mtu_to_enum(dd->pport[pidx].ibmtu);
- else
- pmtu = attr->path_mtu;
- return pmtu;
-}
-
-int qib_mtu_to_path_mtu(u32 mtu)
-{
- return mtu_to_enum(mtu);
-}
-
-u32 qib_mtu_from_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp, u32 pmtu)
-{
- return ib_mtu_enum_to_int(pmtu);
-}
-
-void *qib_qp_priv_alloc(struct rvt_dev_info *rdi, struct rvt_qp *qp)
-{
- struct qib_qp_priv *priv;
-
- priv = kzalloc(sizeof(*priv), GFP_KERNEL);
- if (!priv)
- return ERR_PTR(-ENOMEM);
- priv->owner = qp;
-
- priv->s_hdr = kzalloc(sizeof(*priv->s_hdr), GFP_KERNEL);
- if (!priv->s_hdr) {
- kfree(priv);
- return ERR_PTR(-ENOMEM);
- }
- init_waitqueue_head(&priv->wait_dma);
- INIT_WORK(&priv->s_work, _qib_do_send);
- INIT_LIST_HEAD(&priv->iowait);
-
- return priv;
-}
-
-void qib_qp_priv_free(struct rvt_dev_info *rdi, struct rvt_qp *qp)
-{
- struct qib_qp_priv *priv = qp->priv;
-
- kfree(priv->s_hdr);
- kfree(priv);
-}
-
-void qib_stop_send_queue(struct rvt_qp *qp)
-{
- struct qib_qp_priv *priv = qp->priv;
-
- cancel_work_sync(&priv->s_work);
-}
-
-void qib_quiesce_qp(struct rvt_qp *qp)
-{
- struct qib_qp_priv *priv = qp->priv;
-
- wait_event(priv->wait_dma, !atomic_read(&priv->s_dma_busy));
- if (priv->s_tx) {
- qib_put_txreq(priv->s_tx);
- priv->s_tx = NULL;
- }
-}
-
-void qib_flush_qp_waiters(struct rvt_qp *qp)
-{
- struct qib_qp_priv *priv = qp->priv;
- struct qib_ibdev *dev = to_idev(qp->ibqp.device);
-
- spin_lock(&dev->rdi.pending_lock);
- if (!list_empty(&priv->iowait))
- list_del_init(&priv->iowait);
- spin_unlock(&dev->rdi.pending_lock);
-}
-
-/**
- * qib_check_send_wqe - validate wr/wqe
- * @qp: The qp
- * @wqe: The built wqe
- * @call_send: Determine if the send should be posted or scheduled
- *
- * Returns 0 on success, -EINVAL on failure
- */
-int qib_check_send_wqe(struct rvt_qp *qp,
- struct rvt_swqe *wqe, bool *call_send)
-{
- struct rvt_ah *ah;
-
- switch (qp->ibqp.qp_type) {
- case IB_QPT_RC:
- case IB_QPT_UC:
- if (wqe->length > 0x80000000U)
- return -EINVAL;
- if (wqe->length > qp->pmtu)
- *call_send = false;
- break;
- case IB_QPT_SMI:
- case IB_QPT_GSI:
- case IB_QPT_UD:
- ah = rvt_get_swqe_ah(wqe);
- if (wqe->length > (1 << ah->log_pmtu))
- return -EINVAL;
- /* progress hint */
- *call_send = true;
- break;
- default:
- break;
- }
- return 0;
-}
-
-#ifdef CONFIG_DEBUG_FS
-
-static const char * const qp_type_str[] = {
- "SMI", "GSI", "RC", "UC", "UD",
-};
-
-/**
- * qib_qp_iter_print - print information to seq_file
- * @s: the seq_file
- * @iter: the iterator
- */
-void qib_qp_iter_print(struct seq_file *s, struct rvt_qp_iter *iter)
-{
- struct rvt_swqe *wqe;
- struct rvt_qp *qp = iter->qp;
- struct qib_qp_priv *priv = qp->priv;
-
- wqe = rvt_get_swqe_ptr(qp, qp->s_last);
- seq_printf(s,
- "N %d QP%u %s %u %u %u f=%x %u %u %u %u %u PSN %x %x %x %x %x (%u %u %u %u %u %u) QP%u LID %x\n",
- iter->n,
- qp->ibqp.qp_num,
- qp_type_str[qp->ibqp.qp_type],
- qp->state,
- wqe->wr.opcode,
- qp->s_hdrwords,
- qp->s_flags,
- atomic_read(&priv->s_dma_busy),
- !list_empty(&priv->iowait),
- qp->timeout,
- wqe->ssn,
- qp->s_lsn,
- qp->s_last_psn,
- qp->s_psn, qp->s_next_psn,
- qp->s_sending_psn, qp->s_sending_hpsn,
- qp->s_last, qp->s_acked, qp->s_cur,
- qp->s_tail, qp->s_head, qp->s_size,
- qp->remote_qpn,
- rdma_ah_get_dlid(&qp->remote_ah_attr));
-}
-
-#endif
diff --git a/drivers/infiniband/hw/qib/qib_qsfp.c b/drivers/infiniband/hw/qib/qib_qsfp.c
deleted file mode 100644
index 295d40a83bb6..000000000000
--- a/drivers/infiniband/hw/qib/qib_qsfp.c
+++ /dev/null
@@ -1,549 +0,0 @@
-/*
- * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
- * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <linux/delay.h>
-#include <linux/pci.h>
-#include <linux/vmalloc.h>
-
-#include "qib.h"
-#include "qib_qsfp.h"
-
-/*
- * QSFP support for ib_qib driver, using "Two Wire Serial Interface" driver
- * in qib_twsi.c
- */
-#define QSFP_MAX_RETRY 4
-
-static int qsfp_read(struct qib_pportdata *ppd, int addr, void *bp, int len)
-{
- struct qib_devdata *dd = ppd->dd;
- u32 out, mask;
- int ret, cnt, pass = 0;
- int stuck = 0;
- u8 *buff = bp;
-
- ret = mutex_lock_interruptible(&dd->eep_lock);
- if (ret)
- goto no_unlock;
-
- if (dd->twsi_eeprom_dev == QIB_TWSI_NO_DEV) {
- ret = -ENXIO;
- goto bail;
- }
-
- /*
- * We presume, if we are called at all, that this board has
- * QSFP. This is on the same i2c chain as the legacy parts,
- * but only responds if the module is selected via GPIO pins.
- * Further, there are very long setup and hold requirements
- * on MODSEL.
- */
- mask = QSFP_GPIO_MOD_SEL_N | QSFP_GPIO_MOD_RST_N | QSFP_GPIO_LP_MODE;
- out = QSFP_GPIO_MOD_RST_N | QSFP_GPIO_LP_MODE;
- if (ppd->hw_pidx) {
- mask <<= QSFP_GPIO_PORT2_SHIFT;
- out <<= QSFP_GPIO_PORT2_SHIFT;
- }
-
- dd->f_gpio_mod(dd, out, mask, mask);
-
- /*
- * Module could take up to 2 Msec to respond to MOD_SEL, and there
- * is no way to tell if it is ready, so we must wait.
- */
- msleep(20);
-
- /* Make sure TWSI bus is in sane state. */
- ret = qib_twsi_reset(dd);
- if (ret) {
- qib_dev_porterr(dd, ppd->port,
- "QSFP interface Reset for read failed\n");
- ret = -EIO;
- stuck = 1;
- goto deselect;
- }
-
- /* All QSFP modules are at A0 */
-
- cnt = 0;
- while (cnt < len) {
- unsigned in_page;
- int wlen = len - cnt;
-
- in_page = addr % QSFP_PAGESIZE;
- if ((in_page + wlen) > QSFP_PAGESIZE)
- wlen = QSFP_PAGESIZE - in_page;
- ret = qib_twsi_blk_rd(dd, QSFP_DEV, addr, buff + cnt, wlen);
- /* Some QSFP's fail first try. Retry as experiment */
- if (ret && cnt == 0 && ++pass < QSFP_MAX_RETRY)
- continue;
- if (ret) {
- /* qib_twsi_blk_rd() 1 for error, else 0 */
- ret = -EIO;
- goto deselect;
- }
- addr += wlen;
- cnt += wlen;
- }
- ret = cnt;
-
-deselect:
- /*
- * Module could take up to 10 uSec after transfer before
- * ready to respond to MOD_SEL negation, and there is no way
- * to tell if it is ready, so we must wait.
- */
- udelay(10);
- /* set QSFP MODSEL, RST. LP all high */
- dd->f_gpio_mod(dd, mask, mask, mask);
-
- /*
- * Module could take up to 2 Msec to respond to MOD_SEL
- * going away, and there is no way to tell if it is ready.
- * so we must wait.
- */
- if (stuck)
- qib_dev_err(dd, "QSFP interface bus stuck non-idle\n");
-
- if (pass >= QSFP_MAX_RETRY && ret)
- qib_dev_porterr(dd, ppd->port, "QSFP failed even retrying\n");
- else if (pass)
- qib_dev_porterr(dd, ppd->port, "QSFP retries: %d\n", pass);
-
- msleep(20);
-
-bail:
- mutex_unlock(&dd->eep_lock);
-
-no_unlock:
- return ret;
-}
-
-/*
- * qsfp_write
- * We do not ordinarily write the QSFP, but this is needed to select
- * the page on non-flat QSFPs, and possibly later unusual cases
- */
-static int qib_qsfp_write(struct qib_pportdata *ppd, int addr, void *bp,
- int len)
-{
- struct qib_devdata *dd = ppd->dd;
- u32 out, mask;
- int ret, cnt;
- u8 *buff = bp;
-
- ret = mutex_lock_interruptible(&dd->eep_lock);
- if (ret)
- goto no_unlock;
-
- if (dd->twsi_eeprom_dev == QIB_TWSI_NO_DEV) {
- ret = -ENXIO;
- goto bail;
- }
-
- /*
- * We presume, if we are called at all, that this board has
- * QSFP. This is on the same i2c chain as the legacy parts,
- * but only responds if the module is selected via GPIO pins.
- * Further, there are very long setup and hold requirements
- * on MODSEL.
- */
- mask = QSFP_GPIO_MOD_SEL_N | QSFP_GPIO_MOD_RST_N | QSFP_GPIO_LP_MODE;
- out = QSFP_GPIO_MOD_RST_N | QSFP_GPIO_LP_MODE;
- if (ppd->hw_pidx) {
- mask <<= QSFP_GPIO_PORT2_SHIFT;
- out <<= QSFP_GPIO_PORT2_SHIFT;
- }
- dd->f_gpio_mod(dd, out, mask, mask);
-
- /*
- * Module could take up to 2 Msec to respond to MOD_SEL,
- * and there is no way to tell if it is ready, so we must wait.
- */
- msleep(20);
-
- /* Make sure TWSI bus is in sane state. */
- ret = qib_twsi_reset(dd);
- if (ret) {
- qib_dev_porterr(dd, ppd->port,
- "QSFP interface Reset for write failed\n");
- ret = -EIO;
- goto deselect;
- }
-
- /* All QSFP modules are at A0 */
-
- cnt = 0;
- while (cnt < len) {
- unsigned in_page;
- int wlen = len - cnt;
-
- in_page = addr % QSFP_PAGESIZE;
- if ((in_page + wlen) > QSFP_PAGESIZE)
- wlen = QSFP_PAGESIZE - in_page;
- ret = qib_twsi_blk_wr(dd, QSFP_DEV, addr, buff + cnt, wlen);
- if (ret) {
- /* qib_twsi_blk_wr() 1 for error, else 0 */
- ret = -EIO;
- goto deselect;
- }
- addr += wlen;
- cnt += wlen;
- }
- ret = cnt;
-
-deselect:
- /*
- * Module could take up to 10 uSec after transfer before
- * ready to respond to MOD_SEL negation, and there is no way
- * to tell if it is ready, so we must wait.
- */
- udelay(10);
- /* set QSFP MODSEL, RST, LP high */
- dd->f_gpio_mod(dd, mask, mask, mask);
- /*
- * Module could take up to 2 Msec to respond to MOD_SEL
- * going away, and there is no way to tell if it is ready.
- * so we must wait.
- */
- msleep(20);
-
-bail:
- mutex_unlock(&dd->eep_lock);
-
-no_unlock:
- return ret;
-}
-
-/*
- * For validation, we want to check the checksums, even of the
- * fields we do not otherwise use. This function reads the bytes from
- * <first> to <next-1> and returns the 8lsbs of the sum, or <0 for errors
- */
-static int qsfp_cks(struct qib_pportdata *ppd, int first, int next)
-{
- int ret;
- u16 cks;
- u8 bval;
-
- cks = 0;
- while (first < next) {
- ret = qsfp_read(ppd, first, &bval, 1);
- if (ret < 0)
- goto bail;
- cks += bval;
- ++first;
- }
- ret = cks & 0xFF;
-bail:
- return ret;
-
-}
-
-int qib_refresh_qsfp_cache(struct qib_pportdata *ppd, struct qib_qsfp_cache *cp)
-{
- int ret;
- int idx;
- u16 cks;
- u8 peek[4];
-
- /* ensure sane contents on invalid reads, for cable swaps */
- memset(cp, 0, sizeof(*cp));
-
- if (!qib_qsfp_mod_present(ppd)) {
- ret = -ENODEV;
- goto bail;
- }
-
- ret = qsfp_read(ppd, 0, peek, 3);
- if (ret < 0)
- goto bail;
- if ((peek[0] & 0xFE) != 0x0C)
- qib_dev_porterr(ppd->dd, ppd->port,
- "QSFP byte0 is 0x%02X, S/B 0x0C/D\n", peek[0]);
-
- if ((peek[2] & 4) == 0) {
- /*
- * If cable is paged, rather than "flat memory", we need to
- * set the page to zero, Even if it already appears to be zero.
- */
- u8 poke = 0;
-
- ret = qib_qsfp_write(ppd, 127, &poke, 1);
- udelay(50);
- if (ret != 1) {
- qib_dev_porterr(ppd->dd, ppd->port,
- "Failed QSFP Page set\n");
- goto bail;
- }
- }
-
- ret = qsfp_read(ppd, QSFP_MOD_ID_OFFS, &cp->id, 1);
- if (ret < 0)
- goto bail;
- if ((cp->id & 0xFE) != 0x0C)
- qib_dev_porterr(ppd->dd, ppd->port,
- "QSFP ID byte is 0x%02X, S/B 0x0C/D\n", cp->id);
- cks = cp->id;
-
- ret = qsfp_read(ppd, QSFP_MOD_PWR_OFFS, &cp->pwr, 1);
- if (ret < 0)
- goto bail;
- cks += cp->pwr;
-
- ret = qsfp_cks(ppd, QSFP_MOD_PWR_OFFS + 1, QSFP_MOD_LEN_OFFS);
- if (ret < 0)
- goto bail;
- cks += ret;
-
- ret = qsfp_read(ppd, QSFP_MOD_LEN_OFFS, &cp->len, 1);
- if (ret < 0)
- goto bail;
- cks += cp->len;
-
- ret = qsfp_read(ppd, QSFP_MOD_TECH_OFFS, &cp->tech, 1);
- if (ret < 0)
- goto bail;
- cks += cp->tech;
-
- ret = qsfp_read(ppd, QSFP_VEND_OFFS, &cp->vendor, QSFP_VEND_LEN);
- if (ret < 0)
- goto bail;
- for (idx = 0; idx < QSFP_VEND_LEN; ++idx)
- cks += cp->vendor[idx];
-
- ret = qsfp_read(ppd, QSFP_IBXCV_OFFS, &cp->xt_xcv, 1);
- if (ret < 0)
- goto bail;
- cks += cp->xt_xcv;
-
- ret = qsfp_read(ppd, QSFP_VOUI_OFFS, &cp->oui, QSFP_VOUI_LEN);
- if (ret < 0)
- goto bail;
- for (idx = 0; idx < QSFP_VOUI_LEN; ++idx)
- cks += cp->oui[idx];
-
- ret = qsfp_read(ppd, QSFP_PN_OFFS, &cp->partnum, QSFP_PN_LEN);
- if (ret < 0)
- goto bail;
- for (idx = 0; idx < QSFP_PN_LEN; ++idx)
- cks += cp->partnum[idx];
-
- ret = qsfp_read(ppd, QSFP_REV_OFFS, &cp->rev, QSFP_REV_LEN);
- if (ret < 0)
- goto bail;
- for (idx = 0; idx < QSFP_REV_LEN; ++idx)
- cks += cp->rev[idx];
-
- ret = qsfp_read(ppd, QSFP_ATTEN_OFFS, &cp->atten, QSFP_ATTEN_LEN);
- if (ret < 0)
- goto bail;
- for (idx = 0; idx < QSFP_ATTEN_LEN; ++idx)
- cks += cp->atten[idx];
-
- ret = qsfp_cks(ppd, QSFP_ATTEN_OFFS + QSFP_ATTEN_LEN, QSFP_CC_OFFS);
- if (ret < 0)
- goto bail;
- cks += ret;
-
- cks &= 0xFF;
- ret = qsfp_read(ppd, QSFP_CC_OFFS, &cp->cks1, 1);
- if (ret < 0)
- goto bail;
- if (cks != cp->cks1)
- qib_dev_porterr(ppd->dd, ppd->port,
- "QSFP cks1 is %02X, computed %02X\n", cp->cks1,
- cks);
-
- /* Second checksum covers 192 to (serial, date, lot) */
- ret = qsfp_cks(ppd, QSFP_CC_OFFS + 1, QSFP_SN_OFFS);
- if (ret < 0)
- goto bail;
- cks = ret;
-
- ret = qsfp_read(ppd, QSFP_SN_OFFS, &cp->serial, QSFP_SN_LEN);
- if (ret < 0)
- goto bail;
- for (idx = 0; idx < QSFP_SN_LEN; ++idx)
- cks += cp->serial[idx];
-
- ret = qsfp_read(ppd, QSFP_DATE_OFFS, &cp->date, QSFP_DATE_LEN);
- if (ret < 0)
- goto bail;
- for (idx = 0; idx < QSFP_DATE_LEN; ++idx)
- cks += cp->date[idx];
-
- ret = qsfp_read(ppd, QSFP_LOT_OFFS, &cp->lot, QSFP_LOT_LEN);
- if (ret < 0)
- goto bail;
- for (idx = 0; idx < QSFP_LOT_LEN; ++idx)
- cks += cp->lot[idx];
-
- ret = qsfp_cks(ppd, QSFP_LOT_OFFS + QSFP_LOT_LEN, QSFP_CC_EXT_OFFS);
- if (ret < 0)
- goto bail;
- cks += ret;
-
- ret = qsfp_read(ppd, QSFP_CC_EXT_OFFS, &cp->cks2, 1);
- if (ret < 0)
- goto bail;
- cks &= 0xFF;
- if (cks != cp->cks2)
- qib_dev_porterr(ppd->dd, ppd->port,
- "QSFP cks2 is %02X, computed %02X\n", cp->cks2,
- cks);
- return 0;
-
-bail:
- cp->id = 0;
- return ret;
-}
-
-const char * const qib_qsfp_devtech[16] = {
- "850nm VCSEL", "1310nm VCSEL", "1550nm VCSEL", "1310nm FP",
- "1310nm DFB", "1550nm DFB", "1310nm EML", "1550nm EML",
- "Cu Misc", "1490nm DFB", "Cu NoEq", "Cu Eq",
- "Undef", "Cu Active BothEq", "Cu FarEq", "Cu NearEq"
-};
-
-#define QSFP_DUMP_CHUNK 16 /* Holds longest string */
-#define QSFP_DEFAULT_HDR_CNT 224
-
-static const char *pwr_codes = "1.5W2.0W2.5W3.5W";
-
-int qib_qsfp_mod_present(struct qib_pportdata *ppd)
-{
- u32 mask;
- int ret;
-
- mask = QSFP_GPIO_MOD_PRS_N <<
- (ppd->hw_pidx * QSFP_GPIO_PORT2_SHIFT);
- ret = ppd->dd->f_gpio_mod(ppd->dd, 0, 0, 0);
-
- return !((ret & mask) >>
- ((ppd->hw_pidx * QSFP_GPIO_PORT2_SHIFT) + 3));
-}
-
-/*
- * Initialize structures that control access to QSFP. Called once per port
- * on cards that support QSFP.
- */
-void qib_qsfp_init(struct qib_qsfp_data *qd,
- void (*fevent)(struct work_struct *))
-{
- u32 mask, highs;
-
- struct qib_devdata *dd = qd->ppd->dd;
-
- /* Initialize work struct for later QSFP events */
- INIT_WORK(&qd->work, fevent);
-
- /*
- * Later, we may want more validation. For now, just set up pins and
- * blip reset. If module is present, call qib_refresh_qsfp_cache(),
- * to do further init.
- */
- mask = QSFP_GPIO_MOD_SEL_N | QSFP_GPIO_MOD_RST_N | QSFP_GPIO_LP_MODE;
- highs = mask - QSFP_GPIO_MOD_RST_N;
- if (qd->ppd->hw_pidx) {
- mask <<= QSFP_GPIO_PORT2_SHIFT;
- highs <<= QSFP_GPIO_PORT2_SHIFT;
- }
- dd->f_gpio_mod(dd, highs, mask, mask);
- udelay(20); /* Generous RST dwell */
-
- dd->f_gpio_mod(dd, mask, mask, mask);
-}
-
-int qib_qsfp_dump(struct qib_pportdata *ppd, char *buf, int len)
-{
- struct qib_qsfp_cache cd;
- u8 bin_buff[QSFP_DUMP_CHUNK];
- char lenstr[6];
- int sofar, ret;
- int bidx = 0;
-
- sofar = 0;
- ret = qib_refresh_qsfp_cache(ppd, &cd);
- if (ret < 0)
- goto bail;
-
- lenstr[0] = ' ';
- lenstr[1] = '\0';
- if (QSFP_IS_CU(cd.tech))
- sprintf(lenstr, "%dM ", cd.len);
-
- sofar += scnprintf(buf + sofar, len - sofar, "PWR:%.3sW\n", pwr_codes +
- (QSFP_PWR(cd.pwr) * 4));
-
- sofar += scnprintf(buf + sofar, len - sofar, "TECH:%s%s\n", lenstr,
- qib_qsfp_devtech[cd.tech >> 4]);
-
- sofar += scnprintf(buf + sofar, len - sofar, "Vendor:%.*s\n",
- QSFP_VEND_LEN, cd.vendor);
-
- sofar += scnprintf(buf + sofar, len - sofar, "OUI:%06X\n",
- QSFP_OUI(cd.oui));
-
- sofar += scnprintf(buf + sofar, len - sofar, "Part#:%.*s\n",
- QSFP_PN_LEN, cd.partnum);
- sofar += scnprintf(buf + sofar, len - sofar, "Rev:%.*s\n",
- QSFP_REV_LEN, cd.rev);
- if (QSFP_IS_CU(cd.tech))
- sofar += scnprintf(buf + sofar, len - sofar, "Atten:%d, %d\n",
- QSFP_ATTEN_SDR(cd.atten),
- QSFP_ATTEN_DDR(cd.atten));
- sofar += scnprintf(buf + sofar, len - sofar, "Serial:%.*s\n",
- QSFP_SN_LEN, cd.serial);
- sofar += scnprintf(buf + sofar, len - sofar, "Date:%.*s\n",
- QSFP_DATE_LEN, cd.date);
- sofar += scnprintf(buf + sofar, len - sofar, "Lot:%.*s\n",
- QSFP_LOT_LEN, cd.lot);
-
- while (bidx < QSFP_DEFAULT_HDR_CNT) {
- int iidx;
-
- ret = qsfp_read(ppd, bidx, bin_buff, QSFP_DUMP_CHUNK);
- if (ret < 0)
- goto bail;
- for (iidx = 0; iidx < ret; ++iidx) {
- sofar += scnprintf(buf + sofar, len-sofar, " %02X",
- bin_buff[iidx]);
- }
- sofar += scnprintf(buf + sofar, len - sofar, "\n");
- bidx += QSFP_DUMP_CHUNK;
- }
- ret = sofar;
-bail:
- return ret;
-}
diff --git a/drivers/infiniband/hw/qib/qib_qsfp.h b/drivers/infiniband/hw/qib/qib_qsfp.h
deleted file mode 100644
index ad8dbd6ac0cf..000000000000
--- a/drivers/infiniband/hw/qib/qib_qsfp.h
+++ /dev/null
@@ -1,188 +0,0 @@
-/*
- * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
- * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-/* QSFP support common definitions, for ib_qib driver */
-
-#define QSFP_DEV 0xA0
-#define QSFP_PWR_LAG_MSEC 2000
-#define QSFP_MODPRS_LAG_MSEC 20
-
-/*
- * Below are masks for various QSFP signals, for Port 1.
- * Port2 equivalents are shifted by QSFP_GPIO_PORT2_SHIFT.
- * _N means asserted low
- */
-#define QSFP_GPIO_MOD_SEL_N (4)
-#define QSFP_GPIO_MOD_PRS_N (8)
-#define QSFP_GPIO_INT_N (0x10)
-#define QSFP_GPIO_MOD_RST_N (0x20)
-#define QSFP_GPIO_LP_MODE (0x40)
-#define QSFP_GPIO_PORT2_SHIFT 5
-
-#define QSFP_PAGESIZE 128
-/* Defined fields that QLogic requires of qualified cables */
-/* Byte 0 is Identifier, not checked */
-/* Byte 1 is reserved "status MSB" */
-/* Byte 2 is "status LSB" We only care that D2 "Flat Mem" is set. */
-/*
- * Rest of first 128 not used, although 127 is reserved for page select
- * if module is not "Flat memory".
- */
-/* Byte 128 is Identifier: must be 0x0c for QSFP, or 0x0d for QSFP+ */
-#define QSFP_MOD_ID_OFFS 128
-/*
- * Byte 129 is "Extended Identifier". We only care about D7,D6: Power class
- * 0:1.5W, 1:2.0W, 2:2.5W, 3:3.5W
- */
-#define QSFP_MOD_PWR_OFFS 129
-/* Byte 130 is Connector type. Not QLogic req'd */
-/* Bytes 131..138 are Transceiver types, bit maps for various tech, none IB */
-/* Byte 139 is encoding. code 0x01 is 8b10b. Not QLogic req'd */
-/* byte 140 is nominal bit-rate, in units of 100Mbits/sec Not QLogic req'd */
-/* Byte 141 is Extended Rate Select. Not QLogic req'd */
-/* Bytes 142..145 are lengths for various fiber types. Not QLogic req'd */
-/* Byte 146 is length for Copper. Units of 1 meter */
-#define QSFP_MOD_LEN_OFFS 146
-/*
- * Byte 147 is Device technology. D0..3 not Qlogc req'd
- * D4..7 select from 15 choices, translated by table:
- */
-#define QSFP_MOD_TECH_OFFS 147
-extern const char *const qib_qsfp_devtech[16];
-/* Active Equalization includes fiber, copper full EQ, and copper near Eq */
-#define QSFP_IS_ACTIVE(tech) ((0xA2FF >> ((tech) >> 4)) & 1)
-/* Active Equalization includes fiber, copper full EQ, and copper far Eq */
-#define QSFP_IS_ACTIVE_FAR(tech) ((0x32FF >> ((tech) >> 4)) & 1)
-/* Attenuation should be valid for copper other than full/near Eq */
-#define QSFP_HAS_ATTEN(tech) ((0x4D00 >> ((tech) >> 4)) & 1)
-/* Length is only valid if technology is "copper" */
-#define QSFP_IS_CU(tech) ((0xED00 >> ((tech) >> 4)) & 1)
-#define QSFP_TECH_1490 9
-
-#define QSFP_OUI(oui) (((unsigned)oui[0] << 16) | ((unsigned)oui[1] << 8) | \
- oui[2])
-#define QSFP_OUI_AMPHENOL 0x415048
-#define QSFP_OUI_FINISAR 0x009065
-#define QSFP_OUI_GORE 0x002177
-
-/* Bytes 148..163 are Vendor Name, Left-justified Blank-filled */
-#define QSFP_VEND_OFFS 148
-#define QSFP_VEND_LEN 16
-/* Byte 164 is IB Extended tranceiver codes Bits D0..3 are SDR,DDR,QDR,EDR */
-#define QSFP_IBXCV_OFFS 164
-/* Bytes 165..167 are Vendor OUI number */
-#define QSFP_VOUI_OFFS 165
-#define QSFP_VOUI_LEN 3
-/* Bytes 168..183 are Vendor Part Number, string */
-#define QSFP_PN_OFFS 168
-#define QSFP_PN_LEN 16
-/* Bytes 184,185 are Vendor Rev. Left Justified, Blank-filled */
-#define QSFP_REV_OFFS 184
-#define QSFP_REV_LEN 2
-/*
- * Bytes 186,187 are Wavelength, if Optical. Not Qlogic req'd
- * If copper, they are attenuation in dB:
- * Byte 186 is at 2.5Gb/sec (SDR), Byte 187 at 5.0Gb/sec (DDR)
- */
-#define QSFP_ATTEN_OFFS 186
-#define QSFP_ATTEN_LEN 2
-/* Bytes 188,189 are Wavelength tolerance, not QLogic req'd */
-/* Byte 190 is Max Case Temp. Not QLogic req'd */
-/* Byte 191 is LSB of sum of bytes 128..190. Not QLogic req'd */
-#define QSFP_CC_OFFS 191
-/* Bytes 192..195 are Options implemented in qsfp. Not Qlogic req'd */
-/* Bytes 196..211 are Serial Number, String */
-#define QSFP_SN_OFFS 196
-#define QSFP_SN_LEN 16
-/* Bytes 212..219 are date-code YYMMDD (MM==1 for Jan) */
-#define QSFP_DATE_OFFS 212
-#define QSFP_DATE_LEN 6
-/* Bytes 218,219 are optional lot-code, string */
-#define QSFP_LOT_OFFS 218
-#define QSFP_LOT_LEN 2
-/* Bytes 220, 221 indicate monitoring options, Not QLogic req'd */
-/* Byte 223 is LSB of sum of bytes 192..222 */
-#define QSFP_CC_EXT_OFFS 223
-
-/*
- * struct qib_qsfp_data encapsulates state of QSFP device for one port.
- * it will be part of port-chip-specific data if a board supports QSFP.
- *
- * Since multiple board-types use QSFP, and their pport_data structs
- * differ (in the chip-specific section), we need a pointer to its head.
- *
- * Avoiding premature optimization, we will have one work_struct per port,
- * and let the (increasingly inaccurately named) eep_lock arbitrate
- * access to common resources.
- *
- */
-
-/*
- * Hold the parts of the onboard EEPROM that we care about, so we aren't
- * coonstantly bit-boffing
- */
-struct qib_qsfp_cache {
- u8 id; /* must be 0x0C or 0x0D; 0 indicates invalid EEPROM read */
- u8 pwr; /* in D6,7 */
- u8 len; /* in meters, Cu only */
- u8 tech;
- char vendor[QSFP_VEND_LEN];
- u8 xt_xcv; /* Ext. tranceiver codes, 4 lsbs are IB speed supported */
- u8 oui[QSFP_VOUI_LEN];
- u8 partnum[QSFP_PN_LEN];
- u8 rev[QSFP_REV_LEN];
- u8 atten[QSFP_ATTEN_LEN];
- u8 cks1; /* Checksum of bytes 128..190 */
- u8 serial[QSFP_SN_LEN];
- u8 date[QSFP_DATE_LEN];
- u8 lot[QSFP_LOT_LEN];
- u8 cks2; /* Checsum of bytes 192..222 */
-};
-
-#define QSFP_PWR(pbyte) (((pbyte) >> 6) & 3)
-#define QSFP_ATTEN_SDR(attenarray) (attenarray[0])
-#define QSFP_ATTEN_DDR(attenarray) (attenarray[1])
-
-struct qib_qsfp_data {
- /* Helps to find our way */
- struct qib_pportdata *ppd;
- struct work_struct work;
- struct qib_qsfp_cache cache;
- unsigned long t_insert;
- u8 modpresent;
-};
-
-extern int qib_refresh_qsfp_cache(struct qib_pportdata *ppd,
- struct qib_qsfp_cache *cp);
-extern int qib_qsfp_mod_present(struct qib_pportdata *ppd);
-extern void qib_qsfp_init(struct qib_qsfp_data *qd,
- void (*fevent)(struct work_struct *));
diff --git a/drivers/infiniband/hw/qib/qib_rc.c b/drivers/infiniband/hw/qib/qib_rc.c
deleted file mode 100644
index a1c20ffb4490..000000000000
--- a/drivers/infiniband/hw/qib/qib_rc.c
+++ /dev/null
@@ -1,2131 +0,0 @@
-/*
- * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
- * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <linux/io.h>
-
-#include "qib.h"
-
-/* cut down ridiculously long IB macro names */
-#define OP(x) IB_OPCODE_RC_##x
-
-
-static u32 restart_sge(struct rvt_sge_state *ss, struct rvt_swqe *wqe,
- u32 psn, u32 pmtu)
-{
- u32 len;
-
- len = ((psn - wqe->psn) & QIB_PSN_MASK) * pmtu;
- return rvt_restart_sge(ss, wqe, len);
-}
-
-/**
- * qib_make_rc_ack - construct a response packet (ACK, NAK, or RDMA read)
- * @dev: the device for this QP
- * @qp: a pointer to the QP
- * @ohdr: a pointer to the IB header being constructed
- * @pmtu: the path MTU
- *
- * Return 1 if constructed; otherwise, return 0.
- * Note that we are in the responder's side of the QP context.
- * Note the QP s_lock must be held.
- */
-static int qib_make_rc_ack(struct qib_ibdev *dev, struct rvt_qp *qp,
- struct ib_other_headers *ohdr, u32 pmtu)
-{
- struct rvt_ack_entry *e;
- u32 hwords;
- u32 len;
- u32 bth0;
- u32 bth2;
-
- /* Don't send an ACK if we aren't supposed to. */
- if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK))
- goto bail;
-
- /* header size in 32-bit words LRH+BTH = (8+12)/4. */
- hwords = 5;
-
- switch (qp->s_ack_state) {
- case OP(RDMA_READ_RESPONSE_LAST):
- case OP(RDMA_READ_RESPONSE_ONLY):
- e = &qp->s_ack_queue[qp->s_tail_ack_queue];
- if (e->rdma_sge.mr) {
- rvt_put_mr(e->rdma_sge.mr);
- e->rdma_sge.mr = NULL;
- }
- fallthrough;
- case OP(ATOMIC_ACKNOWLEDGE):
- /*
- * We can increment the tail pointer now that the last
- * response has been sent instead of only being
- * constructed.
- */
- if (++qp->s_tail_ack_queue > QIB_MAX_RDMA_ATOMIC)
- qp->s_tail_ack_queue = 0;
- fallthrough;
- case OP(SEND_ONLY):
- case OP(ACKNOWLEDGE):
- /* Check for no next entry in the queue. */
- if (qp->r_head_ack_queue == qp->s_tail_ack_queue) {
- if (qp->s_flags & RVT_S_ACK_PENDING)
- goto normal;
- goto bail;
- }
-
- e = &qp->s_ack_queue[qp->s_tail_ack_queue];
- if (e->opcode == OP(RDMA_READ_REQUEST)) {
- /*
- * If a RDMA read response is being resent and
- * we haven't seen the duplicate request yet,
- * then stop sending the remaining responses the
- * responder has seen until the requester resends it.
- */
- len = e->rdma_sge.sge_length;
- if (len && !e->rdma_sge.mr) {
- qp->s_tail_ack_queue = qp->r_head_ack_queue;
- goto bail;
- }
- /* Copy SGE state in case we need to resend */
- qp->s_rdma_mr = e->rdma_sge.mr;
- if (qp->s_rdma_mr)
- rvt_get_mr(qp->s_rdma_mr);
- qp->s_ack_rdma_sge.sge = e->rdma_sge;
- qp->s_ack_rdma_sge.num_sge = 1;
- qp->s_cur_sge = &qp->s_ack_rdma_sge;
- if (len > pmtu) {
- len = pmtu;
- qp->s_ack_state = OP(RDMA_READ_RESPONSE_FIRST);
- } else {
- qp->s_ack_state = OP(RDMA_READ_RESPONSE_ONLY);
- e->sent = 1;
- }
- ohdr->u.aeth = rvt_compute_aeth(qp);
- hwords++;
- qp->s_ack_rdma_psn = e->psn;
- bth2 = qp->s_ack_rdma_psn++ & QIB_PSN_MASK;
- } else {
- /* COMPARE_SWAP or FETCH_ADD */
- qp->s_cur_sge = NULL;
- len = 0;
- qp->s_ack_state = OP(ATOMIC_ACKNOWLEDGE);
- ohdr->u.at.aeth = rvt_compute_aeth(qp);
- ib_u64_put(e->atomic_data, &ohdr->u.at.atomic_ack_eth);
- hwords += sizeof(ohdr->u.at) / sizeof(u32);
- bth2 = e->psn & QIB_PSN_MASK;
- e->sent = 1;
- }
- bth0 = qp->s_ack_state << 24;
- break;
-
- case OP(RDMA_READ_RESPONSE_FIRST):
- qp->s_ack_state = OP(RDMA_READ_RESPONSE_MIDDLE);
- fallthrough;
- case OP(RDMA_READ_RESPONSE_MIDDLE):
- qp->s_cur_sge = &qp->s_ack_rdma_sge;
- qp->s_rdma_mr = qp->s_ack_rdma_sge.sge.mr;
- if (qp->s_rdma_mr)
- rvt_get_mr(qp->s_rdma_mr);
- len = qp->s_ack_rdma_sge.sge.sge_length;
- if (len > pmtu)
- len = pmtu;
- else {
- ohdr->u.aeth = rvt_compute_aeth(qp);
- hwords++;
- qp->s_ack_state = OP(RDMA_READ_RESPONSE_LAST);
- e = &qp->s_ack_queue[qp->s_tail_ack_queue];
- e->sent = 1;
- }
- bth0 = qp->s_ack_state << 24;
- bth2 = qp->s_ack_rdma_psn++ & QIB_PSN_MASK;
- break;
-
- default:
-normal:
- /*
- * Send a regular ACK.
- * Set the s_ack_state so we wait until after sending
- * the ACK before setting s_ack_state to ACKNOWLEDGE
- * (see above).
- */
- qp->s_ack_state = OP(SEND_ONLY);
- qp->s_flags &= ~RVT_S_ACK_PENDING;
- qp->s_cur_sge = NULL;
- if (qp->s_nak_state)
- ohdr->u.aeth =
- cpu_to_be32((qp->r_msn & IB_MSN_MASK) |
- (qp->s_nak_state <<
- IB_AETH_CREDIT_SHIFT));
- else
- ohdr->u.aeth = rvt_compute_aeth(qp);
- hwords++;
- len = 0;
- bth0 = OP(ACKNOWLEDGE) << 24;
- bth2 = qp->s_ack_psn & QIB_PSN_MASK;
- }
- qp->s_rdma_ack_cnt++;
- qp->s_hdrwords = hwords;
- qp->s_cur_size = len;
- qib_make_ruc_header(qp, ohdr, bth0, bth2);
- return 1;
-
-bail:
- qp->s_ack_state = OP(ACKNOWLEDGE);
- qp->s_flags &= ~(RVT_S_RESP_PENDING | RVT_S_ACK_PENDING);
- return 0;
-}
-
-/**
- * qib_make_rc_req - construct a request packet (SEND, RDMA r/w, ATOMIC)
- * @qp: a pointer to the QP
- * @flags: unused
- *
- * Assumes the s_lock is held.
- *
- * Return 1 if constructed; otherwise, return 0.
- */
-int qib_make_rc_req(struct rvt_qp *qp, unsigned long *flags)
-{
- struct qib_qp_priv *priv = qp->priv;
- struct qib_ibdev *dev = to_idev(qp->ibqp.device);
- struct ib_other_headers *ohdr;
- struct rvt_sge_state *ss;
- struct rvt_swqe *wqe;
- u32 hwords;
- u32 len;
- u32 bth0;
- u32 bth2;
- u32 pmtu = qp->pmtu;
- char newreq;
- int ret = 0;
- int delta;
-
- ohdr = &priv->s_hdr->u.oth;
- if (rdma_ah_get_ah_flags(&qp->remote_ah_attr) & IB_AH_GRH)
- ohdr = &priv->s_hdr->u.l.oth;
-
- /* Sending responses has higher priority over sending requests. */
- if ((qp->s_flags & RVT_S_RESP_PENDING) &&
- qib_make_rc_ack(dev, qp, ohdr, pmtu))
- goto done;
-
- if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_SEND_OK)) {
- if (!(ib_rvt_state_ops[qp->state] & RVT_FLUSH_SEND))
- goto bail;
- /* We are in the error state, flush the work request. */
- if (qp->s_last == READ_ONCE(qp->s_head))
- goto bail;
- /* If DMAs are in progress, we can't flush immediately. */
- if (atomic_read(&priv->s_dma_busy)) {
- qp->s_flags |= RVT_S_WAIT_DMA;
- goto bail;
- }
- wqe = rvt_get_swqe_ptr(qp, qp->s_last);
- rvt_send_complete(qp, wqe, qp->s_last != qp->s_acked ?
- IB_WC_SUCCESS : IB_WC_WR_FLUSH_ERR);
- /* will get called again */
- goto done;
- }
-
- if (qp->s_flags & (RVT_S_WAIT_RNR | RVT_S_WAIT_ACK))
- goto bail;
-
- if (qib_cmp24(qp->s_psn, qp->s_sending_hpsn) <= 0) {
- if (qib_cmp24(qp->s_sending_psn, qp->s_sending_hpsn) <= 0) {
- qp->s_flags |= RVT_S_WAIT_PSN;
- goto bail;
- }
- qp->s_sending_psn = qp->s_psn;
- qp->s_sending_hpsn = qp->s_psn - 1;
- }
-
- /* header size in 32-bit words LRH+BTH = (8+12)/4. */
- hwords = 5;
- bth0 = 0;
-
- /* Send a request. */
- wqe = rvt_get_swqe_ptr(qp, qp->s_cur);
- switch (qp->s_state) {
- default:
- if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_NEXT_SEND_OK))
- goto bail;
- /*
- * Resend an old request or start a new one.
- *
- * We keep track of the current SWQE so that
- * we don't reset the "furthest progress" state
- * if we need to back up.
- */
- newreq = 0;
- if (qp->s_cur == qp->s_tail) {
- /* Check if send work queue is empty. */
- if (qp->s_tail == READ_ONCE(qp->s_head))
- goto bail;
- /*
- * If a fence is requested, wait for previous
- * RDMA read and atomic operations to finish.
- */
- if ((wqe->wr.send_flags & IB_SEND_FENCE) &&
- qp->s_num_rd_atomic) {
- qp->s_flags |= RVT_S_WAIT_FENCE;
- goto bail;
- }
- newreq = 1;
- qp->s_psn = wqe->psn;
- }
- /*
- * Note that we have to be careful not to modify the
- * original work request since we may need to resend
- * it.
- */
- len = wqe->length;
- ss = &qp->s_sge;
- bth2 = qp->s_psn & QIB_PSN_MASK;
- switch (wqe->wr.opcode) {
- case IB_WR_SEND:
- case IB_WR_SEND_WITH_IMM:
- /* If no credit, return. */
- if (!rvt_rc_credit_avail(qp, wqe))
- goto bail;
- if (len > pmtu) {
- qp->s_state = OP(SEND_FIRST);
- len = pmtu;
- break;
- }
- if (wqe->wr.opcode == IB_WR_SEND)
- qp->s_state = OP(SEND_ONLY);
- else {
- qp->s_state = OP(SEND_ONLY_WITH_IMMEDIATE);
- /* Immediate data comes after the BTH */
- ohdr->u.imm_data = wqe->wr.ex.imm_data;
- hwords += 1;
- }
- if (wqe->wr.send_flags & IB_SEND_SOLICITED)
- bth0 |= IB_BTH_SOLICITED;
- bth2 |= IB_BTH_REQ_ACK;
- if (++qp->s_cur == qp->s_size)
- qp->s_cur = 0;
- break;
-
- case IB_WR_RDMA_WRITE:
- if (newreq && !(qp->s_flags & RVT_S_UNLIMITED_CREDIT))
- qp->s_lsn++;
- goto no_flow_control;
- case IB_WR_RDMA_WRITE_WITH_IMM:
- /* If no credit, return. */
- if (!rvt_rc_credit_avail(qp, wqe))
- goto bail;
-no_flow_control:
- ohdr->u.rc.reth.vaddr =
- cpu_to_be64(wqe->rdma_wr.remote_addr);
- ohdr->u.rc.reth.rkey =
- cpu_to_be32(wqe->rdma_wr.rkey);
- ohdr->u.rc.reth.length = cpu_to_be32(len);
- hwords += sizeof(struct ib_reth) / sizeof(u32);
- if (len > pmtu) {
- qp->s_state = OP(RDMA_WRITE_FIRST);
- len = pmtu;
- break;
- }
- if (wqe->rdma_wr.wr.opcode == IB_WR_RDMA_WRITE)
- qp->s_state = OP(RDMA_WRITE_ONLY);
- else {
- qp->s_state = OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE);
- /* Immediate data comes after RETH */
- ohdr->u.rc.imm_data =
- wqe->rdma_wr.wr.ex.imm_data;
- hwords += 1;
- if (wqe->rdma_wr.wr.send_flags & IB_SEND_SOLICITED)
- bth0 |= IB_BTH_SOLICITED;
- }
- bth2 |= IB_BTH_REQ_ACK;
- if (++qp->s_cur == qp->s_size)
- qp->s_cur = 0;
- break;
-
- case IB_WR_RDMA_READ:
- /*
- * Don't allow more operations to be started
- * than the QP limits allow.
- */
- if (newreq) {
- if (qp->s_num_rd_atomic >=
- qp->s_max_rd_atomic) {
- qp->s_flags |= RVT_S_WAIT_RDMAR;
- goto bail;
- }
- qp->s_num_rd_atomic++;
- if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT))
- qp->s_lsn++;
- }
-
- ohdr->u.rc.reth.vaddr =
- cpu_to_be64(wqe->rdma_wr.remote_addr);
- ohdr->u.rc.reth.rkey =
- cpu_to_be32(wqe->rdma_wr.rkey);
- ohdr->u.rc.reth.length = cpu_to_be32(len);
- qp->s_state = OP(RDMA_READ_REQUEST);
- hwords += sizeof(ohdr->u.rc.reth) / sizeof(u32);
- ss = NULL;
- len = 0;
- bth2 |= IB_BTH_REQ_ACK;
- if (++qp->s_cur == qp->s_size)
- qp->s_cur = 0;
- break;
-
- case IB_WR_ATOMIC_CMP_AND_SWP:
- case IB_WR_ATOMIC_FETCH_AND_ADD:
- /*
- * Don't allow more operations to be started
- * than the QP limits allow.
- */
- if (newreq) {
- if (qp->s_num_rd_atomic >=
- qp->s_max_rd_atomic) {
- qp->s_flags |= RVT_S_WAIT_RDMAR;
- goto bail;
- }
- qp->s_num_rd_atomic++;
- if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT))
- qp->s_lsn++;
- }
- if (wqe->atomic_wr.wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP) {
- qp->s_state = OP(COMPARE_SWAP);
- put_ib_ateth_swap(wqe->atomic_wr.swap,
- &ohdr->u.atomic_eth);
- put_ib_ateth_compare(wqe->atomic_wr.compare_add,
- &ohdr->u.atomic_eth);
- } else {
- qp->s_state = OP(FETCH_ADD);
- put_ib_ateth_swap(wqe->atomic_wr.compare_add,
- &ohdr->u.atomic_eth);
- put_ib_ateth_compare(0, &ohdr->u.atomic_eth);
- }
- put_ib_ateth_vaddr(wqe->atomic_wr.remote_addr,
- &ohdr->u.atomic_eth);
- ohdr->u.atomic_eth.rkey = cpu_to_be32(
- wqe->atomic_wr.rkey);
- hwords += sizeof(struct ib_atomic_eth) / sizeof(u32);
- ss = NULL;
- len = 0;
- bth2 |= IB_BTH_REQ_ACK;
- if (++qp->s_cur == qp->s_size)
- qp->s_cur = 0;
- break;
-
- default:
- goto bail;
- }
- qp->s_sge.sge = wqe->sg_list[0];
- qp->s_sge.sg_list = wqe->sg_list + 1;
- qp->s_sge.num_sge = wqe->wr.num_sge;
- qp->s_sge.total_len = wqe->length;
- qp->s_len = wqe->length;
- if (newreq) {
- qp->s_tail++;
- if (qp->s_tail >= qp->s_size)
- qp->s_tail = 0;
- }
- if (wqe->wr.opcode == IB_WR_RDMA_READ)
- qp->s_psn = wqe->lpsn + 1;
- else
- qp->s_psn++;
- break;
-
- case OP(RDMA_READ_RESPONSE_FIRST):
- /*
- * qp->s_state is normally set to the opcode of the
- * last packet constructed for new requests and therefore
- * is never set to RDMA read response.
- * RDMA_READ_RESPONSE_FIRST is used by the ACK processing
- * thread to indicate a SEND needs to be restarted from an
- * earlier PSN without interferring with the sending thread.
- * See qib_restart_rc().
- */
- qp->s_len = restart_sge(&qp->s_sge, wqe, qp->s_psn, pmtu);
- fallthrough;
- case OP(SEND_FIRST):
- qp->s_state = OP(SEND_MIDDLE);
- fallthrough;
- case OP(SEND_MIDDLE):
- bth2 = qp->s_psn++ & QIB_PSN_MASK;
- ss = &qp->s_sge;
- len = qp->s_len;
- if (len > pmtu) {
- len = pmtu;
- break;
- }
- if (wqe->wr.opcode == IB_WR_SEND)
- qp->s_state = OP(SEND_LAST);
- else {
- qp->s_state = OP(SEND_LAST_WITH_IMMEDIATE);
- /* Immediate data comes after the BTH */
- ohdr->u.imm_data = wqe->wr.ex.imm_data;
- hwords += 1;
- }
- if (wqe->wr.send_flags & IB_SEND_SOLICITED)
- bth0 |= IB_BTH_SOLICITED;
- bth2 |= IB_BTH_REQ_ACK;
- qp->s_cur++;
- if (qp->s_cur >= qp->s_size)
- qp->s_cur = 0;
- break;
-
- case OP(RDMA_READ_RESPONSE_LAST):
- /*
- * qp->s_state is normally set to the opcode of the
- * last packet constructed for new requests and therefore
- * is never set to RDMA read response.
- * RDMA_READ_RESPONSE_LAST is used by the ACK processing
- * thread to indicate a RDMA write needs to be restarted from
- * an earlier PSN without interferring with the sending thread.
- * See qib_restart_rc().
- */
- qp->s_len = restart_sge(&qp->s_sge, wqe, qp->s_psn, pmtu);
- fallthrough;
- case OP(RDMA_WRITE_FIRST):
- qp->s_state = OP(RDMA_WRITE_MIDDLE);
- fallthrough;
- case OP(RDMA_WRITE_MIDDLE):
- bth2 = qp->s_psn++ & QIB_PSN_MASK;
- ss = &qp->s_sge;
- len = qp->s_len;
- if (len > pmtu) {
- len = pmtu;
- break;
- }
- if (wqe->wr.opcode == IB_WR_RDMA_WRITE)
- qp->s_state = OP(RDMA_WRITE_LAST);
- else {
- qp->s_state = OP(RDMA_WRITE_LAST_WITH_IMMEDIATE);
- /* Immediate data comes after the BTH */
- ohdr->u.imm_data = wqe->wr.ex.imm_data;
- hwords += 1;
- if (wqe->wr.send_flags & IB_SEND_SOLICITED)
- bth0 |= IB_BTH_SOLICITED;
- }
- bth2 |= IB_BTH_REQ_ACK;
- qp->s_cur++;
- if (qp->s_cur >= qp->s_size)
- qp->s_cur = 0;
- break;
-
- case OP(RDMA_READ_RESPONSE_MIDDLE):
- /*
- * qp->s_state is normally set to the opcode of the
- * last packet constructed for new requests and therefore
- * is never set to RDMA read response.
- * RDMA_READ_RESPONSE_MIDDLE is used by the ACK processing
- * thread to indicate a RDMA read needs to be restarted from
- * an earlier PSN without interferring with the sending thread.
- * See qib_restart_rc().
- */
- len = ((qp->s_psn - wqe->psn) & QIB_PSN_MASK) * pmtu;
- ohdr->u.rc.reth.vaddr =
- cpu_to_be64(wqe->rdma_wr.remote_addr + len);
- ohdr->u.rc.reth.rkey =
- cpu_to_be32(wqe->rdma_wr.rkey);
- ohdr->u.rc.reth.length = cpu_to_be32(wqe->length - len);
- qp->s_state = OP(RDMA_READ_REQUEST);
- hwords += sizeof(ohdr->u.rc.reth) / sizeof(u32);
- bth2 = (qp->s_psn & QIB_PSN_MASK) | IB_BTH_REQ_ACK;
- qp->s_psn = wqe->lpsn + 1;
- ss = NULL;
- len = 0;
- qp->s_cur++;
- if (qp->s_cur == qp->s_size)
- qp->s_cur = 0;
- break;
- }
- qp->s_sending_hpsn = bth2;
- delta = (((int) bth2 - (int) wqe->psn) << 8) >> 8;
- if (delta && delta % QIB_PSN_CREDIT == 0)
- bth2 |= IB_BTH_REQ_ACK;
- if (qp->s_flags & RVT_S_SEND_ONE) {
- qp->s_flags &= ~RVT_S_SEND_ONE;
- qp->s_flags |= RVT_S_WAIT_ACK;
- bth2 |= IB_BTH_REQ_ACK;
- }
- qp->s_len -= len;
- qp->s_hdrwords = hwords;
- qp->s_cur_sge = ss;
- qp->s_cur_size = len;
- qib_make_ruc_header(qp, ohdr, bth0 | (qp->s_state << 24), bth2);
-done:
- return 1;
-bail:
- qp->s_flags &= ~RVT_S_BUSY;
- return ret;
-}
-
-/**
- * qib_send_rc_ack - Construct an ACK packet and send it
- * @qp: a pointer to the QP
- *
- * This is called from qib_rc_rcv() and qib_kreceive().
- * Note that RDMA reads and atomics are handled in the
- * send side QP state and tasklet.
- */
-void qib_send_rc_ack(struct rvt_qp *qp)
-{
- struct qib_devdata *dd = dd_from_ibdev(qp->ibqp.device);
- struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
- struct qib_pportdata *ppd = ppd_from_ibp(ibp);
- u64 pbc;
- u16 lrh0;
- u32 bth0;
- u32 hwords;
- u32 pbufn;
- u32 __iomem *piobuf;
- struct ib_header hdr;
- struct ib_other_headers *ohdr;
- u32 control;
- unsigned long flags;
-
- spin_lock_irqsave(&qp->s_lock, flags);
-
- if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK))
- goto unlock;
-
- /* Don't send ACK or NAK if a RDMA read or atomic is pending. */
- if ((qp->s_flags & RVT_S_RESP_PENDING) || qp->s_rdma_ack_cnt)
- goto queue_ack;
-
- /* Construct the header with s_lock held so APM doesn't change it. */
- ohdr = &hdr.u.oth;
- lrh0 = QIB_LRH_BTH;
- /* header size in 32-bit words LRH+BTH+AETH = (8+12+4)/4. */
- hwords = 6;
- if (unlikely(rdma_ah_get_ah_flags(&qp->remote_ah_attr) &
- IB_AH_GRH)) {
- hwords += qib_make_grh(ibp, &hdr.u.l.grh,
- rdma_ah_read_grh(&qp->remote_ah_attr),
- hwords, 0);
- ohdr = &hdr.u.l.oth;
- lrh0 = QIB_LRH_GRH;
- }
- /* read pkey_index w/o lock (its atomic) */
- bth0 = qib_get_pkey(ibp, qp->s_pkey_index) | (OP(ACKNOWLEDGE) << 24);
- if (qp->s_mig_state == IB_MIG_MIGRATED)
- bth0 |= IB_BTH_MIG_REQ;
- if (qp->r_nak_state)
- ohdr->u.aeth = cpu_to_be32((qp->r_msn & IB_MSN_MASK) |
- (qp->r_nak_state <<
- IB_AETH_CREDIT_SHIFT));
- else
- ohdr->u.aeth = rvt_compute_aeth(qp);
- lrh0 |= ibp->sl_to_vl[rdma_ah_get_sl(&qp->remote_ah_attr)] << 12 |
- rdma_ah_get_sl(&qp->remote_ah_attr) << 4;
- hdr.lrh[0] = cpu_to_be16(lrh0);
- hdr.lrh[1] = cpu_to_be16(rdma_ah_get_dlid(&qp->remote_ah_attr));
- hdr.lrh[2] = cpu_to_be16(hwords + SIZE_OF_CRC);
- hdr.lrh[3] = cpu_to_be16(ppd->lid |
- rdma_ah_get_path_bits(&qp->remote_ah_attr));
- ohdr->bth[0] = cpu_to_be32(bth0);
- ohdr->bth[1] = cpu_to_be32(qp->remote_qpn);
- ohdr->bth[2] = cpu_to_be32(qp->r_ack_psn & QIB_PSN_MASK);
-
- spin_unlock_irqrestore(&qp->s_lock, flags);
-
- /* Don't try to send ACKs if the link isn't ACTIVE */
- if (!(ppd->lflags & QIBL_LINKACTIVE))
- goto done;
-
- control = dd->f_setpbc_control(ppd, hwords + SIZE_OF_CRC,
- qp->s_srate, lrh0 >> 12);
- /* length is + 1 for the control dword */
- pbc = ((u64) control << 32) | (hwords + 1);
-
- piobuf = dd->f_getsendbuf(ppd, pbc, &pbufn);
- if (!piobuf) {
- /*
- * We are out of PIO buffers at the moment.
- * Pass responsibility for sending the ACK to the
- * send tasklet so that when a PIO buffer becomes
- * available, the ACK is sent ahead of other outgoing
- * packets.
- */
- spin_lock_irqsave(&qp->s_lock, flags);
- goto queue_ack;
- }
-
- /*
- * Write the pbc.
- * We have to flush after the PBC for correctness
- * on some cpus or WC buffer can be written out of order.
- */
- writeq(pbc, piobuf);
-
- if (dd->flags & QIB_PIO_FLUSH_WC) {
- u32 *hdrp = (u32 *) &hdr;
-
- qib_flush_wc();
- qib_pio_copy(piobuf + 2, hdrp, hwords - 1);
- qib_flush_wc();
- __raw_writel(hdrp[hwords - 1], piobuf + hwords + 1);
- } else
- qib_pio_copy(piobuf + 2, (u32 *) &hdr, hwords);
-
- if (dd->flags & QIB_USE_SPCL_TRIG) {
- u32 spcl_off = (pbufn >= dd->piobcnt2k) ? 2047 : 1023;
-
- qib_flush_wc();
- __raw_writel(0xaebecede, piobuf + spcl_off);
- }
-
- qib_flush_wc();
- qib_sendbuf_done(dd, pbufn);
-
- this_cpu_inc(ibp->pmastats->n_unicast_xmit);
- goto done;
-
-queue_ack:
- if (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) {
- this_cpu_inc(*ibp->rvp.rc_qacks);
- qp->s_flags |= RVT_S_ACK_PENDING | RVT_S_RESP_PENDING;
- qp->s_nak_state = qp->r_nak_state;
- qp->s_ack_psn = qp->r_ack_psn;
-
- /* Schedule the send tasklet. */
- qib_schedule_send(qp);
- }
-unlock:
- spin_unlock_irqrestore(&qp->s_lock, flags);
-done:
- return;
-}
-
-/**
- * reset_psn - reset the QP state to send starting from PSN
- * @qp: the QP
- * @psn: the packet sequence number to restart at
- *
- * This is called from qib_rc_rcv() to process an incoming RC ACK
- * for the given QP.
- * Called at interrupt level with the QP s_lock held.
- */
-static void reset_psn(struct rvt_qp *qp, u32 psn)
-{
- u32 n = qp->s_acked;
- struct rvt_swqe *wqe = rvt_get_swqe_ptr(qp, n);
- u32 opcode;
-
- qp->s_cur = n;
-
- /*
- * If we are starting the request from the beginning,
- * let the normal send code handle initialization.
- */
- if (qib_cmp24(psn, wqe->psn) <= 0) {
- qp->s_state = OP(SEND_LAST);
- goto done;
- }
-
- /* Find the work request opcode corresponding to the given PSN. */
- opcode = wqe->wr.opcode;
- for (;;) {
- int diff;
-
- if (++n == qp->s_size)
- n = 0;
- if (n == qp->s_tail)
- break;
- wqe = rvt_get_swqe_ptr(qp, n);
- diff = qib_cmp24(psn, wqe->psn);
- if (diff < 0)
- break;
- qp->s_cur = n;
- /*
- * If we are starting the request from the beginning,
- * let the normal send code handle initialization.
- */
- if (diff == 0) {
- qp->s_state = OP(SEND_LAST);
- goto done;
- }
- opcode = wqe->wr.opcode;
- }
-
- /*
- * Set the state to restart in the middle of a request.
- * Don't change the s_sge, s_cur_sge, or s_cur_size.
- * See qib_make_rc_req().
- */
- switch (opcode) {
- case IB_WR_SEND:
- case IB_WR_SEND_WITH_IMM:
- qp->s_state = OP(RDMA_READ_RESPONSE_FIRST);
- break;
-
- case IB_WR_RDMA_WRITE:
- case IB_WR_RDMA_WRITE_WITH_IMM:
- qp->s_state = OP(RDMA_READ_RESPONSE_LAST);
- break;
-
- case IB_WR_RDMA_READ:
- qp->s_state = OP(RDMA_READ_RESPONSE_MIDDLE);
- break;
-
- default:
- /*
- * This case shouldn't happen since its only
- * one PSN per req.
- */
- qp->s_state = OP(SEND_LAST);
- }
-done:
- qp->s_psn = psn;
- /*
- * Set RVT_S_WAIT_PSN as qib_rc_complete() may start the timer
- * asynchronously before the send tasklet can get scheduled.
- * Doing it in qib_make_rc_req() is too late.
- */
- if ((qib_cmp24(qp->s_psn, qp->s_sending_hpsn) <= 0) &&
- (qib_cmp24(qp->s_sending_psn, qp->s_sending_hpsn) <= 0))
- qp->s_flags |= RVT_S_WAIT_PSN;
-}
-
-/*
- * Back up requester to resend the last un-ACKed request.
- * The QP r_lock and s_lock should be held and interrupts disabled.
- */
-void qib_restart_rc(struct rvt_qp *qp, u32 psn, int wait)
-{
- struct rvt_swqe *wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
- struct qib_ibport *ibp;
-
- if (qp->s_retry == 0) {
- if (qp->s_mig_state == IB_MIG_ARMED) {
- qib_migrate_qp(qp);
- qp->s_retry = qp->s_retry_cnt;
- } else if (qp->s_last == qp->s_acked) {
- rvt_send_complete(qp, wqe, IB_WC_RETRY_EXC_ERR);
- rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR);
- return;
- } else /* XXX need to handle delayed completion */
- return;
- } else
- qp->s_retry--;
-
- ibp = to_iport(qp->ibqp.device, qp->port_num);
- if (wqe->wr.opcode == IB_WR_RDMA_READ)
- ibp->rvp.n_rc_resends++;
- else
- ibp->rvp.n_rc_resends += (qp->s_psn - psn) & QIB_PSN_MASK;
-
- qp->s_flags &= ~(RVT_S_WAIT_FENCE | RVT_S_WAIT_RDMAR |
- RVT_S_WAIT_SSN_CREDIT | RVT_S_WAIT_PSN |
- RVT_S_WAIT_ACK);
- if (wait)
- qp->s_flags |= RVT_S_SEND_ONE;
- reset_psn(qp, psn);
-}
-
-/*
- * Set qp->s_sending_psn to the next PSN after the given one.
- * This would be psn+1 except when RDMA reads are present.
- */
-static void reset_sending_psn(struct rvt_qp *qp, u32 psn)
-{
- struct rvt_swqe *wqe;
- u32 n = qp->s_last;
-
- /* Find the work request corresponding to the given PSN. */
- for (;;) {
- wqe = rvt_get_swqe_ptr(qp, n);
- if (qib_cmp24(psn, wqe->lpsn) <= 0) {
- if (wqe->wr.opcode == IB_WR_RDMA_READ)
- qp->s_sending_psn = wqe->lpsn + 1;
- else
- qp->s_sending_psn = psn + 1;
- break;
- }
- if (++n == qp->s_size)
- n = 0;
- if (n == qp->s_tail)
- break;
- }
-}
-
-/*
- * This should be called with the QP s_lock held and interrupts disabled.
- */
-void qib_rc_send_complete(struct rvt_qp *qp, struct ib_header *hdr)
-{
- struct ib_other_headers *ohdr;
- struct rvt_swqe *wqe;
- u32 opcode;
- u32 psn;
-
- if (!(ib_rvt_state_ops[qp->state] & RVT_SEND_OR_FLUSH_OR_RECV_OK))
- return;
-
- /* Find out where the BTH is */
- if ((be16_to_cpu(hdr->lrh[0]) & 3) == QIB_LRH_BTH)
- ohdr = &hdr->u.oth;
- else
- ohdr = &hdr->u.l.oth;
-
- opcode = be32_to_cpu(ohdr->bth[0]) >> 24;
- if (opcode >= OP(RDMA_READ_RESPONSE_FIRST) &&
- opcode <= OP(ATOMIC_ACKNOWLEDGE)) {
- WARN_ON(!qp->s_rdma_ack_cnt);
- qp->s_rdma_ack_cnt--;
- return;
- }
-
- psn = be32_to_cpu(ohdr->bth[2]);
- reset_sending_psn(qp, psn);
-
- /*
- * Start timer after a packet requesting an ACK has been sent and
- * there are still requests that haven't been acked.
- */
- if ((psn & IB_BTH_REQ_ACK) && qp->s_acked != qp->s_tail &&
- !(qp->s_flags & (RVT_S_TIMER | RVT_S_WAIT_RNR | RVT_S_WAIT_PSN)) &&
- (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK))
- rvt_add_retry_timer(qp);
-
- while (qp->s_last != qp->s_acked) {
- wqe = rvt_get_swqe_ptr(qp, qp->s_last);
- if (qib_cmp24(wqe->lpsn, qp->s_sending_psn) >= 0 &&
- qib_cmp24(qp->s_sending_psn, qp->s_sending_hpsn) <= 0)
- break;
- rvt_qp_complete_swqe(qp,
- wqe,
- ib_qib_wc_opcode[wqe->wr.opcode],
- IB_WC_SUCCESS);
- }
- /*
- * If we were waiting for sends to complete before resending,
- * and they are now complete, restart sending.
- */
- if (qp->s_flags & RVT_S_WAIT_PSN &&
- qib_cmp24(qp->s_sending_psn, qp->s_sending_hpsn) > 0) {
- qp->s_flags &= ~RVT_S_WAIT_PSN;
- qp->s_sending_psn = qp->s_psn;
- qp->s_sending_hpsn = qp->s_psn - 1;
- qib_schedule_send(qp);
- }
-}
-
-static inline void update_last_psn(struct rvt_qp *qp, u32 psn)
-{
- qp->s_last_psn = psn;
-}
-
-/*
- * Generate a SWQE completion.
- * This is similar to qib_send_complete but has to check to be sure
- * that the SGEs are not being referenced if the SWQE is being resent.
- */
-static struct rvt_swqe *do_rc_completion(struct rvt_qp *qp,
- struct rvt_swqe *wqe,
- struct qib_ibport *ibp)
-{
- /*
- * Don't decrement refcount and don't generate a
- * completion if the SWQE is being resent until the send
- * is finished.
- */
- if (qib_cmp24(wqe->lpsn, qp->s_sending_psn) < 0 ||
- qib_cmp24(qp->s_sending_psn, qp->s_sending_hpsn) > 0)
- rvt_qp_complete_swqe(qp,
- wqe,
- ib_qib_wc_opcode[wqe->wr.opcode],
- IB_WC_SUCCESS);
- else
- this_cpu_inc(*ibp->rvp.rc_delayed_comp);
-
- qp->s_retry = qp->s_retry_cnt;
- update_last_psn(qp, wqe->lpsn);
-
- /*
- * If we are completing a request which is in the process of
- * being resent, we can stop resending it since we know the
- * responder has already seen it.
- */
- if (qp->s_acked == qp->s_cur) {
- if (++qp->s_cur >= qp->s_size)
- qp->s_cur = 0;
- qp->s_acked = qp->s_cur;
- wqe = rvt_get_swqe_ptr(qp, qp->s_cur);
- if (qp->s_acked != qp->s_tail) {
- qp->s_state = OP(SEND_LAST);
- qp->s_psn = wqe->psn;
- }
- } else {
- if (++qp->s_acked >= qp->s_size)
- qp->s_acked = 0;
- if (qp->state == IB_QPS_SQD && qp->s_acked == qp->s_cur)
- qp->s_draining = 0;
- wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
- }
- return wqe;
-}
-
-/*
- * do_rc_ack - process an incoming RC ACK
- * @qp: the QP the ACK came in on
- * @psn: the packet sequence number of the ACK
- * @opcode: the opcode of the request that resulted in the ACK
- *
- * This is called from qib_rc_rcv_resp() to process an incoming RC ACK
- * for the given QP.
- * Called at interrupt level with the QP s_lock held.
- * Returns 1 if OK, 0 if current operation should be aborted (NAK).
- */
-static int do_rc_ack(struct rvt_qp *qp, u32 aeth, u32 psn, int opcode,
- u64 val, struct qib_ctxtdata *rcd)
-{
- struct qib_ibport *ibp;
- enum ib_wc_status status;
- struct rvt_swqe *wqe;
- int ret = 0;
- u32 ack_psn;
- int diff;
-
- /*
- * Note that NAKs implicitly ACK outstanding SEND and RDMA write
- * requests and implicitly NAK RDMA read and atomic requests issued
- * before the NAK'ed request. The MSN won't include the NAK'ed
- * request but will include an ACK'ed request(s).
- */
- ack_psn = psn;
- if (aeth >> IB_AETH_NAK_SHIFT)
- ack_psn--;
- wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
- ibp = to_iport(qp->ibqp.device, qp->port_num);
-
- /*
- * The MSN might be for a later WQE than the PSN indicates so
- * only complete WQEs that the PSN finishes.
- */
- while ((diff = qib_cmp24(ack_psn, wqe->lpsn)) >= 0) {
- /*
- * RDMA_READ_RESPONSE_ONLY is a special case since
- * we want to generate completion events for everything
- * before the RDMA read, copy the data, then generate
- * the completion for the read.
- */
- if (wqe->wr.opcode == IB_WR_RDMA_READ &&
- opcode == OP(RDMA_READ_RESPONSE_ONLY) &&
- diff == 0) {
- ret = 1;
- goto bail;
- }
- /*
- * If this request is a RDMA read or atomic, and the ACK is
- * for a later operation, this ACK NAKs the RDMA read or
- * atomic. In other words, only a RDMA_READ_LAST or ONLY
- * can ACK a RDMA read and likewise for atomic ops. Note
- * that the NAK case can only happen if relaxed ordering is
- * used and requests are sent after an RDMA read or atomic
- * is sent but before the response is received.
- */
- if ((wqe->wr.opcode == IB_WR_RDMA_READ &&
- (opcode != OP(RDMA_READ_RESPONSE_LAST) || diff != 0)) ||
- ((wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
- wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) &&
- (opcode != OP(ATOMIC_ACKNOWLEDGE) || diff != 0))) {
- /* Retry this request. */
- if (!(qp->r_flags & RVT_R_RDMAR_SEQ)) {
- qp->r_flags |= RVT_R_RDMAR_SEQ;
- qib_restart_rc(qp, qp->s_last_psn + 1, 0);
- if (list_empty(&qp->rspwait)) {
- qp->r_flags |= RVT_R_RSP_SEND;
- rvt_get_qp(qp);
- list_add_tail(&qp->rspwait,
- &rcd->qp_wait_list);
- }
- }
- /*
- * No need to process the ACK/NAK since we are
- * restarting an earlier request.
- */
- goto bail;
- }
- if (wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
- wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) {
- u64 *vaddr = wqe->sg_list[0].vaddr;
- *vaddr = val;
- }
- if (qp->s_num_rd_atomic &&
- (wqe->wr.opcode == IB_WR_RDMA_READ ||
- wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
- wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD)) {
- qp->s_num_rd_atomic--;
- /* Restart sending task if fence is complete */
- if ((qp->s_flags & RVT_S_WAIT_FENCE) &&
- !qp->s_num_rd_atomic) {
- qp->s_flags &= ~(RVT_S_WAIT_FENCE |
- RVT_S_WAIT_ACK);
- qib_schedule_send(qp);
- } else if (qp->s_flags & RVT_S_WAIT_RDMAR) {
- qp->s_flags &= ~(RVT_S_WAIT_RDMAR |
- RVT_S_WAIT_ACK);
- qib_schedule_send(qp);
- }
- }
- wqe = do_rc_completion(qp, wqe, ibp);
- if (qp->s_acked == qp->s_tail)
- break;
- }
-
- switch (aeth >> IB_AETH_NAK_SHIFT) {
- case 0: /* ACK */
- this_cpu_inc(*ibp->rvp.rc_acks);
- if (qp->s_acked != qp->s_tail) {
- /*
- * We are expecting more ACKs so
- * reset the retransmit timer.
- */
- rvt_mod_retry_timer(qp);
- /*
- * We can stop resending the earlier packets and
- * continue with the next packet the receiver wants.
- */
- if (qib_cmp24(qp->s_psn, psn) <= 0)
- reset_psn(qp, psn + 1);
- } else {
- /* No more acks - kill all timers */
- rvt_stop_rc_timers(qp);
- if (qib_cmp24(qp->s_psn, psn) <= 0) {
- qp->s_state = OP(SEND_LAST);
- qp->s_psn = psn + 1;
- }
- }
- if (qp->s_flags & RVT_S_WAIT_ACK) {
- qp->s_flags &= ~RVT_S_WAIT_ACK;
- qib_schedule_send(qp);
- }
- rvt_get_credit(qp, aeth);
- qp->s_rnr_retry = qp->s_rnr_retry_cnt;
- qp->s_retry = qp->s_retry_cnt;
- update_last_psn(qp, psn);
- return 1;
-
- case 1: /* RNR NAK */
- ibp->rvp.n_rnr_naks++;
- if (qp->s_acked == qp->s_tail)
- goto bail;
- if (qp->s_flags & RVT_S_WAIT_RNR)
- goto bail;
- if (qp->s_rnr_retry == 0) {
- status = IB_WC_RNR_RETRY_EXC_ERR;
- goto class_b;
- }
- if (qp->s_rnr_retry_cnt < 7)
- qp->s_rnr_retry--;
-
- /* The last valid PSN is the previous PSN. */
- update_last_psn(qp, psn - 1);
-
- ibp->rvp.n_rc_resends += (qp->s_psn - psn) & QIB_PSN_MASK;
-
- reset_psn(qp, psn);
-
- qp->s_flags &= ~(RVT_S_WAIT_SSN_CREDIT | RVT_S_WAIT_ACK);
- rvt_stop_rc_timers(qp);
- rvt_add_rnr_timer(qp, aeth);
- return 0;
-
- case 3: /* NAK */
- if (qp->s_acked == qp->s_tail)
- goto bail;
- /* The last valid PSN is the previous PSN. */
- update_last_psn(qp, psn - 1);
- switch ((aeth >> IB_AETH_CREDIT_SHIFT) &
- IB_AETH_CREDIT_MASK) {
- case 0: /* PSN sequence error */
- ibp->rvp.n_seq_naks++;
- /*
- * Back up to the responder's expected PSN.
- * Note that we might get a NAK in the middle of an
- * RDMA READ response which terminates the RDMA
- * READ.
- */
- qib_restart_rc(qp, psn, 0);
- qib_schedule_send(qp);
- break;
-
- case 1: /* Invalid Request */
- status = IB_WC_REM_INV_REQ_ERR;
- ibp->rvp.n_other_naks++;
- goto class_b;
-
- case 2: /* Remote Access Error */
- status = IB_WC_REM_ACCESS_ERR;
- ibp->rvp.n_other_naks++;
- goto class_b;
-
- case 3: /* Remote Operation Error */
- status = IB_WC_REM_OP_ERR;
- ibp->rvp.n_other_naks++;
-class_b:
- if (qp->s_last == qp->s_acked) {
- rvt_send_complete(qp, wqe, status);
- rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR);
- }
- break;
-
- default:
- /* Ignore other reserved NAK error codes */
- goto reserved;
- }
- qp->s_retry = qp->s_retry_cnt;
- qp->s_rnr_retry = qp->s_rnr_retry_cnt;
- goto bail;
-
- default: /* 2: reserved */
-reserved:
- /* Ignore reserved NAK codes. */
- goto bail;
- }
-
-bail:
- rvt_stop_rc_timers(qp);
- return ret;
-}
-
-/*
- * We have seen an out of sequence RDMA read middle or last packet.
- * This ACKs SENDs and RDMA writes up to the first RDMA read or atomic SWQE.
- */
-static void rdma_seq_err(struct rvt_qp *qp, struct qib_ibport *ibp, u32 psn,
- struct qib_ctxtdata *rcd)
-{
- struct rvt_swqe *wqe;
-
- /* Remove QP from retry timer */
- rvt_stop_rc_timers(qp);
-
- wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
-
- while (qib_cmp24(psn, wqe->lpsn) > 0) {
- if (wqe->wr.opcode == IB_WR_RDMA_READ ||
- wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
- wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD)
- break;
- wqe = do_rc_completion(qp, wqe, ibp);
- }
-
- ibp->rvp.n_rdma_seq++;
- qp->r_flags |= RVT_R_RDMAR_SEQ;
- qib_restart_rc(qp, qp->s_last_psn + 1, 0);
- if (list_empty(&qp->rspwait)) {
- qp->r_flags |= RVT_R_RSP_SEND;
- rvt_get_qp(qp);
- list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
- }
-}
-
-/**
- * qib_rc_rcv_resp - process an incoming RC response packet
- * @ibp: the port this packet came in on
- * @ohdr: the other headers for this packet
- * @data: the packet data
- * @tlen: the packet length
- * @qp: the QP for this packet
- * @opcode: the opcode for this packet
- * @psn: the packet sequence number for this packet
- * @hdrsize: the header length
- * @pmtu: the path MTU
- * @rcd: the context pointer
- *
- * This is called from qib_rc_rcv() to process an incoming RC response
- * packet for the given QP.
- * Called at interrupt level.
- */
-static void qib_rc_rcv_resp(struct qib_ibport *ibp,
- struct ib_other_headers *ohdr,
- void *data, u32 tlen,
- struct rvt_qp *qp,
- u32 opcode,
- u32 psn, u32 hdrsize, u32 pmtu,
- struct qib_ctxtdata *rcd)
-{
- struct rvt_swqe *wqe;
- struct qib_pportdata *ppd = ppd_from_ibp(ibp);
- enum ib_wc_status status;
- unsigned long flags;
- int diff;
- u32 pad;
- u32 aeth;
- u64 val;
-
- if (opcode != OP(RDMA_READ_RESPONSE_MIDDLE)) {
- /*
- * If ACK'd PSN on SDMA busy list try to make progress to
- * reclaim SDMA credits.
- */
- if ((qib_cmp24(psn, qp->s_sending_psn) >= 0) &&
- (qib_cmp24(qp->s_sending_psn, qp->s_sending_hpsn) <= 0)) {
-
- /*
- * If send tasklet not running attempt to progress
- * SDMA queue.
- */
- if (!(qp->s_flags & RVT_S_BUSY)) {
- /* Acquire SDMA Lock */
- spin_lock_irqsave(&ppd->sdma_lock, flags);
- /* Invoke sdma make progress */
- qib_sdma_make_progress(ppd);
- /* Release SDMA Lock */
- spin_unlock_irqrestore(&ppd->sdma_lock, flags);
- }
- }
- }
-
- spin_lock_irqsave(&qp->s_lock, flags);
- if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK))
- goto ack_done;
-
- /* Ignore invalid responses. */
- if (qib_cmp24(psn, READ_ONCE(qp->s_next_psn)) >= 0)
- goto ack_done;
-
- /* Ignore duplicate responses. */
- diff = qib_cmp24(psn, qp->s_last_psn);
- if (unlikely(diff <= 0)) {
- /* Update credits for "ghost" ACKs */
- if (diff == 0 && opcode == OP(ACKNOWLEDGE)) {
- aeth = be32_to_cpu(ohdr->u.aeth);
- if ((aeth >> IB_AETH_NAK_SHIFT) == 0)
- rvt_get_credit(qp, aeth);
- }
- goto ack_done;
- }
-
- /*
- * Skip everything other than the PSN we expect, if we are waiting
- * for a reply to a restarted RDMA read or atomic op.
- */
- if (qp->r_flags & RVT_R_RDMAR_SEQ) {
- if (qib_cmp24(psn, qp->s_last_psn + 1) != 0)
- goto ack_done;
- qp->r_flags &= ~RVT_R_RDMAR_SEQ;
- }
-
- if (unlikely(qp->s_acked == qp->s_tail))
- goto ack_done;
- wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
- status = IB_WC_SUCCESS;
-
- switch (opcode) {
- case OP(ACKNOWLEDGE):
- case OP(ATOMIC_ACKNOWLEDGE):
- case OP(RDMA_READ_RESPONSE_FIRST):
- aeth = be32_to_cpu(ohdr->u.aeth);
- if (opcode == OP(ATOMIC_ACKNOWLEDGE))
- val = ib_u64_get(&ohdr->u.at.atomic_ack_eth);
- else
- val = 0;
- if (!do_rc_ack(qp, aeth, psn, opcode, val, rcd) ||
- opcode != OP(RDMA_READ_RESPONSE_FIRST))
- goto ack_done;
- hdrsize += 4;
- wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
- if (unlikely(wqe->wr.opcode != IB_WR_RDMA_READ))
- goto ack_op_err;
- /*
- * If this is a response to a resent RDMA read, we
- * have to be careful to copy the data to the right
- * location.
- */
- qp->s_rdma_read_len = restart_sge(&qp->s_rdma_read_sge,
- wqe, psn, pmtu);
- goto read_middle;
-
- case OP(RDMA_READ_RESPONSE_MIDDLE):
- /* no AETH, no ACK */
- if (unlikely(qib_cmp24(psn, qp->s_last_psn + 1)))
- goto ack_seq_err;
- if (unlikely(wqe->wr.opcode != IB_WR_RDMA_READ))
- goto ack_op_err;
-read_middle:
- if (unlikely(tlen != (hdrsize + pmtu + 4)))
- goto ack_len_err;
- if (unlikely(pmtu >= qp->s_rdma_read_len))
- goto ack_len_err;
-
- /*
- * We got a response so update the timeout.
- * 4.096 usec. * (1 << qp->timeout)
- */
- rvt_mod_retry_timer(qp);
- if (qp->s_flags & RVT_S_WAIT_ACK) {
- qp->s_flags &= ~RVT_S_WAIT_ACK;
- qib_schedule_send(qp);
- }
-
- if (opcode == OP(RDMA_READ_RESPONSE_MIDDLE))
- qp->s_retry = qp->s_retry_cnt;
-
- /*
- * Update the RDMA receive state but do the copy w/o
- * holding the locks and blocking interrupts.
- */
- qp->s_rdma_read_len -= pmtu;
- update_last_psn(qp, psn);
- spin_unlock_irqrestore(&qp->s_lock, flags);
- rvt_copy_sge(qp, &qp->s_rdma_read_sge,
- data, pmtu, false, false);
- goto bail;
-
- case OP(RDMA_READ_RESPONSE_ONLY):
- aeth = be32_to_cpu(ohdr->u.aeth);
- if (!do_rc_ack(qp, aeth, psn, opcode, 0, rcd))
- goto ack_done;
- /* Get the number of bytes the message was padded by. */
- pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
- /*
- * Check that the data size is >= 0 && <= pmtu.
- * Remember to account for the AETH header (4) and
- * ICRC (4).
- */
- if (unlikely(tlen < (hdrsize + pad + 8)))
- goto ack_len_err;
- /*
- * If this is a response to a resent RDMA read, we
- * have to be careful to copy the data to the right
- * location.
- */
- wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
- qp->s_rdma_read_len = restart_sge(&qp->s_rdma_read_sge,
- wqe, psn, pmtu);
- goto read_last;
-
- case OP(RDMA_READ_RESPONSE_LAST):
- /* ACKs READ req. */
- if (unlikely(qib_cmp24(psn, qp->s_last_psn + 1)))
- goto ack_seq_err;
- if (unlikely(wqe->wr.opcode != IB_WR_RDMA_READ))
- goto ack_op_err;
- /* Get the number of bytes the message was padded by. */
- pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
- /*
- * Check that the data size is >= 1 && <= pmtu.
- * Remember to account for the AETH header (4) and
- * ICRC (4).
- */
- if (unlikely(tlen <= (hdrsize + pad + 8)))
- goto ack_len_err;
-read_last:
- tlen -= hdrsize + pad + 8;
- if (unlikely(tlen != qp->s_rdma_read_len))
- goto ack_len_err;
- aeth = be32_to_cpu(ohdr->u.aeth);
- rvt_copy_sge(qp, &qp->s_rdma_read_sge,
- data, tlen, false, false);
- WARN_ON(qp->s_rdma_read_sge.num_sge);
- (void) do_rc_ack(qp, aeth, psn,
- OP(RDMA_READ_RESPONSE_LAST), 0, rcd);
- goto ack_done;
- }
-
-ack_op_err:
- status = IB_WC_LOC_QP_OP_ERR;
- goto ack_err;
-
-ack_seq_err:
- rdma_seq_err(qp, ibp, psn, rcd);
- goto ack_done;
-
-ack_len_err:
- status = IB_WC_LOC_LEN_ERR;
-ack_err:
- if (qp->s_last == qp->s_acked) {
- rvt_send_complete(qp, wqe, status);
- rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR);
- }
-ack_done:
- spin_unlock_irqrestore(&qp->s_lock, flags);
-bail:
- return;
-}
-
-/**
- * qib_rc_rcv_error - process an incoming duplicate or error RC packet
- * @ohdr: the other headers for this packet
- * @data: the packet data
- * @qp: the QP for this packet
- * @opcode: the opcode for this packet
- * @psn: the packet sequence number for this packet
- * @diff: the difference between the PSN and the expected PSN
- * @rcd: the context pointer
- *
- * This is called from qib_rc_rcv() to process an unexpected
- * incoming RC packet for the given QP.
- * Called at interrupt level.
- * Return 1 if no more processing is needed; otherwise return 0 to
- * schedule a response to be sent.
- */
-static int qib_rc_rcv_error(struct ib_other_headers *ohdr,
- void *data,
- struct rvt_qp *qp,
- u32 opcode,
- u32 psn,
- int diff,
- struct qib_ctxtdata *rcd)
-{
- struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
- struct rvt_ack_entry *e;
- unsigned long flags;
- u8 i, prev;
- int old_req;
-
- if (diff > 0) {
- /*
- * Packet sequence error.
- * A NAK will ACK earlier sends and RDMA writes.
- * Don't queue the NAK if we already sent one.
- */
- if (!qp->r_nak_state) {
- ibp->rvp.n_rc_seqnak++;
- qp->r_nak_state = IB_NAK_PSN_ERROR;
- /* Use the expected PSN. */
- qp->r_ack_psn = qp->r_psn;
- /*
- * Wait to send the sequence NAK until all packets
- * in the receive queue have been processed.
- * Otherwise, we end up propagating congestion.
- */
- if (list_empty(&qp->rspwait)) {
- qp->r_flags |= RVT_R_RSP_NAK;
- rvt_get_qp(qp);
- list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
- }
- }
- goto done;
- }
-
- /*
- * Handle a duplicate request. Don't re-execute SEND, RDMA
- * write or atomic op. Don't NAK errors, just silently drop
- * the duplicate request. Note that r_sge, r_len, and
- * r_rcv_len may be in use so don't modify them.
- *
- * We are supposed to ACK the earliest duplicate PSN but we
- * can coalesce an outstanding duplicate ACK. We have to
- * send the earliest so that RDMA reads can be restarted at
- * the requester's expected PSN.
- *
- * First, find where this duplicate PSN falls within the
- * ACKs previously sent.
- * old_req is true if there is an older response that is scheduled
- * to be sent before sending this one.
- */
- e = NULL;
- old_req = 1;
- ibp->rvp.n_rc_dupreq++;
-
- spin_lock_irqsave(&qp->s_lock, flags);
-
- for (i = qp->r_head_ack_queue; ; i = prev) {
- if (i == qp->s_tail_ack_queue)
- old_req = 0;
- if (i)
- prev = i - 1;
- else
- prev = QIB_MAX_RDMA_ATOMIC;
- if (prev == qp->r_head_ack_queue) {
- e = NULL;
- break;
- }
- e = &qp->s_ack_queue[prev];
- if (!e->opcode) {
- e = NULL;
- break;
- }
- if (qib_cmp24(psn, e->psn) >= 0) {
- if (prev == qp->s_tail_ack_queue &&
- qib_cmp24(psn, e->lpsn) <= 0)
- old_req = 0;
- break;
- }
- }
- switch (opcode) {
- case OP(RDMA_READ_REQUEST): {
- struct ib_reth *reth;
- u32 offset;
- u32 len;
-
- /*
- * If we didn't find the RDMA read request in the ack queue,
- * we can ignore this request.
- */
- if (!e || e->opcode != OP(RDMA_READ_REQUEST))
- goto unlock_done;
- /* RETH comes after BTH */
- reth = &ohdr->u.rc.reth;
- /*
- * Address range must be a subset of the original
- * request and start on pmtu boundaries.
- * We reuse the old ack_queue slot since the requester
- * should not back up and request an earlier PSN for the
- * same request.
- */
- offset = ((psn - e->psn) & QIB_PSN_MASK) *
- qp->pmtu;
- len = be32_to_cpu(reth->length);
- if (unlikely(offset + len != e->rdma_sge.sge_length))
- goto unlock_done;
- if (e->rdma_sge.mr) {
- rvt_put_mr(e->rdma_sge.mr);
- e->rdma_sge.mr = NULL;
- }
- if (len != 0) {
- u32 rkey = be32_to_cpu(reth->rkey);
- u64 vaddr = be64_to_cpu(reth->vaddr);
- int ok;
-
- ok = rvt_rkey_ok(qp, &e->rdma_sge, len, vaddr, rkey,
- IB_ACCESS_REMOTE_READ);
- if (unlikely(!ok))
- goto unlock_done;
- } else {
- e->rdma_sge.vaddr = NULL;
- e->rdma_sge.length = 0;
- e->rdma_sge.sge_length = 0;
- }
- e->psn = psn;
- if (old_req)
- goto unlock_done;
- qp->s_tail_ack_queue = prev;
- break;
- }
-
- case OP(COMPARE_SWAP):
- case OP(FETCH_ADD): {
- /*
- * If we didn't find the atomic request in the ack queue
- * or the send tasklet is already backed up to send an
- * earlier entry, we can ignore this request.
- */
- if (!e || e->opcode != (u8) opcode || old_req)
- goto unlock_done;
- qp->s_tail_ack_queue = prev;
- break;
- }
-
- default:
- /*
- * Ignore this operation if it doesn't request an ACK
- * or an earlier RDMA read or atomic is going to be resent.
- */
- if (!(psn & IB_BTH_REQ_ACK) || old_req)
- goto unlock_done;
- /*
- * Resend the most recent ACK if this request is
- * after all the previous RDMA reads and atomics.
- */
- if (i == qp->r_head_ack_queue) {
- spin_unlock_irqrestore(&qp->s_lock, flags);
- qp->r_nak_state = 0;
- qp->r_ack_psn = qp->r_psn - 1;
- goto send_ack;
- }
- /*
- * Try to send a simple ACK to work around a Mellanox bug
- * which doesn't accept a RDMA read response or atomic
- * response as an ACK for earlier SENDs or RDMA writes.
- */
- if (!(qp->s_flags & RVT_S_RESP_PENDING)) {
- spin_unlock_irqrestore(&qp->s_lock, flags);
- qp->r_nak_state = 0;
- qp->r_ack_psn = qp->s_ack_queue[i].psn - 1;
- goto send_ack;
- }
- /*
- * Resend the RDMA read or atomic op which
- * ACKs this duplicate request.
- */
- qp->s_tail_ack_queue = i;
- break;
- }
- qp->s_ack_state = OP(ACKNOWLEDGE);
- qp->s_flags |= RVT_S_RESP_PENDING;
- qp->r_nak_state = 0;
- qib_schedule_send(qp);
-
-unlock_done:
- spin_unlock_irqrestore(&qp->s_lock, flags);
-done:
- return 1;
-
-send_ack:
- return 0;
-}
-
-static inline void qib_update_ack_queue(struct rvt_qp *qp, unsigned n)
-{
- unsigned next;
-
- next = n + 1;
- if (next > QIB_MAX_RDMA_ATOMIC)
- next = 0;
- qp->s_tail_ack_queue = next;
- qp->s_ack_state = OP(ACKNOWLEDGE);
-}
-
-/**
- * qib_rc_rcv - process an incoming RC packet
- * @rcd: the context pointer
- * @hdr: the header of this packet
- * @has_grh: true if the header has a GRH
- * @data: the packet data
- * @tlen: the packet length
- * @qp: the QP for this packet
- *
- * This is called from qib_qp_rcv() to process an incoming RC packet
- * for the given QP.
- * Called at interrupt level.
- */
-void qib_rc_rcv(struct qib_ctxtdata *rcd, struct ib_header *hdr,
- int has_grh, void *data, u32 tlen, struct rvt_qp *qp)
-{
- struct qib_ibport *ibp = &rcd->ppd->ibport_data;
- struct ib_other_headers *ohdr;
- u32 opcode;
- u32 hdrsize;
- u32 psn;
- u32 pad;
- struct ib_wc wc;
- u32 pmtu = qp->pmtu;
- int diff;
- struct ib_reth *reth;
- unsigned long flags;
- int ret;
-
- /* Check for GRH */
- if (!has_grh) {
- ohdr = &hdr->u.oth;
- hdrsize = 8 + 12; /* LRH + BTH */
- } else {
- ohdr = &hdr->u.l.oth;
- hdrsize = 8 + 40 + 12; /* LRH + GRH + BTH */
- }
-
- opcode = be32_to_cpu(ohdr->bth[0]);
- if (qib_ruc_check_hdr(ibp, hdr, has_grh, qp, opcode))
- return;
-
- psn = be32_to_cpu(ohdr->bth[2]);
- opcode >>= 24;
-
- /*
- * Process responses (ACKs) before anything else. Note that the
- * packet sequence number will be for something in the send work
- * queue rather than the expected receive packet sequence number.
- * In other words, this QP is the requester.
- */
- if (opcode >= OP(RDMA_READ_RESPONSE_FIRST) &&
- opcode <= OP(ATOMIC_ACKNOWLEDGE)) {
- qib_rc_rcv_resp(ibp, ohdr, data, tlen, qp, opcode, psn,
- hdrsize, pmtu, rcd);
- return;
- }
-
- /* Compute 24 bits worth of difference. */
- diff = qib_cmp24(psn, qp->r_psn);
- if (unlikely(diff)) {
- if (qib_rc_rcv_error(ohdr, data, qp, opcode, psn, diff, rcd))
- return;
- goto send_ack;
- }
-
- /* Check for opcode sequence errors. */
- switch (qp->r_state) {
- case OP(SEND_FIRST):
- case OP(SEND_MIDDLE):
- if (opcode == OP(SEND_MIDDLE) ||
- opcode == OP(SEND_LAST) ||
- opcode == OP(SEND_LAST_WITH_IMMEDIATE))
- break;
- goto nack_inv;
-
- case OP(RDMA_WRITE_FIRST):
- case OP(RDMA_WRITE_MIDDLE):
- if (opcode == OP(RDMA_WRITE_MIDDLE) ||
- opcode == OP(RDMA_WRITE_LAST) ||
- opcode == OP(RDMA_WRITE_LAST_WITH_IMMEDIATE))
- break;
- goto nack_inv;
-
- default:
- if (opcode == OP(SEND_MIDDLE) ||
- opcode == OP(SEND_LAST) ||
- opcode == OP(SEND_LAST_WITH_IMMEDIATE) ||
- opcode == OP(RDMA_WRITE_MIDDLE) ||
- opcode == OP(RDMA_WRITE_LAST) ||
- opcode == OP(RDMA_WRITE_LAST_WITH_IMMEDIATE))
- goto nack_inv;
- /*
- * Note that it is up to the requester to not send a new
- * RDMA read or atomic operation before receiving an ACK
- * for the previous operation.
- */
- break;
- }
-
- if (qp->state == IB_QPS_RTR && !(qp->r_flags & RVT_R_COMM_EST))
- rvt_comm_est(qp);
-
- /* OK, process the packet. */
- switch (opcode) {
- case OP(SEND_FIRST):
- ret = rvt_get_rwqe(qp, false);
- if (ret < 0)
- goto nack_op_err;
- if (!ret)
- goto rnr_nak;
- qp->r_rcv_len = 0;
- fallthrough;
- case OP(SEND_MIDDLE):
- case OP(RDMA_WRITE_MIDDLE):
-send_middle:
- /* Check for invalid length PMTU or posted rwqe len. */
- if (unlikely(tlen != (hdrsize + pmtu + 4)))
- goto nack_inv;
- qp->r_rcv_len += pmtu;
- if (unlikely(qp->r_rcv_len > qp->r_len))
- goto nack_inv;
- rvt_copy_sge(qp, &qp->r_sge, data, pmtu, true, false);
- break;
-
- case OP(RDMA_WRITE_LAST_WITH_IMMEDIATE):
- /* consume RWQE */
- ret = rvt_get_rwqe(qp, true);
- if (ret < 0)
- goto nack_op_err;
- if (!ret)
- goto rnr_nak;
- goto send_last_imm;
-
- case OP(SEND_ONLY):
- case OP(SEND_ONLY_WITH_IMMEDIATE):
- ret = rvt_get_rwqe(qp, false);
- if (ret < 0)
- goto nack_op_err;
- if (!ret)
- goto rnr_nak;
- qp->r_rcv_len = 0;
- if (opcode == OP(SEND_ONLY))
- goto no_immediate_data;
- fallthrough; /* for SEND_ONLY_WITH_IMMEDIATE */
- case OP(SEND_LAST_WITH_IMMEDIATE):
-send_last_imm:
- wc.ex.imm_data = ohdr->u.imm_data;
- hdrsize += 4;
- wc.wc_flags = IB_WC_WITH_IMM;
- goto send_last;
- case OP(SEND_LAST):
- case OP(RDMA_WRITE_LAST):
-no_immediate_data:
- wc.wc_flags = 0;
- wc.ex.imm_data = 0;
-send_last:
- /* Get the number of bytes the message was padded by. */
- pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
- /* Check for invalid length. */
- /* XXX LAST len should be >= 1 */
- if (unlikely(tlen < (hdrsize + pad + 4)))
- goto nack_inv;
- /* Don't count the CRC. */
- tlen -= (hdrsize + pad + 4);
- wc.byte_len = tlen + qp->r_rcv_len;
- if (unlikely(wc.byte_len > qp->r_len))
- goto nack_inv;
- rvt_copy_sge(qp, &qp->r_sge, data, tlen, true, false);
- rvt_put_ss(&qp->r_sge);
- qp->r_msn++;
- if (!test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags))
- break;
- wc.wr_id = qp->r_wr_id;
- wc.status = IB_WC_SUCCESS;
- if (opcode == OP(RDMA_WRITE_LAST_WITH_IMMEDIATE) ||
- opcode == OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE))
- wc.opcode = IB_WC_RECV_RDMA_WITH_IMM;
- else
- wc.opcode = IB_WC_RECV;
- wc.qp = &qp->ibqp;
- wc.src_qp = qp->remote_qpn;
- wc.slid = rdma_ah_get_dlid(&qp->remote_ah_attr);
- wc.sl = rdma_ah_get_sl(&qp->remote_ah_attr);
- /* zero fields that are N/A */
- wc.vendor_err = 0;
- wc.pkey_index = 0;
- wc.dlid_path_bits = 0;
- wc.port_num = 0;
- /* Signal completion event if the solicited bit is set. */
- rvt_recv_cq(qp, &wc, ib_bth_is_solicited(ohdr));
- break;
-
- case OP(RDMA_WRITE_FIRST):
- case OP(RDMA_WRITE_ONLY):
- case OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE):
- if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_WRITE)))
- goto nack_inv;
- /* consume RWQE */
- reth = &ohdr->u.rc.reth;
- hdrsize += sizeof(*reth);
- qp->r_len = be32_to_cpu(reth->length);
- qp->r_rcv_len = 0;
- qp->r_sge.sg_list = NULL;
- if (qp->r_len != 0) {
- u32 rkey = be32_to_cpu(reth->rkey);
- u64 vaddr = be64_to_cpu(reth->vaddr);
- int ok;
-
- /* Check rkey & NAK */
- ok = rvt_rkey_ok(qp, &qp->r_sge.sge, qp->r_len, vaddr,
- rkey, IB_ACCESS_REMOTE_WRITE);
- if (unlikely(!ok))
- goto nack_acc;
- qp->r_sge.num_sge = 1;
- } else {
- qp->r_sge.num_sge = 0;
- qp->r_sge.sge.mr = NULL;
- qp->r_sge.sge.vaddr = NULL;
- qp->r_sge.sge.length = 0;
- qp->r_sge.sge.sge_length = 0;
- }
- if (opcode == OP(RDMA_WRITE_FIRST))
- goto send_middle;
- else if (opcode == OP(RDMA_WRITE_ONLY))
- goto no_immediate_data;
- ret = rvt_get_rwqe(qp, true);
- if (ret < 0)
- goto nack_op_err;
- if (!ret) {
- rvt_put_ss(&qp->r_sge);
- goto rnr_nak;
- }
- wc.ex.imm_data = ohdr->u.rc.imm_data;
- hdrsize += 4;
- wc.wc_flags = IB_WC_WITH_IMM;
- goto send_last;
-
- case OP(RDMA_READ_REQUEST): {
- struct rvt_ack_entry *e;
- u32 len;
- u8 next;
-
- if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_READ)))
- goto nack_inv;
- next = qp->r_head_ack_queue + 1;
- /* s_ack_queue is size QIB_MAX_RDMA_ATOMIC+1 so use > not >= */
- if (next > QIB_MAX_RDMA_ATOMIC)
- next = 0;
- spin_lock_irqsave(&qp->s_lock, flags);
- if (unlikely(next == qp->s_tail_ack_queue)) {
- if (!qp->s_ack_queue[next].sent)
- goto nack_inv_unlck;
- qib_update_ack_queue(qp, next);
- }
- e = &qp->s_ack_queue[qp->r_head_ack_queue];
- if (e->opcode == OP(RDMA_READ_REQUEST) && e->rdma_sge.mr) {
- rvt_put_mr(e->rdma_sge.mr);
- e->rdma_sge.mr = NULL;
- }
- reth = &ohdr->u.rc.reth;
- len = be32_to_cpu(reth->length);
- if (len) {
- u32 rkey = be32_to_cpu(reth->rkey);
- u64 vaddr = be64_to_cpu(reth->vaddr);
- int ok;
-
- /* Check rkey & NAK */
- ok = rvt_rkey_ok(qp, &e->rdma_sge, len, vaddr,
- rkey, IB_ACCESS_REMOTE_READ);
- if (unlikely(!ok))
- goto nack_acc_unlck;
- /*
- * Update the next expected PSN. We add 1 later
- * below, so only add the remainder here.
- */
- qp->r_psn += rvt_div_mtu(qp, len - 1);
- } else {
- e->rdma_sge.mr = NULL;
- e->rdma_sge.vaddr = NULL;
- e->rdma_sge.length = 0;
- e->rdma_sge.sge_length = 0;
- }
- e->opcode = opcode;
- e->sent = 0;
- e->psn = psn;
- e->lpsn = qp->r_psn;
- /*
- * We need to increment the MSN here instead of when we
- * finish sending the result since a duplicate request would
- * increment it more than once.
- */
- qp->r_msn++;
- qp->r_psn++;
- qp->r_state = opcode;
- qp->r_nak_state = 0;
- qp->r_head_ack_queue = next;
-
- /* Schedule the send tasklet. */
- qp->s_flags |= RVT_S_RESP_PENDING;
- qib_schedule_send(qp);
-
- goto sunlock;
- }
-
- case OP(COMPARE_SWAP):
- case OP(FETCH_ADD): {
- struct ib_atomic_eth *ateth;
- struct rvt_ack_entry *e;
- u64 vaddr;
- atomic64_t *maddr;
- u64 sdata;
- u32 rkey;
- u8 next;
-
- if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC)))
- goto nack_inv;
- next = qp->r_head_ack_queue + 1;
- if (next > QIB_MAX_RDMA_ATOMIC)
- next = 0;
- spin_lock_irqsave(&qp->s_lock, flags);
- if (unlikely(next == qp->s_tail_ack_queue)) {
- if (!qp->s_ack_queue[next].sent)
- goto nack_inv_unlck;
- qib_update_ack_queue(qp, next);
- }
- e = &qp->s_ack_queue[qp->r_head_ack_queue];
- if (e->opcode == OP(RDMA_READ_REQUEST) && e->rdma_sge.mr) {
- rvt_put_mr(e->rdma_sge.mr);
- e->rdma_sge.mr = NULL;
- }
- ateth = &ohdr->u.atomic_eth;
- vaddr = get_ib_ateth_vaddr(ateth);
- if (unlikely(vaddr & (sizeof(u64) - 1)))
- goto nack_inv_unlck;
- rkey = be32_to_cpu(ateth->rkey);
- /* Check rkey & NAK */
- if (unlikely(!rvt_rkey_ok(qp, &qp->r_sge.sge, sizeof(u64),
- vaddr, rkey,
- IB_ACCESS_REMOTE_ATOMIC)))
- goto nack_acc_unlck;
- /* Perform atomic OP and save result. */
- maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
- sdata = get_ib_ateth_swap(ateth);
- e->atomic_data = (opcode == OP(FETCH_ADD)) ?
- (u64) atomic64_add_return(sdata, maddr) - sdata :
- (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
- get_ib_ateth_compare(ateth),
- sdata);
- rvt_put_mr(qp->r_sge.sge.mr);
- qp->r_sge.num_sge = 0;
- e->opcode = opcode;
- e->sent = 0;
- e->psn = psn;
- e->lpsn = psn;
- qp->r_msn++;
- qp->r_psn++;
- qp->r_state = opcode;
- qp->r_nak_state = 0;
- qp->r_head_ack_queue = next;
-
- /* Schedule the send tasklet. */
- qp->s_flags |= RVT_S_RESP_PENDING;
- qib_schedule_send(qp);
-
- goto sunlock;
- }
-
- default:
- /* NAK unknown opcodes. */
- goto nack_inv;
- }
- qp->r_psn++;
- qp->r_state = opcode;
- qp->r_ack_psn = psn;
- qp->r_nak_state = 0;
- /* Send an ACK if requested or required. */
- if (psn & (1 << 31))
- goto send_ack;
- return;
-
-rnr_nak:
- qp->r_nak_state = IB_RNR_NAK | qp->r_min_rnr_timer;
- qp->r_ack_psn = qp->r_psn;
- /* Queue RNR NAK for later */
- if (list_empty(&qp->rspwait)) {
- qp->r_flags |= RVT_R_RSP_NAK;
- rvt_get_qp(qp);
- list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
- }
- return;
-
-nack_op_err:
- rvt_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
- qp->r_nak_state = IB_NAK_REMOTE_OPERATIONAL_ERROR;
- qp->r_ack_psn = qp->r_psn;
- /* Queue NAK for later */
- if (list_empty(&qp->rspwait)) {
- qp->r_flags |= RVT_R_RSP_NAK;
- rvt_get_qp(qp);
- list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
- }
- return;
-
-nack_inv_unlck:
- spin_unlock_irqrestore(&qp->s_lock, flags);
-nack_inv:
- rvt_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
- qp->r_nak_state = IB_NAK_INVALID_REQUEST;
- qp->r_ack_psn = qp->r_psn;
- /* Queue NAK for later */
- if (list_empty(&qp->rspwait)) {
- qp->r_flags |= RVT_R_RSP_NAK;
- rvt_get_qp(qp);
- list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
- }
- return;
-
-nack_acc_unlck:
- spin_unlock_irqrestore(&qp->s_lock, flags);
-nack_acc:
- rvt_rc_error(qp, IB_WC_LOC_PROT_ERR);
- qp->r_nak_state = IB_NAK_REMOTE_ACCESS_ERROR;
- qp->r_ack_psn = qp->r_psn;
-send_ack:
- qib_send_rc_ack(qp);
- return;
-
-sunlock:
- spin_unlock_irqrestore(&qp->s_lock, flags);
-}
diff --git a/drivers/infiniband/hw/qib/qib_ruc.c b/drivers/infiniband/hw/qib/qib_ruc.c
deleted file mode 100644
index 1fa21938f310..000000000000
--- a/drivers/infiniband/hw/qib/qib_ruc.c
+++ /dev/null
@@ -1,314 +0,0 @@
-/*
- * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
- * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <linux/spinlock.h>
-#include <rdma/ib_smi.h>
-
-#include "qib.h"
-#include "qib_mad.h"
-
-/*
- * Switch to alternate path.
- * The QP s_lock should be held and interrupts disabled.
- */
-void qib_migrate_qp(struct rvt_qp *qp)
-{
- struct ib_event ev;
-
- qp->s_mig_state = IB_MIG_MIGRATED;
- qp->remote_ah_attr = qp->alt_ah_attr;
- qp->port_num = rdma_ah_get_port_num(&qp->alt_ah_attr);
- qp->s_pkey_index = qp->s_alt_pkey_index;
-
- ev.device = qp->ibqp.device;
- ev.element.qp = &qp->ibqp;
- ev.event = IB_EVENT_PATH_MIG;
- qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
-}
-
-static __be64 get_sguid(struct qib_ibport *ibp, unsigned index)
-{
- if (!index) {
- struct qib_pportdata *ppd = ppd_from_ibp(ibp);
-
- return ppd->guid;
- }
- return ibp->guids[index - 1];
-}
-
-static int gid_ok(union ib_gid *gid, __be64 gid_prefix, __be64 id)
-{
- return (gid->global.interface_id == id &&
- (gid->global.subnet_prefix == gid_prefix ||
- gid->global.subnet_prefix == IB_DEFAULT_GID_PREFIX));
-}
-
-/*
- *
- * This should be called with the QP r_lock held.
- *
- * The s_lock will be acquired around the qib_migrate_qp() call.
- */
-int qib_ruc_check_hdr(struct qib_ibport *ibp, struct ib_header *hdr,
- int has_grh, struct rvt_qp *qp, u32 bth0)
-{
- __be64 guid;
- unsigned long flags;
-
- if (qp->s_mig_state == IB_MIG_ARMED && (bth0 & IB_BTH_MIG_REQ)) {
- if (!has_grh) {
- if (rdma_ah_get_ah_flags(&qp->alt_ah_attr) &
- IB_AH_GRH)
- goto err;
- } else {
- const struct ib_global_route *grh;
-
- if (!(rdma_ah_get_ah_flags(&qp->alt_ah_attr) &
- IB_AH_GRH))
- goto err;
- grh = rdma_ah_read_grh(&qp->alt_ah_attr);
- guid = get_sguid(ibp, grh->sgid_index);
- if (!gid_ok(&hdr->u.l.grh.dgid,
- ibp->rvp.gid_prefix, guid))
- goto err;
- if (!gid_ok(&hdr->u.l.grh.sgid,
- grh->dgid.global.subnet_prefix,
- grh->dgid.global.interface_id))
- goto err;
- }
- if (!qib_pkey_ok((u16)bth0,
- qib_get_pkey(ibp, qp->s_alt_pkey_index))) {
- qib_bad_pkey(ibp,
- (u16)bth0,
- (be16_to_cpu(hdr->lrh[0]) >> 4) & 0xF,
- 0, qp->ibqp.qp_num,
- hdr->lrh[3], hdr->lrh[1]);
- goto err;
- }
- /* Validate the SLID. See Ch. 9.6.1.5 and 17.2.8 */
- if ((be16_to_cpu(hdr->lrh[3]) !=
- rdma_ah_get_dlid(&qp->alt_ah_attr)) ||
- ppd_from_ibp(ibp)->port !=
- rdma_ah_get_port_num(&qp->alt_ah_attr))
- goto err;
- spin_lock_irqsave(&qp->s_lock, flags);
- qib_migrate_qp(qp);
- spin_unlock_irqrestore(&qp->s_lock, flags);
- } else {
- if (!has_grh) {
- if (rdma_ah_get_ah_flags(&qp->remote_ah_attr) &
- IB_AH_GRH)
- goto err;
- } else {
- const struct ib_global_route *grh;
-
- if (!(rdma_ah_get_ah_flags(&qp->remote_ah_attr) &
- IB_AH_GRH))
- goto err;
- grh = rdma_ah_read_grh(&qp->remote_ah_attr);
- guid = get_sguid(ibp, grh->sgid_index);
- if (!gid_ok(&hdr->u.l.grh.dgid,
- ibp->rvp.gid_prefix, guid))
- goto err;
- if (!gid_ok(&hdr->u.l.grh.sgid,
- grh->dgid.global.subnet_prefix,
- grh->dgid.global.interface_id))
- goto err;
- }
- if (!qib_pkey_ok((u16)bth0,
- qib_get_pkey(ibp, qp->s_pkey_index))) {
- qib_bad_pkey(ibp,
- (u16)bth0,
- (be16_to_cpu(hdr->lrh[0]) >> 4) & 0xF,
- 0, qp->ibqp.qp_num,
- hdr->lrh[3], hdr->lrh[1]);
- goto err;
- }
- /* Validate the SLID. See Ch. 9.6.1.5 */
- if (be16_to_cpu(hdr->lrh[3]) !=
- rdma_ah_get_dlid(&qp->remote_ah_attr) ||
- ppd_from_ibp(ibp)->port != qp->port_num)
- goto err;
- if (qp->s_mig_state == IB_MIG_REARM &&
- !(bth0 & IB_BTH_MIG_REQ))
- qp->s_mig_state = IB_MIG_ARMED;
- }
-
- return 0;
-
-err:
- return 1;
-}
-
-/**
- * qib_make_grh - construct a GRH header
- * @ibp: a pointer to the IB port
- * @hdr: a pointer to the GRH header being constructed
- * @grh: the global route address to send to
- * @hwords: the number of 32 bit words of header being sent
- * @nwords: the number of 32 bit words of data being sent
- *
- * Return the size of the header in 32 bit words.
- */
-u32 qib_make_grh(struct qib_ibport *ibp, struct ib_grh *hdr,
- const struct ib_global_route *grh, u32 hwords, u32 nwords)
-{
- hdr->version_tclass_flow =
- cpu_to_be32((IB_GRH_VERSION << IB_GRH_VERSION_SHIFT) |
- (grh->traffic_class << IB_GRH_TCLASS_SHIFT) |
- (grh->flow_label << IB_GRH_FLOW_SHIFT));
- hdr->paylen = cpu_to_be16((hwords - 2 + nwords + SIZE_OF_CRC) << 2);
- /* next_hdr is defined by C8-7 in ch. 8.4.1 */
- hdr->next_hdr = IB_GRH_NEXT_HDR;
- hdr->hop_limit = grh->hop_limit;
- /* The SGID is 32-bit aligned. */
- hdr->sgid.global.subnet_prefix = ibp->rvp.gid_prefix;
- if (!grh->sgid_index)
- hdr->sgid.global.interface_id = ppd_from_ibp(ibp)->guid;
- else if (grh->sgid_index < QIB_GUIDS_PER_PORT)
- hdr->sgid.global.interface_id = ibp->guids[grh->sgid_index - 1];
- hdr->dgid = grh->dgid;
-
- /* GRH header size in 32-bit words. */
- return sizeof(struct ib_grh) / sizeof(u32);
-}
-
-void qib_make_ruc_header(struct rvt_qp *qp, struct ib_other_headers *ohdr,
- u32 bth0, u32 bth2)
-{
- struct qib_qp_priv *priv = qp->priv;
- struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
- u16 lrh0;
- u32 nwords;
- u32 extra_bytes;
-
- /* Construct the header. */
- extra_bytes = -qp->s_cur_size & 3;
- nwords = (qp->s_cur_size + extra_bytes) >> 2;
- lrh0 = QIB_LRH_BTH;
- if (unlikely(rdma_ah_get_ah_flags(&qp->remote_ah_attr) & IB_AH_GRH)) {
- qp->s_hdrwords +=
- qib_make_grh(ibp, &priv->s_hdr->u.l.grh,
- rdma_ah_read_grh(&qp->remote_ah_attr),
- qp->s_hdrwords, nwords);
- lrh0 = QIB_LRH_GRH;
- }
- lrh0 |= ibp->sl_to_vl[rdma_ah_get_sl(&qp->remote_ah_attr)] << 12 |
- rdma_ah_get_sl(&qp->remote_ah_attr) << 4;
- priv->s_hdr->lrh[0] = cpu_to_be16(lrh0);
- priv->s_hdr->lrh[1] =
- cpu_to_be16(rdma_ah_get_dlid(&qp->remote_ah_attr));
- priv->s_hdr->lrh[2] =
- cpu_to_be16(qp->s_hdrwords + nwords + SIZE_OF_CRC);
- priv->s_hdr->lrh[3] =
- cpu_to_be16(ppd_from_ibp(ibp)->lid |
- rdma_ah_get_path_bits(&qp->remote_ah_attr));
- bth0 |= qib_get_pkey(ibp, qp->s_pkey_index);
- bth0 |= extra_bytes << 20;
- if (qp->s_mig_state == IB_MIG_MIGRATED)
- bth0 |= IB_BTH_MIG_REQ;
- ohdr->bth[0] = cpu_to_be32(bth0);
- ohdr->bth[1] = cpu_to_be32(qp->remote_qpn);
- ohdr->bth[2] = cpu_to_be32(bth2);
- this_cpu_inc(ibp->pmastats->n_unicast_xmit);
-}
-
-void _qib_do_send(struct work_struct *work)
-{
- struct qib_qp_priv *priv = container_of(work, struct qib_qp_priv,
- s_work);
- struct rvt_qp *qp = priv->owner;
-
- qib_do_send(qp);
-}
-
-/**
- * qib_do_send - perform a send on a QP
- * @qp: pointer to the QP
- *
- * Process entries in the send work queue until credit or queue is
- * exhausted. Only allow one CPU to send a packet per QP (tasklet).
- * Otherwise, two threads could send packets out of order.
- */
-void qib_do_send(struct rvt_qp *qp)
-{
- struct qib_qp_priv *priv = qp->priv;
- struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
- struct qib_pportdata *ppd = ppd_from_ibp(ibp);
- int (*make_req)(struct rvt_qp *qp, unsigned long *flags);
- unsigned long flags;
-
- if ((qp->ibqp.qp_type == IB_QPT_RC ||
- qp->ibqp.qp_type == IB_QPT_UC) &&
- (rdma_ah_get_dlid(&qp->remote_ah_attr) &
- ~((1 << ppd->lmc) - 1)) == ppd->lid) {
- rvt_ruc_loopback(qp);
- return;
- }
-
- if (qp->ibqp.qp_type == IB_QPT_RC)
- make_req = qib_make_rc_req;
- else if (qp->ibqp.qp_type == IB_QPT_UC)
- make_req = qib_make_uc_req;
- else
- make_req = qib_make_ud_req;
-
- spin_lock_irqsave(&qp->s_lock, flags);
-
- /* Return if we are already busy processing a work request. */
- if (!qib_send_ok(qp)) {
- spin_unlock_irqrestore(&qp->s_lock, flags);
- return;
- }
-
- qp->s_flags |= RVT_S_BUSY;
-
- do {
- /* Check for a constructed packet to be sent. */
- if (qp->s_hdrwords != 0) {
- spin_unlock_irqrestore(&qp->s_lock, flags);
- /*
- * If the packet cannot be sent now, return and
- * the send tasklet will be woken up later.
- */
- if (qib_verbs_send(qp, priv->s_hdr, qp->s_hdrwords,
- qp->s_cur_sge, qp->s_cur_size))
- return;
- /* Record that s_hdr is empty. */
- qp->s_hdrwords = 0;
- spin_lock_irqsave(&qp->s_lock, flags);
- }
- } while (make_req(qp, &flags));
-
- spin_unlock_irqrestore(&qp->s_lock, flags);
-}
diff --git a/drivers/infiniband/hw/qib/qib_sd7220.c b/drivers/infiniband/hw/qib/qib_sd7220.c
deleted file mode 100644
index 40bc0a34273e..000000000000
--- a/drivers/infiniband/hw/qib/qib_sd7220.c
+++ /dev/null
@@ -1,1445 +0,0 @@
-/*
- * Copyright (c) 2013 Intel Corporation. All rights reserved.
- * Copyright (c) 2006 - 2012 QLogic Corporation. All rights reserved.
- * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-/*
- * This file contains all of the code that is specific to the SerDes
- * on the QLogic_IB 7220 chip.
- */
-
-#include <linux/pci.h>
-#include <linux/delay.h>
-#include <linux/module.h>
-#include <linux/firmware.h>
-
-#include "qib.h"
-#include "qib_7220.h"
-
-#define SD7220_FW_NAME "qlogic/sd7220.fw"
-MODULE_FIRMWARE(SD7220_FW_NAME);
-
-/*
- * Same as in qib_iba7220.c, but just the registers needed here.
- * Could move whole set to qib_7220.h, but decided better to keep
- * local.
- */
-#define KREG_IDX(regname) (QIB_7220_##regname##_OFFS / sizeof(u64))
-#define kr_hwerrclear KREG_IDX(HwErrClear)
-#define kr_hwerrmask KREG_IDX(HwErrMask)
-#define kr_hwerrstatus KREG_IDX(HwErrStatus)
-#define kr_ibcstatus KREG_IDX(IBCStatus)
-#define kr_ibserdesctrl KREG_IDX(IBSerDesCtrl)
-#define kr_scratch KREG_IDX(Scratch)
-#define kr_xgxs_cfg KREG_IDX(XGXSCfg)
-/* these are used only here, not in qib_iba7220.c */
-#define kr_ibsd_epb_access_ctrl KREG_IDX(ibsd_epb_access_ctrl)
-#define kr_ibsd_epb_transaction_reg KREG_IDX(ibsd_epb_transaction_reg)
-#define kr_pciesd_epb_transaction_reg KREG_IDX(pciesd_epb_transaction_reg)
-#define kr_pciesd_epb_access_ctrl KREG_IDX(pciesd_epb_access_ctrl)
-#define kr_serdes_ddsrxeq0 KREG_IDX(SerDes_DDSRXEQ0)
-
-/*
- * The IBSerDesMappTable is a memory that holds values to be stored in
- * various SerDes registers by IBC.
- */
-#define kr_serdes_maptable KREG_IDX(IBSerDesMappTable)
-
-/*
- * Below used for sdnum parameter, selecting one of the two sections
- * used for PCIe, or the single SerDes used for IB.
- */
-#define PCIE_SERDES0 0
-#define PCIE_SERDES1 1
-
-/*
- * The EPB requires addressing in a particular form. EPB_LOC() is intended
- * to make #definitions a little more readable.
- */
-#define EPB_ADDR_SHF 8
-#define EPB_LOC(chn, elt, reg) \
- (((elt & 0xf) | ((chn & 7) << 4) | ((reg & 0x3f) << 9)) << \
- EPB_ADDR_SHF)
-#define EPB_IB_QUAD0_CS_SHF (25)
-#define EPB_IB_QUAD0_CS (1U << EPB_IB_QUAD0_CS_SHF)
-#define EPB_IB_UC_CS_SHF (26)
-#define EPB_PCIE_UC_CS_SHF (27)
-#define EPB_GLOBAL_WR (1U << (EPB_ADDR_SHF + 8))
-
-/* Forward declarations. */
-static int qib_sd7220_reg_mod(struct qib_devdata *dd, int sdnum, u32 loc,
- u32 data, u32 mask);
-static int ibsd_mod_allchnls(struct qib_devdata *dd, int loc, int val,
- int mask);
-static int qib_sd_trimdone_poll(struct qib_devdata *dd);
-static void qib_sd_trimdone_monitor(struct qib_devdata *dd, const char *where);
-static int qib_sd_setvals(struct qib_devdata *dd);
-static int qib_sd_early(struct qib_devdata *dd);
-static int qib_sd_dactrim(struct qib_devdata *dd);
-static int qib_internal_presets(struct qib_devdata *dd);
-/* Tweak the register (CMUCTRL5) that contains the TRIMSELF controls */
-static int qib_sd_trimself(struct qib_devdata *dd, int val);
-static int epb_access(struct qib_devdata *dd, int sdnum, int claim);
-static int qib_sd7220_ib_load(struct qib_devdata *dd,
- const struct firmware *fw);
-static int qib_sd7220_ib_vfy(struct qib_devdata *dd,
- const struct firmware *fw);
-
-/*
- * Below keeps track of whether the "once per power-on" initialization has
- * been done, because uC code Version 1.32.17 or higher allows the uC to
- * be reset at will, and Automatic Equalization may require it. So the
- * state of the reset "pin", is no longer valid. Instead, we check for the
- * actual uC code having been loaded.
- */
-static int qib_ibsd_ucode_loaded(struct qib_pportdata *ppd,
- const struct firmware *fw)
-{
- struct qib_devdata *dd = ppd->dd;
-
- if (!dd->cspec->serdes_first_init_done &&
- qib_sd7220_ib_vfy(dd, fw) > 0)
- dd->cspec->serdes_first_init_done = 1;
- return dd->cspec->serdes_first_init_done;
-}
-
-/* repeat #define for local use. "Real" #define is in qib_iba7220.c */
-#define QLOGIC_IB_HWE_IB_UC_MEMORYPARITYERR 0x0000004000000000ULL
-#define IB_MPREG5 (EPB_LOC(6, 0, 0xE) | (1L << EPB_IB_UC_CS_SHF))
-#define IB_MPREG6 (EPB_LOC(6, 0, 0xF) | (1U << EPB_IB_UC_CS_SHF))
-#define UC_PAR_CLR_D 8
-#define UC_PAR_CLR_M 0xC
-#define IB_CTRL2(chn) (EPB_LOC(chn, 7, 3) | EPB_IB_QUAD0_CS)
-#define START_EQ1(chan) EPB_LOC(chan, 7, 0x27)
-
-void qib_sd7220_clr_ibpar(struct qib_devdata *dd)
-{
- int ret;
-
- /* clear, then re-enable parity errs */
- ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, IB_MPREG6,
- UC_PAR_CLR_D, UC_PAR_CLR_M);
- if (ret < 0) {
- qib_dev_err(dd, "Failed clearing IBSerDes Parity err\n");
- goto bail;
- }
- ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, IB_MPREG6, 0,
- UC_PAR_CLR_M);
-
- qib_read_kreg32(dd, kr_scratch);
- udelay(4);
- qib_write_kreg(dd, kr_hwerrclear,
- QLOGIC_IB_HWE_IB_UC_MEMORYPARITYERR);
- qib_read_kreg32(dd, kr_scratch);
-bail:
- return;
-}
-
-/*
- * After a reset or other unusual event, the epb interface may need
- * to be re-synchronized, between the host and the uC.
- * returns <0 for failure to resync within IBSD_RESYNC_TRIES (not expected)
- */
-#define IBSD_RESYNC_TRIES 3
-#define IB_PGUDP(chn) (EPB_LOC((chn), 2, 1) | EPB_IB_QUAD0_CS)
-#define IB_CMUDONE(chn) (EPB_LOC((chn), 7, 0xF) | EPB_IB_QUAD0_CS)
-
-static int qib_resync_ibepb(struct qib_devdata *dd)
-{
- int ret, pat, tries, chn;
- u32 loc;
-
- ret = -1;
- chn = 0;
- for (tries = 0; tries < (4 * IBSD_RESYNC_TRIES); ++tries) {
- loc = IB_PGUDP(chn);
- ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, loc, 0, 0);
- if (ret < 0) {
- qib_dev_err(dd, "Failed read in resync\n");
- continue;
- }
- if (ret != 0xF0 && ret != 0x55 && tries == 0)
- qib_dev_err(dd, "unexpected pattern in resync\n");
- pat = ret ^ 0xA5; /* alternate F0 and 55 */
- ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, loc, pat, 0xFF);
- if (ret < 0) {
- qib_dev_err(dd, "Failed write in resync\n");
- continue;
- }
- ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, loc, 0, 0);
- if (ret < 0) {
- qib_dev_err(dd, "Failed re-read in resync\n");
- continue;
- }
- if (ret != pat) {
- qib_dev_err(dd, "Failed compare1 in resync\n");
- continue;
- }
- loc = IB_CMUDONE(chn);
- ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, loc, 0, 0);
- if (ret < 0) {
- qib_dev_err(dd, "Failed CMUDONE rd in resync\n");
- continue;
- }
- if ((ret & 0x70) != ((chn << 4) | 0x40)) {
- qib_dev_err(dd, "Bad CMUDONE value %02X, chn %d\n",
- ret, chn);
- continue;
- }
- if (++chn == 4)
- break; /* Success */
- }
- return (ret > 0) ? 0 : ret;
-}
-
-/*
- * Localize the stuff that should be done to change IB uC reset
- * returns <0 for errors.
- */
-static int qib_ibsd_reset(struct qib_devdata *dd, int assert_rst)
-{
- u64 rst_val;
- int ret = 0;
- unsigned long flags;
-
- rst_val = qib_read_kreg64(dd, kr_ibserdesctrl);
- if (assert_rst) {
- /*
- * Vendor recommends "interrupting" uC before reset, to
- * minimize possible glitches.
- */
- spin_lock_irqsave(&dd->cspec->sdepb_lock, flags);
- epb_access(dd, IB_7220_SERDES, 1);
- rst_val |= 1ULL;
- /* Squelch possible parity error from _asserting_ reset */
- qib_write_kreg(dd, kr_hwerrmask,
- dd->cspec->hwerrmask &
- ~QLOGIC_IB_HWE_IB_UC_MEMORYPARITYERR);
- qib_write_kreg(dd, kr_ibserdesctrl, rst_val);
- /* flush write, delay to ensure it took effect */
- qib_read_kreg32(dd, kr_scratch);
- udelay(2);
- /* once it's reset, can remove interrupt */
- epb_access(dd, IB_7220_SERDES, -1);
- spin_unlock_irqrestore(&dd->cspec->sdepb_lock, flags);
- } else {
- /*
- * Before we de-assert reset, we need to deal with
- * possible glitch on the Parity-error line.
- * Suppress it around the reset, both in chip-level
- * hwerrmask and in IB uC control reg. uC will allow
- * it again during startup.
- */
- u64 val;
-
- rst_val &= ~(1ULL);
- qib_write_kreg(dd, kr_hwerrmask,
- dd->cspec->hwerrmask &
- ~QLOGIC_IB_HWE_IB_UC_MEMORYPARITYERR);
-
- ret = qib_resync_ibepb(dd);
- if (ret < 0)
- qib_dev_err(dd, "unable to re-sync IB EPB\n");
-
- /* set uC control regs to suppress parity errs */
- ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, IB_MPREG5, 1, 1);
- if (ret < 0)
- goto bail;
- /* IB uC code past Version 1.32.17 allow suppression of wdog */
- ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, IB_MPREG6, 0x80,
- 0x80);
- if (ret < 0) {
- qib_dev_err(dd, "Failed to set WDOG disable\n");
- goto bail;
- }
- qib_write_kreg(dd, kr_ibserdesctrl, rst_val);
- /* flush write, delay for startup */
- qib_read_kreg32(dd, kr_scratch);
- udelay(1);
- /* clear, then re-enable parity errs */
- qib_sd7220_clr_ibpar(dd);
- val = qib_read_kreg64(dd, kr_hwerrstatus);
- if (val & QLOGIC_IB_HWE_IB_UC_MEMORYPARITYERR) {
- qib_dev_err(dd, "IBUC Parity still set after RST\n");
- dd->cspec->hwerrmask &=
- ~QLOGIC_IB_HWE_IB_UC_MEMORYPARITYERR;
- }
- qib_write_kreg(dd, kr_hwerrmask,
- dd->cspec->hwerrmask);
- }
-
-bail:
- return ret;
-}
-
-static void qib_sd_trimdone_monitor(struct qib_devdata *dd,
- const char *where)
-{
- int ret, chn, baduns;
- u64 val;
-
- if (!where)
- where = "?";
-
- /* give time for reset to settle out in EPB */
- udelay(2);
-
- ret = qib_resync_ibepb(dd);
- if (ret < 0)
- qib_dev_err(dd, "not able to re-sync IB EPB (%s)\n", where);
-
- /* Do "sacrificial read" to get EPB in sane state after reset */
- ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, IB_CTRL2(0), 0, 0);
- if (ret < 0)
- qib_dev_err(dd, "Failed TRIMDONE 1st read, (%s)\n", where);
-
- /* Check/show "summary" Trim-done bit in IBCStatus */
- val = qib_read_kreg64(dd, kr_ibcstatus);
- if (!(val & (1ULL << 11)))
- qib_dev_err(dd, "IBCS TRIMDONE clear (%s)\n", where);
- /*
- * Do "dummy read/mod/wr" to get EPB in sane state after reset
- * The default value for MPREG6 is 0.
- */
- udelay(2);
-
- ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, IB_MPREG6, 0x80, 0x80);
- if (ret < 0)
- qib_dev_err(dd, "Failed Dummy RMW, (%s)\n", where);
- udelay(10);
-
- baduns = 0;
-
- for (chn = 3; chn >= 0; --chn) {
- /* Read CTRL reg for each channel to check TRIMDONE */
- ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES,
- IB_CTRL2(chn), 0, 0);
- if (ret < 0)
- qib_dev_err(dd,
- "Failed checking TRIMDONE, chn %d (%s)\n",
- chn, where);
-
- if (!(ret & 0x10)) {
- int probe;
-
- baduns |= (1 << chn);
- qib_dev_err(dd,
- "TRIMDONE cleared on chn %d (%02X). (%s)\n",
- chn, ret, where);
- probe = qib_sd7220_reg_mod(dd, IB_7220_SERDES,
- IB_PGUDP(0), 0, 0);
- qib_dev_err(dd, "probe is %d (%02X)\n",
- probe, probe);
- probe = qib_sd7220_reg_mod(dd, IB_7220_SERDES,
- IB_CTRL2(chn), 0, 0);
- qib_dev_err(dd, "re-read: %d (%02X)\n",
- probe, probe);
- ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES,
- IB_CTRL2(chn), 0x10, 0x10);
- if (ret < 0)
- qib_dev_err(dd,
- "Err on TRIMDONE rewrite1\n");
- }
- }
- for (chn = 3; chn >= 0; --chn) {
- /* Read CTRL reg for each channel to check TRIMDONE */
- if (baduns & (1 << chn)) {
- qib_dev_err(dd,
- "Resetting TRIMDONE on chn %d (%s)\n",
- chn, where);
- ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES,
- IB_CTRL2(chn), 0x10, 0x10);
- if (ret < 0)
- qib_dev_err(dd,
- "Failed re-setting TRIMDONE, chn %d (%s)\n",
- chn, where);
- }
- }
-}
-
-/*
- * Below is portion of IBA7220-specific bringup_serdes() that actually
- * deals with registers and memory within the SerDes itself.
- * Post IB uC code version 1.32.17, was_reset being 1 is not really
- * informative, so we double-check.
- */
-int qib_sd7220_init(struct qib_devdata *dd)
-{
- const struct firmware *fw;
- int ret = 1; /* default to failure */
- int first_reset, was_reset;
-
- /* SERDES MPU reset recorded in D0 */
- was_reset = (qib_read_kreg64(dd, kr_ibserdesctrl) & 1);
- if (!was_reset) {
- /* entered with reset not asserted, we need to do it */
- qib_ibsd_reset(dd, 1);
- qib_sd_trimdone_monitor(dd, "Driver-reload");
- }
-
- ret = request_firmware(&fw, SD7220_FW_NAME, &dd->pcidev->dev);
- if (ret) {
- qib_dev_err(dd, "Failed to load IB SERDES image\n");
- goto done;
- }
-
- /* Substitute our deduced value for was_reset */
- ret = qib_ibsd_ucode_loaded(dd->pport, fw);
- if (ret < 0)
- goto bail;
-
- first_reset = !ret; /* First reset if IBSD uCode not yet loaded */
- /*
- * Alter some regs per vendor latest doc, reset-defaults
- * are not right for IB.
- */
- ret = qib_sd_early(dd);
- if (ret < 0) {
- qib_dev_err(dd, "Failed to set IB SERDES early defaults\n");
- goto bail;
- }
- /*
- * Set DAC manual trim IB.
- * We only do this once after chip has been reset (usually
- * same as once per system boot).
- */
- if (first_reset) {
- ret = qib_sd_dactrim(dd);
- if (ret < 0) {
- qib_dev_err(dd, "Failed IB SERDES DAC trim\n");
- goto bail;
- }
- }
- /*
- * Set various registers (DDS and RXEQ) that will be
- * controlled by IBC (in 1.2 mode) to reasonable preset values
- * Calling the "internal" version avoids the "check for needed"
- * and "trimdone monitor" that might be counter-productive.
- */
- ret = qib_internal_presets(dd);
- if (ret < 0) {
- qib_dev_err(dd, "Failed to set IB SERDES presets\n");
- goto bail;
- }
- ret = qib_sd_trimself(dd, 0x80);
- if (ret < 0) {
- qib_dev_err(dd, "Failed to set IB SERDES TRIMSELF\n");
- goto bail;
- }
-
- /* Load image, then try to verify */
- ret = 0; /* Assume success */
- if (first_reset) {
- int vfy;
- int trim_done;
-
- ret = qib_sd7220_ib_load(dd, fw);
- if (ret < 0) {
- qib_dev_err(dd, "Failed to load IB SERDES image\n");
- goto bail;
- } else {
- /* Loaded image, try to verify */
- vfy = qib_sd7220_ib_vfy(dd, fw);
- if (vfy != ret) {
- qib_dev_err(dd, "SERDES PRAM VFY failed\n");
- goto bail;
- } /* end if verified */
- } /* end if loaded */
-
- /*
- * Loaded and verified. Almost good...
- * hold "success" in ret
- */
- ret = 0;
- /*
- * Prev steps all worked, continue bringup
- * De-assert RESET to uC, only in first reset, to allow
- * trimming.
- *
- * Since our default setup sets START_EQ1 to
- * PRESET, we need to clear that for this very first run.
- */
- ret = ibsd_mod_allchnls(dd, START_EQ1(0), 0, 0x38);
- if (ret < 0) {
- qib_dev_err(dd, "Failed clearing START_EQ1\n");
- goto bail;
- }
-
- qib_ibsd_reset(dd, 0);
- /*
- * If this is not the first reset, trimdone should be set
- * already. We may need to check about this.
- */
- trim_done = qib_sd_trimdone_poll(dd);
- /*
- * Whether or not trimdone succeeded, we need to put the
- * uC back into reset to avoid a possible fight with the
- * IBC state-machine.
- */
- qib_ibsd_reset(dd, 1);
-
- if (!trim_done) {
- qib_dev_err(dd, "No TRIMDONE seen\n");
- goto bail;
- }
- /*
- * DEBUG: check each time we reset if trimdone bits have
- * gotten cleared, and re-set them.
- */
- qib_sd_trimdone_monitor(dd, "First-reset");
- /* Remember so we do not re-do the load, dactrim, etc. */
- dd->cspec->serdes_first_init_done = 1;
- }
- /*
- * setup for channel training and load values for
- * RxEq and DDS in tables used by IBC in IB1.2 mode
- */
- ret = 0;
- if (qib_sd_setvals(dd) >= 0)
- goto done;
-bail:
- ret = 1;
-done:
- /* start relock timer regardless, but start at 1 second */
- set_7220_relock_poll(dd, -1);
-
- release_firmware(fw);
- return ret;
-}
-
-#define EPB_ACC_REQ 1
-#define EPB_ACC_GNT 0x100
-#define EPB_DATA_MASK 0xFF
-#define EPB_RD (1ULL << 24)
-#define EPB_TRANS_RDY (1ULL << 31)
-#define EPB_TRANS_ERR (1ULL << 30)
-#define EPB_TRANS_TRIES 5
-
-/*
- * query, claim, release ownership of the EPB (External Parallel Bus)
- * for a specified SERDES.
- * the "claim" parameter is >0 to claim, <0 to release, 0 to query.
- * Returns <0 for errors, >0 if we had ownership, else 0.
- */
-static int epb_access(struct qib_devdata *dd, int sdnum, int claim)
-{
- u16 acc;
- u64 accval;
- int owned = 0;
- u64 oct_sel = 0;
-
- switch (sdnum) {
- case IB_7220_SERDES:
- /*
- * The IB SERDES "ownership" is fairly simple. A single each
- * request/grant.
- */
- acc = kr_ibsd_epb_access_ctrl;
- break;
-
- case PCIE_SERDES0:
- case PCIE_SERDES1:
- /* PCIe SERDES has two "octants", need to select which */
- acc = kr_pciesd_epb_access_ctrl;
- oct_sel = (2 << (sdnum - PCIE_SERDES0));
- break;
-
- default:
- return 0;
- }
-
- /* Make sure any outstanding transaction was seen */
- qib_read_kreg32(dd, kr_scratch);
- udelay(15);
-
- accval = qib_read_kreg32(dd, acc);
-
- owned = !!(accval & EPB_ACC_GNT);
- if (claim < 0) {
- /* Need to release */
- u64 pollval;
- /*
- * The only writable bits are the request and CS.
- * Both should be clear
- */
- u64 newval = 0;
-
- qib_write_kreg(dd, acc, newval);
- /* First read after write is not trustworthy */
- pollval = qib_read_kreg32(dd, acc);
- udelay(5);
- pollval = qib_read_kreg32(dd, acc);
- if (pollval & EPB_ACC_GNT)
- owned = -1;
- } else if (claim > 0) {
- /* Need to claim */
- u64 pollval;
- u64 newval = EPB_ACC_REQ | oct_sel;
-
- qib_write_kreg(dd, acc, newval);
- /* First read after write is not trustworthy */
- pollval = qib_read_kreg32(dd, acc);
- udelay(5);
- pollval = qib_read_kreg32(dd, acc);
- if (!(pollval & EPB_ACC_GNT))
- owned = -1;
- }
- return owned;
-}
-
-/*
- * Lemma to deal with race condition of write..read to epb regs
- */
-static int epb_trans(struct qib_devdata *dd, u16 reg, u64 i_val, u64 *o_vp)
-{
- int tries;
- u64 transval;
-
- qib_write_kreg(dd, reg, i_val);
- /* Throw away first read, as RDY bit may be stale */
- transval = qib_read_kreg64(dd, reg);
-
- for (tries = EPB_TRANS_TRIES; tries; --tries) {
- transval = qib_read_kreg32(dd, reg);
- if (transval & EPB_TRANS_RDY)
- break;
- udelay(5);
- }
- if (transval & EPB_TRANS_ERR)
- return -1;
- if (tries > 0 && o_vp)
- *o_vp = transval;
- return tries;
-}
-
-/**
- * qib_sd7220_reg_mod - modify SERDES register
- * @dd: the qlogic_ib device
- * @sdnum: which SERDES to access
- * @loc: location - channel, element, register, as packed by EPB_LOC() macro.
- * @wd: Write Data - value to set in register
- * @mask: ones where data should be spliced into reg.
- *
- * Basic register read/modify/write, with un-needed acesses elided. That is,
- * a mask of zero will prevent write, while a mask of 0xFF will prevent read.
- * returns current (presumed, if a write was done) contents of selected
- * register, or <0 if errors.
- */
-static int qib_sd7220_reg_mod(struct qib_devdata *dd, int sdnum, u32 loc,
- u32 wd, u32 mask)
-{
- u16 trans;
- u64 transval;
- int owned;
- int tries, ret;
- unsigned long flags;
-
- switch (sdnum) {
- case IB_7220_SERDES:
- trans = kr_ibsd_epb_transaction_reg;
- break;
-
- case PCIE_SERDES0:
- case PCIE_SERDES1:
- trans = kr_pciesd_epb_transaction_reg;
- break;
-
- default:
- return -1;
- }
-
- /*
- * All access is locked in software (vs other host threads) and
- * hardware (vs uC access).
- */
- spin_lock_irqsave(&dd->cspec->sdepb_lock, flags);
-
- owned = epb_access(dd, sdnum, 1);
- if (owned < 0) {
- spin_unlock_irqrestore(&dd->cspec->sdepb_lock, flags);
- return -1;
- }
- for (tries = EPB_TRANS_TRIES; tries; --tries) {
- transval = qib_read_kreg32(dd, trans);
- if (transval & EPB_TRANS_RDY)
- break;
- udelay(5);
- }
-
- if (tries > 0) {
- tries = 1; /* to make read-skip work */
- if (mask != 0xFF) {
- /*
- * Not a pure write, so need to read.
- * loc encodes chip-select as well as address
- */
- transval = loc | EPB_RD;
- tries = epb_trans(dd, trans, transval, &transval);
- }
- if (tries > 0 && mask != 0) {
- /*
- * Not a pure read, so need to write.
- */
- wd = (wd & mask) | (transval & ~mask);
- transval = loc | (wd & EPB_DATA_MASK);
- tries = epb_trans(dd, trans, transval, &transval);
- }
- }
- /* else, failed to see ready, what error-handling? */
-
- /*
- * Release bus. Failure is an error.
- */
- if (epb_access(dd, sdnum, -1) < 0)
- ret = -1;
- else
- ret = transval & EPB_DATA_MASK;
-
- spin_unlock_irqrestore(&dd->cspec->sdepb_lock, flags);
- if (tries <= 0)
- ret = -1;
- return ret;
-}
-
-#define EPB_ROM_R (2)
-#define EPB_ROM_W (1)
-/*
- * Below, all uC-related, use appropriate UC_CS, depending
- * on which SerDes is used.
- */
-#define EPB_UC_CTL EPB_LOC(6, 0, 0)
-#define EPB_MADDRL EPB_LOC(6, 0, 2)
-#define EPB_MADDRH EPB_LOC(6, 0, 3)
-#define EPB_ROMDATA EPB_LOC(6, 0, 4)
-#define EPB_RAMDATA EPB_LOC(6, 0, 5)
-
-/* Transfer date to/from uC Program RAM of IB or PCIe SerDes */
-static int qib_sd7220_ram_xfer(struct qib_devdata *dd, int sdnum, u32 loc,
- u8 *buf, int cnt, int rd_notwr)
-{
- u16 trans;
- u64 transval;
- u64 csbit;
- int owned;
- int tries;
- int sofar;
- int addr;
- int ret;
- unsigned long flags;
-
- /* Pick appropriate transaction reg and "Chip select" for this serdes */
- switch (sdnum) {
- case IB_7220_SERDES:
- csbit = 1ULL << EPB_IB_UC_CS_SHF;
- trans = kr_ibsd_epb_transaction_reg;
- break;
-
- case PCIE_SERDES0:
- case PCIE_SERDES1:
- /* PCIe SERDES has uC "chip select" in different bit, too */
- csbit = 1ULL << EPB_PCIE_UC_CS_SHF;
- trans = kr_pciesd_epb_transaction_reg;
- break;
-
- default:
- return -1;
- }
-
- spin_lock_irqsave(&dd->cspec->sdepb_lock, flags);
-
- owned = epb_access(dd, sdnum, 1);
- if (owned < 0) {
- spin_unlock_irqrestore(&dd->cspec->sdepb_lock, flags);
- return -1;
- }
-
- /*
- * In future code, we may need to distinguish several address ranges,
- * and select various memories based on this. For now, just trim
- * "loc" (location including address and memory select) to
- * "addr" (address within memory). we will only support PRAM
- * The memory is 8KB.
- */
- addr = loc & 0x1FFF;
- for (tries = EPB_TRANS_TRIES; tries; --tries) {
- transval = qib_read_kreg32(dd, trans);
- if (transval & EPB_TRANS_RDY)
- break;
- udelay(5);
- }
-
- sofar = 0;
- if (tries > 0) {
- /*
- * Every "memory" access is doubly-indirect.
- * We set two bytes of address, then read/write
- * one or mores bytes of data.
- */
-
- /* First, we set control to "Read" or "Write" */
- transval = csbit | EPB_UC_CTL |
- (rd_notwr ? EPB_ROM_R : EPB_ROM_W);
- tries = epb_trans(dd, trans, transval, &transval);
- while (tries > 0 && sofar < cnt) {
- if (!sofar) {
- /* Only set address at start of chunk */
- int addrbyte = (addr + sofar) >> 8;
-
- transval = csbit | EPB_MADDRH | addrbyte;
- tries = epb_trans(dd, trans, transval,
- &transval);
- if (tries <= 0)
- break;
- addrbyte = (addr + sofar) & 0xFF;
- transval = csbit | EPB_MADDRL | addrbyte;
- tries = epb_trans(dd, trans, transval,
- &transval);
- if (tries <= 0)
- break;
- }
-
- if (rd_notwr)
- transval = csbit | EPB_ROMDATA | EPB_RD;
- else
- transval = csbit | EPB_ROMDATA | buf[sofar];
- tries = epb_trans(dd, trans, transval, &transval);
- if (tries <= 0)
- break;
- if (rd_notwr)
- buf[sofar] = transval & EPB_DATA_MASK;
- ++sofar;
- }
- /* Finally, clear control-bit for Read or Write */
- transval = csbit | EPB_UC_CTL;
- tries = epb_trans(dd, trans, transval, &transval);
- }
-
- ret = sofar;
- /* Release bus. Failure is an error */
- if (epb_access(dd, sdnum, -1) < 0)
- ret = -1;
-
- spin_unlock_irqrestore(&dd->cspec->sdepb_lock, flags);
- if (tries <= 0)
- ret = -1;
- return ret;
-}
-
-#define PROG_CHUNK 64
-
-static int qib_sd7220_prog_ld(struct qib_devdata *dd, int sdnum,
- const u8 *img, int len, int offset)
-{
- int cnt, sofar, req;
-
- sofar = 0;
- while (sofar < len) {
- req = len - sofar;
- if (req > PROG_CHUNK)
- req = PROG_CHUNK;
- cnt = qib_sd7220_ram_xfer(dd, sdnum, offset + sofar,
- (u8 *)img + sofar, req, 0);
- if (cnt < req) {
- sofar = -1;
- break;
- }
- sofar += req;
- }
- return sofar;
-}
-
-#define VFY_CHUNK 64
-#define SD_PRAM_ERROR_LIMIT 42
-
-static int qib_sd7220_prog_vfy(struct qib_devdata *dd, int sdnum,
- const u8 *img, int len, int offset)
-{
- int cnt, sofar, req, idx, errors;
- unsigned char readback[VFY_CHUNK];
-
- errors = 0;
- sofar = 0;
- while (sofar < len) {
- req = len - sofar;
- if (req > VFY_CHUNK)
- req = VFY_CHUNK;
- cnt = qib_sd7220_ram_xfer(dd, sdnum, sofar + offset,
- readback, req, 1);
- if (cnt < req) {
- /* failed in read itself */
- sofar = -1;
- break;
- }
- for (idx = 0; idx < cnt; ++idx) {
- if (readback[idx] != img[idx+sofar])
- ++errors;
- }
- sofar += cnt;
- }
- return errors ? -errors : sofar;
-}
-
-static int
-qib_sd7220_ib_load(struct qib_devdata *dd, const struct firmware *fw)
-{
- return qib_sd7220_prog_ld(dd, IB_7220_SERDES, fw->data, fw->size, 0);
-}
-
-static int
-qib_sd7220_ib_vfy(struct qib_devdata *dd, const struct firmware *fw)
-{
- return qib_sd7220_prog_vfy(dd, IB_7220_SERDES, fw->data, fw->size, 0);
-}
-
-/*
- * IRQ not set up at this point in init, so we poll.
- */
-#define IB_SERDES_TRIM_DONE (1ULL << 11)
-#define TRIM_TMO (15)
-
-static int qib_sd_trimdone_poll(struct qib_devdata *dd)
-{
- int trim_tmo, ret;
- uint64_t val;
-
- /*
- * Default to failure, so IBC will not start
- * without IB_SERDES_TRIM_DONE.
- */
- ret = 0;
- for (trim_tmo = 0; trim_tmo < TRIM_TMO; ++trim_tmo) {
- val = qib_read_kreg64(dd, kr_ibcstatus);
- if (val & IB_SERDES_TRIM_DONE) {
- ret = 1;
- break;
- }
- msleep(20);
- }
- if (trim_tmo >= TRIM_TMO) {
- qib_dev_err(dd, "No TRIMDONE in %d tries\n", trim_tmo);
- ret = 0;
- }
- return ret;
-}
-
-#define TX_FAST_ELT (9)
-
-/*
- * Set the "negotiation" values for SERDES. These are used by the IB1.2
- * link negotiation. Macros below are attempt to keep the values a
- * little more human-editable.
- * First, values related to Drive De-emphasis Settings.
- */
-
-#define NUM_DDS_REGS 6
-#define DDS_REG_MAP 0x76A910 /* LSB-first list of regs (in elt 9) to mod */
-
-#define DDS_VAL(amp_d, main_d, ipst_d, ipre_d, amp_s, main_s, ipst_s, ipre_s) \
- { { ((amp_d & 0x1F) << 1) | 1, ((amp_s & 0x1F) << 1) | 1, \
- (main_d << 3) | 4 | (ipre_d >> 2), \
- (main_s << 3) | 4 | (ipre_s >> 2), \
- ((ipst_d & 0xF) << 1) | ((ipre_d & 3) << 6) | 0x21, \
- ((ipst_s & 0xF) << 1) | ((ipre_s & 3) << 6) | 0x21 } }
-
-static struct dds_init {
- uint8_t reg_vals[NUM_DDS_REGS];
-} dds_init_vals[] = {
- /* DDR(FDR) SDR(HDR) */
- /* Vendor recommends below for 3m cable */
-#define DDS_3M 0
- DDS_VAL(31, 19, 12, 0, 29, 22, 9, 0),
- DDS_VAL(31, 12, 15, 4, 31, 15, 15, 1),
- DDS_VAL(31, 13, 15, 3, 31, 16, 15, 0),
- DDS_VAL(31, 14, 15, 2, 31, 17, 14, 0),
- DDS_VAL(31, 15, 15, 1, 31, 18, 13, 0),
- DDS_VAL(31, 16, 15, 0, 31, 19, 12, 0),
- DDS_VAL(31, 17, 14, 0, 31, 20, 11, 0),
- DDS_VAL(31, 18, 13, 0, 30, 21, 10, 0),
- DDS_VAL(31, 20, 11, 0, 28, 23, 8, 0),
- DDS_VAL(31, 21, 10, 0, 27, 24, 7, 0),
- DDS_VAL(31, 22, 9, 0, 26, 25, 6, 0),
- DDS_VAL(30, 23, 8, 0, 25, 26, 5, 0),
- DDS_VAL(29, 24, 7, 0, 23, 27, 4, 0),
- /* Vendor recommends below for 1m cable */
-#define DDS_1M 13
- DDS_VAL(28, 25, 6, 0, 21, 28, 3, 0),
- DDS_VAL(27, 26, 5, 0, 19, 29, 2, 0),
- DDS_VAL(25, 27, 4, 0, 17, 30, 1, 0)
-};
-
-/*
- * Now the RXEQ section of the table.
- */
-/* Hardware packs an element number and register address thus: */
-#define RXEQ_INIT_RDESC(elt, addr) (((elt) & 0xF) | ((addr) << 4))
-#define RXEQ_VAL(elt, adr, val0, val1, val2, val3) \
- {RXEQ_INIT_RDESC((elt), (adr)), {(val0), (val1), (val2), (val3)} }
-
-#define RXEQ_VAL_ALL(elt, adr, val) \
- {RXEQ_INIT_RDESC((elt), (adr)), {(val), (val), (val), (val)} }
-
-#define RXEQ_SDR_DFELTH 0
-#define RXEQ_SDR_TLTH 0
-#define RXEQ_SDR_G1CNT_Z1CNT 0x11
-#define RXEQ_SDR_ZCNT 23
-
-static struct rxeq_init {
- u16 rdesc; /* in form used in SerDesDDSRXEQ */
- u8 rdata[4];
-} rxeq_init_vals[] = {
- /* Set Rcv Eq. to Preset node */
- RXEQ_VAL_ALL(7, 0x27, 0x10),
- /* Set DFELTHFDR/HDR thresholds */
- RXEQ_VAL(7, 8, 0, 0, 0, 0), /* FDR, was 0, 1, 2, 3 */
- RXEQ_VAL(7, 0x21, 0, 0, 0, 0), /* HDR */
- /* Set TLTHFDR/HDR theshold */
- RXEQ_VAL(7, 9, 2, 2, 2, 2), /* FDR, was 0, 2, 4, 6 */
- RXEQ_VAL(7, 0x23, 2, 2, 2, 2), /* HDR, was 0, 1, 2, 3 */
- /* Set Preamp setting 2 (ZFR/ZCNT) */
- RXEQ_VAL(7, 0x1B, 12, 12, 12, 12), /* FDR, was 12, 16, 20, 24 */
- RXEQ_VAL(7, 0x1C, 12, 12, 12, 12), /* HDR, was 12, 16, 20, 24 */
- /* Set Preamp DC gain and Setting 1 (GFR/GHR) */
- RXEQ_VAL(7, 0x1E, 16, 16, 16, 16), /* FDR, was 16, 17, 18, 20 */
- RXEQ_VAL(7, 0x1F, 16, 16, 16, 16), /* HDR, was 16, 17, 18, 20 */
- /* Toggle RELOCK (in VCDL_CTRL0) to lock to data */
- RXEQ_VAL_ALL(6, 6, 0x20), /* Set D5 High */
- RXEQ_VAL_ALL(6, 6, 0), /* Set D5 Low */
-};
-
-/* There are 17 values from vendor, but IBC only accesses the first 16 */
-#define DDS_ROWS (16)
-#define RXEQ_ROWS ARRAY_SIZE(rxeq_init_vals)
-
-static int qib_sd_setvals(struct qib_devdata *dd)
-{
- int idx, midx;
- int min_idx; /* Minimum index for this portion of table */
- uint32_t dds_reg_map;
- u64 __iomem *taddr, *iaddr;
- uint64_t data;
- uint64_t sdctl;
-
- taddr = dd->kregbase + kr_serdes_maptable;
- iaddr = dd->kregbase + kr_serdes_ddsrxeq0;
-
- /*
- * Init the DDS section of the table.
- * Each "row" of the table provokes NUM_DDS_REG writes, to the
- * registers indicated in DDS_REG_MAP.
- */
- sdctl = qib_read_kreg64(dd, kr_ibserdesctrl);
- sdctl = (sdctl & ~(0x1f << 8)) | (NUM_DDS_REGS << 8);
- sdctl = (sdctl & ~(0x1f << 13)) | (RXEQ_ROWS << 13);
- qib_write_kreg(dd, kr_ibserdesctrl, sdctl);
-
- /*
- * Iterate down table within loop for each register to store.
- */
- dds_reg_map = DDS_REG_MAP;
- for (idx = 0; idx < NUM_DDS_REGS; ++idx) {
- data = ((dds_reg_map & 0xF) << 4) | TX_FAST_ELT;
- writeq(data, iaddr + idx);
- qib_read_kreg32(dd, kr_scratch);
- dds_reg_map >>= 4;
- for (midx = 0; midx < DDS_ROWS; ++midx) {
- u64 __iomem *daddr = taddr + ((midx << 4) + idx);
-
- data = dds_init_vals[midx].reg_vals[idx];
- writeq(data, daddr);
- qib_read_kreg32(dd, kr_scratch);
- } /* End inner for (vals for this reg, each row) */
- } /* end outer for (regs to be stored) */
-
- /*
- * Init the RXEQ section of the table.
- * This runs in a different order, as the pattern of
- * register references is more complex, but there are only
- * four "data" values per register.
- */
- min_idx = idx; /* RXEQ indices pick up where DDS left off */
- taddr += 0x100; /* RXEQ data is in second half of table */
- /* Iterate through RXEQ register addresses */
- for (idx = 0; idx < RXEQ_ROWS; ++idx) {
- int didx; /* "destination" */
- int vidx;
-
- /* didx is offset by min_idx to address RXEQ range of regs */
- didx = idx + min_idx;
- /* Store the next RXEQ register address */
- writeq(rxeq_init_vals[idx].rdesc, iaddr + didx);
- qib_read_kreg32(dd, kr_scratch);
- /* Iterate through RXEQ values */
- for (vidx = 0; vidx < 4; vidx++) {
- data = rxeq_init_vals[idx].rdata[vidx];
- writeq(data, taddr + (vidx << 6) + idx);
- qib_read_kreg32(dd, kr_scratch);
- }
- } /* end outer for (Reg-writes for RXEQ) */
- return 0;
-}
-
-#define CMUCTRL5 EPB_LOC(7, 0, 0x15)
-#define RXHSCTRL0(chan) EPB_LOC(chan, 6, 0)
-#define VCDL_DAC2(chan) EPB_LOC(chan, 6, 5)
-#define VCDL_CTRL0(chan) EPB_LOC(chan, 6, 6)
-#define VCDL_CTRL2(chan) EPB_LOC(chan, 6, 8)
-#define START_EQ2(chan) EPB_LOC(chan, 7, 0x28)
-
-/*
- * Repeat a "store" across all channels of the IB SerDes.
- * Although nominally it inherits the "read value" of the last
- * channel it modified, the only really useful return is <0 for
- * failure, >= 0 for success. The parameter 'loc' is assumed to
- * be the location in some channel of the register to be modified
- * The caller can specify use of the "gang write" option of EPB,
- * in which case we use the specified channel data for any fields
- * not explicitely written.
- */
-static int ibsd_mod_allchnls(struct qib_devdata *dd, int loc, int val,
- int mask)
-{
- int ret = -1;
- int chnl;
-
- if (loc & EPB_GLOBAL_WR) {
- /*
- * Our caller has assured us that we can set all four
- * channels at once. Trust that. If mask is not 0xFF,
- * we will read the _specified_ channel for our starting
- * value.
- */
- loc |= (1U << EPB_IB_QUAD0_CS_SHF);
- chnl = (loc >> (4 + EPB_ADDR_SHF)) & 7;
- if (mask != 0xFF) {
- ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES,
- loc & ~EPB_GLOBAL_WR, 0, 0);
- if (ret < 0) {
- int sloc = loc >> EPB_ADDR_SHF;
-
- qib_dev_err(dd,
- "pre-read failed: elt %d, addr 0x%X, chnl %d\n",
- (sloc & 0xF),
- (sloc >> 9) & 0x3f, chnl);
- return ret;
- }
- val = (ret & ~mask) | (val & mask);
- }
- loc &= ~(7 << (4+EPB_ADDR_SHF));
- ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, loc, val, 0xFF);
- if (ret < 0) {
- int sloc = loc >> EPB_ADDR_SHF;
-
- qib_dev_err(dd,
- "Global WR failed: elt %d, addr 0x%X, val %02X\n",
- (sloc & 0xF), (sloc >> 9) & 0x3f, val);
- }
- return ret;
- }
- /* Clear "channel" and set CS so we can simply iterate */
- loc &= ~(7 << (4+EPB_ADDR_SHF));
- loc |= (1U << EPB_IB_QUAD0_CS_SHF);
- for (chnl = 0; chnl < 4; ++chnl) {
- int cloc = loc | (chnl << (4+EPB_ADDR_SHF));
-
- ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, cloc, val, mask);
- if (ret < 0) {
- int sloc = loc >> EPB_ADDR_SHF;
-
- qib_dev_err(dd,
- "Write failed: elt %d, addr 0x%X, chnl %d, val 0x%02X, mask 0x%02X\n",
- (sloc & 0xF), (sloc >> 9) & 0x3f, chnl,
- val & 0xFF, mask & 0xFF);
- break;
- }
- }
- return ret;
-}
-
-/*
- * Set the Tx values normally modified by IBC in IB1.2 mode to default
- * values, as gotten from first row of init table.
- */
-static int set_dds_vals(struct qib_devdata *dd, struct dds_init *ddi)
-{
- int ret;
- int idx, reg, data;
- uint32_t regmap;
-
- regmap = DDS_REG_MAP;
- for (idx = 0; idx < NUM_DDS_REGS; ++idx) {
- reg = (regmap & 0xF);
- regmap >>= 4;
- data = ddi->reg_vals[idx];
- /* Vendor says RMW not needed for these regs, use 0xFF mask */
- ret = ibsd_mod_allchnls(dd, EPB_LOC(0, 9, reg), data, 0xFF);
- if (ret < 0)
- break;
- }
- return ret;
-}
-
-/*
- * Set the Rx values normally modified by IBC in IB1.2 mode to default
- * values, as gotten from selected column of init table.
- */
-static int set_rxeq_vals(struct qib_devdata *dd, int vsel)
-{
- int ret;
- int ridx;
- int cnt = ARRAY_SIZE(rxeq_init_vals);
-
- for (ridx = 0; ridx < cnt; ++ridx) {
- int elt, reg, val, loc;
-
- elt = rxeq_init_vals[ridx].rdesc & 0xF;
- reg = rxeq_init_vals[ridx].rdesc >> 4;
- loc = EPB_LOC(0, elt, reg);
- val = rxeq_init_vals[ridx].rdata[vsel];
- /* mask of 0xFF, because hardware does full-byte store. */
- ret = ibsd_mod_allchnls(dd, loc, val, 0xFF);
- if (ret < 0)
- break;
- }
- return ret;
-}
-
-/*
- * Set the default values (row 0) for DDR Driver Demphasis.
- * we do this initially and whenever we turn off IB-1.2
- *
- * The "default" values for Rx equalization are also stored to
- * SerDes registers. Formerly (and still default), we used set 2.
- * For experimenting with cables and link-partners, we allow changing
- * that via a module parameter.
- */
-static unsigned qib_rxeq_set = 2;
-module_param_named(rxeq_default_set, qib_rxeq_set, uint,
- S_IWUSR | S_IRUGO);
-MODULE_PARM_DESC(rxeq_default_set,
- "Which set [0..3] of Rx Equalization values is default");
-
-static int qib_internal_presets(struct qib_devdata *dd)
-{
- int ret = 0;
-
- ret = set_dds_vals(dd, dds_init_vals + DDS_3M);
-
- if (ret < 0)
- qib_dev_err(dd, "Failed to set default DDS values\n");
- ret = set_rxeq_vals(dd, qib_rxeq_set & 3);
- if (ret < 0)
- qib_dev_err(dd, "Failed to set default RXEQ values\n");
- return ret;
-}
-
-int qib_sd7220_presets(struct qib_devdata *dd)
-{
- int ret = 0;
-
- if (!dd->cspec->presets_needed)
- return ret;
- dd->cspec->presets_needed = 0;
- /* Assert uC reset, so we don't clash with it. */
- qib_ibsd_reset(dd, 1);
- udelay(2);
- qib_sd_trimdone_monitor(dd, "link-down");
-
- ret = qib_internal_presets(dd);
- return ret;
-}
-
-static int qib_sd_trimself(struct qib_devdata *dd, int val)
-{
- int loc = CMUCTRL5 | (1U << EPB_IB_QUAD0_CS_SHF);
-
- return qib_sd7220_reg_mod(dd, IB_7220_SERDES, loc, val, 0xFF);
-}
-
-static int qib_sd_early(struct qib_devdata *dd)
-{
- int ret;
-
- ret = ibsd_mod_allchnls(dd, RXHSCTRL0(0) | EPB_GLOBAL_WR, 0xD4, 0xFF);
- if (ret < 0)
- goto bail;
- ret = ibsd_mod_allchnls(dd, START_EQ1(0) | EPB_GLOBAL_WR, 0x10, 0xFF);
- if (ret < 0)
- goto bail;
- ret = ibsd_mod_allchnls(dd, START_EQ2(0) | EPB_GLOBAL_WR, 0x30, 0xFF);
-bail:
- return ret;
-}
-
-#define BACTRL(chnl) EPB_LOC(chnl, 6, 0x0E)
-#define LDOUTCTRL1(chnl) EPB_LOC(chnl, 7, 6)
-#define RXHSSTATUS(chnl) EPB_LOC(chnl, 6, 0xF)
-
-static int qib_sd_dactrim(struct qib_devdata *dd)
-{
- int ret;
-
- ret = ibsd_mod_allchnls(dd, VCDL_DAC2(0) | EPB_GLOBAL_WR, 0x2D, 0xFF);
- if (ret < 0)
- goto bail;
-
- /* more fine-tuning of what will be default */
- ret = ibsd_mod_allchnls(dd, VCDL_CTRL2(0), 3, 0xF);
- if (ret < 0)
- goto bail;
-
- ret = ibsd_mod_allchnls(dd, BACTRL(0) | EPB_GLOBAL_WR, 0x40, 0xFF);
- if (ret < 0)
- goto bail;
-
- ret = ibsd_mod_allchnls(dd, LDOUTCTRL1(0) | EPB_GLOBAL_WR, 0x04, 0xFF);
- if (ret < 0)
- goto bail;
-
- ret = ibsd_mod_allchnls(dd, RXHSSTATUS(0) | EPB_GLOBAL_WR, 0x04, 0xFF);
- if (ret < 0)
- goto bail;
-
- /*
- * Delay for max possible number of steps, with slop.
- * Each step is about 4usec.
- */
- udelay(415);
-
- ret = ibsd_mod_allchnls(dd, LDOUTCTRL1(0) | EPB_GLOBAL_WR, 0x00, 0xFF);
-
-bail:
- return ret;
-}
-
-#define RELOCK_FIRST_MS 3
-#define RXLSPPM(chan) EPB_LOC(chan, 0, 2)
-void toggle_7220_rclkrls(struct qib_devdata *dd)
-{
- int loc = RXLSPPM(0) | EPB_GLOBAL_WR;
- int ret;
-
- ret = ibsd_mod_allchnls(dd, loc, 0, 0x80);
- if (ret < 0)
- qib_dev_err(dd, "RCLKRLS failed to clear D7\n");
- else {
- udelay(1);
- ibsd_mod_allchnls(dd, loc, 0x80, 0x80);
- }
- /* And again for good measure */
- udelay(1);
- ret = ibsd_mod_allchnls(dd, loc, 0, 0x80);
- if (ret < 0)
- qib_dev_err(dd, "RCLKRLS failed to clear D7\n");
- else {
- udelay(1);
- ibsd_mod_allchnls(dd, loc, 0x80, 0x80);
- }
- /* Now reset xgxs and IBC to complete the recovery */
- dd->f_xgxs_reset(dd->pport);
-}
-
-/*
- * Shut down the timer that polls for relock occasions, if needed
- * this is "hooked" from qib_7220_quiet_serdes(), which is called
- * just before qib_shutdown_device() in qib_driver.c shuts down all
- * the other timers
- */
-void shutdown_7220_relock_poll(struct qib_devdata *dd)
-{
- if (dd->cspec->relock_timer_active)
- timer_delete_sync(&dd->cspec->relock_timer);
-}
-
-static unsigned qib_relock_by_timer = 1;
-module_param_named(relock_by_timer, qib_relock_by_timer, uint,
- S_IWUSR | S_IRUGO);
-MODULE_PARM_DESC(relock_by_timer, "Allow relock attempt if link not up");
-
-static void qib_run_relock(struct timer_list *t)
-{
- struct qib_chip_specific *cs = timer_container_of(cs, t, relock_timer);
- struct qib_devdata *dd = cs->dd;
- struct qib_pportdata *ppd = dd->pport;
- int timeoff;
-
- /*
- * Check link-training state for "stuck" state, when down.
- * if found, try relock and schedule another try at
- * exponentially growing delay, maxed at one second.
- * if not stuck, our work is done.
- */
- if ((dd->flags & QIB_INITTED) && !(ppd->lflags &
- (QIBL_IB_AUTONEG_INPROG | QIBL_LINKINIT | QIBL_LINKARMED |
- QIBL_LINKACTIVE))) {
- if (qib_relock_by_timer) {
- if (!(ppd->lflags & QIBL_IB_LINK_DISABLED))
- toggle_7220_rclkrls(dd);
- }
- /* re-set timer for next check */
- timeoff = cs->relock_interval << 1;
- if (timeoff > HZ)
- timeoff = HZ;
- cs->relock_interval = timeoff;
- } else
- timeoff = HZ;
- mod_timer(&cs->relock_timer, jiffies + timeoff);
-}
-
-void set_7220_relock_poll(struct qib_devdata *dd, int ibup)
-{
- struct qib_chip_specific *cs = dd->cspec;
-
- if (ibup) {
- /* We are now up, relax timer to 1 second interval */
- if (cs->relock_timer_active) {
- cs->relock_interval = HZ;
- mod_timer(&cs->relock_timer, jiffies + HZ);
- }
- } else {
- /* Transition to down, (re-)set timer to short interval. */
- unsigned int timeout;
-
- timeout = msecs_to_jiffies(RELOCK_FIRST_MS);
- if (timeout == 0)
- timeout = 1;
- /* If timer has not yet been started, do so. */
- if (!cs->relock_timer_active) {
- cs->relock_timer_active = 1;
- timer_setup(&cs->relock_timer, qib_run_relock, 0);
- cs->relock_interval = timeout;
- cs->relock_timer.expires = jiffies + timeout;
- add_timer(&cs->relock_timer);
- } else {
- cs->relock_interval = timeout;
- mod_timer(&cs->relock_timer, jiffies + timeout);
- }
- }
-}
diff --git a/drivers/infiniband/hw/qib/qib_sdma.c b/drivers/infiniband/hw/qib/qib_sdma.c
deleted file mode 100644
index 5e86cbf7d70e..000000000000
--- a/drivers/infiniband/hw/qib/qib_sdma.c
+++ /dev/null
@@ -1,999 +0,0 @@
-/*
- * Copyright (c) 2012 Intel Corporation. All rights reserved.
- * Copyright (c) 2007 - 2012 QLogic Corporation. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <linux/spinlock.h>
-#include <linux/netdevice.h>
-#include <linux/moduleparam.h>
-
-#include "qib.h"
-#include "qib_common.h"
-
-/* default pio off, sdma on */
-static ushort sdma_descq_cnt = 256;
-module_param_named(sdma_descq_cnt, sdma_descq_cnt, ushort, S_IRUGO);
-MODULE_PARM_DESC(sdma_descq_cnt, "Number of SDMA descq entries");
-
-/*
- * Bits defined in the send DMA descriptor.
- */
-#define SDMA_DESC_LAST (1ULL << 11)
-#define SDMA_DESC_FIRST (1ULL << 12)
-#define SDMA_DESC_DMA_HEAD (1ULL << 13)
-#define SDMA_DESC_USE_LARGE_BUF (1ULL << 14)
-#define SDMA_DESC_INTR (1ULL << 15)
-#define SDMA_DESC_COUNT_LSB 16
-#define SDMA_DESC_GEN_LSB 30
-
-/* declare all statics here rather than keep sorting */
-static int alloc_sdma(struct qib_pportdata *);
-static void sdma_complete(struct kref *);
-static void sdma_finalput(struct qib_sdma_state *);
-static void sdma_get(struct qib_sdma_state *);
-static void sdma_put(struct qib_sdma_state *);
-static void sdma_set_state(struct qib_pportdata *, enum qib_sdma_states);
-static void sdma_start_sw_clean_up(struct qib_pportdata *);
-static void sdma_sw_clean_up_task(struct tasklet_struct *);
-static void unmap_desc(struct qib_pportdata *, unsigned);
-
-static void sdma_get(struct qib_sdma_state *ss)
-{
- kref_get(&ss->kref);
-}
-
-static void sdma_complete(struct kref *kref)
-{
- struct qib_sdma_state *ss =
- container_of(kref, struct qib_sdma_state, kref);
-
- complete(&ss->comp);
-}
-
-static void sdma_put(struct qib_sdma_state *ss)
-{
- kref_put(&ss->kref, sdma_complete);
-}
-
-static void sdma_finalput(struct qib_sdma_state *ss)
-{
- sdma_put(ss);
- wait_for_completion(&ss->comp);
-}
-
-/*
- * Complete all the sdma requests on the active list, in the correct
- * order, and with appropriate processing. Called when cleaning up
- * after sdma shutdown, and when new sdma requests are submitted for
- * a link that is down. This matches what is done for requests
- * that complete normally, it's just the full list.
- *
- * Must be called with sdma_lock held
- */
-static void clear_sdma_activelist(struct qib_pportdata *ppd)
-{
- struct qib_sdma_txreq *txp, *txp_next;
-
- list_for_each_entry_safe(txp, txp_next, &ppd->sdma_activelist, list) {
- list_del_init(&txp->list);
- if (txp->flags & QIB_SDMA_TXREQ_F_FREEDESC) {
- unsigned idx;
-
- idx = txp->start_idx;
- while (idx != txp->next_descq_idx) {
- unmap_desc(ppd, idx);
- if (++idx == ppd->sdma_descq_cnt)
- idx = 0;
- }
- }
- if (txp->callback)
- (*txp->callback)(txp, QIB_SDMA_TXREQ_S_ABORTED);
- }
-}
-
-static void sdma_sw_clean_up_task(struct tasklet_struct *t)
-{
- struct qib_pportdata *ppd = from_tasklet(ppd, t,
- sdma_sw_clean_up_task);
- unsigned long flags;
-
- spin_lock_irqsave(&ppd->sdma_lock, flags);
-
- /*
- * At this point, the following should always be true:
- * - We are halted, so no more descriptors are getting retired.
- * - We are not running, so no one is submitting new work.
- * - Only we can send the e40_sw_cleaned, so we can't start
- * running again until we say so. So, the active list and
- * descq are ours to play with.
- */
-
- /* Process all retired requests. */
- qib_sdma_make_progress(ppd);
-
- clear_sdma_activelist(ppd);
-
- /*
- * Resync count of added and removed. It is VERY important that
- * sdma_descq_removed NEVER decrement - user_sdma depends on it.
- */
- ppd->sdma_descq_removed = ppd->sdma_descq_added;
-
- /*
- * Reset our notion of head and tail.
- * Note that the HW registers will be reset when switching states
- * due to calling __qib_sdma_process_event() below.
- */
- ppd->sdma_descq_tail = 0;
- ppd->sdma_descq_head = 0;
- ppd->sdma_head_dma[0] = 0;
- ppd->sdma_generation = 0;
-
- __qib_sdma_process_event(ppd, qib_sdma_event_e40_sw_cleaned);
-
- spin_unlock_irqrestore(&ppd->sdma_lock, flags);
-}
-
-/*
- * This is called when changing to state qib_sdma_state_s10_hw_start_up_wait
- * as a result of send buffer errors or send DMA descriptor errors.
- * We want to disarm the buffers in these cases.
- */
-static void sdma_hw_start_up(struct qib_pportdata *ppd)
-{
- struct qib_sdma_state *ss = &ppd->sdma_state;
- unsigned bufno;
-
- for (bufno = ss->first_sendbuf; bufno < ss->last_sendbuf; ++bufno)
- ppd->dd->f_sendctrl(ppd, QIB_SENDCTRL_DISARM_BUF(bufno));
-
- ppd->dd->f_sdma_hw_start_up(ppd);
-}
-
-static void sdma_sw_tear_down(struct qib_pportdata *ppd)
-{
- struct qib_sdma_state *ss = &ppd->sdma_state;
-
- /* Releasing this reference means the state machine has stopped. */
- sdma_put(ss);
-}
-
-static void sdma_start_sw_clean_up(struct qib_pportdata *ppd)
-{
- tasklet_hi_schedule(&ppd->sdma_sw_clean_up_task);
-}
-
-static void sdma_set_state(struct qib_pportdata *ppd,
- enum qib_sdma_states next_state)
-{
- struct qib_sdma_state *ss = &ppd->sdma_state;
- struct sdma_set_state_action *action = ss->set_state_action;
- unsigned op = 0;
-
- /* debugging bookkeeping */
- ss->previous_state = ss->current_state;
- ss->previous_op = ss->current_op;
-
- ss->current_state = next_state;
-
- if (action[next_state].op_enable)
- op |= QIB_SDMA_SENDCTRL_OP_ENABLE;
-
- if (action[next_state].op_intenable)
- op |= QIB_SDMA_SENDCTRL_OP_INTENABLE;
-
- if (action[next_state].op_halt)
- op |= QIB_SDMA_SENDCTRL_OP_HALT;
-
- if (action[next_state].op_drain)
- op |= QIB_SDMA_SENDCTRL_OP_DRAIN;
-
- if (action[next_state].go_s99_running_tofalse)
- ss->go_s99_running = 0;
-
- if (action[next_state].go_s99_running_totrue)
- ss->go_s99_running = 1;
-
- ss->current_op = op;
-
- ppd->dd->f_sdma_sendctrl(ppd, ss->current_op);
-}
-
-static void unmap_desc(struct qib_pportdata *ppd, unsigned head)
-{
- __le64 *descqp = &ppd->sdma_descq[head].qw[0];
- u64 desc[2];
- dma_addr_t addr;
- size_t len;
-
- desc[0] = le64_to_cpu(descqp[0]);
- desc[1] = le64_to_cpu(descqp[1]);
-
- addr = (desc[1] << 32) | (desc[0] >> 32);
- len = (desc[0] >> 14) & (0x7ffULL << 2);
- dma_unmap_single(&ppd->dd->pcidev->dev, addr, len, DMA_TO_DEVICE);
-}
-
-static int alloc_sdma(struct qib_pportdata *ppd)
-{
- ppd->sdma_descq_cnt = sdma_descq_cnt;
- if (!ppd->sdma_descq_cnt)
- ppd->sdma_descq_cnt = 256;
-
- /* Allocate memory for SendDMA descriptor FIFO */
- ppd->sdma_descq = dma_alloc_coherent(&ppd->dd->pcidev->dev,
- ppd->sdma_descq_cnt * sizeof(u64[2]), &ppd->sdma_descq_phys,
- GFP_KERNEL);
-
- if (!ppd->sdma_descq) {
- qib_dev_err(ppd->dd,
- "failed to allocate SendDMA descriptor FIFO memory\n");
- goto bail;
- }
-
- /* Allocate memory for DMA of head register to memory */
- ppd->sdma_head_dma = dma_alloc_coherent(&ppd->dd->pcidev->dev,
- PAGE_SIZE, &ppd->sdma_head_phys, GFP_KERNEL);
- if (!ppd->sdma_head_dma) {
- qib_dev_err(ppd->dd,
- "failed to allocate SendDMA head memory\n");
- goto cleanup_descq;
- }
- ppd->sdma_head_dma[0] = 0;
- return 0;
-
-cleanup_descq:
- dma_free_coherent(&ppd->dd->pcidev->dev,
- ppd->sdma_descq_cnt * sizeof(u64[2]), (void *)ppd->sdma_descq,
- ppd->sdma_descq_phys);
- ppd->sdma_descq = NULL;
- ppd->sdma_descq_phys = 0;
-bail:
- ppd->sdma_descq_cnt = 0;
- return -ENOMEM;
-}
-
-static void free_sdma(struct qib_pportdata *ppd)
-{
- struct qib_devdata *dd = ppd->dd;
-
- if (ppd->sdma_head_dma) {
- dma_free_coherent(&dd->pcidev->dev, PAGE_SIZE,
- (void *)ppd->sdma_head_dma,
- ppd->sdma_head_phys);
- ppd->sdma_head_dma = NULL;
- ppd->sdma_head_phys = 0;
- }
-
- if (ppd->sdma_descq) {
- dma_free_coherent(&dd->pcidev->dev,
- ppd->sdma_descq_cnt * sizeof(u64[2]),
- ppd->sdma_descq, ppd->sdma_descq_phys);
- ppd->sdma_descq = NULL;
- ppd->sdma_descq_phys = 0;
- }
-}
-
-static inline void make_sdma_desc(struct qib_pportdata *ppd,
- u64 *sdmadesc, u64 addr, u64 dwlen,
- u64 dwoffset)
-{
-
- WARN_ON(addr & 3);
- /* SDmaPhyAddr[47:32] */
- sdmadesc[1] = addr >> 32;
- /* SDmaPhyAddr[31:0] */
- sdmadesc[0] = (addr & 0xfffffffcULL) << 32;
- /* SDmaGeneration[1:0] */
- sdmadesc[0] |= (ppd->sdma_generation & 3ULL) <<
- SDMA_DESC_GEN_LSB;
- /* SDmaDwordCount[10:0] */
- sdmadesc[0] |= (dwlen & 0x7ffULL) << SDMA_DESC_COUNT_LSB;
- /* SDmaBufOffset[12:2] */
- sdmadesc[0] |= dwoffset & 0x7ffULL;
-}
-
-/* sdma_lock must be held */
-int qib_sdma_make_progress(struct qib_pportdata *ppd)
-{
- struct list_head *lp = NULL;
- struct qib_sdma_txreq *txp = NULL;
- struct qib_devdata *dd = ppd->dd;
- int progress = 0;
- u16 hwhead;
- u16 idx = 0;
-
- hwhead = dd->f_sdma_gethead(ppd);
-
- /* The reason for some of the complexity of this code is that
- * not all descriptors have corresponding txps. So, we have to
- * be able to skip over descs until we wander into the range of
- * the next txp on the list.
- */
-
- if (!list_empty(&ppd->sdma_activelist)) {
- lp = ppd->sdma_activelist.next;
- txp = list_entry(lp, struct qib_sdma_txreq, list);
- idx = txp->start_idx;
- }
-
- while (ppd->sdma_descq_head != hwhead) {
- /* if desc is part of this txp, unmap if needed */
- if (txp && (txp->flags & QIB_SDMA_TXREQ_F_FREEDESC) &&
- (idx == ppd->sdma_descq_head)) {
- unmap_desc(ppd, ppd->sdma_descq_head);
- if (++idx == ppd->sdma_descq_cnt)
- idx = 0;
- }
-
- /* increment dequed desc count */
- ppd->sdma_descq_removed++;
-
- /* advance head, wrap if needed */
- if (++ppd->sdma_descq_head == ppd->sdma_descq_cnt)
- ppd->sdma_descq_head = 0;
-
- /* if now past this txp's descs, do the callback */
- if (txp && txp->next_descq_idx == ppd->sdma_descq_head) {
- /* remove from active list */
- list_del_init(&txp->list);
- if (txp->callback)
- (*txp->callback)(txp, QIB_SDMA_TXREQ_S_OK);
- /* see if there is another txp */
- if (list_empty(&ppd->sdma_activelist))
- txp = NULL;
- else {
- lp = ppd->sdma_activelist.next;
- txp = list_entry(lp, struct qib_sdma_txreq,
- list);
- idx = txp->start_idx;
- }
- }
- progress = 1;
- }
- if (progress)
- qib_verbs_sdma_desc_avail(ppd, qib_sdma_descq_freecnt(ppd));
- return progress;
-}
-
-/*
- * This is called from interrupt context.
- */
-void qib_sdma_intr(struct qib_pportdata *ppd)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&ppd->sdma_lock, flags);
-
- __qib_sdma_intr(ppd);
-
- spin_unlock_irqrestore(&ppd->sdma_lock, flags);
-}
-
-void __qib_sdma_intr(struct qib_pportdata *ppd)
-{
- if (__qib_sdma_running(ppd)) {
- qib_sdma_make_progress(ppd);
- if (!list_empty(&ppd->sdma_userpending))
- qib_user_sdma_send_desc(ppd, &ppd->sdma_userpending);
- }
-}
-
-int qib_setup_sdma(struct qib_pportdata *ppd)
-{
- struct qib_devdata *dd = ppd->dd;
- unsigned long flags;
- int ret = 0;
-
- ret = alloc_sdma(ppd);
- if (ret)
- goto bail;
-
- /* set consistent sdma state */
- ppd->dd->f_sdma_init_early(ppd);
- spin_lock_irqsave(&ppd->sdma_lock, flags);
- sdma_set_state(ppd, qib_sdma_state_s00_hw_down);
- spin_unlock_irqrestore(&ppd->sdma_lock, flags);
-
- /* set up reference counting */
- kref_init(&ppd->sdma_state.kref);
- init_completion(&ppd->sdma_state.comp);
-
- ppd->sdma_generation = 0;
- ppd->sdma_descq_head = 0;
- ppd->sdma_descq_removed = 0;
- ppd->sdma_descq_added = 0;
-
- ppd->sdma_intrequest = 0;
- INIT_LIST_HEAD(&ppd->sdma_userpending);
-
- INIT_LIST_HEAD(&ppd->sdma_activelist);
-
- tasklet_setup(&ppd->sdma_sw_clean_up_task, sdma_sw_clean_up_task);
-
- ret = dd->f_init_sdma_regs(ppd);
- if (ret)
- goto bail_alloc;
-
- qib_sdma_process_event(ppd, qib_sdma_event_e10_go_hw_start);
-
- return 0;
-
-bail_alloc:
- qib_teardown_sdma(ppd);
-bail:
- return ret;
-}
-
-void qib_teardown_sdma(struct qib_pportdata *ppd)
-{
- qib_sdma_process_event(ppd, qib_sdma_event_e00_go_hw_down);
-
- /*
- * This waits for the state machine to exit so it is not
- * necessary to kill the sdma_sw_clean_up_task to make sure
- * it is not running.
- */
- sdma_finalput(&ppd->sdma_state);
-
- free_sdma(ppd);
-}
-
-int qib_sdma_running(struct qib_pportdata *ppd)
-{
- unsigned long flags;
- int ret;
-
- spin_lock_irqsave(&ppd->sdma_lock, flags);
- ret = __qib_sdma_running(ppd);
- spin_unlock_irqrestore(&ppd->sdma_lock, flags);
-
- return ret;
-}
-
-/*
- * Complete a request when sdma not running; likely only request
- * but to simplify the code, always queue it, then process the full
- * activelist. We process the entire list to ensure that this particular
- * request does get it's callback, but in the correct order.
- * Must be called with sdma_lock held
- */
-static void complete_sdma_err_req(struct qib_pportdata *ppd,
- struct qib_verbs_txreq *tx)
-{
- struct qib_qp_priv *priv = tx->qp->priv;
-
- atomic_inc(&priv->s_dma_busy);
- /* no sdma descriptors, so no unmap_desc */
- tx->txreq.start_idx = 0;
- tx->txreq.next_descq_idx = 0;
- list_add_tail(&tx->txreq.list, &ppd->sdma_activelist);
- clear_sdma_activelist(ppd);
-}
-
-/*
- * This function queues one IB packet onto the send DMA queue per call.
- * The caller is responsible for checking:
- * 1) The number of send DMA descriptor entries is less than the size of
- * the descriptor queue.
- * 2) The IB SGE addresses and lengths are 32-bit aligned
- * (except possibly the last SGE's length)
- * 3) The SGE addresses are suitable for passing to dma_map_single().
- */
-int qib_sdma_verbs_send(struct qib_pportdata *ppd,
- struct rvt_sge_state *ss, u32 dwords,
- struct qib_verbs_txreq *tx)
-{
- unsigned long flags;
- struct rvt_sge *sge;
- struct rvt_qp *qp;
- int ret = 0;
- u16 tail;
- __le64 *descqp;
- u64 sdmadesc[2];
- u32 dwoffset;
- dma_addr_t addr;
- struct qib_qp_priv *priv;
-
- spin_lock_irqsave(&ppd->sdma_lock, flags);
-
-retry:
- if (unlikely(!__qib_sdma_running(ppd))) {
- complete_sdma_err_req(ppd, tx);
- goto unlock;
- }
-
- if (tx->txreq.sg_count > qib_sdma_descq_freecnt(ppd)) {
- if (qib_sdma_make_progress(ppd))
- goto retry;
- if (ppd->dd->flags & QIB_HAS_SDMA_TIMEOUT)
- ppd->dd->f_sdma_set_desc_cnt(ppd,
- ppd->sdma_descq_cnt / 2);
- goto busy;
- }
-
- dwoffset = tx->hdr_dwords;
- make_sdma_desc(ppd, sdmadesc, (u64) tx->txreq.addr, dwoffset, 0);
-
- sdmadesc[0] |= SDMA_DESC_FIRST;
- if (tx->txreq.flags & QIB_SDMA_TXREQ_F_USELARGEBUF)
- sdmadesc[0] |= SDMA_DESC_USE_LARGE_BUF;
-
- /* write to the descq */
- tail = ppd->sdma_descq_tail;
- descqp = &ppd->sdma_descq[tail].qw[0];
- *descqp++ = cpu_to_le64(sdmadesc[0]);
- *descqp++ = cpu_to_le64(sdmadesc[1]);
-
- /* increment the tail */
- if (++tail == ppd->sdma_descq_cnt) {
- tail = 0;
- descqp = &ppd->sdma_descq[0].qw[0];
- ++ppd->sdma_generation;
- }
-
- tx->txreq.start_idx = tail;
-
- sge = &ss->sge;
- while (dwords) {
- u32 dw;
- u32 len = rvt_get_sge_length(sge, dwords << 2);
-
- dw = (len + 3) >> 2;
- addr = dma_map_single(&ppd->dd->pcidev->dev, sge->vaddr,
- dw << 2, DMA_TO_DEVICE);
- if (dma_mapping_error(&ppd->dd->pcidev->dev, addr)) {
- ret = -ENOMEM;
- goto unmap;
- }
- sdmadesc[0] = 0;
- make_sdma_desc(ppd, sdmadesc, (u64) addr, dw, dwoffset);
- /* SDmaUseLargeBuf has to be set in every descriptor */
- if (tx->txreq.flags & QIB_SDMA_TXREQ_F_USELARGEBUF)
- sdmadesc[0] |= SDMA_DESC_USE_LARGE_BUF;
- /* write to the descq */
- *descqp++ = cpu_to_le64(sdmadesc[0]);
- *descqp++ = cpu_to_le64(sdmadesc[1]);
-
- /* increment the tail */
- if (++tail == ppd->sdma_descq_cnt) {
- tail = 0;
- descqp = &ppd->sdma_descq[0].qw[0];
- ++ppd->sdma_generation;
- }
- rvt_update_sge(ss, len, false);
- dwoffset += dw;
- dwords -= dw;
- }
-
- if (!tail)
- descqp = &ppd->sdma_descq[ppd->sdma_descq_cnt].qw[0];
- descqp -= 2;
- descqp[0] |= cpu_to_le64(SDMA_DESC_LAST);
- if (tx->txreq.flags & QIB_SDMA_TXREQ_F_HEADTOHOST)
- descqp[0] |= cpu_to_le64(SDMA_DESC_DMA_HEAD);
- if (tx->txreq.flags & QIB_SDMA_TXREQ_F_INTREQ)
- descqp[0] |= cpu_to_le64(SDMA_DESC_INTR);
- priv = tx->qp->priv;
- atomic_inc(&priv->s_dma_busy);
- tx->txreq.next_descq_idx = tail;
- ppd->dd->f_sdma_update_tail(ppd, tail);
- ppd->sdma_descq_added += tx->txreq.sg_count;
- list_add_tail(&tx->txreq.list, &ppd->sdma_activelist);
- goto unlock;
-
-unmap:
- for (;;) {
- if (!tail)
- tail = ppd->sdma_descq_cnt - 1;
- else
- tail--;
- if (tail == ppd->sdma_descq_tail)
- break;
- unmap_desc(ppd, tail);
- }
- qp = tx->qp;
- priv = qp->priv;
- qib_put_txreq(tx);
- spin_lock(&qp->r_lock);
- spin_lock(&qp->s_lock);
- if (qp->ibqp.qp_type == IB_QPT_RC) {
- /* XXX what about error sending RDMA read responses? */
- if (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK)
- rvt_error_qp(qp, IB_WC_GENERAL_ERR);
- } else if (qp->s_wqe)
- rvt_send_complete(qp, qp->s_wqe, IB_WC_GENERAL_ERR);
- spin_unlock(&qp->s_lock);
- spin_unlock(&qp->r_lock);
- /* return zero to process the next send work request */
- goto unlock;
-
-busy:
- qp = tx->qp;
- priv = qp->priv;
- spin_lock(&qp->s_lock);
- if (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) {
- struct qib_ibdev *dev;
-
- /*
- * If we couldn't queue the DMA request, save the info
- * and try again later rather than destroying the
- * buffer and undoing the side effects of the copy.
- */
- tx->ss = ss;
- tx->dwords = dwords;
- priv->s_tx = tx;
- dev = &ppd->dd->verbs_dev;
- spin_lock(&dev->rdi.pending_lock);
- if (list_empty(&priv->iowait)) {
- struct qib_ibport *ibp;
-
- ibp = &ppd->ibport_data;
- ibp->rvp.n_dmawait++;
- qp->s_flags |= RVT_S_WAIT_DMA_DESC;
- list_add_tail(&priv->iowait, &dev->dmawait);
- }
- spin_unlock(&dev->rdi.pending_lock);
- qp->s_flags &= ~RVT_S_BUSY;
- spin_unlock(&qp->s_lock);
- ret = -EBUSY;
- } else {
- spin_unlock(&qp->s_lock);
- qib_put_txreq(tx);
- }
-unlock:
- spin_unlock_irqrestore(&ppd->sdma_lock, flags);
- return ret;
-}
-
-/*
- * sdma_lock should be acquired before calling this routine
- */
-void dump_sdma_state(struct qib_pportdata *ppd)
-{
- struct qib_sdma_desc *descq;
- struct qib_sdma_txreq *txp, *txpnext;
- __le64 *descqp;
- u64 desc[2];
- u64 addr;
- u16 gen, dwlen, dwoffset;
- u16 head, tail, cnt;
-
- head = ppd->sdma_descq_head;
- tail = ppd->sdma_descq_tail;
- cnt = qib_sdma_descq_freecnt(ppd);
- descq = ppd->sdma_descq;
-
- qib_dev_porterr(ppd->dd, ppd->port,
- "SDMA ppd->sdma_descq_head: %u\n", head);
- qib_dev_porterr(ppd->dd, ppd->port,
- "SDMA ppd->sdma_descq_tail: %u\n", tail);
- qib_dev_porterr(ppd->dd, ppd->port,
- "SDMA sdma_descq_freecnt: %u\n", cnt);
-
- /* print info for each entry in the descriptor queue */
- while (head != tail) {
- char flags[6] = { 'x', 'x', 'x', 'x', 'x', 0 };
-
- descqp = &descq[head].qw[0];
- desc[0] = le64_to_cpu(descqp[0]);
- desc[1] = le64_to_cpu(descqp[1]);
- flags[0] = (desc[0] & 1<<15) ? 'I' : '-';
- flags[1] = (desc[0] & 1<<14) ? 'L' : 'S';
- flags[2] = (desc[0] & 1<<13) ? 'H' : '-';
- flags[3] = (desc[0] & 1<<12) ? 'F' : '-';
- flags[4] = (desc[0] & 1<<11) ? 'L' : '-';
- addr = (desc[1] << 32) | ((desc[0] >> 32) & 0xfffffffcULL);
- gen = (desc[0] >> 30) & 3ULL;
- dwlen = (desc[0] >> 14) & (0x7ffULL << 2);
- dwoffset = (desc[0] & 0x7ffULL) << 2;
- qib_dev_porterr(ppd->dd, ppd->port,
- "SDMA sdmadesc[%u]: flags:%s addr:0x%016llx gen:%u len:%u bytes offset:%u bytes\n",
- head, flags, addr, gen, dwlen, dwoffset);
- if (++head == ppd->sdma_descq_cnt)
- head = 0;
- }
-
- /* print dma descriptor indices from the TX requests */
- list_for_each_entry_safe(txp, txpnext, &ppd->sdma_activelist,
- list)
- qib_dev_porterr(ppd->dd, ppd->port,
- "SDMA txp->start_idx: %u txp->next_descq_idx: %u\n",
- txp->start_idx, txp->next_descq_idx);
-}
-
-void qib_sdma_process_event(struct qib_pportdata *ppd,
- enum qib_sdma_events event)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&ppd->sdma_lock, flags);
-
- __qib_sdma_process_event(ppd, event);
-
- if (ppd->sdma_state.current_state == qib_sdma_state_s99_running)
- qib_verbs_sdma_desc_avail(ppd, qib_sdma_descq_freecnt(ppd));
-
- spin_unlock_irqrestore(&ppd->sdma_lock, flags);
-}
-
-void __qib_sdma_process_event(struct qib_pportdata *ppd,
- enum qib_sdma_events event)
-{
- struct qib_sdma_state *ss = &ppd->sdma_state;
-
- switch (ss->current_state) {
- case qib_sdma_state_s00_hw_down:
- switch (event) {
- case qib_sdma_event_e00_go_hw_down:
- break;
- case qib_sdma_event_e30_go_running:
- /*
- * If down, but running requested (usually result
- * of link up, then we need to start up.
- * This can happen when hw down is requested while
- * bringing the link up with traffic active on
- * 7220, e.g. */
- ss->go_s99_running = 1;
- fallthrough; /* and start dma engine */
- case qib_sdma_event_e10_go_hw_start:
- /* This reference means the state machine is started */
- sdma_get(&ppd->sdma_state);
- sdma_set_state(ppd,
- qib_sdma_state_s10_hw_start_up_wait);
- break;
- case qib_sdma_event_e20_hw_started:
- break;
- case qib_sdma_event_e40_sw_cleaned:
- sdma_sw_tear_down(ppd);
- break;
- case qib_sdma_event_e50_hw_cleaned:
- break;
- case qib_sdma_event_e60_hw_halted:
- break;
- case qib_sdma_event_e70_go_idle:
- break;
- case qib_sdma_event_e7220_err_halted:
- break;
- case qib_sdma_event_e7322_err_halted:
- break;
- case qib_sdma_event_e90_timer_tick:
- break;
- }
- break;
-
- case qib_sdma_state_s10_hw_start_up_wait:
- switch (event) {
- case qib_sdma_event_e00_go_hw_down:
- sdma_set_state(ppd, qib_sdma_state_s00_hw_down);
- sdma_sw_tear_down(ppd);
- break;
- case qib_sdma_event_e10_go_hw_start:
- break;
- case qib_sdma_event_e20_hw_started:
- sdma_set_state(ppd, ss->go_s99_running ?
- qib_sdma_state_s99_running :
- qib_sdma_state_s20_idle);
- break;
- case qib_sdma_event_e30_go_running:
- ss->go_s99_running = 1;
- break;
- case qib_sdma_event_e40_sw_cleaned:
- break;
- case qib_sdma_event_e50_hw_cleaned:
- break;
- case qib_sdma_event_e60_hw_halted:
- break;
- case qib_sdma_event_e70_go_idle:
- ss->go_s99_running = 0;
- break;
- case qib_sdma_event_e7220_err_halted:
- break;
- case qib_sdma_event_e7322_err_halted:
- break;
- case qib_sdma_event_e90_timer_tick:
- break;
- }
- break;
-
- case qib_sdma_state_s20_idle:
- switch (event) {
- case qib_sdma_event_e00_go_hw_down:
- sdma_set_state(ppd, qib_sdma_state_s00_hw_down);
- sdma_sw_tear_down(ppd);
- break;
- case qib_sdma_event_e10_go_hw_start:
- break;
- case qib_sdma_event_e20_hw_started:
- break;
- case qib_sdma_event_e30_go_running:
- sdma_set_state(ppd, qib_sdma_state_s99_running);
- ss->go_s99_running = 1;
- break;
- case qib_sdma_event_e40_sw_cleaned:
- break;
- case qib_sdma_event_e50_hw_cleaned:
- break;
- case qib_sdma_event_e60_hw_halted:
- break;
- case qib_sdma_event_e70_go_idle:
- break;
- case qib_sdma_event_e7220_err_halted:
- break;
- case qib_sdma_event_e7322_err_halted:
- break;
- case qib_sdma_event_e90_timer_tick:
- break;
- }
- break;
-
- case qib_sdma_state_s30_sw_clean_up_wait:
- switch (event) {
- case qib_sdma_event_e00_go_hw_down:
- sdma_set_state(ppd, qib_sdma_state_s00_hw_down);
- break;
- case qib_sdma_event_e10_go_hw_start:
- break;
- case qib_sdma_event_e20_hw_started:
- break;
- case qib_sdma_event_e30_go_running:
- ss->go_s99_running = 1;
- break;
- case qib_sdma_event_e40_sw_cleaned:
- sdma_set_state(ppd,
- qib_sdma_state_s10_hw_start_up_wait);
- sdma_hw_start_up(ppd);
- break;
- case qib_sdma_event_e50_hw_cleaned:
- break;
- case qib_sdma_event_e60_hw_halted:
- break;
- case qib_sdma_event_e70_go_idle:
- ss->go_s99_running = 0;
- break;
- case qib_sdma_event_e7220_err_halted:
- break;
- case qib_sdma_event_e7322_err_halted:
- break;
- case qib_sdma_event_e90_timer_tick:
- break;
- }
- break;
-
- case qib_sdma_state_s40_hw_clean_up_wait:
- switch (event) {
- case qib_sdma_event_e00_go_hw_down:
- sdma_set_state(ppd, qib_sdma_state_s00_hw_down);
- sdma_start_sw_clean_up(ppd);
- break;
- case qib_sdma_event_e10_go_hw_start:
- break;
- case qib_sdma_event_e20_hw_started:
- break;
- case qib_sdma_event_e30_go_running:
- ss->go_s99_running = 1;
- break;
- case qib_sdma_event_e40_sw_cleaned:
- break;
- case qib_sdma_event_e50_hw_cleaned:
- sdma_set_state(ppd,
- qib_sdma_state_s30_sw_clean_up_wait);
- sdma_start_sw_clean_up(ppd);
- break;
- case qib_sdma_event_e60_hw_halted:
- break;
- case qib_sdma_event_e70_go_idle:
- ss->go_s99_running = 0;
- break;
- case qib_sdma_event_e7220_err_halted:
- break;
- case qib_sdma_event_e7322_err_halted:
- break;
- case qib_sdma_event_e90_timer_tick:
- break;
- }
- break;
-
- case qib_sdma_state_s50_hw_halt_wait:
- switch (event) {
- case qib_sdma_event_e00_go_hw_down:
- sdma_set_state(ppd, qib_sdma_state_s00_hw_down);
- sdma_start_sw_clean_up(ppd);
- break;
- case qib_sdma_event_e10_go_hw_start:
- break;
- case qib_sdma_event_e20_hw_started:
- break;
- case qib_sdma_event_e30_go_running:
- ss->go_s99_running = 1;
- break;
- case qib_sdma_event_e40_sw_cleaned:
- break;
- case qib_sdma_event_e50_hw_cleaned:
- break;
- case qib_sdma_event_e60_hw_halted:
- sdma_set_state(ppd,
- qib_sdma_state_s40_hw_clean_up_wait);
- ppd->dd->f_sdma_hw_clean_up(ppd);
- break;
- case qib_sdma_event_e70_go_idle:
- ss->go_s99_running = 0;
- break;
- case qib_sdma_event_e7220_err_halted:
- break;
- case qib_sdma_event_e7322_err_halted:
- break;
- case qib_sdma_event_e90_timer_tick:
- break;
- }
- break;
-
- case qib_sdma_state_s99_running:
- switch (event) {
- case qib_sdma_event_e00_go_hw_down:
- sdma_set_state(ppd, qib_sdma_state_s00_hw_down);
- sdma_start_sw_clean_up(ppd);
- break;
- case qib_sdma_event_e10_go_hw_start:
- break;
- case qib_sdma_event_e20_hw_started:
- break;
- case qib_sdma_event_e30_go_running:
- break;
- case qib_sdma_event_e40_sw_cleaned:
- break;
- case qib_sdma_event_e50_hw_cleaned:
- break;
- case qib_sdma_event_e60_hw_halted:
- sdma_set_state(ppd,
- qib_sdma_state_s30_sw_clean_up_wait);
- sdma_start_sw_clean_up(ppd);
- break;
- case qib_sdma_event_e70_go_idle:
- sdma_set_state(ppd, qib_sdma_state_s50_hw_halt_wait);
- ss->go_s99_running = 0;
- break;
- case qib_sdma_event_e7220_err_halted:
- sdma_set_state(ppd,
- qib_sdma_state_s30_sw_clean_up_wait);
- sdma_start_sw_clean_up(ppd);
- break;
- case qib_sdma_event_e7322_err_halted:
- sdma_set_state(ppd, qib_sdma_state_s50_hw_halt_wait);
- break;
- case qib_sdma_event_e90_timer_tick:
- break;
- }
- break;
- }
-
- ss->last_event = event;
-}
diff --git a/drivers/infiniband/hw/qib/qib_sysfs.c b/drivers/infiniband/hw/qib/qib_sysfs.c
deleted file mode 100644
index 805e37dc7621..000000000000
--- a/drivers/infiniband/hw/qib/qib_sysfs.c
+++ /dev/null
@@ -1,731 +0,0 @@
-/*
- * Copyright (c) 2012 Intel Corporation. All rights reserved.
- * Copyright (c) 2006 - 2012 QLogic Corporation. All rights reserved.
- * Copyright (c) 2006 PathScale, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include <linux/ctype.h>
-#include <rdma/ib_sysfs.h>
-
-#include "qib.h"
-#include "qib_mad.h"
-
-static struct qib_pportdata *qib_get_pportdata_kobj(struct kobject *kobj)
-{
- u32 port_num;
- struct ib_device *ibdev = ib_port_sysfs_get_ibdev_kobj(kobj, &port_num);
- struct qib_devdata *dd = dd_from_ibdev(ibdev);
-
- return &dd->pport[port_num - 1];
-}
-
-/*
- * Get/Set heartbeat enable. OR of 1=enabled, 2=auto
- */
-static ssize_t hrtbt_enable_show(struct ib_device *ibdev, u32 port_num,
- struct ib_port_attribute *attr, char *buf)
-{
- struct qib_devdata *dd = dd_from_ibdev(ibdev);
- struct qib_pportdata *ppd = &dd->pport[port_num - 1];
-
- return sysfs_emit(buf, "%d\n", dd->f_get_ib_cfg(ppd, QIB_IB_CFG_HRTBT));
-}
-
-static ssize_t hrtbt_enable_store(struct ib_device *ibdev, u32 port_num,
- struct ib_port_attribute *attr,
- const char *buf, size_t count)
-{
- struct qib_devdata *dd = dd_from_ibdev(ibdev);
- struct qib_pportdata *ppd = &dd->pport[port_num - 1];
- int ret;
- u16 val;
-
- ret = kstrtou16(buf, 0, &val);
- if (ret) {
- qib_dev_err(dd, "attempt to set invalid Heartbeat enable\n");
- return ret;
- }
-
- /*
- * Set the "intentional" heartbeat enable per either of
- * "Enable" and "Auto", as these are normally set together.
- * This bit is consulted when leaving loopback mode,
- * because entering loopback mode overrides it and automatically
- * disables heartbeat.
- */
- ret = dd->f_set_ib_cfg(ppd, QIB_IB_CFG_HRTBT, val);
- return ret < 0 ? ret : count;
-}
-static IB_PORT_ATTR_RW(hrtbt_enable);
-
-static ssize_t loopback_store(struct ib_device *ibdev, u32 port_num,
- struct ib_port_attribute *attr, const char *buf,
- size_t count)
-{
- struct qib_devdata *dd = dd_from_ibdev(ibdev);
- struct qib_pportdata *ppd = &dd->pport[port_num - 1];
- int ret = count, r;
-
- r = dd->f_set_ib_loopback(ppd, buf);
- if (r < 0)
- ret = r;
-
- return ret;
-}
-static IB_PORT_ATTR_WO(loopback);
-
-static ssize_t led_override_store(struct ib_device *ibdev, u32 port_num,
- struct ib_port_attribute *attr,
- const char *buf, size_t count)
-{
- struct qib_devdata *dd = dd_from_ibdev(ibdev);
- struct qib_pportdata *ppd = &dd->pport[port_num - 1];
- int ret;
- u16 val;
-
- ret = kstrtou16(buf, 0, &val);
- if (ret) {
- qib_dev_err(dd, "attempt to set invalid LED override\n");
- return ret;
- }
-
- qib_set_led_override(ppd, val);
- return count;
-}
-static IB_PORT_ATTR_WO(led_override);
-
-static ssize_t status_show(struct ib_device *ibdev, u32 port_num,
- struct ib_port_attribute *attr, char *buf)
-{
- struct qib_devdata *dd = dd_from_ibdev(ibdev);
- struct qib_pportdata *ppd = &dd->pport[port_num - 1];
-
- if (!ppd->statusp)
- return -EINVAL;
-
- return sysfs_emit(buf, "0x%llx\n", (unsigned long long)*(ppd->statusp));
-}
-static IB_PORT_ATTR_RO(status);
-
-/*
- * For userland compatibility, these offsets must remain fixed.
- * They are strings for QIB_STATUS_*
- */
-static const char * const qib_status_str[] = {
- "Initted",
- "",
- "",
- "",
- "",
- "Present",
- "IB_link_up",
- "IB_configured",
- "",
- "Fatal_Hardware_Error",
- NULL,
-};
-
-static ssize_t status_str_show(struct ib_device *ibdev, u32 port_num,
- struct ib_port_attribute *attr, char *buf)
-{
- struct qib_devdata *dd = dd_from_ibdev(ibdev);
- struct qib_pportdata *ppd = &dd->pport[port_num - 1];
- int i, any;
- u64 s;
- ssize_t ret;
-
- if (!ppd->statusp) {
- ret = -EINVAL;
- goto bail;
- }
-
- s = *(ppd->statusp);
- *buf = '\0';
- for (any = i = 0; s && qib_status_str[i]; i++) {
- if (s & 1) {
- /* if overflow */
- if (any && strlcat(buf, " ", PAGE_SIZE) >= PAGE_SIZE)
- break;
- if (strlcat(buf, qib_status_str[i], PAGE_SIZE) >=
- PAGE_SIZE)
- break;
- any = 1;
- }
- s >>= 1;
- }
- if (any)
- strlcat(buf, "\n", PAGE_SIZE);
-
- ret = strlen(buf);
-
-bail:
- return ret;
-}
-static IB_PORT_ATTR_RO(status_str);
-
-/* end of per-port functions */
-
-static struct attribute *port_linkcontrol_attributes[] = {
- &ib_port_attr_loopback.attr,
- &ib_port_attr_led_override.attr,
- &ib_port_attr_hrtbt_enable.attr,
- &ib_port_attr_status.attr,
- &ib_port_attr_status_str.attr,
- NULL
-};
-
-static const struct attribute_group port_linkcontrol_group = {
- .name = "linkcontrol",
- .attrs = port_linkcontrol_attributes,
-};
-
-/*
- * Start of per-port congestion control structures and support code
- */
-
-/*
- * Congestion control table size followed by table entries
- */
-static ssize_t cc_table_bin_read(struct file *filp, struct kobject *kobj,
- const struct bin_attribute *bin_attr,
- char *buf, loff_t pos, size_t count)
-{
- struct qib_pportdata *ppd = qib_get_pportdata_kobj(kobj);
- int ret;
-
- if (!qib_cc_table_size || !ppd->ccti_entries_shadow)
- return -EINVAL;
-
- ret = ppd->total_cct_entry * sizeof(struct ib_cc_table_entry_shadow)
- + sizeof(__be16);
-
- if (pos > ret)
- return -EINVAL;
-
- if (count > ret - pos)
- count = ret - pos;
-
- if (!count)
- return count;
-
- spin_lock(&ppd->cc_shadow_lock);
- memcpy(buf, ppd->ccti_entries_shadow, count);
- spin_unlock(&ppd->cc_shadow_lock);
-
- return count;
-}
-static const BIN_ATTR_RO(cc_table_bin, PAGE_SIZE);
-
-/*
- * Congestion settings: port control, control map and an array of 16
- * entries for the congestion entries - increase, timer, event log
- * trigger threshold and the minimum injection rate delay.
- */
-static ssize_t cc_setting_bin_read(struct file *filp, struct kobject *kobj,
- const struct bin_attribute *bin_attr,
- char *buf, loff_t pos, size_t count)
-{
- struct qib_pportdata *ppd = qib_get_pportdata_kobj(kobj);
- int ret;
-
- if (!qib_cc_table_size || !ppd->congestion_entries_shadow)
- return -EINVAL;
-
- ret = sizeof(struct ib_cc_congestion_setting_attr_shadow);
-
- if (pos > ret)
- return -EINVAL;
- if (count > ret - pos)
- count = ret - pos;
-
- if (!count)
- return count;
-
- spin_lock(&ppd->cc_shadow_lock);
- memcpy(buf, ppd->congestion_entries_shadow, count);
- spin_unlock(&ppd->cc_shadow_lock);
-
- return count;
-}
-static const BIN_ATTR_RO(cc_setting_bin, PAGE_SIZE);
-
-static const struct bin_attribute *const port_ccmgta_attributes[] = {
- &bin_attr_cc_setting_bin,
- &bin_attr_cc_table_bin,
- NULL,
-};
-
-static umode_t qib_ccmgta_is_bin_visible(struct kobject *kobj,
- const struct bin_attribute *attr, int n)
-{
- struct qib_pportdata *ppd = qib_get_pportdata_kobj(kobj);
-
- if (!qib_cc_table_size || !ppd->congestion_entries_shadow)
- return 0;
- return attr->attr.mode;
-}
-
-static const struct attribute_group port_ccmgta_attribute_group = {
- .name = "CCMgtA",
- .is_bin_visible = qib_ccmgta_is_bin_visible,
- .bin_attrs_new = port_ccmgta_attributes,
-};
-
-/* Start sl2vl */
-
-struct qib_sl2vl_attr {
- struct ib_port_attribute attr;
- int sl;
-};
-
-static ssize_t sl2vl_attr_show(struct ib_device *ibdev, u32 port_num,
- struct ib_port_attribute *attr, char *buf)
-{
- struct qib_sl2vl_attr *sattr =
- container_of(attr, struct qib_sl2vl_attr, attr);
- struct qib_devdata *dd = dd_from_ibdev(ibdev);
- struct qib_ibport *qibp = &dd->pport[port_num - 1].ibport_data;
-
- return sysfs_emit(buf, "%u\n", qibp->sl_to_vl[sattr->sl]);
-}
-
-#define QIB_SL2VL_ATTR(N) \
- static struct qib_sl2vl_attr qib_sl2vl_attr_##N = { \
- .attr = __ATTR(N, 0444, sl2vl_attr_show, NULL), \
- .sl = N, \
- }
-
-QIB_SL2VL_ATTR(0);
-QIB_SL2VL_ATTR(1);
-QIB_SL2VL_ATTR(2);
-QIB_SL2VL_ATTR(3);
-QIB_SL2VL_ATTR(4);
-QIB_SL2VL_ATTR(5);
-QIB_SL2VL_ATTR(6);
-QIB_SL2VL_ATTR(7);
-QIB_SL2VL_ATTR(8);
-QIB_SL2VL_ATTR(9);
-QIB_SL2VL_ATTR(10);
-QIB_SL2VL_ATTR(11);
-QIB_SL2VL_ATTR(12);
-QIB_SL2VL_ATTR(13);
-QIB_SL2VL_ATTR(14);
-QIB_SL2VL_ATTR(15);
-
-static struct attribute *port_sl2vl_attributes[] = {
- &qib_sl2vl_attr_0.attr.attr,
- &qib_sl2vl_attr_1.attr.attr,
- &qib_sl2vl_attr_2.attr.attr,
- &qib_sl2vl_attr_3.attr.attr,
- &qib_sl2vl_attr_4.attr.attr,
- &qib_sl2vl_attr_5.attr.attr,
- &qib_sl2vl_attr_6.attr.attr,
- &qib_sl2vl_attr_7.attr.attr,
- &qib_sl2vl_attr_8.attr.attr,
- &qib_sl2vl_attr_9.attr.attr,
- &qib_sl2vl_attr_10.attr.attr,
- &qib_sl2vl_attr_11.attr.attr,
- &qib_sl2vl_attr_12.attr.attr,
- &qib_sl2vl_attr_13.attr.attr,
- &qib_sl2vl_attr_14.attr.attr,
- &qib_sl2vl_attr_15.attr.attr,
- NULL
-};
-
-static const struct attribute_group port_sl2vl_group = {
- .name = "sl2vl",
- .attrs = port_sl2vl_attributes,
-};
-
-/* End sl2vl */
-
-/* Start diag_counters */
-
-struct qib_diagc_attr {
- struct ib_port_attribute attr;
- size_t counter;
-};
-
-static ssize_t diagc_attr_show(struct ib_device *ibdev, u32 port_num,
- struct ib_port_attribute *attr, char *buf)
-{
- struct qib_diagc_attr *dattr =
- container_of(attr, struct qib_diagc_attr, attr);
- struct qib_devdata *dd = dd_from_ibdev(ibdev);
- struct qib_ibport *qibp = &dd->pport[port_num - 1].ibport_data;
-
- return sysfs_emit(buf, "%llu\n", *((u64 *)qibp + dattr->counter));
-}
-
-static ssize_t diagc_attr_store(struct ib_device *ibdev, u32 port_num,
- struct ib_port_attribute *attr, const char *buf,
- size_t count)
-{
- struct qib_diagc_attr *dattr =
- container_of(attr, struct qib_diagc_attr, attr);
- struct qib_devdata *dd = dd_from_ibdev(ibdev);
- struct qib_ibport *qibp = &dd->pport[port_num - 1].ibport_data;
- u64 val;
- int ret;
-
- ret = kstrtou64(buf, 0, &val);
- if (ret)
- return ret;
- *((u64 *)qibp + dattr->counter) = val;
- return count;
-}
-
-#define QIB_DIAGC_ATTR(N) \
- static_assert(__same_type(((struct qib_ibport *)0)->rvp.n_##N, u64)); \
- static struct qib_diagc_attr qib_diagc_attr_##N = { \
- .attr = __ATTR(N, 0664, diagc_attr_show, diagc_attr_store), \
- .counter = \
- offsetof(struct qib_ibport, rvp.n_##N) / sizeof(u64) \
- }
-
-QIB_DIAGC_ATTR(rc_resends);
-QIB_DIAGC_ATTR(seq_naks);
-QIB_DIAGC_ATTR(rdma_seq);
-QIB_DIAGC_ATTR(rnr_naks);
-QIB_DIAGC_ATTR(other_naks);
-QIB_DIAGC_ATTR(rc_timeouts);
-QIB_DIAGC_ATTR(loop_pkts);
-QIB_DIAGC_ATTR(pkt_drops);
-QIB_DIAGC_ATTR(dmawait);
-QIB_DIAGC_ATTR(unaligned);
-QIB_DIAGC_ATTR(rc_dupreq);
-QIB_DIAGC_ATTR(rc_seqnak);
-QIB_DIAGC_ATTR(rc_crwaits);
-
-static u64 get_all_cpu_total(u64 __percpu *cntr)
-{
- int cpu;
- u64 counter = 0;
-
- for_each_possible_cpu(cpu)
- counter += *per_cpu_ptr(cntr, cpu);
- return counter;
-}
-
-static ssize_t qib_store_per_cpu(struct qib_devdata *dd, const char *buf,
- size_t count, u64 *zero, u64 cur)
-{
- u32 val;
- int ret;
-
- ret = kstrtou32(buf, 0, &val);
- if (ret)
- return ret;
- if (val != 0) {
- qib_dev_err(dd, "Per CPU cntrs can only be zeroed");
- return count;
- }
- *zero = cur;
- return count;
-}
-
-static ssize_t rc_acks_show(struct ib_device *ibdev, u32 port_num,
- struct ib_port_attribute *attr, char *buf)
-{
- struct qib_devdata *dd = dd_from_ibdev(ibdev);
- struct qib_ibport *qibp = &dd->pport[port_num - 1].ibport_data;
-
- return sysfs_emit(buf, "%llu\n",
- get_all_cpu_total(qibp->rvp.rc_acks) -
- qibp->rvp.z_rc_acks);
-}
-
-static ssize_t rc_acks_store(struct ib_device *ibdev, u32 port_num,
- struct ib_port_attribute *attr, const char *buf,
- size_t count)
-{
- struct qib_devdata *dd = dd_from_ibdev(ibdev);
- struct qib_ibport *qibp = &dd->pport[port_num - 1].ibport_data;
-
- return qib_store_per_cpu(dd, buf, count, &qibp->rvp.z_rc_acks,
- get_all_cpu_total(qibp->rvp.rc_acks));
-}
-static IB_PORT_ATTR_RW(rc_acks);
-
-static ssize_t rc_qacks_show(struct ib_device *ibdev, u32 port_num,
- struct ib_port_attribute *attr, char *buf)
-{
- struct qib_devdata *dd = dd_from_ibdev(ibdev);
- struct qib_ibport *qibp = &dd->pport[port_num - 1].ibport_data;
-
- return sysfs_emit(buf, "%llu\n",
- get_all_cpu_total(qibp->rvp.rc_qacks) -
- qibp->rvp.z_rc_qacks);
-}
-
-static ssize_t rc_qacks_store(struct ib_device *ibdev, u32 port_num,
- struct ib_port_attribute *attr, const char *buf,
- size_t count)
-{
- struct qib_devdata *dd = dd_from_ibdev(ibdev);
- struct qib_ibport *qibp = &dd->pport[port_num - 1].ibport_data;
-
- return qib_store_per_cpu(dd, buf, count, &qibp->rvp.z_rc_qacks,
- get_all_cpu_total(qibp->rvp.rc_qacks));
-}
-static IB_PORT_ATTR_RW(rc_qacks);
-
-static ssize_t rc_delayed_comp_show(struct ib_device *ibdev, u32 port_num,
- struct ib_port_attribute *attr, char *buf)
-{
- struct qib_devdata *dd = dd_from_ibdev(ibdev);
- struct qib_ibport *qibp = &dd->pport[port_num - 1].ibport_data;
-
- return sysfs_emit(buf, "%llu\n",
- get_all_cpu_total(qibp->rvp.rc_delayed_comp) -
- qibp->rvp.z_rc_delayed_comp);
-}
-
-static ssize_t rc_delayed_comp_store(struct ib_device *ibdev, u32 port_num,
- struct ib_port_attribute *attr,
- const char *buf, size_t count)
-{
- struct qib_devdata *dd = dd_from_ibdev(ibdev);
- struct qib_ibport *qibp = &dd->pport[port_num - 1].ibport_data;
-
- return qib_store_per_cpu(dd, buf, count, &qibp->rvp.z_rc_delayed_comp,
- get_all_cpu_total(qibp->rvp.rc_delayed_comp));
-}
-static IB_PORT_ATTR_RW(rc_delayed_comp);
-
-static struct attribute *port_diagc_attributes[] = {
- &qib_diagc_attr_rc_resends.attr.attr,
- &qib_diagc_attr_seq_naks.attr.attr,
- &qib_diagc_attr_rdma_seq.attr.attr,
- &qib_diagc_attr_rnr_naks.attr.attr,
- &qib_diagc_attr_other_naks.attr.attr,
- &qib_diagc_attr_rc_timeouts.attr.attr,
- &qib_diagc_attr_loop_pkts.attr.attr,
- &qib_diagc_attr_pkt_drops.attr.attr,
- &qib_diagc_attr_dmawait.attr.attr,
- &qib_diagc_attr_unaligned.attr.attr,
- &qib_diagc_attr_rc_dupreq.attr.attr,
- &qib_diagc_attr_rc_seqnak.attr.attr,
- &qib_diagc_attr_rc_crwaits.attr.attr,
- &ib_port_attr_rc_acks.attr,
- &ib_port_attr_rc_qacks.attr,
- &ib_port_attr_rc_delayed_comp.attr,
- NULL
-};
-
-static const struct attribute_group port_diagc_group = {
- .name = "diag_counters",
- .attrs = port_diagc_attributes,
-};
-
-/* End diag_counters */
-
-const struct attribute_group *qib_attr_port_groups[] = {
- &port_linkcontrol_group,
- &port_ccmgta_attribute_group,
- &port_sl2vl_group,
- &port_diagc_group,
- NULL,
-};
-
-/* end of per-port file structures and support code */
-
-/*
- * Start of per-unit (or driver, in some cases, but replicated
- * per unit) functions (these get a device *)
- */
-static ssize_t hw_rev_show(struct device *device, struct device_attribute *attr,
- char *buf)
-{
- struct qib_ibdev *dev =
- rdma_device_to_drv_device(device, struct qib_ibdev, rdi.ibdev);
-
- return sysfs_emit(buf, "%x\n", dd_from_dev(dev)->minrev);
-}
-static DEVICE_ATTR_RO(hw_rev);
-
-static ssize_t hca_type_show(struct device *device,
- struct device_attribute *attr, char *buf)
-{
- struct qib_ibdev *dev =
- rdma_device_to_drv_device(device, struct qib_ibdev, rdi.ibdev);
- struct qib_devdata *dd = dd_from_dev(dev);
-
- if (!dd->boardname)
- return -EINVAL;
- return sysfs_emit(buf, "%s\n", dd->boardname);
-}
-static DEVICE_ATTR_RO(hca_type);
-static DEVICE_ATTR(board_id, 0444, hca_type_show, NULL);
-
-static DEVICE_STRING_ATTR_RO(version, 0444, QIB_DRIVER_VERSION);
-
-static ssize_t boardversion_show(struct device *device,
- struct device_attribute *attr, char *buf)
-{
- struct qib_ibdev *dev =
- rdma_device_to_drv_device(device, struct qib_ibdev, rdi.ibdev);
- struct qib_devdata *dd = dd_from_dev(dev);
-
- /* The string printed here is already newline-terminated. */
- return sysfs_emit(buf, "%s", dd->boardversion);
-}
-static DEVICE_ATTR_RO(boardversion);
-
-static ssize_t localbus_info_show(struct device *device,
- struct device_attribute *attr, char *buf)
-{
- struct qib_ibdev *dev =
- rdma_device_to_drv_device(device, struct qib_ibdev, rdi.ibdev);
- struct qib_devdata *dd = dd_from_dev(dev);
-
- /* The string printed here is already newline-terminated. */
- return sysfs_emit(buf, "%s", dd->lbus_info);
-}
-static DEVICE_ATTR_RO(localbus_info);
-
-static ssize_t nctxts_show(struct device *device,
- struct device_attribute *attr, char *buf)
-{
- struct qib_ibdev *dev =
- rdma_device_to_drv_device(device, struct qib_ibdev, rdi.ibdev);
- struct qib_devdata *dd = dd_from_dev(dev);
-
- /* Return the number of user ports (contexts) available. */
- /* The calculation below deals with a special case where
- * cfgctxts is set to 1 on a single-port board. */
- return sysfs_emit(buf, "%u\n",
- (dd->first_user_ctxt > dd->cfgctxts) ?
- 0 :
- (dd->cfgctxts - dd->first_user_ctxt));
-}
-static DEVICE_ATTR_RO(nctxts);
-
-static ssize_t nfreectxts_show(struct device *device,
- struct device_attribute *attr, char *buf)
-{
- struct qib_ibdev *dev =
- rdma_device_to_drv_device(device, struct qib_ibdev, rdi.ibdev);
- struct qib_devdata *dd = dd_from_dev(dev);
-
- /* Return the number of free user ports (contexts) available. */
- return sysfs_emit(buf, "%u\n", dd->freectxts);
-}
-static DEVICE_ATTR_RO(nfreectxts);
-
-static ssize_t serial_show(struct device *device, struct device_attribute *attr,
- char *buf)
-{
- struct qib_ibdev *dev =
- rdma_device_to_drv_device(device, struct qib_ibdev, rdi.ibdev);
- struct qib_devdata *dd = dd_from_dev(dev);
- const u8 *end = memchr(dd->serial, 0, ARRAY_SIZE(dd->serial));
- int size = end ? end - dd->serial : ARRAY_SIZE(dd->serial);
-
- return sysfs_emit(buf, ".%*s\n", size, dd->serial);
-}
-static DEVICE_ATTR_RO(serial);
-
-static ssize_t chip_reset_store(struct device *device,
- struct device_attribute *attr, const char *buf,
- size_t count)
-{
- struct qib_ibdev *dev =
- rdma_device_to_drv_device(device, struct qib_ibdev, rdi.ibdev);
- struct qib_devdata *dd = dd_from_dev(dev);
- int ret;
-
- if (count < 5 || memcmp(buf, "reset", 5) || !dd->diag_client) {
- ret = -EINVAL;
- goto bail;
- }
-
- ret = qib_reset_device(dd->unit);
-bail:
- return ret < 0 ? ret : count;
-}
-static DEVICE_ATTR_WO(chip_reset);
-
-/*
- * Dump tempsense regs. in decimal, to ease shell-scripts.
- */
-static ssize_t tempsense_show(struct device *device,
- struct device_attribute *attr, char *buf)
-{
- struct qib_ibdev *dev =
- rdma_device_to_drv_device(device, struct qib_ibdev, rdi.ibdev);
- struct qib_devdata *dd = dd_from_dev(dev);
- int i;
- u8 regvals[8];
-
- for (i = 0; i < 8; i++) {
- int ret;
-
- if (i == 6)
- continue;
- ret = dd->f_tempsense_rd(dd, i);
- if (ret < 0)
- return ret; /* return error on bad read */
- regvals[i] = ret;
- }
- return sysfs_emit(buf, "%d %d %02X %02X %d %d\n",
- (signed char)regvals[0],
- (signed char)regvals[1],
- regvals[2],
- regvals[3],
- (signed char)regvals[5],
- (signed char)regvals[7]);
-}
-static DEVICE_ATTR_RO(tempsense);
-
-/*
- * end of per-unit (or driver, in some cases, but replicated
- * per unit) functions
- */
-
-/* start of per-unit file structures and support code */
-static struct attribute *qib_attributes[] = {
- &dev_attr_hw_rev.attr,
- &dev_attr_hca_type.attr,
- &dev_attr_board_id.attr,
- &dev_attr_version.attr.attr,
- &dev_attr_nctxts.attr,
- &dev_attr_nfreectxts.attr,
- &dev_attr_serial.attr,
- &dev_attr_boardversion.attr,
- &dev_attr_tempsense.attr,
- &dev_attr_localbus_info.attr,
- &dev_attr_chip_reset.attr,
- NULL,
-};
-
-const struct attribute_group qib_attr_group = {
- .attrs = qib_attributes,
-};
diff --git a/drivers/infiniband/hw/qib/qib_twsi.c b/drivers/infiniband/hw/qib/qib_twsi.c
deleted file mode 100644
index 97b8a2bf5c69..000000000000
--- a/drivers/infiniband/hw/qib/qib_twsi.c
+++ /dev/null
@@ -1,502 +0,0 @@
-/*
- * Copyright (c) 2012 Intel Corporation. All rights reserved.
- * Copyright (c) 2006 - 2012 QLogic Corporation. All rights reserved.
- * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <linux/delay.h>
-#include <linux/pci.h>
-#include <linux/vmalloc.h>
-
-#include "qib.h"
-
-/*
- * QLogic_IB "Two Wire Serial Interface" driver.
- * Originally written for a not-quite-i2c serial eeprom, which is
- * still used on some supported boards. Later boards have added a
- * variety of other uses, most board-specific, so the bit-boffing
- * part has been split off to this file, while the other parts
- * have been moved to chip-specific files.
- *
- * We have also dropped all pretense of fully generic (e.g. pretend
- * we don't know whether '1' is the higher voltage) interface, as
- * the restrictions of the generic i2c interface (e.g. no access from
- * driver itself) make it unsuitable for this use.
- */
-
-#define READ_CMD 1
-#define WRITE_CMD 0
-
-/**
- * i2c_wait_for_writes - wait for a write
- * @dd: the qlogic_ib device
- *
- * We use this instead of udelay directly, so we can make sure
- * that previous register writes have been flushed all the way
- * to the chip. Since we are delaying anyway, the cost doesn't
- * hurt, and makes the bit twiddling more regular
- */
-static void i2c_wait_for_writes(struct qib_devdata *dd)
-{
- /*
- * implicit read of EXTStatus is as good as explicit
- * read of scratch, if all we want to do is flush
- * writes.
- */
- dd->f_gpio_mod(dd, 0, 0, 0);
- rmb(); /* inlined, so prevent compiler reordering */
-}
-
-/*
- * QSFP modules are allowed to hold SCL low for 500uSec. Allow twice that
- * for "almost compliant" modules
- */
-#define SCL_WAIT_USEC 1000
-
-/* BUF_WAIT is time bus must be free between STOP or ACK and to next START.
- * Should be 20, but some chips need more.
- */
-#define TWSI_BUF_WAIT_USEC 60
-
-static void scl_out(struct qib_devdata *dd, u8 bit)
-{
- u32 mask;
-
- udelay(1);
-
- mask = 1UL << dd->gpio_scl_num;
-
- /* SCL is meant to be bare-drain, so never set "OUT", just DIR */
- dd->f_gpio_mod(dd, 0, bit ? 0 : mask, mask);
-
- /*
- * Allow for slow slaves by simple
- * delay for falling edge, sampling on rise.
- */
- if (!bit)
- udelay(2);
- else {
- int rise_usec;
-
- for (rise_usec = SCL_WAIT_USEC; rise_usec > 0; rise_usec -= 2) {
- if (mask & dd->f_gpio_mod(dd, 0, 0, 0))
- break;
- udelay(2);
- }
- if (rise_usec <= 0)
- qib_dev_err(dd, "SCL interface stuck low > %d uSec\n",
- SCL_WAIT_USEC);
- }
- i2c_wait_for_writes(dd);
-}
-
-static void sda_out(struct qib_devdata *dd, u8 bit)
-{
- u32 mask;
-
- mask = 1UL << dd->gpio_sda_num;
-
- /* SDA is meant to be bare-drain, so never set "OUT", just DIR */
- dd->f_gpio_mod(dd, 0, bit ? 0 : mask, mask);
-
- i2c_wait_for_writes(dd);
- udelay(2);
-}
-
-static u8 sda_in(struct qib_devdata *dd, int wait)
-{
- int bnum;
- u32 read_val, mask;
-
- bnum = dd->gpio_sda_num;
- mask = (1UL << bnum);
- /* SDA is meant to be bare-drain, so never set "OUT", just DIR */
- dd->f_gpio_mod(dd, 0, 0, mask);
- read_val = dd->f_gpio_mod(dd, 0, 0, 0);
- if (wait)
- i2c_wait_for_writes(dd);
- return (read_val & mask) >> bnum;
-}
-
-/**
- * i2c_ackrcv - see if ack following write is true
- * @dd: the qlogic_ib device
- */
-static int i2c_ackrcv(struct qib_devdata *dd)
-{
- u8 ack_received;
-
- /* AT ENTRY SCL = LOW */
- /* change direction, ignore data */
- ack_received = sda_in(dd, 1);
- scl_out(dd, 1);
- ack_received = sda_in(dd, 1) == 0;
- scl_out(dd, 0);
- return ack_received;
-}
-
-static void stop_cmd(struct qib_devdata *dd);
-
-/**
- * rd_byte - read a byte, sending STOP on last, else ACK
- * @dd: the qlogic_ib device
- * @last: identifies the last read
- *
- * Returns byte shifted out of device
- */
-static int rd_byte(struct qib_devdata *dd, int last)
-{
- int bit_cntr, data;
-
- data = 0;
-
- for (bit_cntr = 7; bit_cntr >= 0; --bit_cntr) {
- data <<= 1;
- scl_out(dd, 1);
- data |= sda_in(dd, 0);
- scl_out(dd, 0);
- }
- if (last) {
- scl_out(dd, 1);
- stop_cmd(dd);
- } else {
- sda_out(dd, 0);
- scl_out(dd, 1);
- scl_out(dd, 0);
- sda_out(dd, 1);
- }
- return data;
-}
-
-/**
- * wr_byte - write a byte, one bit at a time
- * @dd: the qlogic_ib device
- * @data: the byte to write
- *
- * Returns 0 if we got the following ack, otherwise 1
- */
-static int wr_byte(struct qib_devdata *dd, u8 data)
-{
- int bit_cntr;
- u8 bit;
-
- for (bit_cntr = 7; bit_cntr >= 0; bit_cntr--) {
- bit = (data >> bit_cntr) & 1;
- sda_out(dd, bit);
- scl_out(dd, 1);
- scl_out(dd, 0);
- }
- return (!i2c_ackrcv(dd)) ? 1 : 0;
-}
-
-/*
- * issue TWSI start sequence:
- * (both clock/data high, clock high, data low while clock is high)
- */
-static void start_seq(struct qib_devdata *dd)
-{
- sda_out(dd, 1);
- scl_out(dd, 1);
- sda_out(dd, 0);
- udelay(1);
- scl_out(dd, 0);
-}
-
-/**
- * stop_seq - transmit the stop sequence
- * @dd: the qlogic_ib device
- *
- * (both clock/data low, clock high, data high while clock is high)
- */
-static void stop_seq(struct qib_devdata *dd)
-{
- scl_out(dd, 0);
- sda_out(dd, 0);
- scl_out(dd, 1);
- sda_out(dd, 1);
-}
-
-/**
- * stop_cmd - transmit the stop condition
- * @dd: the qlogic_ib device
- *
- * (both clock/data low, clock high, data high while clock is high)
- */
-static void stop_cmd(struct qib_devdata *dd)
-{
- stop_seq(dd);
- udelay(TWSI_BUF_WAIT_USEC);
-}
-
-/**
- * qib_twsi_reset - reset I2C communication
- * @dd: the qlogic_ib device
- */
-
-int qib_twsi_reset(struct qib_devdata *dd)
-{
- int clock_cycles_left = 9;
- int was_high = 0;
- u32 pins, mask;
-
- /* Both SCL and SDA should be high. If not, there
- * is something wrong.
- */
- mask = (1UL << dd->gpio_scl_num) | (1UL << dd->gpio_sda_num);
-
- /*
- * Force pins to desired innocuous state.
- * This is the default power-on state with out=0 and dir=0,
- * So tri-stated and should be floating high (barring HW problems)
- */
- dd->f_gpio_mod(dd, 0, 0, mask);
-
- /*
- * Clock nine times to get all listeners into a sane state.
- * If SDA does not go high at any point, we are wedged.
- * One vendor recommends then issuing START followed by STOP.
- * we cannot use our "normal" functions to do that, because
- * if SCL drops between them, another vendor's part will
- * wedge, dropping SDA and keeping it low forever, at the end of
- * the next transaction (even if it was not the device addressed).
- * So our START and STOP take place with SCL held high.
- */
- while (clock_cycles_left--) {
- scl_out(dd, 0);
- scl_out(dd, 1);
- /* Note if SDA is high, but keep clocking to sync slave */
- was_high |= sda_in(dd, 0);
- }
-
- if (was_high) {
- /*
- * We saw a high, which we hope means the slave is sync'd.
- * Issue START, STOP, pause for T_BUF.
- */
-
- pins = dd->f_gpio_mod(dd, 0, 0, 0);
- if ((pins & mask) != mask)
- qib_dev_err(dd, "GPIO pins not at rest: %d\n",
- pins & mask);
- /* Drop SDA to issue START */
- udelay(1); /* Guarantee .6 uSec setup */
- sda_out(dd, 0);
- udelay(1); /* Guarantee .6 uSec hold */
- /* At this point, SCL is high, SDA low. Raise SDA for STOP */
- sda_out(dd, 1);
- udelay(TWSI_BUF_WAIT_USEC);
- }
-
- return !was_high;
-}
-
-#define QIB_TWSI_START 0x100
-#define QIB_TWSI_STOP 0x200
-
-/* Write byte to TWSI, optionally prefixed with START or suffixed with
- * STOP.
- * returns 0 if OK (ACK received), else != 0
- */
-static int qib_twsi_wr(struct qib_devdata *dd, int data, int flags)
-{
- int ret = 1;
-
- if (flags & QIB_TWSI_START)
- start_seq(dd);
-
- ret = wr_byte(dd, data); /* Leaves SCL low (from i2c_ackrcv()) */
-
- if (flags & QIB_TWSI_STOP)
- stop_cmd(dd);
- return ret;
-}
-
-/* Added functionality for IBA7220-based cards */
-#define QIB_TEMP_DEV 0x98
-
-/*
- * qib_twsi_blk_rd
- * Formerly called qib_eeprom_internal_read, and only used for eeprom,
- * but now the general interface for data transfer from twsi devices.
- * One vestige of its former role is that it recognizes a device
- * QIB_TWSI_NO_DEV and does the correct operation for the legacy part,
- * which responded to all TWSI device codes, interpreting them as
- * address within device. On all other devices found on board handled by
- * this driver, the device is followed by a one-byte "address" which selects
- * the "register" or "offset" within the device from which data should
- * be read.
- */
-int qib_twsi_blk_rd(struct qib_devdata *dd, int dev, int addr,
- void *buffer, int len)
-{
- int ret;
- u8 *bp = buffer;
-
- ret = 1;
-
- if (dev == QIB_TWSI_NO_DEV) {
- /* legacy not-really-I2C */
- addr = (addr << 1) | READ_CMD;
- ret = qib_twsi_wr(dd, addr, QIB_TWSI_START);
- } else {
- /* Actual I2C */
- ret = qib_twsi_wr(dd, dev | WRITE_CMD, QIB_TWSI_START);
- if (ret) {
- stop_cmd(dd);
- ret = 1;
- goto bail;
- }
- /*
- * SFF spec claims we do _not_ stop after the addr
- * but simply issue a start with the "read" dev-addr.
- * Since we are implicitely waiting for ACK here,
- * we need t_buf (nominally 20uSec) before that start,
- * and cannot rely on the delay built in to the STOP
- */
- ret = qib_twsi_wr(dd, addr, 0);
- udelay(TWSI_BUF_WAIT_USEC);
-
- if (ret) {
- qib_dev_err(dd,
- "Failed to write interface read addr %02X\n",
- addr);
- ret = 1;
- goto bail;
- }
- ret = qib_twsi_wr(dd, dev | READ_CMD, QIB_TWSI_START);
- }
- if (ret) {
- stop_cmd(dd);
- ret = 1;
- goto bail;
- }
-
- /*
- * block devices keeps clocking data out as long as we ack,
- * automatically incrementing the address. Some have "pages"
- * whose boundaries will not be crossed, but the handling
- * of these is left to the caller, who is in a better
- * position to know.
- */
- while (len-- > 0) {
- /*
- * Get and store data, sending ACK if length remaining,
- * else STOP
- */
- *bp++ = rd_byte(dd, !len);
- }
-
- ret = 0;
-
-bail:
- return ret;
-}
-
-/*
- * qib_twsi_blk_wr
- * Formerly called qib_eeprom_internal_write, and only used for eeprom,
- * but now the general interface for data transfer to twsi devices.
- * One vestige of its former role is that it recognizes a device
- * QIB_TWSI_NO_DEV and does the correct operation for the legacy part,
- * which responded to all TWSI device codes, interpreting them as
- * address within device. On all other devices found on board handled by
- * this driver, the device is followed by a one-byte "address" which selects
- * the "register" or "offset" within the device to which data should
- * be written.
- */
-int qib_twsi_blk_wr(struct qib_devdata *dd, int dev, int addr,
- const void *buffer, int len)
-{
- int sub_len;
- const u8 *bp = buffer;
- int max_wait_time, i;
- int ret = 1;
-
- while (len > 0) {
- if (dev == QIB_TWSI_NO_DEV) {
- if (qib_twsi_wr(dd, (addr << 1) | WRITE_CMD,
- QIB_TWSI_START)) {
- goto failed_write;
- }
- } else {
- /* Real I2C */
- if (qib_twsi_wr(dd, dev | WRITE_CMD, QIB_TWSI_START))
- goto failed_write;
- ret = qib_twsi_wr(dd, addr, 0);
- if (ret) {
- qib_dev_err(dd,
- "Failed to write interface write addr %02X\n",
- addr);
- goto failed_write;
- }
- }
-
- sub_len = min(len, 4);
- addr += sub_len;
- len -= sub_len;
-
- for (i = 0; i < sub_len; i++)
- if (qib_twsi_wr(dd, *bp++, 0))
- goto failed_write;
-
- stop_cmd(dd);
-
- /*
- * Wait for write complete by waiting for a successful
- * read (the chip replies with a zero after the write
- * cmd completes, and before it writes to the eeprom.
- * The startcmd for the read will fail the ack until
- * the writes have completed. We do this inline to avoid
- * the debug prints that are in the real read routine
- * if the startcmd fails.
- * We also use the proper device address, so it doesn't matter
- * whether we have real eeprom_dev. Legacy likes any address.
- */
- max_wait_time = 100;
- while (qib_twsi_wr(dd, dev | READ_CMD, QIB_TWSI_START)) {
- stop_cmd(dd);
- if (!--max_wait_time)
- goto failed_write;
- }
- /* now read (and ignore) the resulting byte */
- rd_byte(dd, 1);
- }
-
- ret = 0;
- goto bail;
-
-failed_write:
- stop_cmd(dd);
- ret = 1;
-
-bail:
- return ret;
-}
diff --git a/drivers/infiniband/hw/qib/qib_tx.c b/drivers/infiniband/hw/qib/qib_tx.c
deleted file mode 100644
index 397928c80f7c..000000000000
--- a/drivers/infiniband/hw/qib/qib_tx.c
+++ /dev/null
@@ -1,566 +0,0 @@
-/*
- * Copyright (c) 2008, 2009, 2010 QLogic Corporation. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <linux/spinlock.h>
-#include <linux/pci.h>
-#include <linux/io.h>
-#include <linux/delay.h>
-#include <linux/netdevice.h>
-#include <linux/vmalloc.h>
-#include <linux/moduleparam.h>
-
-#include "qib.h"
-
-static unsigned qib_hol_timeout_ms = 3000;
-module_param_named(hol_timeout_ms, qib_hol_timeout_ms, uint, S_IRUGO);
-MODULE_PARM_DESC(hol_timeout_ms,
- "duration of user app suspension after link failure");
-
-unsigned qib_sdma_fetch_arb = 1;
-module_param_named(fetch_arb, qib_sdma_fetch_arb, uint, S_IRUGO);
-MODULE_PARM_DESC(fetch_arb, "IBA7220: change SDMA descriptor arbitration");
-
-/**
- * qib_disarm_piobufs - cancel a range of PIO buffers
- * @dd: the qlogic_ib device
- * @first: the first PIO buffer to cancel
- * @cnt: the number of PIO buffers to cancel
- *
- * Cancel a range of PIO buffers. Used at user process close,
- * in case it died while writing to a PIO buffer.
- */
-void qib_disarm_piobufs(struct qib_devdata *dd, unsigned first, unsigned cnt)
-{
- unsigned long flags;
- unsigned i;
- unsigned last;
-
- last = first + cnt;
- spin_lock_irqsave(&dd->pioavail_lock, flags);
- for (i = first; i < last; i++) {
- __clear_bit(i, dd->pio_need_disarm);
- dd->f_sendctrl(dd->pport, QIB_SENDCTRL_DISARM_BUF(i));
- }
- spin_unlock_irqrestore(&dd->pioavail_lock, flags);
-}
-
-/*
- * This is called by a user process when it sees the DISARM_BUFS event
- * bit is set.
- */
-int qib_disarm_piobufs_ifneeded(struct qib_ctxtdata *rcd)
-{
- struct qib_devdata *dd = rcd->dd;
- unsigned i;
- unsigned last;
-
- last = rcd->pio_base + rcd->piocnt;
- /*
- * Don't need uctxt_lock here, since user has called in to us.
- * Clear at start in case more interrupts set bits while we
- * are disarming
- */
- if (rcd->user_event_mask) {
- /*
- * subctxt_cnt is 0 if not shared, so do base
- * separately, first, then remaining subctxt, if any
- */
- clear_bit(_QIB_EVENT_DISARM_BUFS_BIT, &rcd->user_event_mask[0]);
- for (i = 1; i < rcd->subctxt_cnt; i++)
- clear_bit(_QIB_EVENT_DISARM_BUFS_BIT,
- &rcd->user_event_mask[i]);
- }
- spin_lock_irq(&dd->pioavail_lock);
- for (i = rcd->pio_base; i < last; i++) {
- if (__test_and_clear_bit(i, dd->pio_need_disarm))
- dd->f_sendctrl(rcd->ppd, QIB_SENDCTRL_DISARM_BUF(i));
- }
- spin_unlock_irq(&dd->pioavail_lock);
- return 0;
-}
-
-static struct qib_pportdata *is_sdma_buf(struct qib_devdata *dd, unsigned i)
-{
- struct qib_pportdata *ppd;
- unsigned pidx;
-
- for (pidx = 0; pidx < dd->num_pports; pidx++) {
- ppd = dd->pport + pidx;
- if (i >= ppd->sdma_state.first_sendbuf &&
- i < ppd->sdma_state.last_sendbuf)
- return ppd;
- }
- return NULL;
-}
-
-/*
- * Return true if send buffer is being used by a user context.
- * Sets _QIB_EVENT_DISARM_BUFS_BIT in user_event_mask as a side effect
- */
-static int find_ctxt(struct qib_devdata *dd, unsigned bufn)
-{
- struct qib_ctxtdata *rcd;
- unsigned ctxt;
- int ret = 0;
-
- spin_lock(&dd->uctxt_lock);
- for (ctxt = dd->first_user_ctxt; ctxt < dd->cfgctxts; ctxt++) {
- rcd = dd->rcd[ctxt];
- if (!rcd || bufn < rcd->pio_base ||
- bufn >= rcd->pio_base + rcd->piocnt)
- continue;
- if (rcd->user_event_mask) {
- int i;
- /*
- * subctxt_cnt is 0 if not shared, so do base
- * separately, first, then remaining subctxt, if any
- */
- set_bit(_QIB_EVENT_DISARM_BUFS_BIT,
- &rcd->user_event_mask[0]);
- for (i = 1; i < rcd->subctxt_cnt; i++)
- set_bit(_QIB_EVENT_DISARM_BUFS_BIT,
- &rcd->user_event_mask[i]);
- }
- ret = 1;
- break;
- }
- spin_unlock(&dd->uctxt_lock);
-
- return ret;
-}
-
-/*
- * Disarm a set of send buffers. If the buffer might be actively being
- * written to, mark the buffer to be disarmed later when it is not being
- * written to.
- *
- * This should only be called from the IRQ error handler.
- */
-void qib_disarm_piobufs_set(struct qib_devdata *dd, unsigned long *mask,
- unsigned cnt)
-{
- struct qib_pportdata *ppd, *pppd[QIB_MAX_IB_PORTS];
- unsigned i;
- unsigned long flags;
-
- for (i = 0; i < dd->num_pports; i++)
- pppd[i] = NULL;
-
- for (i = 0; i < cnt; i++) {
- if (!test_bit(i, mask))
- continue;
- /*
- * If the buffer is owned by the DMA hardware,
- * reset the DMA engine.
- */
- ppd = is_sdma_buf(dd, i);
- if (ppd) {
- pppd[ppd->port] = ppd;
- continue;
- }
- /*
- * If the kernel is writing the buffer or the buffer is
- * owned by a user process, we can't clear it yet.
- */
- spin_lock_irqsave(&dd->pioavail_lock, flags);
- if (test_bit(i, dd->pio_writing) ||
- (!test_bit(i << 1, dd->pioavailkernel) &&
- find_ctxt(dd, i))) {
- __set_bit(i, dd->pio_need_disarm);
- } else {
- dd->f_sendctrl(dd->pport, QIB_SENDCTRL_DISARM_BUF(i));
- }
- spin_unlock_irqrestore(&dd->pioavail_lock, flags);
- }
-
- /* do cancel_sends once per port that had sdma piobufs in error */
- for (i = 0; i < dd->num_pports; i++)
- if (pppd[i])
- qib_cancel_sends(pppd[i]);
-}
-
-/**
- * update_send_bufs - update shadow copy of the PIO availability map
- * @dd: the qlogic_ib device
- *
- * called whenever our local copy indicates we have run out of send buffers
- */
-static void update_send_bufs(struct qib_devdata *dd)
-{
- unsigned long flags;
- unsigned i;
- const unsigned piobregs = dd->pioavregs;
-
- /*
- * If the generation (check) bits have changed, then we update the
- * busy bit for the corresponding PIO buffer. This algorithm will
- * modify positions to the value they already have in some cases
- * (i.e., no change), but it's faster than changing only the bits
- * that have changed.
- *
- * We would like to do this atomicly, to avoid spinlocks in the
- * critical send path, but that's not really possible, given the
- * type of changes, and that this routine could be called on
- * multiple cpu's simultaneously, so we lock in this routine only,
- * to avoid conflicting updates; all we change is the shadow, and
- * it's a single 64 bit memory location, so by definition the update
- * is atomic in terms of what other cpu's can see in testing the
- * bits. The spin_lock overhead isn't too bad, since it only
- * happens when all buffers are in use, so only cpu overhead, not
- * latency or bandwidth is affected.
- */
- if (!dd->pioavailregs_dma)
- return;
- spin_lock_irqsave(&dd->pioavail_lock, flags);
- for (i = 0; i < piobregs; i++) {
- u64 pchbusy, pchg, piov, pnew;
-
- piov = le64_to_cpu(dd->pioavailregs_dma[i]);
- pchg = dd->pioavailkernel[i] &
- ~(dd->pioavailshadow[i] ^ piov);
- pchbusy = pchg << QLOGIC_IB_SENDPIOAVAIL_BUSY_SHIFT;
- if (pchg && (pchbusy & dd->pioavailshadow[i])) {
- pnew = dd->pioavailshadow[i] & ~pchbusy;
- pnew |= piov & pchbusy;
- dd->pioavailshadow[i] = pnew;
- }
- }
- spin_unlock_irqrestore(&dd->pioavail_lock, flags);
-}
-
-/*
- * Debugging code and stats updates if no pio buffers available.
- */
-static noinline void no_send_bufs(struct qib_devdata *dd)
-{
- dd->upd_pio_shadow = 1;
-
- /* not atomic, but if we lose a stat count in a while, that's OK */
- qib_stats.sps_nopiobufs++;
-}
-
-/*
- * Common code for normal driver send buffer allocation, and reserved
- * allocation.
- *
- * Do appropriate marking as busy, etc.
- * Returns buffer pointer if one is found, otherwise NULL.
- */
-u32 __iomem *qib_getsendbuf_range(struct qib_devdata *dd, u32 *pbufnum,
- u32 first, u32 last)
-{
- unsigned i, j, updated = 0;
- unsigned nbufs;
- unsigned long flags;
- unsigned long *shadow = dd->pioavailshadow;
- u32 __iomem *buf;
-
- if (!(dd->flags & QIB_PRESENT))
- return NULL;
-
- nbufs = last - first + 1; /* number in range to check */
- if (dd->upd_pio_shadow) {
-update_shadow:
- /*
- * Minor optimization. If we had no buffers on last call,
- * start out by doing the update; continue and do scan even
- * if no buffers were updated, to be paranoid.
- */
- update_send_bufs(dd);
- updated++;
- }
- i = first;
- /*
- * While test_and_set_bit() is atomic, we do that and then the
- * change_bit(), and the pair is not. See if this is the cause
- * of the remaining armlaunch errors.
- */
- spin_lock_irqsave(&dd->pioavail_lock, flags);
- if (dd->last_pio >= first && dd->last_pio <= last)
- i = dd->last_pio + 1;
- if (!first)
- /* adjust to min possible */
- nbufs = last - dd->min_kernel_pio + 1;
- for (j = 0; j < nbufs; j++, i++) {
- if (i > last)
- i = !first ? dd->min_kernel_pio : first;
- if (__test_and_set_bit((2 * i) + 1, shadow))
- continue;
- /* flip generation bit */
- __change_bit(2 * i, shadow);
- /* remember that the buffer can be written to now */
- __set_bit(i, dd->pio_writing);
- if (!first && first != last) /* first == last on VL15, avoid */
- dd->last_pio = i;
- break;
- }
- spin_unlock_irqrestore(&dd->pioavail_lock, flags);
-
- if (j == nbufs) {
- if (!updated)
- /*
- * First time through; shadow exhausted, but may be
- * buffers available, try an update and then rescan.
- */
- goto update_shadow;
- no_send_bufs(dd);
- buf = NULL;
- } else {
- if (i < dd->piobcnt2k)
- buf = (u32 __iomem *)(dd->pio2kbase +
- i * dd->palign);
- else if (i < dd->piobcnt2k + dd->piobcnt4k || !dd->piovl15base)
- buf = (u32 __iomem *)(dd->pio4kbase +
- (i - dd->piobcnt2k) * dd->align4k);
- else
- buf = (u32 __iomem *)(dd->piovl15base +
- (i - (dd->piobcnt2k + dd->piobcnt4k)) *
- dd->align4k);
- if (pbufnum)
- *pbufnum = i;
- dd->upd_pio_shadow = 0;
- }
-
- return buf;
-}
-
-/*
- * Record that the caller is finished writing to the buffer so we don't
- * disarm it while it is being written and disarm it now if needed.
- */
-void qib_sendbuf_done(struct qib_devdata *dd, unsigned n)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&dd->pioavail_lock, flags);
- __clear_bit(n, dd->pio_writing);
- if (__test_and_clear_bit(n, dd->pio_need_disarm))
- dd->f_sendctrl(dd->pport, QIB_SENDCTRL_DISARM_BUF(n));
- spin_unlock_irqrestore(&dd->pioavail_lock, flags);
-}
-
-/**
- * qib_chg_pioavailkernel - change which send buffers are available for kernel
- * @dd: the qlogic_ib device
- * @start: the starting send buffer number
- * @len: the number of send buffers
- * @avail: true if the buffers are available for kernel use, false otherwise
- * @rcd: the context pointer
- */
-void qib_chg_pioavailkernel(struct qib_devdata *dd, unsigned start,
- unsigned len, u32 avail, struct qib_ctxtdata *rcd)
-{
- unsigned long flags;
- unsigned end;
- unsigned ostart = start;
-
- /* There are two bits per send buffer (busy and generation) */
- start *= 2;
- end = start + len * 2;
-
- spin_lock_irqsave(&dd->pioavail_lock, flags);
- /* Set or clear the busy bit in the shadow. */
- while (start < end) {
- if (avail) {
- unsigned long dma;
- int i;
-
- /*
- * The BUSY bit will never be set, because we disarm
- * the user buffers before we hand them back to the
- * kernel. We do have to make sure the generation
- * bit is set correctly in shadow, since it could
- * have changed many times while allocated to user.
- * We can't use the bitmap functions on the full
- * dma array because it is always little-endian, so
- * we have to flip to host-order first.
- * BITS_PER_LONG is slightly wrong, since it's
- * always 64 bits per register in chip...
- * We only work on 64 bit kernels, so that's OK.
- */
- i = start / BITS_PER_LONG;
- __clear_bit(QLOGIC_IB_SENDPIOAVAIL_BUSY_SHIFT + start,
- dd->pioavailshadow);
- dma = (unsigned long)
- le64_to_cpu(dd->pioavailregs_dma[i]);
- if (test_bit((QLOGIC_IB_SENDPIOAVAIL_CHECK_SHIFT +
- start) % BITS_PER_LONG, &dma))
- __set_bit(QLOGIC_IB_SENDPIOAVAIL_CHECK_SHIFT +
- start, dd->pioavailshadow);
- else
- __clear_bit(QLOGIC_IB_SENDPIOAVAIL_CHECK_SHIFT
- + start, dd->pioavailshadow);
- __set_bit(start, dd->pioavailkernel);
- if ((start >> 1) < dd->min_kernel_pio)
- dd->min_kernel_pio = start >> 1;
- } else {
- __set_bit(start + QLOGIC_IB_SENDPIOAVAIL_BUSY_SHIFT,
- dd->pioavailshadow);
- __clear_bit(start, dd->pioavailkernel);
- if ((start >> 1) > dd->min_kernel_pio)
- dd->min_kernel_pio = start >> 1;
- }
- start += 2;
- }
-
- if (dd->min_kernel_pio > 0 && dd->last_pio < dd->min_kernel_pio - 1)
- dd->last_pio = dd->min_kernel_pio - 1;
- spin_unlock_irqrestore(&dd->pioavail_lock, flags);
-
- dd->f_txchk_change(dd, ostart, len, avail, rcd);
-}
-
-/*
- * Flush all sends that might be in the ready to send state, as well as any
- * that are in the process of being sent. Used whenever we need to be
- * sure the send side is idle. Cleans up all buffer state by canceling
- * all pio buffers, and issuing an abort, which cleans up anything in the
- * launch fifo. The cancel is superfluous on some chip versions, but
- * it's safer to always do it.
- * PIOAvail bits are updated by the chip as if a normal send had happened.
- */
-void qib_cancel_sends(struct qib_pportdata *ppd)
-{
- struct qib_devdata *dd = ppd->dd;
- struct qib_ctxtdata *rcd;
- unsigned long flags;
- unsigned ctxt;
- unsigned i;
- unsigned last;
-
- /*
- * Tell PSM to disarm buffers again before trying to reuse them.
- * We need to be sure the rcd doesn't change out from under us
- * while we do so. We hold the two locks sequentially. We might
- * needlessly set some need_disarm bits as a result, if the
- * context is closed after we release the uctxt_lock, but that's
- * fairly benign, and safer than nesting the locks.
- */
- for (ctxt = dd->first_user_ctxt; ctxt < dd->cfgctxts; ctxt++) {
- spin_lock_irqsave(&dd->uctxt_lock, flags);
- rcd = dd->rcd[ctxt];
- if (rcd && rcd->ppd == ppd) {
- last = rcd->pio_base + rcd->piocnt;
- if (rcd->user_event_mask) {
- /*
- * subctxt_cnt is 0 if not shared, so do base
- * separately, first, then remaining subctxt,
- * if any
- */
- set_bit(_QIB_EVENT_DISARM_BUFS_BIT,
- &rcd->user_event_mask[0]);
- for (i = 1; i < rcd->subctxt_cnt; i++)
- set_bit(_QIB_EVENT_DISARM_BUFS_BIT,
- &rcd->user_event_mask[i]);
- }
- i = rcd->pio_base;
- spin_unlock_irqrestore(&dd->uctxt_lock, flags);
- spin_lock_irqsave(&dd->pioavail_lock, flags);
- for (; i < last; i++)
- __set_bit(i, dd->pio_need_disarm);
- spin_unlock_irqrestore(&dd->pioavail_lock, flags);
- } else
- spin_unlock_irqrestore(&dd->uctxt_lock, flags);
- }
-
- if (!(dd->flags & QIB_HAS_SEND_DMA))
- dd->f_sendctrl(ppd, QIB_SENDCTRL_DISARM_ALL |
- QIB_SENDCTRL_FLUSH);
-}
-
-/*
- * Force an update of in-memory copy of the pioavail registers, when
- * needed for any of a variety of reasons.
- * If already off, this routine is a nop, on the assumption that the
- * caller (or set of callers) will "do the right thing".
- * This is a per-device operation, so just the first port.
- */
-void qib_force_pio_avail_update(struct qib_devdata *dd)
-{
- dd->f_sendctrl(dd->pport, QIB_SENDCTRL_AVAIL_BLIP);
-}
-
-void qib_hol_down(struct qib_pportdata *ppd)
-{
- /*
- * Cancel sends when the link goes DOWN so that we aren't doing it
- * at INIT when we might be trying to send SMI packets.
- */
- if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG))
- qib_cancel_sends(ppd);
-}
-
-/*
- * Link is at INIT.
- * We start the HoL timer so we can detect stuck packets blocking SMP replies.
- * Timer may already be running, so use mod_timer, not add_timer.
- */
-void qib_hol_init(struct qib_pportdata *ppd)
-{
- if (ppd->hol_state != QIB_HOL_INIT) {
- ppd->hol_state = QIB_HOL_INIT;
- mod_timer(&ppd->hol_timer,
- jiffies + msecs_to_jiffies(qib_hol_timeout_ms));
- }
-}
-
-/*
- * Link is up, continue any user processes, and ensure timer
- * is a nop, if running. Let timer keep running, if set; it
- * will nop when it sees the link is up.
- */
-void qib_hol_up(struct qib_pportdata *ppd)
-{
- ppd->hol_state = QIB_HOL_UP;
-}
-
-/*
- * This is only called via the timer.
- */
-void qib_hol_event(struct timer_list *t)
-{
- struct qib_pportdata *ppd = timer_container_of(ppd, t, hol_timer);
-
- /* If hardware error, etc, skip. */
- if (!(ppd->dd->flags & QIB_INITTED))
- return;
-
- if (ppd->hol_state != QIB_HOL_UP) {
- /*
- * Try to flush sends in case a stuck packet is blocking
- * SMP replies.
- */
- qib_hol_down(ppd);
- mod_timer(&ppd->hol_timer,
- jiffies + msecs_to_jiffies(qib_hol_timeout_ms));
- }
-}
diff --git a/drivers/infiniband/hw/qib/qib_uc.c b/drivers/infiniband/hw/qib/qib_uc.c
deleted file mode 100644
index 8e2bda77d8b9..000000000000
--- a/drivers/infiniband/hw/qib/qib_uc.c
+++ /dev/null
@@ -1,521 +0,0 @@
-/*
- * Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation.
- * All rights reserved.
- * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include "qib.h"
-
-/* cut down ridiculously long IB macro names */
-#define OP(x) IB_OPCODE_UC_##x
-
-/**
- * qib_make_uc_req - construct a request packet (SEND, RDMA write)
- * @qp: a pointer to the QP
- * @flags: unused
- *
- * Assumes the s_lock is held.
- *
- * Return 1 if constructed; otherwise, return 0.
- */
-int qib_make_uc_req(struct rvt_qp *qp, unsigned long *flags)
-{
- struct qib_qp_priv *priv = qp->priv;
- struct ib_other_headers *ohdr;
- struct rvt_swqe *wqe;
- u32 hwords;
- u32 bth0;
- u32 len;
- u32 pmtu = qp->pmtu;
- int ret = 0;
-
- if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_SEND_OK)) {
- if (!(ib_rvt_state_ops[qp->state] & RVT_FLUSH_SEND))
- goto bail;
- /* We are in the error state, flush the work request. */
- if (qp->s_last == READ_ONCE(qp->s_head))
- goto bail;
- /* If DMAs are in progress, we can't flush immediately. */
- if (atomic_read(&priv->s_dma_busy)) {
- qp->s_flags |= RVT_S_WAIT_DMA;
- goto bail;
- }
- wqe = rvt_get_swqe_ptr(qp, qp->s_last);
- rvt_send_complete(qp, wqe, IB_WC_WR_FLUSH_ERR);
- goto done;
- }
-
- ohdr = &priv->s_hdr->u.oth;
- if (rdma_ah_get_ah_flags(&qp->remote_ah_attr) & IB_AH_GRH)
- ohdr = &priv->s_hdr->u.l.oth;
-
- /* header size in 32-bit words LRH+BTH = (8+12)/4. */
- hwords = 5;
- bth0 = 0;
-
- /* Get the next send request. */
- wqe = rvt_get_swqe_ptr(qp, qp->s_cur);
- qp->s_wqe = NULL;
- switch (qp->s_state) {
- default:
- if (!(ib_rvt_state_ops[qp->state] &
- RVT_PROCESS_NEXT_SEND_OK))
- goto bail;
- /* Check if send work queue is empty. */
- if (qp->s_cur == READ_ONCE(qp->s_head))
- goto bail;
- /*
- * Start a new request.
- */
- qp->s_psn = wqe->psn;
- qp->s_sge.sge = wqe->sg_list[0];
- qp->s_sge.sg_list = wqe->sg_list + 1;
- qp->s_sge.num_sge = wqe->wr.num_sge;
- qp->s_sge.total_len = wqe->length;
- len = wqe->length;
- qp->s_len = len;
- switch (wqe->wr.opcode) {
- case IB_WR_SEND:
- case IB_WR_SEND_WITH_IMM:
- if (len > pmtu) {
- qp->s_state = OP(SEND_FIRST);
- len = pmtu;
- break;
- }
- if (wqe->wr.opcode == IB_WR_SEND)
- qp->s_state = OP(SEND_ONLY);
- else {
- qp->s_state =
- OP(SEND_ONLY_WITH_IMMEDIATE);
- /* Immediate data comes after the BTH */
- ohdr->u.imm_data = wqe->wr.ex.imm_data;
- hwords += 1;
- }
- if (wqe->wr.send_flags & IB_SEND_SOLICITED)
- bth0 |= IB_BTH_SOLICITED;
- qp->s_wqe = wqe;
- if (++qp->s_cur >= qp->s_size)
- qp->s_cur = 0;
- break;
-
- case IB_WR_RDMA_WRITE:
- case IB_WR_RDMA_WRITE_WITH_IMM:
- ohdr->u.rc.reth.vaddr =
- cpu_to_be64(wqe->rdma_wr.remote_addr);
- ohdr->u.rc.reth.rkey =
- cpu_to_be32(wqe->rdma_wr.rkey);
- ohdr->u.rc.reth.length = cpu_to_be32(len);
- hwords += sizeof(struct ib_reth) / 4;
- if (len > pmtu) {
- qp->s_state = OP(RDMA_WRITE_FIRST);
- len = pmtu;
- break;
- }
- if (wqe->wr.opcode == IB_WR_RDMA_WRITE)
- qp->s_state = OP(RDMA_WRITE_ONLY);
- else {
- qp->s_state =
- OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE);
- /* Immediate data comes after the RETH */
- ohdr->u.rc.imm_data = wqe->wr.ex.imm_data;
- hwords += 1;
- if (wqe->wr.send_flags & IB_SEND_SOLICITED)
- bth0 |= IB_BTH_SOLICITED;
- }
- qp->s_wqe = wqe;
- if (++qp->s_cur >= qp->s_size)
- qp->s_cur = 0;
- break;
-
- default:
- goto bail;
- }
- break;
-
- case OP(SEND_FIRST):
- qp->s_state = OP(SEND_MIDDLE);
- fallthrough;
- case OP(SEND_MIDDLE):
- len = qp->s_len;
- if (len > pmtu) {
- len = pmtu;
- break;
- }
- if (wqe->wr.opcode == IB_WR_SEND)
- qp->s_state = OP(SEND_LAST);
- else {
- qp->s_state = OP(SEND_LAST_WITH_IMMEDIATE);
- /* Immediate data comes after the BTH */
- ohdr->u.imm_data = wqe->wr.ex.imm_data;
- hwords += 1;
- }
- if (wqe->wr.send_flags & IB_SEND_SOLICITED)
- bth0 |= IB_BTH_SOLICITED;
- qp->s_wqe = wqe;
- if (++qp->s_cur >= qp->s_size)
- qp->s_cur = 0;
- break;
-
- case OP(RDMA_WRITE_FIRST):
- qp->s_state = OP(RDMA_WRITE_MIDDLE);
- fallthrough;
- case OP(RDMA_WRITE_MIDDLE):
- len = qp->s_len;
- if (len > pmtu) {
- len = pmtu;
- break;
- }
- if (wqe->wr.opcode == IB_WR_RDMA_WRITE)
- qp->s_state = OP(RDMA_WRITE_LAST);
- else {
- qp->s_state =
- OP(RDMA_WRITE_LAST_WITH_IMMEDIATE);
- /* Immediate data comes after the BTH */
- ohdr->u.imm_data = wqe->wr.ex.imm_data;
- hwords += 1;
- if (wqe->wr.send_flags & IB_SEND_SOLICITED)
- bth0 |= IB_BTH_SOLICITED;
- }
- qp->s_wqe = wqe;
- if (++qp->s_cur >= qp->s_size)
- qp->s_cur = 0;
- break;
- }
- qp->s_len -= len;
- qp->s_hdrwords = hwords;
- qp->s_cur_sge = &qp->s_sge;
- qp->s_cur_size = len;
- qib_make_ruc_header(qp, ohdr, bth0 | (qp->s_state << 24),
- qp->s_psn++ & QIB_PSN_MASK);
-done:
- return 1;
-bail:
- qp->s_flags &= ~RVT_S_BUSY;
- return ret;
-}
-
-/**
- * qib_uc_rcv - handle an incoming UC packet
- * @ibp: the port the packet came in on
- * @hdr: the header of the packet
- * @has_grh: true if the packet has a GRH
- * @data: the packet data
- * @tlen: the length of the packet
- * @qp: the QP for this packet.
- *
- * This is called from qib_qp_rcv() to process an incoming UC packet
- * for the given QP.
- * Called at interrupt level.
- */
-void qib_uc_rcv(struct qib_ibport *ibp, struct ib_header *hdr,
- int has_grh, void *data, u32 tlen, struct rvt_qp *qp)
-{
- struct ib_other_headers *ohdr;
- u32 opcode;
- u32 hdrsize;
- u32 psn;
- u32 pad;
- struct ib_wc wc;
- u32 pmtu = qp->pmtu;
- struct ib_reth *reth;
- int ret;
-
- /* Check for GRH */
- if (!has_grh) {
- ohdr = &hdr->u.oth;
- hdrsize = 8 + 12; /* LRH + BTH */
- } else {
- ohdr = &hdr->u.l.oth;
- hdrsize = 8 + 40 + 12; /* LRH + GRH + BTH */
- }
-
- opcode = be32_to_cpu(ohdr->bth[0]);
- if (qib_ruc_check_hdr(ibp, hdr, has_grh, qp, opcode))
- return;
-
- psn = be32_to_cpu(ohdr->bth[2]);
- opcode >>= 24;
-
- /* Compare the PSN verses the expected PSN. */
- if (unlikely(qib_cmp24(psn, qp->r_psn) != 0)) {
- /*
- * Handle a sequence error.
- * Silently drop any current message.
- */
- qp->r_psn = psn;
-inv:
- if (qp->r_state == OP(SEND_FIRST) ||
- qp->r_state == OP(SEND_MIDDLE)) {
- set_bit(RVT_R_REWIND_SGE, &qp->r_aflags);
- qp->r_sge.num_sge = 0;
- } else
- rvt_put_ss(&qp->r_sge);
- qp->r_state = OP(SEND_LAST);
- switch (opcode) {
- case OP(SEND_FIRST):
- case OP(SEND_ONLY):
- case OP(SEND_ONLY_WITH_IMMEDIATE):
- goto send_first;
-
- case OP(RDMA_WRITE_FIRST):
- case OP(RDMA_WRITE_ONLY):
- case OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE):
- goto rdma_first;
-
- default:
- goto drop;
- }
- }
-
- /* Check for opcode sequence errors. */
- switch (qp->r_state) {
- case OP(SEND_FIRST):
- case OP(SEND_MIDDLE):
- if (opcode == OP(SEND_MIDDLE) ||
- opcode == OP(SEND_LAST) ||
- opcode == OP(SEND_LAST_WITH_IMMEDIATE))
- break;
- goto inv;
-
- case OP(RDMA_WRITE_FIRST):
- case OP(RDMA_WRITE_MIDDLE):
- if (opcode == OP(RDMA_WRITE_MIDDLE) ||
- opcode == OP(RDMA_WRITE_LAST) ||
- opcode == OP(RDMA_WRITE_LAST_WITH_IMMEDIATE))
- break;
- goto inv;
-
- default:
- if (opcode == OP(SEND_FIRST) ||
- opcode == OP(SEND_ONLY) ||
- opcode == OP(SEND_ONLY_WITH_IMMEDIATE) ||
- opcode == OP(RDMA_WRITE_FIRST) ||
- opcode == OP(RDMA_WRITE_ONLY) ||
- opcode == OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE))
- break;
- goto inv;
- }
-
- if (qp->state == IB_QPS_RTR && !(qp->r_flags & RVT_R_COMM_EST))
- rvt_comm_est(qp);
-
- /* OK, process the packet. */
- switch (opcode) {
- case OP(SEND_FIRST):
- case OP(SEND_ONLY):
- case OP(SEND_ONLY_WITH_IMMEDIATE):
-send_first:
- if (test_and_clear_bit(RVT_R_REWIND_SGE, &qp->r_aflags))
- qp->r_sge = qp->s_rdma_read_sge;
- else {
- ret = rvt_get_rwqe(qp, false);
- if (ret < 0)
- goto op_err;
- if (!ret)
- goto drop;
- /*
- * qp->s_rdma_read_sge will be the owner
- * of the mr references.
- */
- qp->s_rdma_read_sge = qp->r_sge;
- }
- qp->r_rcv_len = 0;
- if (opcode == OP(SEND_ONLY))
- goto no_immediate_data;
- else if (opcode == OP(SEND_ONLY_WITH_IMMEDIATE))
- goto send_last_imm;
- fallthrough;
- case OP(SEND_MIDDLE):
- /* Check for invalid length PMTU or posted rwqe len. */
- if (unlikely(tlen != (hdrsize + pmtu + 4)))
- goto rewind;
- qp->r_rcv_len += pmtu;
- if (unlikely(qp->r_rcv_len > qp->r_len))
- goto rewind;
- rvt_copy_sge(qp, &qp->r_sge, data, pmtu, false, false);
- break;
-
- case OP(SEND_LAST_WITH_IMMEDIATE):
-send_last_imm:
- wc.ex.imm_data = ohdr->u.imm_data;
- hdrsize += 4;
- wc.wc_flags = IB_WC_WITH_IMM;
- goto send_last;
- case OP(SEND_LAST):
-no_immediate_data:
- wc.ex.imm_data = 0;
- wc.wc_flags = 0;
-send_last:
- /* Get the number of bytes the message was padded by. */
- pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
- /* Check for invalid length. */
- /* XXX LAST len should be >= 1 */
- if (unlikely(tlen < (hdrsize + pad + 4)))
- goto rewind;
- /* Don't count the CRC. */
- tlen -= (hdrsize + pad + 4);
- wc.byte_len = tlen + qp->r_rcv_len;
- if (unlikely(wc.byte_len > qp->r_len))
- goto rewind;
- wc.opcode = IB_WC_RECV;
- rvt_copy_sge(qp, &qp->r_sge, data, tlen, false, false);
- rvt_put_ss(&qp->s_rdma_read_sge);
-last_imm:
- wc.wr_id = qp->r_wr_id;
- wc.status = IB_WC_SUCCESS;
- wc.qp = &qp->ibqp;
- wc.src_qp = qp->remote_qpn;
- wc.slid = rdma_ah_get_dlid(&qp->remote_ah_attr);
- wc.sl = rdma_ah_get_sl(&qp->remote_ah_attr);
- /* zero fields that are N/A */
- wc.vendor_err = 0;
- wc.pkey_index = 0;
- wc.dlid_path_bits = 0;
- wc.port_num = 0;
- /* Signal completion event if the solicited bit is set. */
- rvt_recv_cq(qp, &wc, ib_bth_is_solicited(ohdr));
- break;
-
- case OP(RDMA_WRITE_FIRST):
- case OP(RDMA_WRITE_ONLY):
- case OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE): /* consume RWQE */
-rdma_first:
- if (unlikely(!(qp->qp_access_flags &
- IB_ACCESS_REMOTE_WRITE))) {
- goto drop;
- }
- reth = &ohdr->u.rc.reth;
- hdrsize += sizeof(*reth);
- qp->r_len = be32_to_cpu(reth->length);
- qp->r_rcv_len = 0;
- qp->r_sge.sg_list = NULL;
- if (qp->r_len != 0) {
- u32 rkey = be32_to_cpu(reth->rkey);
- u64 vaddr = be64_to_cpu(reth->vaddr);
- int ok;
-
- /* Check rkey */
- ok = rvt_rkey_ok(qp, &qp->r_sge.sge, qp->r_len,
- vaddr, rkey, IB_ACCESS_REMOTE_WRITE);
- if (unlikely(!ok))
- goto drop;
- qp->r_sge.num_sge = 1;
- } else {
- qp->r_sge.num_sge = 0;
- qp->r_sge.sge.mr = NULL;
- qp->r_sge.sge.vaddr = NULL;
- qp->r_sge.sge.length = 0;
- qp->r_sge.sge.sge_length = 0;
- }
- if (opcode == OP(RDMA_WRITE_ONLY))
- goto rdma_last;
- else if (opcode == OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE)) {
- wc.ex.imm_data = ohdr->u.rc.imm_data;
- goto rdma_last_imm;
- }
- fallthrough;
- case OP(RDMA_WRITE_MIDDLE):
- /* Check for invalid length PMTU or posted rwqe len. */
- if (unlikely(tlen != (hdrsize + pmtu + 4)))
- goto drop;
- qp->r_rcv_len += pmtu;
- if (unlikely(qp->r_rcv_len > qp->r_len))
- goto drop;
- rvt_copy_sge(qp, &qp->r_sge, data, pmtu, true, false);
- break;
-
- case OP(RDMA_WRITE_LAST_WITH_IMMEDIATE):
- wc.ex.imm_data = ohdr->u.imm_data;
-rdma_last_imm:
- hdrsize += 4;
- wc.wc_flags = IB_WC_WITH_IMM;
-
- /* Get the number of bytes the message was padded by. */
- pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
- /* Check for invalid length. */
- /* XXX LAST len should be >= 1 */
- if (unlikely(tlen < (hdrsize + pad + 4)))
- goto drop;
- /* Don't count the CRC. */
- tlen -= (hdrsize + pad + 4);
- if (unlikely(tlen + qp->r_rcv_len != qp->r_len))
- goto drop;
- if (test_and_clear_bit(RVT_R_REWIND_SGE, &qp->r_aflags))
- rvt_put_ss(&qp->s_rdma_read_sge);
- else {
- ret = rvt_get_rwqe(qp, true);
- if (ret < 0)
- goto op_err;
- if (!ret)
- goto drop;
- }
- wc.byte_len = qp->r_len;
- wc.opcode = IB_WC_RECV_RDMA_WITH_IMM;
- rvt_copy_sge(qp, &qp->r_sge, data, tlen, true, false);
- rvt_put_ss(&qp->r_sge);
- goto last_imm;
-
- case OP(RDMA_WRITE_LAST):
-rdma_last:
- /* Get the number of bytes the message was padded by. */
- pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
- /* Check for invalid length. */
- /* XXX LAST len should be >= 1 */
- if (unlikely(tlen < (hdrsize + pad + 4)))
- goto drop;
- /* Don't count the CRC. */
- tlen -= (hdrsize + pad + 4);
- if (unlikely(tlen + qp->r_rcv_len != qp->r_len))
- goto drop;
- rvt_copy_sge(qp, &qp->r_sge, data, tlen, true, false);
- rvt_put_ss(&qp->r_sge);
- break;
-
- default:
- /* Drop packet for unknown opcodes. */
- goto drop;
- }
- qp->r_psn++;
- qp->r_state = opcode;
- return;
-
-rewind:
- set_bit(RVT_R_REWIND_SGE, &qp->r_aflags);
- qp->r_sge.num_sge = 0;
-drop:
- ibp->rvp.n_pkt_drops++;
- return;
-
-op_err:
- rvt_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
- return;
-
-}
diff --git a/drivers/infiniband/hw/qib/qib_ud.c b/drivers/infiniband/hw/qib/qib_ud.c
deleted file mode 100644
index 81eda94bd279..000000000000
--- a/drivers/infiniband/hw/qib/qib_ud.c
+++ /dev/null
@@ -1,583 +0,0 @@
-/*
- * Copyright (c) 2012 - 2019 Intel Corporation. All rights reserved.
- * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
- * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <rdma/ib_smi.h>
-#include <rdma/ib_verbs.h>
-
-#include "qib.h"
-#include "qib_mad.h"
-
-/**
- * qib_ud_loopback - handle send on loopback QPs
- * @sqp: the sending QP
- * @swqe: the send work request
- *
- * This is called from qib_make_ud_req() to forward a WQE addressed
- * to the same HCA.
- * Note that the receive interrupt handler may be calling qib_ud_rcv()
- * while this is being called.
- */
-static void qib_ud_loopback(struct rvt_qp *sqp, struct rvt_swqe *swqe)
-{
- struct qib_ibport *ibp = to_iport(sqp->ibqp.device, sqp->port_num);
- struct qib_pportdata *ppd = ppd_from_ibp(ibp);
- struct qib_devdata *dd = ppd->dd;
- struct rvt_dev_info *rdi = &dd->verbs_dev.rdi;
- struct rvt_qp *qp;
- struct rdma_ah_attr *ah_attr;
- unsigned long flags;
- struct rvt_sge_state ssge;
- struct rvt_sge *sge;
- struct ib_wc wc;
- u32 length;
- enum ib_qp_type sqptype, dqptype;
-
- rcu_read_lock();
- qp = rvt_lookup_qpn(rdi, &ibp->rvp, rvt_get_swqe_remote_qpn(swqe));
- if (!qp) {
- ibp->rvp.n_pkt_drops++;
- goto drop;
- }
-
- sqptype = sqp->ibqp.qp_type == IB_QPT_GSI ?
- IB_QPT_UD : sqp->ibqp.qp_type;
- dqptype = qp->ibqp.qp_type == IB_QPT_GSI ?
- IB_QPT_UD : qp->ibqp.qp_type;
-
- if (dqptype != sqptype ||
- !(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK)) {
- ibp->rvp.n_pkt_drops++;
- goto drop;
- }
-
- ah_attr = rvt_get_swqe_ah_attr(swqe);
- ppd = ppd_from_ibp(ibp);
-
- if (qp->ibqp.qp_num > 1) {
- u16 pkey1;
- u16 pkey2;
- u16 lid;
-
- pkey1 = qib_get_pkey(ibp, sqp->s_pkey_index);
- pkey2 = qib_get_pkey(ibp, qp->s_pkey_index);
- if (unlikely(!qib_pkey_ok(pkey1, pkey2))) {
- lid = ppd->lid | (rdma_ah_get_path_bits(ah_attr) &
- ((1 << ppd->lmc) - 1));
- qib_bad_pkey(ibp, pkey1,
- rdma_ah_get_sl(ah_attr),
- sqp->ibqp.qp_num, qp->ibqp.qp_num,
- cpu_to_be16(lid),
- cpu_to_be16(rdma_ah_get_dlid(ah_attr)));
- goto drop;
- }
- }
-
- /*
- * Check that the qkey matches (except for QP0, see 9.6.1.4.1).
- * Qkeys with the high order bit set mean use the
- * qkey from the QP context instead of the WR (see 10.2.5).
- */
- if (qp->ibqp.qp_num) {
- u32 qkey;
-
- qkey = (int)rvt_get_swqe_remote_qkey(swqe) < 0 ?
- sqp->qkey : rvt_get_swqe_remote_qkey(swqe);
- if (unlikely(qkey != qp->qkey))
- goto drop;
- }
-
- /*
- * A GRH is expected to precede the data even if not
- * present on the wire.
- */
- length = swqe->length;
- memset(&wc, 0, sizeof(wc));
- wc.byte_len = length + sizeof(struct ib_grh);
-
- if (swqe->wr.opcode == IB_WR_SEND_WITH_IMM) {
- wc.wc_flags = IB_WC_WITH_IMM;
- wc.ex.imm_data = swqe->wr.ex.imm_data;
- }
-
- spin_lock_irqsave(&qp->r_lock, flags);
-
- /*
- * Get the next work request entry to find where to put the data.
- */
- if (qp->r_flags & RVT_R_REUSE_SGE)
- qp->r_flags &= ~RVT_R_REUSE_SGE;
- else {
- int ret;
-
- ret = rvt_get_rwqe(qp, false);
- if (ret < 0) {
- rvt_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
- goto bail_unlock;
- }
- if (!ret) {
- if (qp->ibqp.qp_num == 0)
- ibp->rvp.n_vl15_dropped++;
- goto bail_unlock;
- }
- }
- /* Silently drop packets which are too big. */
- if (unlikely(wc.byte_len > qp->r_len)) {
- qp->r_flags |= RVT_R_REUSE_SGE;
- ibp->rvp.n_pkt_drops++;
- goto bail_unlock;
- }
-
- if (rdma_ah_get_ah_flags(ah_attr) & IB_AH_GRH) {
- struct ib_grh grh;
- const struct ib_global_route *grd = rdma_ah_read_grh(ah_attr);
-
- qib_make_grh(ibp, &grh, grd, 0, 0);
- rvt_copy_sge(qp, &qp->r_sge, &grh,
- sizeof(grh), true, false);
- wc.wc_flags |= IB_WC_GRH;
- } else
- rvt_skip_sge(&qp->r_sge, sizeof(struct ib_grh), true);
- ssge.sg_list = swqe->sg_list + 1;
- ssge.sge = *swqe->sg_list;
- ssge.num_sge = swqe->wr.num_sge;
- sge = &ssge.sge;
- while (length) {
- u32 len = rvt_get_sge_length(sge, length);
-
- rvt_copy_sge(qp, &qp->r_sge, sge->vaddr, len, true, false);
- sge->vaddr += len;
- sge->length -= len;
- sge->sge_length -= len;
- if (sge->sge_length == 0) {
- if (--ssge.num_sge)
- *sge = *ssge.sg_list++;
- } else if (sge->length == 0 && sge->mr->lkey) {
- if (++sge->n >= RVT_SEGSZ) {
- if (++sge->m >= sge->mr->mapsz)
- break;
- sge->n = 0;
- }
- sge->vaddr =
- sge->mr->map[sge->m]->segs[sge->n].vaddr;
- sge->length =
- sge->mr->map[sge->m]->segs[sge->n].length;
- }
- length -= len;
- }
- rvt_put_ss(&qp->r_sge);
- if (!test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags))
- goto bail_unlock;
- wc.wr_id = qp->r_wr_id;
- wc.status = IB_WC_SUCCESS;
- wc.opcode = IB_WC_RECV;
- wc.qp = &qp->ibqp;
- wc.src_qp = sqp->ibqp.qp_num;
- wc.pkey_index = qp->ibqp.qp_type == IB_QPT_GSI ?
- rvt_get_swqe_pkey_index(swqe) : 0;
- wc.slid = ppd->lid | (rdma_ah_get_path_bits(ah_attr) &
- ((1 << ppd->lmc) - 1));
- wc.sl = rdma_ah_get_sl(ah_attr);
- wc.dlid_path_bits = rdma_ah_get_dlid(ah_attr) & ((1 << ppd->lmc) - 1);
- wc.port_num = qp->port_num;
- /* Signal completion event if the solicited bit is set. */
- rvt_recv_cq(qp, &wc, swqe->wr.send_flags & IB_SEND_SOLICITED);
- ibp->rvp.n_loop_pkts++;
-bail_unlock:
- spin_unlock_irqrestore(&qp->r_lock, flags);
-drop:
- rcu_read_unlock();
-}
-
-/**
- * qib_make_ud_req - construct a UD request packet
- * @qp: the QP
- * @flags: flags to modify and pass back to caller
- *
- * Assumes the s_lock is held.
- *
- * Return 1 if constructed; otherwise, return 0.
- */
-int qib_make_ud_req(struct rvt_qp *qp, unsigned long *flags)
-{
- struct qib_qp_priv *priv = qp->priv;
- struct ib_other_headers *ohdr;
- struct rdma_ah_attr *ah_attr;
- struct qib_pportdata *ppd;
- struct qib_ibport *ibp;
- struct rvt_swqe *wqe;
- u32 nwords;
- u32 extra_bytes;
- u32 bth0;
- u16 lrh0;
- u16 lid;
- int ret = 0;
- int next_cur;
-
- if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_NEXT_SEND_OK)) {
- if (!(ib_rvt_state_ops[qp->state] & RVT_FLUSH_SEND))
- goto bail;
- /* We are in the error state, flush the work request. */
- if (qp->s_last == READ_ONCE(qp->s_head))
- goto bail;
- /* If DMAs are in progress, we can't flush immediately. */
- if (atomic_read(&priv->s_dma_busy)) {
- qp->s_flags |= RVT_S_WAIT_DMA;
- goto bail;
- }
- wqe = rvt_get_swqe_ptr(qp, qp->s_last);
- rvt_send_complete(qp, wqe, IB_WC_WR_FLUSH_ERR);
- goto done;
- }
-
- /* see post_one_send() */
- if (qp->s_cur == READ_ONCE(qp->s_head))
- goto bail;
-
- wqe = rvt_get_swqe_ptr(qp, qp->s_cur);
- next_cur = qp->s_cur + 1;
- if (next_cur >= qp->s_size)
- next_cur = 0;
-
- /* Construct the header. */
- ibp = to_iport(qp->ibqp.device, qp->port_num);
- ppd = ppd_from_ibp(ibp);
- ah_attr = rvt_get_swqe_ah_attr(wqe);
- if (rdma_ah_get_dlid(ah_attr) >= be16_to_cpu(IB_MULTICAST_LID_BASE)) {
- if (rdma_ah_get_dlid(ah_attr) !=
- be16_to_cpu(IB_LID_PERMISSIVE))
- this_cpu_inc(ibp->pmastats->n_multicast_xmit);
- else
- this_cpu_inc(ibp->pmastats->n_unicast_xmit);
- } else {
- this_cpu_inc(ibp->pmastats->n_unicast_xmit);
- lid = rdma_ah_get_dlid(ah_attr) & ~((1 << ppd->lmc) - 1);
- if (unlikely(lid == ppd->lid)) {
- unsigned long tflags = *flags;
- /*
- * If DMAs are in progress, we can't generate
- * a completion for the loopback packet since
- * it would be out of order.
- * XXX Instead of waiting, we could queue a
- * zero length descriptor so we get a callback.
- */
- if (atomic_read(&priv->s_dma_busy)) {
- qp->s_flags |= RVT_S_WAIT_DMA;
- goto bail;
- }
- qp->s_cur = next_cur;
- spin_unlock_irqrestore(&qp->s_lock, tflags);
- qib_ud_loopback(qp, wqe);
- spin_lock_irqsave(&qp->s_lock, tflags);
- *flags = tflags;
- rvt_send_complete(qp, wqe, IB_WC_SUCCESS);
- goto done;
- }
- }
-
- qp->s_cur = next_cur;
- extra_bytes = -wqe->length & 3;
- nwords = (wqe->length + extra_bytes) >> 2;
-
- /* header size in 32-bit words LRH+BTH+DETH = (8+12+8)/4. */
- qp->s_hdrwords = 7;
- qp->s_cur_size = wqe->length;
- qp->s_cur_sge = &qp->s_sge;
- qp->s_srate = rdma_ah_get_static_rate(ah_attr);
- qp->s_wqe = wqe;
- qp->s_sge.sge = wqe->sg_list[0];
- qp->s_sge.sg_list = wqe->sg_list + 1;
- qp->s_sge.num_sge = wqe->wr.num_sge;
- qp->s_sge.total_len = wqe->length;
-
- if (rdma_ah_get_ah_flags(ah_attr) & IB_AH_GRH) {
- /* Header size in 32-bit words. */
- qp->s_hdrwords += qib_make_grh(ibp, &priv->s_hdr->u.l.grh,
- rdma_ah_read_grh(ah_attr),
- qp->s_hdrwords, nwords);
- lrh0 = QIB_LRH_GRH;
- ohdr = &priv->s_hdr->u.l.oth;
- /*
- * Don't worry about sending to locally attached multicast
- * QPs. It is unspecified by the spec. what happens.
- */
- } else {
- /* Header size in 32-bit words. */
- lrh0 = QIB_LRH_BTH;
- ohdr = &priv->s_hdr->u.oth;
- }
- if (wqe->wr.opcode == IB_WR_SEND_WITH_IMM) {
- qp->s_hdrwords++;
- ohdr->u.ud.imm_data = wqe->wr.ex.imm_data;
- bth0 = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE << 24;
- } else
- bth0 = IB_OPCODE_UD_SEND_ONLY << 24;
- lrh0 |= rdma_ah_get_sl(ah_attr) << 4;
- if (qp->ibqp.qp_type == IB_QPT_SMI)
- lrh0 |= 0xF000; /* Set VL (see ch. 13.5.3.1) */
- else
- lrh0 |= ibp->sl_to_vl[rdma_ah_get_sl(ah_attr)] << 12;
- priv->s_hdr->lrh[0] = cpu_to_be16(lrh0);
- priv->s_hdr->lrh[1] =
- cpu_to_be16(rdma_ah_get_dlid(ah_attr)); /* DEST LID */
- priv->s_hdr->lrh[2] =
- cpu_to_be16(qp->s_hdrwords + nwords + SIZE_OF_CRC);
- lid = ppd->lid;
- if (lid) {
- lid |= rdma_ah_get_path_bits(ah_attr) &
- ((1 << ppd->lmc) - 1);
- priv->s_hdr->lrh[3] = cpu_to_be16(lid);
- } else
- priv->s_hdr->lrh[3] = IB_LID_PERMISSIVE;
- if (wqe->wr.send_flags & IB_SEND_SOLICITED)
- bth0 |= IB_BTH_SOLICITED;
- bth0 |= extra_bytes << 20;
- bth0 |= qp->ibqp.qp_type == IB_QPT_SMI ? QIB_DEFAULT_P_KEY :
- qib_get_pkey(ibp, qp->ibqp.qp_type == IB_QPT_GSI ?
- rvt_get_swqe_pkey_index(wqe) : qp->s_pkey_index);
- ohdr->bth[0] = cpu_to_be32(bth0);
- /*
- * Use the multicast QP if the destination LID is a multicast LID.
- */
- ohdr->bth[1] = rdma_ah_get_dlid(ah_attr) >=
- be16_to_cpu(IB_MULTICAST_LID_BASE) &&
- rdma_ah_get_dlid(ah_attr) != be16_to_cpu(IB_LID_PERMISSIVE) ?
- cpu_to_be32(QIB_MULTICAST_QPN) :
- cpu_to_be32(rvt_get_swqe_remote_qpn(wqe));
- ohdr->bth[2] = cpu_to_be32(wqe->psn & QIB_PSN_MASK);
- /*
- * Qkeys with the high order bit set mean use the
- * qkey from the QP context instead of the WR (see 10.2.5).
- */
- ohdr->u.ud.deth[0] =
- cpu_to_be32((int)rvt_get_swqe_remote_qkey(wqe) < 0 ? qp->qkey :
- rvt_get_swqe_remote_qkey(wqe));
- ohdr->u.ud.deth[1] = cpu_to_be32(qp->ibqp.qp_num);
-
-done:
- return 1;
-bail:
- qp->s_flags &= ~RVT_S_BUSY;
- return ret;
-}
-
-static unsigned qib_lookup_pkey(struct qib_ibport *ibp, u16 pkey)
-{
- struct qib_pportdata *ppd = ppd_from_ibp(ibp);
- struct qib_devdata *dd = ppd->dd;
- unsigned ctxt = ppd->hw_pidx;
- unsigned i;
-
- pkey &= 0x7fff; /* remove limited/full membership bit */
-
- for (i = 0; i < ARRAY_SIZE(dd->rcd[ctxt]->pkeys); ++i)
- if ((dd->rcd[ctxt]->pkeys[i] & 0x7fff) == pkey)
- return i;
-
- /*
- * Should not get here, this means hardware failed to validate pkeys.
- * Punt and return index 0.
- */
- return 0;
-}
-
-/**
- * qib_ud_rcv - receive an incoming UD packet
- * @ibp: the port the packet came in on
- * @hdr: the packet header
- * @has_grh: true if the packet has a GRH
- * @data: the packet data
- * @tlen: the packet length
- * @qp: the QP the packet came on
- *
- * This is called from qib_qp_rcv() to process an incoming UD packet
- * for the given QP.
- * Called at interrupt level.
- */
-void qib_ud_rcv(struct qib_ibport *ibp, struct ib_header *hdr,
- int has_grh, void *data, u32 tlen, struct rvt_qp *qp)
-{
- struct ib_other_headers *ohdr;
- int opcode;
- u32 hdrsize;
- u32 pad;
- struct ib_wc wc;
- u32 qkey;
- u32 src_qp;
- u16 dlid;
-
- /* Check for GRH */
- if (!has_grh) {
- ohdr = &hdr->u.oth;
- hdrsize = 8 + 12 + 8; /* LRH + BTH + DETH */
- } else {
- ohdr = &hdr->u.l.oth;
- hdrsize = 8 + 40 + 12 + 8; /* LRH + GRH + BTH + DETH */
- }
- qkey = be32_to_cpu(ohdr->u.ud.deth[0]);
- src_qp = be32_to_cpu(ohdr->u.ud.deth[1]) & RVT_QPN_MASK;
-
- /*
- * Get the number of bytes the message was padded by
- * and drop incomplete packets.
- */
- pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
- if (unlikely(tlen < (hdrsize + pad + 4)))
- goto drop;
-
- tlen -= hdrsize + pad + 4;
-
- /*
- * Check that the permissive LID is only used on QP0
- * and the QKEY matches (see 9.6.1.4.1 and 9.6.1.5.1).
- */
- if (qp->ibqp.qp_num) {
- if (unlikely(hdr->lrh[1] == IB_LID_PERMISSIVE ||
- hdr->lrh[3] == IB_LID_PERMISSIVE))
- goto drop;
- if (qp->ibqp.qp_num > 1) {
- u16 pkey1, pkey2;
-
- pkey1 = be32_to_cpu(ohdr->bth[0]);
- pkey2 = qib_get_pkey(ibp, qp->s_pkey_index);
- if (unlikely(!qib_pkey_ok(pkey1, pkey2))) {
- qib_bad_pkey(ibp,
- pkey1,
- (be16_to_cpu(hdr->lrh[0]) >> 4) &
- 0xF,
- src_qp, qp->ibqp.qp_num,
- hdr->lrh[3], hdr->lrh[1]);
- return;
- }
- }
- if (unlikely(qkey != qp->qkey))
- return;
-
- /* Drop invalid MAD packets (see 13.5.3.1). */
- if (unlikely(qp->ibqp.qp_num == 1 &&
- (tlen != 256 ||
- (be16_to_cpu(hdr->lrh[0]) >> 12) == 15)))
- goto drop;
- } else {
- struct ib_smp *smp;
-
- /* Drop invalid MAD packets (see 13.5.3.1). */
- if (tlen != 256 || (be16_to_cpu(hdr->lrh[0]) >> 12) != 15)
- goto drop;
- smp = (struct ib_smp *) data;
- if ((hdr->lrh[1] == IB_LID_PERMISSIVE ||
- hdr->lrh[3] == IB_LID_PERMISSIVE) &&
- smp->mgmt_class != IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
- goto drop;
- }
-
- /*
- * The opcode is in the low byte when its in network order
- * (top byte when in host order).
- */
- opcode = be32_to_cpu(ohdr->bth[0]) >> 24;
- if (qp->ibqp.qp_num > 1 &&
- opcode == IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE) {
- wc.ex.imm_data = ohdr->u.ud.imm_data;
- wc.wc_flags = IB_WC_WITH_IMM;
- } else if (opcode == IB_OPCODE_UD_SEND_ONLY) {
- wc.ex.imm_data = 0;
- wc.wc_flags = 0;
- } else
- goto drop;
-
- /*
- * A GRH is expected to precede the data even if not
- * present on the wire.
- */
- wc.byte_len = tlen + sizeof(struct ib_grh);
-
- /*
- * Get the next work request entry to find where to put the data.
- */
- if (qp->r_flags & RVT_R_REUSE_SGE)
- qp->r_flags &= ~RVT_R_REUSE_SGE;
- else {
- int ret;
-
- ret = rvt_get_rwqe(qp, false);
- if (ret < 0) {
- rvt_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
- return;
- }
- if (!ret) {
- if (qp->ibqp.qp_num == 0)
- ibp->rvp.n_vl15_dropped++;
- return;
- }
- }
- /* Silently drop packets which are too big. */
- if (unlikely(wc.byte_len > qp->r_len)) {
- qp->r_flags |= RVT_R_REUSE_SGE;
- goto drop;
- }
- if (has_grh) {
- rvt_copy_sge(qp, &qp->r_sge, &hdr->u.l.grh,
- sizeof(struct ib_grh), true, false);
- wc.wc_flags |= IB_WC_GRH;
- } else
- rvt_skip_sge(&qp->r_sge, sizeof(struct ib_grh), true);
- rvt_copy_sge(qp, &qp->r_sge, data, wc.byte_len - sizeof(struct ib_grh),
- true, false);
- rvt_put_ss(&qp->r_sge);
- if (!test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags))
- return;
- wc.wr_id = qp->r_wr_id;
- wc.status = IB_WC_SUCCESS;
- wc.opcode = IB_WC_RECV;
- wc.vendor_err = 0;
- wc.qp = &qp->ibqp;
- wc.src_qp = src_qp;
- wc.pkey_index = qp->ibqp.qp_type == IB_QPT_GSI ?
- qib_lookup_pkey(ibp, be32_to_cpu(ohdr->bth[0])) : 0;
- wc.slid = be16_to_cpu(hdr->lrh[3]);
- wc.sl = (be16_to_cpu(hdr->lrh[0]) >> 4) & 0xF;
- dlid = be16_to_cpu(hdr->lrh[1]);
- /*
- * Save the LMC lower bits if the destination LID is a unicast LID.
- */
- wc.dlid_path_bits = dlid >= be16_to_cpu(IB_MULTICAST_LID_BASE) ? 0 :
- dlid & ((1 << ppd_from_ibp(ibp)->lmc) - 1);
- wc.port_num = qp->port_num;
- /* Signal completion event if the solicited bit is set. */
- rvt_recv_cq(qp, &wc, ib_bth_is_solicited(ohdr));
- return;
-
-drop:
- ibp->rvp.n_pkt_drops++;
-}
diff --git a/drivers/infiniband/hw/qib/qib_user_pages.c b/drivers/infiniband/hw/qib/qib_user_pages.c
deleted file mode 100644
index 1bb7507325bc..000000000000
--- a/drivers/infiniband/hw/qib/qib_user_pages.c
+++ /dev/null
@@ -1,137 +0,0 @@
-/*
- * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
- * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <linux/mm.h>
-#include <linux/sched/signal.h>
-#include <linux/device.h>
-
-#include "qib.h"
-
-static void __qib_release_user_pages(struct page **p, size_t num_pages,
- int dirty)
-{
- unpin_user_pages_dirty_lock(p, num_pages, dirty);
-}
-
-/*
- * qib_map_page - a safety wrapper around pci_map_page()
- *
- * A dma_addr of all 0's is interpreted by the chip as "disabled".
- * Unfortunately, it can also be a valid dma_addr returned on some
- * architectures.
- *
- * The powerpc iommu assigns dma_addrs in ascending order, so we don't
- * have to bother with retries or mapping a dummy page to insure we
- * don't just get the same mapping again.
- *
- * I'm sure we won't be so lucky with other iommu's, so FIXME.
- */
-int qib_map_page(struct pci_dev *hwdev, struct page *page, dma_addr_t *daddr)
-{
- dma_addr_t phys;
-
- phys = dma_map_page(&hwdev->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE);
- if (dma_mapping_error(&hwdev->dev, phys))
- return -ENOMEM;
-
- if (!phys) {
- dma_unmap_page(&hwdev->dev, phys, PAGE_SIZE, DMA_FROM_DEVICE);
- phys = dma_map_page(&hwdev->dev, page, 0, PAGE_SIZE,
- DMA_FROM_DEVICE);
- if (dma_mapping_error(&hwdev->dev, phys))
- return -ENOMEM;
- /*
- * FIXME: If we get 0 again, we should keep this page,
- * map another, then free the 0 page.
- */
- }
- *daddr = phys;
- return 0;
-}
-
-/**
- * qib_get_user_pages - lock user pages into memory
- * @start_page: the start page
- * @num_pages: the number of pages
- * @p: the output page structures
- *
- * This function takes a given start page (page aligned user virtual
- * address) and pins it and the following specified number of pages. For
- * now, num_pages is always 1, but that will probably change at some point
- * (because caller is doing expected sends on a single virtually contiguous
- * buffer, so we can do all pages at once).
- */
-int qib_get_user_pages(unsigned long start_page, size_t num_pages,
- struct page **p)
-{
- unsigned long locked, lock_limit;
- size_t got;
- int ret;
-
- lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
- locked = atomic64_add_return(num_pages, &current->mm->pinned_vm);
-
- if (locked > lock_limit && !capable(CAP_IPC_LOCK)) {
- ret = -ENOMEM;
- goto bail;
- }
-
- mmap_read_lock(current->mm);
- for (got = 0; got < num_pages; got += ret) {
- ret = pin_user_pages(start_page + got * PAGE_SIZE,
- num_pages - got,
- FOLL_LONGTERM | FOLL_WRITE,
- p + got);
- if (ret < 0) {
- mmap_read_unlock(current->mm);
- goto bail_release;
- }
- }
- mmap_read_unlock(current->mm);
-
- return 0;
-bail_release:
- __qib_release_user_pages(p, got, 0);
-bail:
- atomic64_sub(num_pages, &current->mm->pinned_vm);
- return ret;
-}
-
-void qib_release_user_pages(struct page **p, size_t num_pages)
-{
- __qib_release_user_pages(p, num_pages, 1);
-
- /* during close after signal, mm can be NULL */
- if (current->mm)
- atomic64_sub(num_pages, &current->mm->pinned_vm);
-}
diff --git a/drivers/infiniband/hw/qib/qib_user_sdma.c b/drivers/infiniband/hw/qib/qib_user_sdma.c
deleted file mode 100644
index 336eb15a721f..000000000000
--- a/drivers/infiniband/hw/qib/qib_user_sdma.c
+++ /dev/null
@@ -1,1470 +0,0 @@
-/*
- * Copyright (c) 2007, 2008, 2009 QLogic Corporation. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include <linux/mm.h>
-#include <linux/types.h>
-#include <linux/device.h>
-#include <linux/dmapool.h>
-#include <linux/slab.h>
-#include <linux/list.h>
-#include <linux/highmem.h>
-#include <linux/io.h>
-#include <linux/uio.h>
-#include <linux/rbtree.h>
-#include <linux/spinlock.h>
-#include <linux/delay.h>
-
-#include "qib.h"
-#include "qib_user_sdma.h"
-
-/* minimum size of header */
-#define QIB_USER_SDMA_MIN_HEADER_LENGTH 64
-/* expected size of headers (for dma_pool) */
-#define QIB_USER_SDMA_EXP_HEADER_LENGTH 64
-/* attempt to drain the queue for 5secs */
-#define QIB_USER_SDMA_DRAIN_TIMEOUT 250
-
-/*
- * track how many times a process open this driver.
- */
-static struct rb_root qib_user_sdma_rb_root = RB_ROOT;
-
-struct qib_user_sdma_rb_node {
- struct rb_node node;
- int refcount;
- pid_t pid;
-};
-
-struct qib_user_sdma_pkt {
- struct list_head list; /* list element */
-
- u8 tiddma; /* if this is NEW tid-sdma */
- u8 largepkt; /* this is large pkt from kmalloc */
- u16 frag_size; /* frag size used by PSM */
- u16 index; /* last header index or push index */
- u16 naddr; /* dimension of addr (1..3) ... */
- u16 addrlimit; /* addr array size */
- u16 tidsmidx; /* current tidsm index */
- u16 tidsmcount; /* tidsm array item count */
- u16 payload_size; /* payload size so far for header */
- u32 bytes_togo; /* bytes for processing */
- u32 counter; /* sdma pkts queued counter for this entry */
- struct qib_tid_session_member *tidsm; /* tid session member array */
- struct qib_user_sdma_queue *pq; /* which pq this pkt belongs to */
- u64 added; /* global descq number of entries */
-
- struct {
- u16 offset; /* offset for kvaddr, addr */
- u16 length; /* length in page */
- u16 first_desc; /* first desc */
- u16 last_desc; /* last desc */
- u16 put_page; /* should we put_page? */
- u16 dma_mapped; /* is page dma_mapped? */
- u16 dma_length; /* for dma_unmap_page() */
- u16 padding;
- struct page *page; /* may be NULL (coherent mem) */
- void *kvaddr; /* FIXME: only for pio hack */
- dma_addr_t addr;
- } addr[4]; /* max pages, any more and we coalesce */
-};
-
-struct qib_user_sdma_queue {
- /*
- * pkts sent to dma engine are queued on this
- * list head. the type of the elements of this
- * list are struct qib_user_sdma_pkt...
- */
- struct list_head sent;
-
- /*
- * Because above list will be accessed by both process and
- * signal handler, we need a spinlock for it.
- */
- spinlock_t sent_lock ____cacheline_aligned_in_smp;
-
- /* headers with expected length are allocated from here... */
- char header_cache_name[64];
- struct dma_pool *header_cache;
-
- /* packets are allocated from the slab cache... */
- char pkt_slab_name[64];
- struct kmem_cache *pkt_slab;
-
- /* as packets go on the queued queue, they are counted... */
- u32 counter;
- u32 sent_counter;
- /* pending packets, not sending yet */
- u32 num_pending;
- /* sending packets, not complete yet */
- u32 num_sending;
- /* global descq number of entry of last sending packet */
- u64 added;
-
- /* dma page table */
- struct rb_root dma_pages_root;
-
- struct qib_user_sdma_rb_node *sdma_rb_node;
-
- /* protect everything above... */
- struct mutex lock;
-};
-
-static struct qib_user_sdma_rb_node *
-qib_user_sdma_rb_search(struct rb_root *root, pid_t pid)
-{
- struct qib_user_sdma_rb_node *sdma_rb_node;
- struct rb_node *node = root->rb_node;
-
- while (node) {
- sdma_rb_node = rb_entry(node, struct qib_user_sdma_rb_node,
- node);
- if (pid < sdma_rb_node->pid)
- node = node->rb_left;
- else if (pid > sdma_rb_node->pid)
- node = node->rb_right;
- else
- return sdma_rb_node;
- }
- return NULL;
-}
-
-static int
-qib_user_sdma_rb_insert(struct rb_root *root, struct qib_user_sdma_rb_node *new)
-{
- struct rb_node **node = &(root->rb_node);
- struct rb_node *parent = NULL;
- struct qib_user_sdma_rb_node *got;
-
- while (*node) {
- got = rb_entry(*node, struct qib_user_sdma_rb_node, node);
- parent = *node;
- if (new->pid < got->pid)
- node = &((*node)->rb_left);
- else if (new->pid > got->pid)
- node = &((*node)->rb_right);
- else
- return 0;
- }
-
- rb_link_node(&new->node, parent, node);
- rb_insert_color(&new->node, root);
- return 1;
-}
-
-struct qib_user_sdma_queue *
-qib_user_sdma_queue_create(struct device *dev, int unit, int ctxt, int sctxt)
-{
- struct qib_user_sdma_queue *pq =
- kmalloc(sizeof(struct qib_user_sdma_queue), GFP_KERNEL);
- struct qib_user_sdma_rb_node *sdma_rb_node;
-
- if (!pq)
- goto done;
-
- pq->counter = 0;
- pq->sent_counter = 0;
- pq->num_pending = 0;
- pq->num_sending = 0;
- pq->added = 0;
- pq->sdma_rb_node = NULL;
-
- INIT_LIST_HEAD(&pq->sent);
- spin_lock_init(&pq->sent_lock);
- mutex_init(&pq->lock);
-
- snprintf(pq->pkt_slab_name, sizeof(pq->pkt_slab_name),
- "qib-user-sdma-pkts-%u-%02u.%02u", unit, ctxt, sctxt);
- pq->pkt_slab = kmem_cache_create(pq->pkt_slab_name,
- sizeof(struct qib_user_sdma_pkt),
- 0, 0, NULL);
-
- if (!pq->pkt_slab)
- goto err_kfree;
-
- snprintf(pq->header_cache_name, sizeof(pq->header_cache_name),
- "qib-user-sdma-headers-%u-%02u.%02u", unit, ctxt, sctxt);
- pq->header_cache = dma_pool_create(pq->header_cache_name,
- dev,
- QIB_USER_SDMA_EXP_HEADER_LENGTH,
- 4, 0);
- if (!pq->header_cache)
- goto err_slab;
-
- pq->dma_pages_root = RB_ROOT;
-
- sdma_rb_node = qib_user_sdma_rb_search(&qib_user_sdma_rb_root,
- current->pid);
- if (sdma_rb_node) {
- sdma_rb_node->refcount++;
- } else {
- sdma_rb_node = kmalloc(sizeof(
- struct qib_user_sdma_rb_node), GFP_KERNEL);
- if (!sdma_rb_node)
- goto err_rb;
-
- sdma_rb_node->refcount = 1;
- sdma_rb_node->pid = current->pid;
-
- qib_user_sdma_rb_insert(&qib_user_sdma_rb_root, sdma_rb_node);
- }
- pq->sdma_rb_node = sdma_rb_node;
-
- goto done;
-
-err_rb:
- dma_pool_destroy(pq->header_cache);
-err_slab:
- kmem_cache_destroy(pq->pkt_slab);
-err_kfree:
- kfree(pq);
- pq = NULL;
-
-done:
- return pq;
-}
-
-static void qib_user_sdma_init_frag(struct qib_user_sdma_pkt *pkt,
- int i, u16 offset, u16 len,
- u16 first_desc, u16 last_desc,
- u16 put_page, u16 dma_mapped,
- struct page *page, void *kvaddr,
- dma_addr_t dma_addr, u16 dma_length)
-{
- pkt->addr[i].offset = offset;
- pkt->addr[i].length = len;
- pkt->addr[i].first_desc = first_desc;
- pkt->addr[i].last_desc = last_desc;
- pkt->addr[i].put_page = put_page;
- pkt->addr[i].dma_mapped = dma_mapped;
- pkt->addr[i].page = page;
- pkt->addr[i].kvaddr = kvaddr;
- pkt->addr[i].addr = dma_addr;
- pkt->addr[i].dma_length = dma_length;
-}
-
-static void *qib_user_sdma_alloc_header(struct qib_user_sdma_queue *pq,
- size_t len, dma_addr_t *dma_addr)
-{
- void *hdr;
-
- if (len == QIB_USER_SDMA_EXP_HEADER_LENGTH)
- hdr = dma_pool_alloc(pq->header_cache, GFP_KERNEL,
- dma_addr);
- else
- hdr = NULL;
-
- if (!hdr) {
- hdr = kmalloc(len, GFP_KERNEL);
- if (!hdr)
- return NULL;
-
- *dma_addr = 0;
- }
-
- return hdr;
-}
-
-static int qib_user_sdma_page_to_frags(const struct qib_devdata *dd,
- struct qib_user_sdma_queue *pq,
- struct qib_user_sdma_pkt *pkt,
- struct page *page, u16 put,
- u16 offset, u16 len, void *kvaddr)
-{
- __le16 *pbc16;
- void *pbcvaddr;
- struct qib_message_header *hdr;
- u16 newlen, pbclen, lastdesc, dma_mapped;
- u32 vcto;
- union qib_seqnum seqnum;
- dma_addr_t pbcdaddr;
- dma_addr_t dma_addr =
- dma_map_page(&dd->pcidev->dev,
- page, offset, len, DMA_TO_DEVICE);
- int ret = 0;
-
- if (dma_mapping_error(&dd->pcidev->dev, dma_addr)) {
- /*
- * dma mapping error, pkt has not managed
- * this page yet, return the page here so
- * the caller can ignore this page.
- */
- if (put) {
- unpin_user_page(page);
- } else {
- /* coalesce case */
- __free_page(page);
- }
- ret = -ENOMEM;
- goto done;
- }
- offset = 0;
- dma_mapped = 1;
-
-
-next_fragment:
-
- /*
- * In tid-sdma, the transfer length is restricted by
- * receiver side current tid page length.
- */
- if (pkt->tiddma && len > pkt->tidsm[pkt->tidsmidx].length)
- newlen = pkt->tidsm[pkt->tidsmidx].length;
- else
- newlen = len;
-
- /*
- * Then the transfer length is restricted by MTU.
- * the last descriptor flag is determined by:
- * 1. the current packet is at frag size length.
- * 2. the current tid page is done if tid-sdma.
- * 3. there is no more byte togo if sdma.
- */
- lastdesc = 0;
- if ((pkt->payload_size + newlen) >= pkt->frag_size) {
- newlen = pkt->frag_size - pkt->payload_size;
- lastdesc = 1;
- } else if (pkt->tiddma) {
- if (newlen == pkt->tidsm[pkt->tidsmidx].length)
- lastdesc = 1;
- } else {
- if (newlen == pkt->bytes_togo)
- lastdesc = 1;
- }
-
- /* fill the next fragment in this page */
- qib_user_sdma_init_frag(pkt, pkt->naddr, /* index */
- offset, newlen, /* offset, len */
- 0, lastdesc, /* first last desc */
- put, dma_mapped, /* put page, dma mapped */
- page, kvaddr, /* struct page, virt addr */
- dma_addr, len); /* dma addr, dma length */
- pkt->bytes_togo -= newlen;
- pkt->payload_size += newlen;
- pkt->naddr++;
- if (pkt->naddr == pkt->addrlimit) {
- ret = -EFAULT;
- goto done;
- }
-
- /* If there is no more byte togo. (lastdesc==1) */
- if (pkt->bytes_togo == 0) {
- /* The packet is done, header is not dma mapped yet.
- * it should be from kmalloc */
- if (!pkt->addr[pkt->index].addr) {
- pkt->addr[pkt->index].addr =
- dma_map_single(&dd->pcidev->dev,
- pkt->addr[pkt->index].kvaddr,
- pkt->addr[pkt->index].dma_length,
- DMA_TO_DEVICE);
- if (dma_mapping_error(&dd->pcidev->dev,
- pkt->addr[pkt->index].addr)) {
- ret = -ENOMEM;
- goto done;
- }
- pkt->addr[pkt->index].dma_mapped = 1;
- }
-
- goto done;
- }
-
- /* If tid-sdma, advance tid info. */
- if (pkt->tiddma) {
- pkt->tidsm[pkt->tidsmidx].length -= newlen;
- if (pkt->tidsm[pkt->tidsmidx].length) {
- pkt->tidsm[pkt->tidsmidx].offset += newlen;
- } else {
- pkt->tidsmidx++;
- if (pkt->tidsmidx == pkt->tidsmcount) {
- ret = -EFAULT;
- goto done;
- }
- }
- }
-
- /*
- * If this is NOT the last descriptor. (newlen==len)
- * the current packet is not done yet, but the current
- * send side page is done.
- */
- if (lastdesc == 0)
- goto done;
-
- /*
- * If running this driver under PSM with message size
- * fitting into one transfer unit, it is not possible
- * to pass this line. otherwise, it is a buggggg.
- */
-
- /*
- * Since the current packet is done, and there are more
- * bytes togo, we need to create a new sdma header, copying
- * from previous sdma header and modify both.
- */
- pbclen = pkt->addr[pkt->index].length;
- pbcvaddr = qib_user_sdma_alloc_header(pq, pbclen, &pbcdaddr);
- if (!pbcvaddr) {
- ret = -ENOMEM;
- goto done;
- }
- /* Copy the previous sdma header to new sdma header */
- pbc16 = (__le16 *)pkt->addr[pkt->index].kvaddr;
- memcpy(pbcvaddr, pbc16, pbclen);
-
- /* Modify the previous sdma header */
- hdr = (struct qib_message_header *)&pbc16[4];
-
- /* New pbc length */
- pbc16[0] = cpu_to_le16(le16_to_cpu(pbc16[0])-(pkt->bytes_togo>>2));
-
- /* New packet length */
- hdr->lrh[2] = cpu_to_be16(le16_to_cpu(pbc16[0]));
-
- if (pkt->tiddma) {
- /* turn on the header suppression */
- hdr->iph.pkt_flags =
- cpu_to_le16(le16_to_cpu(hdr->iph.pkt_flags)|0x2);
- /* turn off ACK_REQ: 0x04 and EXPECTED_DONE: 0x20 */
- hdr->flags &= ~(0x04|0x20);
- } else {
- /* turn off extra bytes: 20-21 bits */
- hdr->bth[0] = cpu_to_be32(be32_to_cpu(hdr->bth[0])&0xFFCFFFFF);
- /* turn off ACK_REQ: 0x04 */
- hdr->flags &= ~(0x04);
- }
-
- /* New kdeth checksum */
- vcto = le32_to_cpu(hdr->iph.ver_ctxt_tid_offset);
- hdr->iph.chksum = cpu_to_le16(QIB_LRH_BTH +
- be16_to_cpu(hdr->lrh[2]) -
- ((vcto>>16)&0xFFFF) - (vcto&0xFFFF) -
- le16_to_cpu(hdr->iph.pkt_flags));
-
- /* The packet is done, header is not dma mapped yet.
- * it should be from kmalloc */
- if (!pkt->addr[pkt->index].addr) {
- pkt->addr[pkt->index].addr =
- dma_map_single(&dd->pcidev->dev,
- pkt->addr[pkt->index].kvaddr,
- pkt->addr[pkt->index].dma_length,
- DMA_TO_DEVICE);
- if (dma_mapping_error(&dd->pcidev->dev,
- pkt->addr[pkt->index].addr)) {
- ret = -ENOMEM;
- goto done;
- }
- pkt->addr[pkt->index].dma_mapped = 1;
- }
-
- /* Modify the new sdma header */
- pbc16 = (__le16 *)pbcvaddr;
- hdr = (struct qib_message_header *)&pbc16[4];
-
- /* New pbc length */
- pbc16[0] = cpu_to_le16(le16_to_cpu(pbc16[0])-(pkt->payload_size>>2));
-
- /* New packet length */
- hdr->lrh[2] = cpu_to_be16(le16_to_cpu(pbc16[0]));
-
- if (pkt->tiddma) {
- /* Set new tid and offset for new sdma header */
- hdr->iph.ver_ctxt_tid_offset = cpu_to_le32(
- (le32_to_cpu(hdr->iph.ver_ctxt_tid_offset)&0xFF000000) +
- (pkt->tidsm[pkt->tidsmidx].tid<<QLOGIC_IB_I_TID_SHIFT) +
- (pkt->tidsm[pkt->tidsmidx].offset>>2));
- } else {
- /* Middle protocol new packet offset */
- hdr->uwords[2] += pkt->payload_size;
- }
-
- /* New kdeth checksum */
- vcto = le32_to_cpu(hdr->iph.ver_ctxt_tid_offset);
- hdr->iph.chksum = cpu_to_le16(QIB_LRH_BTH +
- be16_to_cpu(hdr->lrh[2]) -
- ((vcto>>16)&0xFFFF) - (vcto&0xFFFF) -
- le16_to_cpu(hdr->iph.pkt_flags));
-
- /* Next sequence number in new sdma header */
- seqnum.val = be32_to_cpu(hdr->bth[2]);
- if (pkt->tiddma)
- seqnum.seq++;
- else
- seqnum.pkt++;
- hdr->bth[2] = cpu_to_be32(seqnum.val);
-
- /* Init new sdma header. */
- qib_user_sdma_init_frag(pkt, pkt->naddr, /* index */
- 0, pbclen, /* offset, len */
- 1, 0, /* first last desc */
- 0, 0, /* put page, dma mapped */
- NULL, pbcvaddr, /* struct page, virt addr */
- pbcdaddr, pbclen); /* dma addr, dma length */
- pkt->index = pkt->naddr;
- pkt->payload_size = 0;
- pkt->naddr++;
- if (pkt->naddr == pkt->addrlimit) {
- ret = -EFAULT;
- goto done;
- }
-
- /* Prepare for next fragment in this page */
- if (newlen != len) {
- if (dma_mapped) {
- put = 0;
- dma_mapped = 0;
- page = NULL;
- kvaddr = NULL;
- }
- len -= newlen;
- offset += newlen;
-
- goto next_fragment;
- }
-
-done:
- return ret;
-}
-
-/* we've too many pages in the iovec, coalesce to a single page */
-static int qib_user_sdma_coalesce(const struct qib_devdata *dd,
- struct qib_user_sdma_queue *pq,
- struct qib_user_sdma_pkt *pkt,
- const struct iovec *iov,
- unsigned long niov)
-{
- int ret = 0;
- struct page *page = alloc_page(GFP_KERNEL);
- void *mpage_save;
- char *mpage;
- int i;
- int len = 0;
-
- if (!page) {
- ret = -ENOMEM;
- goto done;
- }
-
- mpage = page_address(page);
- mpage_save = mpage;
- for (i = 0; i < niov; i++) {
- int cfur;
-
- cfur = copy_from_user(mpage,
- iov[i].iov_base, iov[i].iov_len);
- if (cfur) {
- ret = -EFAULT;
- goto page_free;
- }
-
- mpage += iov[i].iov_len;
- len += iov[i].iov_len;
- }
-
- ret = qib_user_sdma_page_to_frags(dd, pq, pkt,
- page, 0, 0, len, mpage_save);
- goto done;
-
-page_free:
- __free_page(page);
-done:
- return ret;
-}
-
-/*
- * How many pages in this iovec element?
- */
-static size_t qib_user_sdma_num_pages(const struct iovec *iov)
-{
- const unsigned long addr = (unsigned long) iov->iov_base;
- const unsigned long len = iov->iov_len;
- const unsigned long spage = addr & PAGE_MASK;
- const unsigned long epage = (addr + len - 1) & PAGE_MASK;
-
- return 1 + ((epage - spage) >> PAGE_SHIFT);
-}
-
-static void qib_user_sdma_free_pkt_frag(struct device *dev,
- struct qib_user_sdma_queue *pq,
- struct qib_user_sdma_pkt *pkt,
- int frag)
-{
- const int i = frag;
-
- if (pkt->addr[i].page) {
- /* only user data has page */
- if (pkt->addr[i].dma_mapped)
- dma_unmap_page(dev,
- pkt->addr[i].addr,
- pkt->addr[i].dma_length,
- DMA_TO_DEVICE);
-
- if (pkt->addr[i].put_page)
- unpin_user_page(pkt->addr[i].page);
- else
- __free_page(pkt->addr[i].page);
- } else if (pkt->addr[i].kvaddr) {
- /* for headers */
- if (pkt->addr[i].dma_mapped) {
- /* from kmalloc & dma mapped */
- dma_unmap_single(dev,
- pkt->addr[i].addr,
- pkt->addr[i].dma_length,
- DMA_TO_DEVICE);
- kfree(pkt->addr[i].kvaddr);
- } else if (pkt->addr[i].addr) {
- /* free coherent mem from cache... */
- dma_pool_free(pq->header_cache,
- pkt->addr[i].kvaddr, pkt->addr[i].addr);
- } else {
- /* from kmalloc but not dma mapped */
- kfree(pkt->addr[i].kvaddr);
- }
- }
-}
-
-/* return number of pages pinned... */
-static int qib_user_sdma_pin_pages(const struct qib_devdata *dd,
- struct qib_user_sdma_queue *pq,
- struct qib_user_sdma_pkt *pkt,
- unsigned long addr, int tlen, size_t npages)
-{
- struct page *pages[8];
- int i, j;
- int ret = 0;
-
- while (npages) {
- if (npages > 8)
- j = 8;
- else
- j = npages;
-
- ret = pin_user_pages_fast(addr, j, FOLL_LONGTERM, pages);
- if (ret != j) {
- i = 0;
- j = ret;
- ret = -ENOMEM;
- goto free_pages;
- }
-
- for (i = 0; i < j; i++) {
- /* map the pages... */
- unsigned long fofs = addr & ~PAGE_MASK;
- int flen = ((fofs + tlen) > PAGE_SIZE) ?
- (PAGE_SIZE - fofs) : tlen;
-
- ret = qib_user_sdma_page_to_frags(dd, pq, pkt,
- pages[i], 1, fofs, flen, NULL);
- if (ret < 0) {
- /* current page has beed taken
- * care of inside above call.
- */
- i++;
- goto free_pages;
- }
-
- addr += flen;
- tlen -= flen;
- }
-
- npages -= j;
- }
-
- goto done;
-
- /* if error, return all pages not managed by pkt */
-free_pages:
- while (i < j)
- unpin_user_page(pages[i++]);
-
-done:
- return ret;
-}
-
-static int qib_user_sdma_pin_pkt(const struct qib_devdata *dd,
- struct qib_user_sdma_queue *pq,
- struct qib_user_sdma_pkt *pkt,
- const struct iovec *iov,
- unsigned long niov)
-{
- int ret = 0;
- unsigned long idx;
-
- for (idx = 0; idx < niov; idx++) {
- const size_t npages = qib_user_sdma_num_pages(iov + idx);
- const unsigned long addr = (unsigned long) iov[idx].iov_base;
-
- ret = qib_user_sdma_pin_pages(dd, pq, pkt, addr,
- iov[idx].iov_len, npages);
- if (ret < 0)
- goto free_pkt;
- }
-
- goto done;
-
-free_pkt:
- /* we need to ignore the first entry here */
- for (idx = 1; idx < pkt->naddr; idx++)
- qib_user_sdma_free_pkt_frag(&dd->pcidev->dev, pq, pkt, idx);
-
- /* need to dma unmap the first entry, this is to restore to
- * the original state so that caller can free the memory in
- * error condition. Caller does not know if dma mapped or not*/
- if (pkt->addr[0].dma_mapped) {
- dma_unmap_single(&dd->pcidev->dev,
- pkt->addr[0].addr,
- pkt->addr[0].dma_length,
- DMA_TO_DEVICE);
- pkt->addr[0].addr = 0;
- pkt->addr[0].dma_mapped = 0;
- }
-
-done:
- return ret;
-}
-
-static int qib_user_sdma_init_payload(const struct qib_devdata *dd,
- struct qib_user_sdma_queue *pq,
- struct qib_user_sdma_pkt *pkt,
- const struct iovec *iov,
- unsigned long niov, int npages)
-{
- int ret = 0;
-
- if (pkt->frag_size == pkt->bytes_togo &&
- npages >= ARRAY_SIZE(pkt->addr))
- ret = qib_user_sdma_coalesce(dd, pq, pkt, iov, niov);
- else
- ret = qib_user_sdma_pin_pkt(dd, pq, pkt, iov, niov);
-
- return ret;
-}
-
-/* free a packet list -- return counter value of last packet */
-static void qib_user_sdma_free_pkt_list(struct device *dev,
- struct qib_user_sdma_queue *pq,
- struct list_head *list)
-{
- struct qib_user_sdma_pkt *pkt, *pkt_next;
-
- list_for_each_entry_safe(pkt, pkt_next, list, list) {
- int i;
-
- for (i = 0; i < pkt->naddr; i++)
- qib_user_sdma_free_pkt_frag(dev, pq, pkt, i);
-
- if (pkt->largepkt)
- kfree(pkt);
- else
- kmem_cache_free(pq->pkt_slab, pkt);
- }
- INIT_LIST_HEAD(list);
-}
-
-/*
- * copy headers, coalesce etc -- pq->lock must be held
- *
- * we queue all the packets to list, returning the
- * number of bytes total. list must be empty initially,
- * as, if there is an error we clean it...
- */
-static int qib_user_sdma_queue_pkts(const struct qib_devdata *dd,
- struct qib_pportdata *ppd,
- struct qib_user_sdma_queue *pq,
- const struct iovec *iov,
- unsigned long niov,
- struct list_head *list,
- int *maxpkts, int *ndesc)
-{
- unsigned long idx = 0;
- int ret = 0;
- int npkts = 0;
- __le32 *pbc;
- dma_addr_t dma_addr;
- struct qib_user_sdma_pkt *pkt = NULL;
- size_t len;
- size_t nw;
- u32 counter = pq->counter;
- u16 frag_size;
-
- while (idx < niov && npkts < *maxpkts) {
- const unsigned long addr = (unsigned long) iov[idx].iov_base;
- const unsigned long idx_save = idx;
- unsigned pktnw;
- unsigned pktnwc;
- int nfrags = 0;
- size_t npages = 0;
- size_t bytes_togo = 0;
- int tiddma = 0;
- int cfur;
-
- len = iov[idx].iov_len;
- nw = len >> 2;
-
- if (len < QIB_USER_SDMA_MIN_HEADER_LENGTH ||
- len > PAGE_SIZE || len & 3 || addr & 3) {
- ret = -EINVAL;
- goto free_list;
- }
-
- pbc = qib_user_sdma_alloc_header(pq, len, &dma_addr);
- if (!pbc) {
- ret = -ENOMEM;
- goto free_list;
- }
-
- cfur = copy_from_user(pbc, iov[idx].iov_base, len);
- if (cfur) {
- ret = -EFAULT;
- goto free_pbc;
- }
-
- /*
- * This assignment is a bit strange. it's because
- * the pbc counts the number of 32 bit words in the full
- * packet _except_ the first word of the pbc itself...
- */
- pktnwc = nw - 1;
-
- /*
- * pktnw computation yields the number of 32 bit words
- * that the caller has indicated in the PBC. note that
- * this is one less than the total number of words that
- * goes to the send DMA engine as the first 32 bit word
- * of the PBC itself is not counted. Armed with this count,
- * we can verify that the packet is consistent with the
- * iovec lengths.
- */
- pktnw = le32_to_cpu(*pbc) & 0xFFFF;
- if (pktnw < pktnwc) {
- ret = -EINVAL;
- goto free_pbc;
- }
-
- idx++;
- while (pktnwc < pktnw && idx < niov) {
- const size_t slen = iov[idx].iov_len;
- const unsigned long faddr =
- (unsigned long) iov[idx].iov_base;
-
- if (slen & 3 || faddr & 3 || !slen) {
- ret = -EINVAL;
- goto free_pbc;
- }
-
- npages += qib_user_sdma_num_pages(&iov[idx]);
-
- if (check_add_overflow(bytes_togo, slen, &bytes_togo) ||
- bytes_togo > type_max(typeof(pkt->bytes_togo))) {
- ret = -EINVAL;
- goto free_pbc;
- }
- pktnwc += slen >> 2;
- idx++;
- nfrags++;
- }
-
- if (pktnwc != pktnw) {
- ret = -EINVAL;
- goto free_pbc;
- }
-
- frag_size = ((le32_to_cpu(*pbc))>>16) & 0xFFFF;
- if (((frag_size ? frag_size : bytes_togo) + len) >
- ppd->ibmaxlen) {
- ret = -EINVAL;
- goto free_pbc;
- }
-
- if (frag_size) {
- size_t tidsmsize, n, pktsize, sz, addrlimit;
-
- n = npages*((2*PAGE_SIZE/frag_size)+1);
- pktsize = struct_size(pkt, addr, n);
-
- /*
- * Determine if this is tid-sdma or just sdma.
- */
- tiddma = (((le32_to_cpu(pbc[7])>>
- QLOGIC_IB_I_TID_SHIFT)&
- QLOGIC_IB_I_TID_MASK) !=
- QLOGIC_IB_I_TID_MASK);
-
- if (tiddma)
- tidsmsize = iov[idx].iov_len;
- else
- tidsmsize = 0;
-
- if (check_add_overflow(pktsize, tidsmsize, &sz)) {
- ret = -EINVAL;
- goto free_pbc;
- }
- pkt = kmalloc(sz, GFP_KERNEL);
- if (!pkt) {
- ret = -ENOMEM;
- goto free_pbc;
- }
- pkt->largepkt = 1;
- pkt->frag_size = frag_size;
- if (check_add_overflow(n, ARRAY_SIZE(pkt->addr),
- &addrlimit) ||
- addrlimit > type_max(typeof(pkt->addrlimit))) {
- ret = -EINVAL;
- goto free_pkt;
- }
- pkt->addrlimit = addrlimit;
-
- if (tiddma) {
- char *tidsm = (char *)pkt + pktsize;
-
- cfur = copy_from_user(tidsm,
- iov[idx].iov_base, tidsmsize);
- if (cfur) {
- ret = -EFAULT;
- goto free_pkt;
- }
- pkt->tidsm =
- (struct qib_tid_session_member *)tidsm;
- pkt->tidsmcount = tidsmsize/
- sizeof(struct qib_tid_session_member);
- pkt->tidsmidx = 0;
- idx++;
- }
-
- /*
- * pbc 'fill1' field is borrowed to pass frag size,
- * we need to clear it after picking frag size, the
- * hardware requires this field to be zero.
- */
- *pbc = cpu_to_le32(le32_to_cpu(*pbc) & 0x0000FFFF);
- } else {
- pkt = kmem_cache_alloc(pq->pkt_slab, GFP_KERNEL);
- if (!pkt) {
- ret = -ENOMEM;
- goto free_pbc;
- }
- pkt->largepkt = 0;
- pkt->frag_size = bytes_togo;
- pkt->addrlimit = ARRAY_SIZE(pkt->addr);
- }
- pkt->bytes_togo = bytes_togo;
- pkt->payload_size = 0;
- pkt->counter = counter;
- pkt->tiddma = tiddma;
-
- /* setup the first header */
- qib_user_sdma_init_frag(pkt, 0, /* index */
- 0, len, /* offset, len */
- 1, 0, /* first last desc */
- 0, 0, /* put page, dma mapped */
- NULL, pbc, /* struct page, virt addr */
- dma_addr, len); /* dma addr, dma length */
- pkt->index = 0;
- pkt->naddr = 1;
-
- if (nfrags) {
- ret = qib_user_sdma_init_payload(dd, pq, pkt,
- iov + idx_save + 1,
- nfrags, npages);
- if (ret < 0)
- goto free_pkt;
- } else {
- /* since there is no payload, mark the
- * header as the last desc. */
- pkt->addr[0].last_desc = 1;
-
- if (dma_addr == 0) {
- /*
- * the header is not dma mapped yet.
- * it should be from kmalloc.
- */
- dma_addr = dma_map_single(&dd->pcidev->dev,
- pbc, len, DMA_TO_DEVICE);
- if (dma_mapping_error(&dd->pcidev->dev,
- dma_addr)) {
- ret = -ENOMEM;
- goto free_pkt;
- }
- pkt->addr[0].addr = dma_addr;
- pkt->addr[0].dma_mapped = 1;
- }
- }
-
- counter++;
- npkts++;
- pkt->pq = pq;
- pkt->index = 0; /* reset index for push on hw */
- *ndesc += pkt->naddr;
-
- list_add_tail(&pkt->list, list);
- }
-
- *maxpkts = npkts;
- ret = idx;
- goto done;
-
-free_pkt:
- if (pkt->largepkt)
- kfree(pkt);
- else
- kmem_cache_free(pq->pkt_slab, pkt);
-free_pbc:
- if (dma_addr)
- dma_pool_free(pq->header_cache, pbc, dma_addr);
- else
- kfree(pbc);
-free_list:
- qib_user_sdma_free_pkt_list(&dd->pcidev->dev, pq, list);
-done:
- return ret;
-}
-
-static void qib_user_sdma_set_complete_counter(struct qib_user_sdma_queue *pq,
- u32 c)
-{
- pq->sent_counter = c;
-}
-
-/* try to clean out queue -- needs pq->lock */
-static int qib_user_sdma_queue_clean(struct qib_pportdata *ppd,
- struct qib_user_sdma_queue *pq)
-{
- struct qib_devdata *dd = ppd->dd;
- struct list_head free_list;
- struct qib_user_sdma_pkt *pkt;
- struct qib_user_sdma_pkt *pkt_prev;
- unsigned long flags;
- int ret = 0;
-
- if (!pq->num_sending)
- return 0;
-
- INIT_LIST_HEAD(&free_list);
-
- /*
- * We need this spin lock here because interrupt handler
- * might modify this list in qib_user_sdma_send_desc(), also
- * we can not get interrupted, otherwise it is a deadlock.
- */
- spin_lock_irqsave(&pq->sent_lock, flags);
- list_for_each_entry_safe(pkt, pkt_prev, &pq->sent, list) {
- s64 descd = ppd->sdma_descq_removed - pkt->added;
-
- if (descd < 0)
- break;
-
- list_move_tail(&pkt->list, &free_list);
-
- /* one more packet cleaned */
- ret++;
- pq->num_sending--;
- }
- spin_unlock_irqrestore(&pq->sent_lock, flags);
-
- if (!list_empty(&free_list)) {
- u32 counter;
-
- pkt = list_entry(free_list.prev,
- struct qib_user_sdma_pkt, list);
- counter = pkt->counter;
-
- qib_user_sdma_free_pkt_list(&dd->pcidev->dev, pq, &free_list);
- qib_user_sdma_set_complete_counter(pq, counter);
- }
-
- return ret;
-}
-
-void qib_user_sdma_queue_destroy(struct qib_user_sdma_queue *pq)
-{
- if (!pq)
- return;
-
- pq->sdma_rb_node->refcount--;
- if (pq->sdma_rb_node->refcount == 0) {
- rb_erase(&pq->sdma_rb_node->node, &qib_user_sdma_rb_root);
- kfree(pq->sdma_rb_node);
- }
- dma_pool_destroy(pq->header_cache);
- kmem_cache_destroy(pq->pkt_slab);
- kfree(pq);
-}
-
-/* clean descriptor queue, returns > 0 if some elements cleaned */
-static int qib_user_sdma_hwqueue_clean(struct qib_pportdata *ppd)
-{
- int ret;
- unsigned long flags;
-
- spin_lock_irqsave(&ppd->sdma_lock, flags);
- ret = qib_sdma_make_progress(ppd);
- spin_unlock_irqrestore(&ppd->sdma_lock, flags);
-
- return ret;
-}
-
-/* we're in close, drain packets so that we can cleanup successfully... */
-void qib_user_sdma_queue_drain(struct qib_pportdata *ppd,
- struct qib_user_sdma_queue *pq)
-{
- struct qib_devdata *dd = ppd->dd;
- unsigned long flags;
- int i;
-
- if (!pq)
- return;
-
- for (i = 0; i < QIB_USER_SDMA_DRAIN_TIMEOUT; i++) {
- mutex_lock(&pq->lock);
- if (!pq->num_pending && !pq->num_sending) {
- mutex_unlock(&pq->lock);
- break;
- }
- qib_user_sdma_hwqueue_clean(ppd);
- qib_user_sdma_queue_clean(ppd, pq);
- mutex_unlock(&pq->lock);
- msleep(20);
- }
-
- if (pq->num_pending || pq->num_sending) {
- struct qib_user_sdma_pkt *pkt;
- struct qib_user_sdma_pkt *pkt_prev;
- struct list_head free_list;
-
- mutex_lock(&pq->lock);
- spin_lock_irqsave(&ppd->sdma_lock, flags);
- /*
- * Since we hold sdma_lock, it is safe without sent_lock.
- */
- if (pq->num_pending) {
- list_for_each_entry_safe(pkt, pkt_prev,
- &ppd->sdma_userpending, list) {
- if (pkt->pq == pq) {
- list_move_tail(&pkt->list, &pq->sent);
- pq->num_pending--;
- pq->num_sending++;
- }
- }
- }
- spin_unlock_irqrestore(&ppd->sdma_lock, flags);
-
- qib_dev_err(dd, "user sdma lists not empty: forcing!\n");
- INIT_LIST_HEAD(&free_list);
- list_splice_init(&pq->sent, &free_list);
- pq->num_sending = 0;
- qib_user_sdma_free_pkt_list(&dd->pcidev->dev, pq, &free_list);
- mutex_unlock(&pq->lock);
- }
-}
-
-static inline __le64 qib_sdma_make_desc0(u8 gen,
- u64 addr, u64 dwlen, u64 dwoffset)
-{
- return cpu_to_le64(/* SDmaPhyAddr[31:0] */
- ((addr & 0xfffffffcULL) << 32) |
- /* SDmaGeneration[1:0] */
- ((gen & 3ULL) << 30) |
- /* SDmaDwordCount[10:0] */
- ((dwlen & 0x7ffULL) << 16) |
- /* SDmaBufOffset[12:2] */
- (dwoffset & 0x7ffULL));
-}
-
-static inline __le64 qib_sdma_make_first_desc0(__le64 descq)
-{
- return descq | cpu_to_le64(1ULL << 12);
-}
-
-static inline __le64 qib_sdma_make_last_desc0(__le64 descq)
-{
- /* last */ /* dma head */
- return descq | cpu_to_le64(1ULL << 11 | 1ULL << 13);
-}
-
-static inline __le64 qib_sdma_make_desc1(u64 addr)
-{
- /* SDmaPhyAddr[47:32] */
- return cpu_to_le64(addr >> 32);
-}
-
-static void qib_user_sdma_send_frag(struct qib_pportdata *ppd,
- struct qib_user_sdma_pkt *pkt, int idx,
- unsigned ofs, u16 tail, u8 gen)
-{
- const u64 addr = (u64) pkt->addr[idx].addr +
- (u64) pkt->addr[idx].offset;
- const u64 dwlen = (u64) pkt->addr[idx].length / 4;
- __le64 *descqp;
- __le64 descq0;
-
- descqp = &ppd->sdma_descq[tail].qw[0];
-
- descq0 = qib_sdma_make_desc0(gen, addr, dwlen, ofs);
- if (pkt->addr[idx].first_desc)
- descq0 = qib_sdma_make_first_desc0(descq0);
- if (pkt->addr[idx].last_desc) {
- descq0 = qib_sdma_make_last_desc0(descq0);
- if (ppd->sdma_intrequest) {
- descq0 |= cpu_to_le64(1ULL << 15);
- ppd->sdma_intrequest = 0;
- }
- }
-
- descqp[0] = descq0;
- descqp[1] = qib_sdma_make_desc1(addr);
-}
-
-void qib_user_sdma_send_desc(struct qib_pportdata *ppd,
- struct list_head *pktlist)
-{
- struct qib_devdata *dd = ppd->dd;
- u16 nfree, nsent;
- u16 tail, tail_c;
- u8 gen, gen_c;
-
- nfree = qib_sdma_descq_freecnt(ppd);
- if (!nfree)
- return;
-
-retry:
- nsent = 0;
- tail_c = tail = ppd->sdma_descq_tail;
- gen_c = gen = ppd->sdma_generation;
- while (!list_empty(pktlist)) {
- struct qib_user_sdma_pkt *pkt =
- list_entry(pktlist->next, struct qib_user_sdma_pkt,
- list);
- int i, j, c = 0;
- unsigned ofs = 0;
- u16 dtail = tail;
-
- for (i = pkt->index; i < pkt->naddr && nfree; i++) {
- qib_user_sdma_send_frag(ppd, pkt, i, ofs, tail, gen);
- ofs += pkt->addr[i].length >> 2;
-
- if (++tail == ppd->sdma_descq_cnt) {
- tail = 0;
- ++gen;
- ppd->sdma_intrequest = 1;
- } else if (tail == (ppd->sdma_descq_cnt>>1)) {
- ppd->sdma_intrequest = 1;
- }
- nfree--;
- if (pkt->addr[i].last_desc == 0)
- continue;
-
- /*
- * If the packet is >= 2KB mtu equivalent, we
- * have to use the large buffers, and have to
- * mark each descriptor as part of a large
- * buffer packet.
- */
- if (ofs > dd->piosize2kmax_dwords) {
- for (j = pkt->index; j <= i; j++) {
- ppd->sdma_descq[dtail].qw[0] |=
- cpu_to_le64(1ULL << 14);
- if (++dtail == ppd->sdma_descq_cnt)
- dtail = 0;
- }
- }
- c += i + 1 - pkt->index;
- pkt->index = i + 1; /* index for next first */
- tail_c = dtail = tail;
- gen_c = gen;
- ofs = 0; /* reset for next packet */
- }
-
- ppd->sdma_descq_added += c;
- nsent += c;
- if (pkt->index == pkt->naddr) {
- pkt->added = ppd->sdma_descq_added;
- pkt->pq->added = pkt->added;
- pkt->pq->num_pending--;
- spin_lock(&pkt->pq->sent_lock);
- pkt->pq->num_sending++;
- list_move_tail(&pkt->list, &pkt->pq->sent);
- spin_unlock(&pkt->pq->sent_lock);
- }
- if (!nfree || (nsent<<2) > ppd->sdma_descq_cnt)
- break;
- }
-
- /* advance the tail on the chip if necessary */
- if (ppd->sdma_descq_tail != tail_c) {
- ppd->sdma_generation = gen_c;
- dd->f_sdma_update_tail(ppd, tail_c);
- }
-
- if (nfree && !list_empty(pktlist))
- goto retry;
-}
-
-/* pq->lock must be held, get packets on the wire... */
-static int qib_user_sdma_push_pkts(struct qib_pportdata *ppd,
- struct qib_user_sdma_queue *pq,
- struct list_head *pktlist, int count)
-{
- unsigned long flags;
-
- if (unlikely(!(ppd->lflags & QIBL_LINKACTIVE)))
- return -ECOMM;
-
- /* non-blocking mode */
- if (pq->sdma_rb_node->refcount > 1) {
- spin_lock_irqsave(&ppd->sdma_lock, flags);
- if (unlikely(!__qib_sdma_running(ppd))) {
- spin_unlock_irqrestore(&ppd->sdma_lock, flags);
- return -ECOMM;
- }
- pq->num_pending += count;
- list_splice_tail_init(pktlist, &ppd->sdma_userpending);
- qib_user_sdma_send_desc(ppd, &ppd->sdma_userpending);
- spin_unlock_irqrestore(&ppd->sdma_lock, flags);
- return 0;
- }
-
- /* In this case, descriptors from this process are not
- * linked to ppd pending queue, interrupt handler
- * won't update this process, it is OK to directly
- * modify without sdma lock.
- */
-
-
- pq->num_pending += count;
- /*
- * Blocking mode for single rail process, we must
- * release/regain sdma_lock to give other process
- * chance to make progress. This is important for
- * performance.
- */
- do {
- spin_lock_irqsave(&ppd->sdma_lock, flags);
- if (unlikely(!__qib_sdma_running(ppd))) {
- spin_unlock_irqrestore(&ppd->sdma_lock, flags);
- return -ECOMM;
- }
- qib_user_sdma_send_desc(ppd, pktlist);
- if (!list_empty(pktlist))
- qib_sdma_make_progress(ppd);
- spin_unlock_irqrestore(&ppd->sdma_lock, flags);
- } while (!list_empty(pktlist));
-
- return 0;
-}
-
-int qib_user_sdma_writev(struct qib_ctxtdata *rcd,
- struct qib_user_sdma_queue *pq,
- const struct iovec *iov,
- unsigned long dim)
-{
- struct qib_devdata *dd = rcd->dd;
- struct qib_pportdata *ppd = rcd->ppd;
- int ret = 0;
- struct list_head list;
- int npkts = 0;
-
- INIT_LIST_HEAD(&list);
-
- mutex_lock(&pq->lock);
-
- /* why not -ECOMM like qib_user_sdma_push_pkts() below? */
- if (!qib_sdma_running(ppd))
- goto done_unlock;
-
- /* if I have packets not complete yet */
- if (pq->added > ppd->sdma_descq_removed)
- qib_user_sdma_hwqueue_clean(ppd);
- /* if I have complete packets to be freed */
- if (pq->num_sending)
- qib_user_sdma_queue_clean(ppd, pq);
-
- while (dim) {
- int mxp = 1;
- int ndesc = 0;
-
- ret = qib_user_sdma_queue_pkts(dd, ppd, pq,
- iov, dim, &list, &mxp, &ndesc);
- if (ret < 0)
- goto done_unlock;
- else {
- dim -= ret;
- iov += ret;
- }
-
- /* force packets onto the sdma hw queue... */
- if (!list_empty(&list)) {
- /*
- * Lazily clean hw queue.
- */
- if (qib_sdma_descq_freecnt(ppd) < ndesc) {
- qib_user_sdma_hwqueue_clean(ppd);
- if (pq->num_sending)
- qib_user_sdma_queue_clean(ppd, pq);
- }
-
- ret = qib_user_sdma_push_pkts(ppd, pq, &list, mxp);
- if (ret < 0)
- goto done_unlock;
- else {
- npkts += mxp;
- pq->counter += mxp;
- }
- }
- }
-
-done_unlock:
- if (!list_empty(&list))
- qib_user_sdma_free_pkt_list(&dd->pcidev->dev, pq, &list);
- mutex_unlock(&pq->lock);
-
- return (ret < 0) ? ret : npkts;
-}
-
-int qib_user_sdma_make_progress(struct qib_pportdata *ppd,
- struct qib_user_sdma_queue *pq)
-{
- int ret = 0;
-
- mutex_lock(&pq->lock);
- qib_user_sdma_hwqueue_clean(ppd);
- ret = qib_user_sdma_queue_clean(ppd, pq);
- mutex_unlock(&pq->lock);
-
- return ret;
-}
-
-u32 qib_user_sdma_complete_counter(const struct qib_user_sdma_queue *pq)
-{
- return pq ? pq->sent_counter : 0;
-}
-
-u32 qib_user_sdma_inflight_counter(struct qib_user_sdma_queue *pq)
-{
- return pq ? pq->counter : 0;
-}
diff --git a/drivers/infiniband/hw/qib/qib_user_sdma.h b/drivers/infiniband/hw/qib/qib_user_sdma.h
deleted file mode 100644
index ce8cbaf6a5c2..000000000000
--- a/drivers/infiniband/hw/qib/qib_user_sdma.h
+++ /dev/null
@@ -1,52 +0,0 @@
-/*
- * Copyright (c) 2007, 2008 QLogic Corporation. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include <linux/device.h>
-
-struct qib_user_sdma_queue;
-
-struct qib_user_sdma_queue *
-qib_user_sdma_queue_create(struct device *dev, int unit, int port, int sport);
-void qib_user_sdma_queue_destroy(struct qib_user_sdma_queue *pq);
-
-int qib_user_sdma_writev(struct qib_ctxtdata *pd,
- struct qib_user_sdma_queue *pq,
- const struct iovec *iov,
- unsigned long dim);
-
-int qib_user_sdma_make_progress(struct qib_pportdata *ppd,
- struct qib_user_sdma_queue *pq);
-
-void qib_user_sdma_queue_drain(struct qib_pportdata *ppd,
- struct qib_user_sdma_queue *pq);
-
-u32 qib_user_sdma_complete_counter(const struct qib_user_sdma_queue *pq);
-u32 qib_user_sdma_inflight_counter(struct qib_user_sdma_queue *pq);
diff --git a/drivers/infiniband/hw/qib/qib_verbs.c b/drivers/infiniband/hw/qib/qib_verbs.c
deleted file mode 100644
index bab657f93084..000000000000
--- a/drivers/infiniband/hw/qib/qib_verbs.c
+++ /dev/null
@@ -1,1705 +0,0 @@
-/*
- * Copyright (c) 2012 - 2018 Intel Corporation. All rights reserved.
- * Copyright (c) 2006 - 2012 QLogic Corporation. All rights reserved.
- * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <rdma/ib_mad.h>
-#include <rdma/ib_user_verbs.h>
-#include <linux/io.h>
-#include <linux/module.h>
-#include <linux/utsname.h>
-#include <linux/rculist.h>
-#include <linux/mm.h>
-#include <linux/vmalloc.h>
-#include <rdma/rdma_vt.h>
-
-#include "qib.h"
-#include "qib_common.h"
-
-static unsigned int ib_qib_qp_table_size = 256;
-module_param_named(qp_table_size, ib_qib_qp_table_size, uint, S_IRUGO);
-MODULE_PARM_DESC(qp_table_size, "QP table size");
-
-static unsigned int qib_lkey_table_size = 16;
-module_param_named(lkey_table_size, qib_lkey_table_size, uint,
- S_IRUGO);
-MODULE_PARM_DESC(lkey_table_size,
- "LKEY table size in bits (2^n, 1 <= n <= 23)");
-
-static unsigned int ib_qib_max_pds = 0xFFFF;
-module_param_named(max_pds, ib_qib_max_pds, uint, S_IRUGO);
-MODULE_PARM_DESC(max_pds,
- "Maximum number of protection domains to support");
-
-static unsigned int ib_qib_max_ahs = 0xFFFF;
-module_param_named(max_ahs, ib_qib_max_ahs, uint, S_IRUGO);
-MODULE_PARM_DESC(max_ahs, "Maximum number of address handles to support");
-
-unsigned int ib_qib_max_cqes = 0x2FFFF;
-module_param_named(max_cqes, ib_qib_max_cqes, uint, S_IRUGO);
-MODULE_PARM_DESC(max_cqes,
- "Maximum number of completion queue entries to support");
-
-unsigned int ib_qib_max_cqs = 0x1FFFF;
-module_param_named(max_cqs, ib_qib_max_cqs, uint, S_IRUGO);
-MODULE_PARM_DESC(max_cqs, "Maximum number of completion queues to support");
-
-unsigned int ib_qib_max_qp_wrs = 0x3FFF;
-module_param_named(max_qp_wrs, ib_qib_max_qp_wrs, uint, S_IRUGO);
-MODULE_PARM_DESC(max_qp_wrs, "Maximum number of QP WRs to support");
-
-unsigned int ib_qib_max_qps = 16384;
-module_param_named(max_qps, ib_qib_max_qps, uint, S_IRUGO);
-MODULE_PARM_DESC(max_qps, "Maximum number of QPs to support");
-
-unsigned int ib_qib_max_sges = 0x60;
-module_param_named(max_sges, ib_qib_max_sges, uint, S_IRUGO);
-MODULE_PARM_DESC(max_sges, "Maximum number of SGEs to support");
-
-unsigned int ib_qib_max_mcast_grps = 16384;
-module_param_named(max_mcast_grps, ib_qib_max_mcast_grps, uint, S_IRUGO);
-MODULE_PARM_DESC(max_mcast_grps,
- "Maximum number of multicast groups to support");
-
-unsigned int ib_qib_max_mcast_qp_attached = 16;
-module_param_named(max_mcast_qp_attached, ib_qib_max_mcast_qp_attached,
- uint, S_IRUGO);
-MODULE_PARM_DESC(max_mcast_qp_attached,
- "Maximum number of attached QPs to support");
-
-unsigned int ib_qib_max_srqs = 1024;
-module_param_named(max_srqs, ib_qib_max_srqs, uint, S_IRUGO);
-MODULE_PARM_DESC(max_srqs, "Maximum number of SRQs to support");
-
-unsigned int ib_qib_max_srq_sges = 128;
-module_param_named(max_srq_sges, ib_qib_max_srq_sges, uint, S_IRUGO);
-MODULE_PARM_DESC(max_srq_sges, "Maximum number of SRQ SGEs to support");
-
-unsigned int ib_qib_max_srq_wrs = 0x1FFFF;
-module_param_named(max_srq_wrs, ib_qib_max_srq_wrs, uint, S_IRUGO);
-MODULE_PARM_DESC(max_srq_wrs, "Maximum number of SRQ WRs support");
-
-static unsigned int ib_qib_disable_sma;
-module_param_named(disable_sma, ib_qib_disable_sma, uint, S_IWUSR | S_IRUGO);
-MODULE_PARM_DESC(disable_sma, "Disable the SMA");
-
-/*
- * Translate ib_wr_opcode into ib_wc_opcode.
- */
-const enum ib_wc_opcode ib_qib_wc_opcode[] = {
- [IB_WR_RDMA_WRITE] = IB_WC_RDMA_WRITE,
- [IB_WR_RDMA_WRITE_WITH_IMM] = IB_WC_RDMA_WRITE,
- [IB_WR_SEND] = IB_WC_SEND,
- [IB_WR_SEND_WITH_IMM] = IB_WC_SEND,
- [IB_WR_RDMA_READ] = IB_WC_RDMA_READ,
- [IB_WR_ATOMIC_CMP_AND_SWP] = IB_WC_COMP_SWAP,
- [IB_WR_ATOMIC_FETCH_AND_ADD] = IB_WC_FETCH_ADD
-};
-
-/*
- * System image GUID.
- */
-__be64 ib_qib_sys_image_guid;
-
-/*
- * Count the number of DMA descriptors needed to send length bytes of data.
- * Don't modify the qib_sge_state to get the count.
- * Return zero if any of the segments is not aligned.
- */
-static u32 qib_count_sge(struct rvt_sge_state *ss, u32 length)
-{
- struct rvt_sge *sg_list = ss->sg_list;
- struct rvt_sge sge = ss->sge;
- u8 num_sge = ss->num_sge;
- u32 ndesc = 1; /* count the header */
-
- while (length) {
- u32 len = rvt_get_sge_length(&sge, length);
-
- if (((long) sge.vaddr & (sizeof(u32) - 1)) ||
- (len != length && (len & (sizeof(u32) - 1)))) {
- ndesc = 0;
- break;
- }
- ndesc++;
- sge.vaddr += len;
- sge.length -= len;
- sge.sge_length -= len;
- if (sge.sge_length == 0) {
- if (--num_sge)
- sge = *sg_list++;
- } else if (sge.length == 0 && sge.mr->lkey) {
- if (++sge.n >= RVT_SEGSZ) {
- if (++sge.m >= sge.mr->mapsz)
- break;
- sge.n = 0;
- }
- sge.vaddr =
- sge.mr->map[sge.m]->segs[sge.n].vaddr;
- sge.length =
- sge.mr->map[sge.m]->segs[sge.n].length;
- }
- length -= len;
- }
- return ndesc;
-}
-
-/*
- * Copy from the SGEs to the data buffer.
- */
-static void qib_copy_from_sge(void *data, struct rvt_sge_state *ss, u32 length)
-{
- struct rvt_sge *sge = &ss->sge;
-
- while (length) {
- u32 len = rvt_get_sge_length(sge, length);
-
- memcpy(data, sge->vaddr, len);
- sge->vaddr += len;
- sge->length -= len;
- sge->sge_length -= len;
- if (sge->sge_length == 0) {
- if (--ss->num_sge)
- *sge = *ss->sg_list++;
- } else if (sge->length == 0 && sge->mr->lkey) {
- if (++sge->n >= RVT_SEGSZ) {
- if (++sge->m >= sge->mr->mapsz)
- break;
- sge->n = 0;
- }
- sge->vaddr =
- sge->mr->map[sge->m]->segs[sge->n].vaddr;
- sge->length =
- sge->mr->map[sge->m]->segs[sge->n].length;
- }
- data += len;
- length -= len;
- }
-}
-
-/**
- * qib_qp_rcv - processing an incoming packet on a QP
- * @rcd: the context pointer
- * @hdr: the packet header
- * @has_grh: true if the packet has a GRH
- * @data: the packet data
- * @tlen: the packet length
- * @qp: the QP the packet came on
- *
- * This is called from qib_ib_rcv() to process an incoming packet
- * for the given QP.
- * Called at interrupt level.
- */
-static void qib_qp_rcv(struct qib_ctxtdata *rcd, struct ib_header *hdr,
- int has_grh, void *data, u32 tlen, struct rvt_qp *qp)
-{
- struct qib_ibport *ibp = &rcd->ppd->ibport_data;
-
- spin_lock(&qp->r_lock);
-
- /* Check for valid receive state. */
- if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK)) {
- ibp->rvp.n_pkt_drops++;
- goto unlock;
- }
-
- switch (qp->ibqp.qp_type) {
- case IB_QPT_SMI:
- case IB_QPT_GSI:
- if (ib_qib_disable_sma)
- break;
- fallthrough;
- case IB_QPT_UD:
- qib_ud_rcv(ibp, hdr, has_grh, data, tlen, qp);
- break;
-
- case IB_QPT_RC:
- qib_rc_rcv(rcd, hdr, has_grh, data, tlen, qp);
- break;
-
- case IB_QPT_UC:
- qib_uc_rcv(ibp, hdr, has_grh, data, tlen, qp);
- break;
-
- default:
- break;
- }
-
-unlock:
- spin_unlock(&qp->r_lock);
-}
-
-/**
- * qib_ib_rcv - process an incoming packet
- * @rcd: the context pointer
- * @rhdr: the header of the packet
- * @data: the packet payload
- * @tlen: the packet length
- *
- * This is called from qib_kreceive() to process an incoming packet at
- * interrupt level. Tlen is the length of the header + data + CRC in bytes.
- */
-void qib_ib_rcv(struct qib_ctxtdata *rcd, void *rhdr, void *data, u32 tlen)
-{
- struct qib_pportdata *ppd = rcd->ppd;
- struct qib_ibport *ibp = &ppd->ibport_data;
- struct ib_header *hdr = rhdr;
- struct qib_devdata *dd = ppd->dd;
- struct rvt_dev_info *rdi = &dd->verbs_dev.rdi;
- struct ib_other_headers *ohdr;
- struct rvt_qp *qp;
- u32 qp_num;
- int lnh;
- u8 opcode;
- u16 lid;
-
- /* 24 == LRH+BTH+CRC */
- if (unlikely(tlen < 24))
- goto drop;
-
- /* Check for a valid destination LID (see ch. 7.11.1). */
- lid = be16_to_cpu(hdr->lrh[1]);
- if (lid < be16_to_cpu(IB_MULTICAST_LID_BASE)) {
- lid &= ~((1 << ppd->lmc) - 1);
- if (unlikely(lid != ppd->lid))
- goto drop;
- }
-
- /* Check for GRH */
- lnh = be16_to_cpu(hdr->lrh[0]) & 3;
- if (lnh == QIB_LRH_BTH)
- ohdr = &hdr->u.oth;
- else if (lnh == QIB_LRH_GRH) {
- u32 vtf;
-
- ohdr = &hdr->u.l.oth;
- if (hdr->u.l.grh.next_hdr != IB_GRH_NEXT_HDR)
- goto drop;
- vtf = be32_to_cpu(hdr->u.l.grh.version_tclass_flow);
- if ((vtf >> IB_GRH_VERSION_SHIFT) != IB_GRH_VERSION)
- goto drop;
- } else
- goto drop;
-
- opcode = (be32_to_cpu(ohdr->bth[0]) >> 24) & 0x7f;
-#ifdef CONFIG_DEBUG_FS
- rcd->opstats->stats[opcode].n_bytes += tlen;
- rcd->opstats->stats[opcode].n_packets++;
-#endif
-
- /* Get the destination QP number. */
- qp_num = be32_to_cpu(ohdr->bth[1]) & RVT_QPN_MASK;
- if (qp_num == QIB_MULTICAST_QPN) {
- struct rvt_mcast *mcast;
- struct rvt_mcast_qp *p;
-
- if (lnh != QIB_LRH_GRH)
- goto drop;
- mcast = rvt_mcast_find(&ibp->rvp, &hdr->u.l.grh.dgid, lid);
- if (mcast == NULL)
- goto drop;
- this_cpu_inc(ibp->pmastats->n_multicast_rcv);
- rcu_read_lock();
- list_for_each_entry_rcu(p, &mcast->qp_list, list)
- qib_qp_rcv(rcd, hdr, 1, data, tlen, p->qp);
- rcu_read_unlock();
- /*
- * Notify rvt_multicast_detach() if it is waiting for us
- * to finish.
- */
- if (atomic_dec_return(&mcast->refcount) <= 1)
- wake_up(&mcast->wait);
- } else {
- rcu_read_lock();
- qp = rvt_lookup_qpn(rdi, &ibp->rvp, qp_num);
- if (!qp) {
- rcu_read_unlock();
- goto drop;
- }
- this_cpu_inc(ibp->pmastats->n_unicast_rcv);
- qib_qp_rcv(rcd, hdr, lnh == QIB_LRH_GRH, data, tlen, qp);
- rcu_read_unlock();
- }
- return;
-
-drop:
- ibp->rvp.n_pkt_drops++;
-}
-
-/*
- * This is called from a timer to check for QPs
- * which need kernel memory in order to send a packet.
- */
-static void mem_timer(struct timer_list *t)
-{
- struct qib_ibdev *dev = timer_container_of(dev, t, mem_timer);
- struct list_head *list = &dev->memwait;
- struct rvt_qp *qp = NULL;
- struct qib_qp_priv *priv = NULL;
- unsigned long flags;
-
- spin_lock_irqsave(&dev->rdi.pending_lock, flags);
- if (!list_empty(list)) {
- priv = list_entry(list->next, struct qib_qp_priv, iowait);
- qp = priv->owner;
- list_del_init(&priv->iowait);
- rvt_get_qp(qp);
- if (!list_empty(list))
- mod_timer(&dev->mem_timer, jiffies + 1);
- }
- spin_unlock_irqrestore(&dev->rdi.pending_lock, flags);
-
- if (qp) {
- spin_lock_irqsave(&qp->s_lock, flags);
- if (qp->s_flags & RVT_S_WAIT_KMEM) {
- qp->s_flags &= ~RVT_S_WAIT_KMEM;
- qib_schedule_send(qp);
- }
- spin_unlock_irqrestore(&qp->s_lock, flags);
- rvt_put_qp(qp);
- }
-}
-
-#ifdef __LITTLE_ENDIAN
-static inline u32 get_upper_bits(u32 data, u32 shift)
-{
- return data >> shift;
-}
-
-static inline u32 set_upper_bits(u32 data, u32 shift)
-{
- return data << shift;
-}
-
-static inline u32 clear_upper_bytes(u32 data, u32 n, u32 off)
-{
- data <<= ((sizeof(u32) - n) * BITS_PER_BYTE);
- data >>= ((sizeof(u32) - n - off) * BITS_PER_BYTE);
- return data;
-}
-#else
-static inline u32 get_upper_bits(u32 data, u32 shift)
-{
- return data << shift;
-}
-
-static inline u32 set_upper_bits(u32 data, u32 shift)
-{
- return data >> shift;
-}
-
-static inline u32 clear_upper_bytes(u32 data, u32 n, u32 off)
-{
- data >>= ((sizeof(u32) - n) * BITS_PER_BYTE);
- data <<= ((sizeof(u32) - n - off) * BITS_PER_BYTE);
- return data;
-}
-#endif
-
-static void qib_copy_io(u32 __iomem *piobuf, struct rvt_sge_state *ss,
- u32 length, unsigned flush_wc)
-{
- u32 extra = 0;
- u32 data = 0;
- u32 last;
-
- while (1) {
- u32 len = rvt_get_sge_length(&ss->sge, length);
- u32 off;
-
- /* If the source address is not aligned, try to align it. */
- off = (unsigned long)ss->sge.vaddr & (sizeof(u32) - 1);
- if (off) {
- u32 *addr = (u32 *)((unsigned long)ss->sge.vaddr &
- ~(sizeof(u32) - 1));
- u32 v = get_upper_bits(*addr, off * BITS_PER_BYTE);
- u32 y;
-
- y = sizeof(u32) - off;
- if (len > y)
- len = y;
- if (len + extra >= sizeof(u32)) {
- data |= set_upper_bits(v, extra *
- BITS_PER_BYTE);
- len = sizeof(u32) - extra;
- if (len == length) {
- last = data;
- break;
- }
- __raw_writel(data, piobuf);
- piobuf++;
- extra = 0;
- data = 0;
- } else {
- /* Clear unused upper bytes */
- data |= clear_upper_bytes(v, len, extra);
- if (len == length) {
- last = data;
- break;
- }
- extra += len;
- }
- } else if (extra) {
- /* Source address is aligned. */
- u32 *addr = (u32 *) ss->sge.vaddr;
- int shift = extra * BITS_PER_BYTE;
- int ushift = 32 - shift;
- u32 l = len;
-
- while (l >= sizeof(u32)) {
- u32 v = *addr;
-
- data |= set_upper_bits(v, shift);
- __raw_writel(data, piobuf);
- data = get_upper_bits(v, ushift);
- piobuf++;
- addr++;
- l -= sizeof(u32);
- }
- /*
- * We still have 'extra' number of bytes leftover.
- */
- if (l) {
- u32 v = *addr;
-
- if (l + extra >= sizeof(u32)) {
- data |= set_upper_bits(v, shift);
- len -= l + extra - sizeof(u32);
- if (len == length) {
- last = data;
- break;
- }
- __raw_writel(data, piobuf);
- piobuf++;
- extra = 0;
- data = 0;
- } else {
- /* Clear unused upper bytes */
- data |= clear_upper_bytes(v, l, extra);
- if (len == length) {
- last = data;
- break;
- }
- extra += l;
- }
- } else if (len == length) {
- last = data;
- break;
- }
- } else if (len == length) {
- u32 w;
-
- /*
- * Need to round up for the last dword in the
- * packet.
- */
- w = (len + 3) >> 2;
- qib_pio_copy(piobuf, ss->sge.vaddr, w - 1);
- piobuf += w - 1;
- last = ((u32 *) ss->sge.vaddr)[w - 1];
- break;
- } else {
- u32 w = len >> 2;
-
- qib_pio_copy(piobuf, ss->sge.vaddr, w);
- piobuf += w;
-
- extra = len & (sizeof(u32) - 1);
- if (extra) {
- u32 v = ((u32 *) ss->sge.vaddr)[w];
-
- /* Clear unused upper bytes */
- data = clear_upper_bytes(v, extra, 0);
- }
- }
- rvt_update_sge(ss, len, false);
- length -= len;
- }
- /* Update address before sending packet. */
- rvt_update_sge(ss, length, false);
- if (flush_wc) {
- /* must flush early everything before trigger word */
- qib_flush_wc();
- __raw_writel(last, piobuf);
- /* be sure trigger word is written */
- qib_flush_wc();
- } else
- __raw_writel(last, piobuf);
-}
-
-static noinline struct qib_verbs_txreq *__get_txreq(struct qib_ibdev *dev,
- struct rvt_qp *qp)
-{
- struct qib_qp_priv *priv = qp->priv;
- struct qib_verbs_txreq *tx;
- unsigned long flags;
-
- spin_lock_irqsave(&qp->s_lock, flags);
- spin_lock(&dev->rdi.pending_lock);
-
- if (!list_empty(&dev->txreq_free)) {
- struct list_head *l = dev->txreq_free.next;
-
- list_del(l);
- spin_unlock(&dev->rdi.pending_lock);
- spin_unlock_irqrestore(&qp->s_lock, flags);
- tx = list_entry(l, struct qib_verbs_txreq, txreq.list);
- } else {
- if (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK &&
- list_empty(&priv->iowait)) {
- dev->n_txwait++;
- qp->s_flags |= RVT_S_WAIT_TX;
- list_add_tail(&priv->iowait, &dev->txwait);
- }
- qp->s_flags &= ~RVT_S_BUSY;
- spin_unlock(&dev->rdi.pending_lock);
- spin_unlock_irqrestore(&qp->s_lock, flags);
- tx = ERR_PTR(-EBUSY);
- }
- return tx;
-}
-
-static inline struct qib_verbs_txreq *get_txreq(struct qib_ibdev *dev,
- struct rvt_qp *qp)
-{
- struct qib_verbs_txreq *tx;
- unsigned long flags;
-
- spin_lock_irqsave(&dev->rdi.pending_lock, flags);
- /* assume the list non empty */
- if (likely(!list_empty(&dev->txreq_free))) {
- struct list_head *l = dev->txreq_free.next;
-
- list_del(l);
- spin_unlock_irqrestore(&dev->rdi.pending_lock, flags);
- tx = list_entry(l, struct qib_verbs_txreq, txreq.list);
- } else {
- /* call slow path to get the extra lock */
- spin_unlock_irqrestore(&dev->rdi.pending_lock, flags);
- tx = __get_txreq(dev, qp);
- }
- return tx;
-}
-
-void qib_put_txreq(struct qib_verbs_txreq *tx)
-{
- struct qib_ibdev *dev;
- struct rvt_qp *qp;
- struct qib_qp_priv *priv;
- unsigned long flags;
-
- qp = tx->qp;
- dev = to_idev(qp->ibqp.device);
-
- if (tx->mr) {
- rvt_put_mr(tx->mr);
- tx->mr = NULL;
- }
- if (tx->txreq.flags & QIB_SDMA_TXREQ_F_FREEBUF) {
- tx->txreq.flags &= ~QIB_SDMA_TXREQ_F_FREEBUF;
- dma_unmap_single(&dd_from_dev(dev)->pcidev->dev,
- tx->txreq.addr, tx->hdr_dwords << 2,
- DMA_TO_DEVICE);
- kfree(tx->align_buf);
- }
-
- spin_lock_irqsave(&dev->rdi.pending_lock, flags);
-
- /* Put struct back on free list */
- list_add(&tx->txreq.list, &dev->txreq_free);
-
- if (!list_empty(&dev->txwait)) {
- /* Wake up first QP wanting a free struct */
- priv = list_entry(dev->txwait.next, struct qib_qp_priv,
- iowait);
- qp = priv->owner;
- list_del_init(&priv->iowait);
- rvt_get_qp(qp);
- spin_unlock_irqrestore(&dev->rdi.pending_lock, flags);
-
- spin_lock_irqsave(&qp->s_lock, flags);
- if (qp->s_flags & RVT_S_WAIT_TX) {
- qp->s_flags &= ~RVT_S_WAIT_TX;
- qib_schedule_send(qp);
- }
- spin_unlock_irqrestore(&qp->s_lock, flags);
-
- rvt_put_qp(qp);
- } else
- spin_unlock_irqrestore(&dev->rdi.pending_lock, flags);
-}
-
-/*
- * This is called when there are send DMA descriptors that might be
- * available.
- *
- * This is called with ppd->sdma_lock held.
- */
-void qib_verbs_sdma_desc_avail(struct qib_pportdata *ppd, unsigned avail)
-{
- struct rvt_qp *qp;
- struct qib_qp_priv *qpp, *nqpp;
- struct rvt_qp *qps[20];
- struct qib_ibdev *dev;
- unsigned i, n;
-
- n = 0;
- dev = &ppd->dd->verbs_dev;
- spin_lock(&dev->rdi.pending_lock);
-
- /* Search wait list for first QP wanting DMA descriptors. */
- list_for_each_entry_safe(qpp, nqpp, &dev->dmawait, iowait) {
- qp = qpp->owner;
- if (qp->port_num != ppd->port)
- continue;
- if (n == ARRAY_SIZE(qps))
- break;
- if (qpp->s_tx->txreq.sg_count > avail)
- break;
- avail -= qpp->s_tx->txreq.sg_count;
- list_del_init(&qpp->iowait);
- rvt_get_qp(qp);
- qps[n++] = qp;
- }
-
- spin_unlock(&dev->rdi.pending_lock);
-
- for (i = 0; i < n; i++) {
- qp = qps[i];
- spin_lock(&qp->s_lock);
- if (qp->s_flags & RVT_S_WAIT_DMA_DESC) {
- qp->s_flags &= ~RVT_S_WAIT_DMA_DESC;
- qib_schedule_send(qp);
- }
- spin_unlock(&qp->s_lock);
- rvt_put_qp(qp);
- }
-}
-
-/*
- * This is called with ppd->sdma_lock held.
- */
-static void sdma_complete(struct qib_sdma_txreq *cookie, int status)
-{
- struct qib_verbs_txreq *tx =
- container_of(cookie, struct qib_verbs_txreq, txreq);
- struct rvt_qp *qp = tx->qp;
- struct qib_qp_priv *priv = qp->priv;
-
- spin_lock(&qp->s_lock);
- if (tx->wqe)
- rvt_send_complete(qp, tx->wqe, IB_WC_SUCCESS);
- else if (qp->ibqp.qp_type == IB_QPT_RC) {
- struct ib_header *hdr;
-
- if (tx->txreq.flags & QIB_SDMA_TXREQ_F_FREEBUF)
- hdr = &tx->align_buf->hdr;
- else {
- struct qib_ibdev *dev = to_idev(qp->ibqp.device);
-
- hdr = &dev->pio_hdrs[tx->hdr_inx].hdr;
- }
- qib_rc_send_complete(qp, hdr);
- }
- if (atomic_dec_and_test(&priv->s_dma_busy)) {
- if (qp->state == IB_QPS_RESET)
- wake_up(&priv->wait_dma);
- else if (qp->s_flags & RVT_S_WAIT_DMA) {
- qp->s_flags &= ~RVT_S_WAIT_DMA;
- qib_schedule_send(qp);
- }
- }
- spin_unlock(&qp->s_lock);
-
- qib_put_txreq(tx);
-}
-
-static int wait_kmem(struct qib_ibdev *dev, struct rvt_qp *qp)
-{
- struct qib_qp_priv *priv = qp->priv;
- unsigned long flags;
- int ret = 0;
-
- spin_lock_irqsave(&qp->s_lock, flags);
- if (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) {
- spin_lock(&dev->rdi.pending_lock);
- if (list_empty(&priv->iowait)) {
- if (list_empty(&dev->memwait))
- mod_timer(&dev->mem_timer, jiffies + 1);
- qp->s_flags |= RVT_S_WAIT_KMEM;
- list_add_tail(&priv->iowait, &dev->memwait);
- }
- spin_unlock(&dev->rdi.pending_lock);
- qp->s_flags &= ~RVT_S_BUSY;
- ret = -EBUSY;
- }
- spin_unlock_irqrestore(&qp->s_lock, flags);
-
- return ret;
-}
-
-static int qib_verbs_send_dma(struct rvt_qp *qp, struct ib_header *hdr,
- u32 hdrwords, struct rvt_sge_state *ss, u32 len,
- u32 plen, u32 dwords)
-{
- struct qib_qp_priv *priv = qp->priv;
- struct qib_ibdev *dev = to_idev(qp->ibqp.device);
- struct qib_devdata *dd = dd_from_dev(dev);
- struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
- struct qib_pportdata *ppd = ppd_from_ibp(ibp);
- struct qib_verbs_txreq *tx;
- struct qib_pio_header *phdr;
- u32 control;
- u32 ndesc;
- int ret;
-
- tx = priv->s_tx;
- if (tx) {
- priv->s_tx = NULL;
- /* resend previously constructed packet */
- ret = qib_sdma_verbs_send(ppd, tx->ss, tx->dwords, tx);
- goto bail;
- }
-
- tx = get_txreq(dev, qp);
- if (IS_ERR(tx))
- goto bail_tx;
-
- control = dd->f_setpbc_control(ppd, plen, qp->s_srate,
- be16_to_cpu(hdr->lrh[0]) >> 12);
- tx->qp = qp;
- tx->wqe = qp->s_wqe;
- tx->mr = qp->s_rdma_mr;
- if (qp->s_rdma_mr)
- qp->s_rdma_mr = NULL;
- tx->txreq.callback = sdma_complete;
- if (dd->flags & QIB_HAS_SDMA_TIMEOUT)
- tx->txreq.flags = QIB_SDMA_TXREQ_F_HEADTOHOST;
- else
- tx->txreq.flags = QIB_SDMA_TXREQ_F_INTREQ;
- if (plen + 1 > dd->piosize2kmax_dwords)
- tx->txreq.flags |= QIB_SDMA_TXREQ_F_USELARGEBUF;
-
- if (len) {
- /*
- * Don't try to DMA if it takes more descriptors than
- * the queue holds.
- */
- ndesc = qib_count_sge(ss, len);
- if (ndesc >= ppd->sdma_descq_cnt)
- ndesc = 0;
- } else
- ndesc = 1;
- if (ndesc) {
- phdr = &dev->pio_hdrs[tx->hdr_inx];
- phdr->pbc[0] = cpu_to_le32(plen);
- phdr->pbc[1] = cpu_to_le32(control);
- memcpy(&phdr->hdr, hdr, hdrwords << 2);
- tx->txreq.flags |= QIB_SDMA_TXREQ_F_FREEDESC;
- tx->txreq.sg_count = ndesc;
- tx->txreq.addr = dev->pio_hdrs_phys +
- tx->hdr_inx * sizeof(struct qib_pio_header);
- tx->hdr_dwords = hdrwords + 2; /* add PBC length */
- ret = qib_sdma_verbs_send(ppd, ss, dwords, tx);
- goto bail;
- }
-
- /* Allocate a buffer and copy the header and payload to it. */
- tx->hdr_dwords = plen + 1;
- phdr = kmalloc(tx->hdr_dwords << 2, GFP_ATOMIC);
- if (!phdr)
- goto err_tx;
- phdr->pbc[0] = cpu_to_le32(plen);
- phdr->pbc[1] = cpu_to_le32(control);
- memcpy(&phdr->hdr, hdr, hdrwords << 2);
- qib_copy_from_sge((u32 *) &phdr->hdr + hdrwords, ss, len);
-
- tx->txreq.addr = dma_map_single(&dd->pcidev->dev, phdr,
- tx->hdr_dwords << 2, DMA_TO_DEVICE);
- if (dma_mapping_error(&dd->pcidev->dev, tx->txreq.addr))
- goto map_err;
- tx->align_buf = phdr;
- tx->txreq.flags |= QIB_SDMA_TXREQ_F_FREEBUF;
- tx->txreq.sg_count = 1;
- ret = qib_sdma_verbs_send(ppd, NULL, 0, tx);
- goto unaligned;
-
-map_err:
- kfree(phdr);
-err_tx:
- qib_put_txreq(tx);
- ret = wait_kmem(dev, qp);
-unaligned:
- ibp->rvp.n_unaligned++;
-bail:
- return ret;
-bail_tx:
- ret = PTR_ERR(tx);
- goto bail;
-}
-
-/*
- * If we are now in the error state, return zero to flush the
- * send work request.
- */
-static int no_bufs_available(struct rvt_qp *qp)
-{
- struct qib_qp_priv *priv = qp->priv;
- struct qib_ibdev *dev = to_idev(qp->ibqp.device);
- struct qib_devdata *dd;
- unsigned long flags;
- int ret = 0;
-
- /*
- * Note that as soon as want_buffer() is called and
- * possibly before it returns, qib_ib_piobufavail()
- * could be called. Therefore, put QP on the I/O wait list before
- * enabling the PIO avail interrupt.
- */
- spin_lock_irqsave(&qp->s_lock, flags);
- if (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) {
- spin_lock(&dev->rdi.pending_lock);
- if (list_empty(&priv->iowait)) {
- dev->n_piowait++;
- qp->s_flags |= RVT_S_WAIT_PIO;
- list_add_tail(&priv->iowait, &dev->piowait);
- dd = dd_from_dev(dev);
- dd->f_wantpiobuf_intr(dd, 1);
- }
- spin_unlock(&dev->rdi.pending_lock);
- qp->s_flags &= ~RVT_S_BUSY;
- ret = -EBUSY;
- }
- spin_unlock_irqrestore(&qp->s_lock, flags);
- return ret;
-}
-
-static int qib_verbs_send_pio(struct rvt_qp *qp, struct ib_header *ibhdr,
- u32 hdrwords, struct rvt_sge_state *ss, u32 len,
- u32 plen, u32 dwords)
-{
- struct qib_devdata *dd = dd_from_ibdev(qp->ibqp.device);
- struct qib_pportdata *ppd = dd->pport + qp->port_num - 1;
- u32 *hdr = (u32 *) ibhdr;
- u32 __iomem *piobuf_orig;
- u32 __iomem *piobuf;
- u64 pbc;
- unsigned long flags;
- unsigned flush_wc;
- u32 control;
- u32 pbufn;
-
- control = dd->f_setpbc_control(ppd, plen, qp->s_srate,
- be16_to_cpu(ibhdr->lrh[0]) >> 12);
- pbc = ((u64) control << 32) | plen;
- piobuf = dd->f_getsendbuf(ppd, pbc, &pbufn);
- if (unlikely(piobuf == NULL))
- return no_bufs_available(qp);
-
- /*
- * Write the pbc.
- * We have to flush after the PBC for correctness on some cpus
- * or WC buffer can be written out of order.
- */
- writeq(pbc, piobuf);
- piobuf_orig = piobuf;
- piobuf += 2;
-
- flush_wc = dd->flags & QIB_PIO_FLUSH_WC;
- if (len == 0) {
- /*
- * If there is just the header portion, must flush before
- * writing last word of header for correctness, and after
- * the last header word (trigger word).
- */
- if (flush_wc) {
- qib_flush_wc();
- qib_pio_copy(piobuf, hdr, hdrwords - 1);
- qib_flush_wc();
- __raw_writel(hdr[hdrwords - 1], piobuf + hdrwords - 1);
- qib_flush_wc();
- } else
- qib_pio_copy(piobuf, hdr, hdrwords);
- goto done;
- }
-
- if (flush_wc)
- qib_flush_wc();
- qib_pio_copy(piobuf, hdr, hdrwords);
- piobuf += hdrwords;
-
- /* The common case is aligned and contained in one segment. */
- if (likely(ss->num_sge == 1 && len <= ss->sge.length &&
- !((unsigned long)ss->sge.vaddr & (sizeof(u32) - 1)))) {
- u32 *addr = (u32 *) ss->sge.vaddr;
-
- /* Update address before sending packet. */
- rvt_update_sge(ss, len, false);
- if (flush_wc) {
- qib_pio_copy(piobuf, addr, dwords - 1);
- /* must flush early everything before trigger word */
- qib_flush_wc();
- __raw_writel(addr[dwords - 1], piobuf + dwords - 1);
- /* be sure trigger word is written */
- qib_flush_wc();
- } else
- qib_pio_copy(piobuf, addr, dwords);
- goto done;
- }
- qib_copy_io(piobuf, ss, len, flush_wc);
-done:
- if (dd->flags & QIB_USE_SPCL_TRIG) {
- u32 spcl_off = (pbufn >= dd->piobcnt2k) ? 2047 : 1023;
-
- qib_flush_wc();
- __raw_writel(0xaebecede, piobuf_orig + spcl_off);
- }
- qib_sendbuf_done(dd, pbufn);
- if (qp->s_rdma_mr) {
- rvt_put_mr(qp->s_rdma_mr);
- qp->s_rdma_mr = NULL;
- }
- if (qp->s_wqe) {
- spin_lock_irqsave(&qp->s_lock, flags);
- rvt_send_complete(qp, qp->s_wqe, IB_WC_SUCCESS);
- spin_unlock_irqrestore(&qp->s_lock, flags);
- } else if (qp->ibqp.qp_type == IB_QPT_RC) {
- spin_lock_irqsave(&qp->s_lock, flags);
- qib_rc_send_complete(qp, ibhdr);
- spin_unlock_irqrestore(&qp->s_lock, flags);
- }
- return 0;
-}
-
-/**
- * qib_verbs_send - send a packet
- * @qp: the QP to send on
- * @hdr: the packet header
- * @hdrwords: the number of 32-bit words in the header
- * @ss: the SGE to send
- * @len: the length of the packet in bytes
- *
- * Return zero if packet is sent or queued OK.
- * Return non-zero and clear qp->s_flags RVT_S_BUSY otherwise.
- */
-int qib_verbs_send(struct rvt_qp *qp, struct ib_header *hdr,
- u32 hdrwords, struct rvt_sge_state *ss, u32 len)
-{
- struct qib_devdata *dd = dd_from_ibdev(qp->ibqp.device);
- u32 plen;
- int ret;
- u32 dwords = (len + 3) >> 2;
-
- /*
- * Calculate the send buffer trigger address.
- * The +1 counts for the pbc control dword following the pbc length.
- */
- plen = hdrwords + dwords + 1;
-
- /*
- * VL15 packets (IB_QPT_SMI) will always use PIO, so we
- * can defer SDMA restart until link goes ACTIVE without
- * worrying about just how we got there.
- */
- if (qp->ibqp.qp_type == IB_QPT_SMI ||
- !(dd->flags & QIB_HAS_SEND_DMA))
- ret = qib_verbs_send_pio(qp, hdr, hdrwords, ss, len,
- plen, dwords);
- else
- ret = qib_verbs_send_dma(qp, hdr, hdrwords, ss, len,
- plen, dwords);
-
- return ret;
-}
-
-int qib_snapshot_counters(struct qib_pportdata *ppd, u64 *swords,
- u64 *rwords, u64 *spkts, u64 *rpkts,
- u64 *xmit_wait)
-{
- int ret;
- struct qib_devdata *dd = ppd->dd;
-
- if (!(dd->flags & QIB_PRESENT)) {
- /* no hardware, freeze, etc. */
- ret = -EINVAL;
- goto bail;
- }
- *swords = dd->f_portcntr(ppd, QIBPORTCNTR_WORDSEND);
- *rwords = dd->f_portcntr(ppd, QIBPORTCNTR_WORDRCV);
- *spkts = dd->f_portcntr(ppd, QIBPORTCNTR_PKTSEND);
- *rpkts = dd->f_portcntr(ppd, QIBPORTCNTR_PKTRCV);
- *xmit_wait = dd->f_portcntr(ppd, QIBPORTCNTR_SENDSTALL);
-
- ret = 0;
-
-bail:
- return ret;
-}
-
-/**
- * qib_get_counters - get various chip counters
- * @ppd: the qlogic_ib device
- * @cntrs: counters are placed here
- *
- * Return the counters needed by recv_pma_get_portcounters().
- */
-int qib_get_counters(struct qib_pportdata *ppd,
- struct qib_verbs_counters *cntrs)
-{
- int ret;
-
- if (!(ppd->dd->flags & QIB_PRESENT)) {
- /* no hardware, freeze, etc. */
- ret = -EINVAL;
- goto bail;
- }
- cntrs->symbol_error_counter =
- ppd->dd->f_portcntr(ppd, QIBPORTCNTR_IBSYMBOLERR);
- cntrs->link_error_recovery_counter =
- ppd->dd->f_portcntr(ppd, QIBPORTCNTR_IBLINKERRRECOV);
- /*
- * The link downed counter counts when the other side downs the
- * connection. We add in the number of times we downed the link
- * due to local link integrity errors to compensate.
- */
- cntrs->link_downed_counter =
- ppd->dd->f_portcntr(ppd, QIBPORTCNTR_IBLINKDOWN);
- cntrs->port_rcv_errors =
- ppd->dd->f_portcntr(ppd, QIBPORTCNTR_RXDROPPKT) +
- ppd->dd->f_portcntr(ppd, QIBPORTCNTR_RCVOVFL) +
- ppd->dd->f_portcntr(ppd, QIBPORTCNTR_ERR_RLEN) +
- ppd->dd->f_portcntr(ppd, QIBPORTCNTR_INVALIDRLEN) +
- ppd->dd->f_portcntr(ppd, QIBPORTCNTR_ERRLINK) +
- ppd->dd->f_portcntr(ppd, QIBPORTCNTR_ERRICRC) +
- ppd->dd->f_portcntr(ppd, QIBPORTCNTR_ERRVCRC) +
- ppd->dd->f_portcntr(ppd, QIBPORTCNTR_ERRLPCRC) +
- ppd->dd->f_portcntr(ppd, QIBPORTCNTR_BADFORMAT);
- cntrs->port_rcv_errors +=
- ppd->dd->f_portcntr(ppd, QIBPORTCNTR_RXLOCALPHYERR);
- cntrs->port_rcv_errors +=
- ppd->dd->f_portcntr(ppd, QIBPORTCNTR_RXVLERR);
- cntrs->port_rcv_remphys_errors =
- ppd->dd->f_portcntr(ppd, QIBPORTCNTR_RCVEBP);
- cntrs->port_xmit_discards =
- ppd->dd->f_portcntr(ppd, QIBPORTCNTR_UNSUPVL);
- cntrs->port_xmit_data = ppd->dd->f_portcntr(ppd,
- QIBPORTCNTR_WORDSEND);
- cntrs->port_rcv_data = ppd->dd->f_portcntr(ppd,
- QIBPORTCNTR_WORDRCV);
- cntrs->port_xmit_packets = ppd->dd->f_portcntr(ppd,
- QIBPORTCNTR_PKTSEND);
- cntrs->port_rcv_packets = ppd->dd->f_portcntr(ppd,
- QIBPORTCNTR_PKTRCV);
- cntrs->local_link_integrity_errors =
- ppd->dd->f_portcntr(ppd, QIBPORTCNTR_LLI);
- cntrs->excessive_buffer_overrun_errors =
- ppd->dd->f_portcntr(ppd, QIBPORTCNTR_EXCESSBUFOVFL);
- cntrs->vl15_dropped =
- ppd->dd->f_portcntr(ppd, QIBPORTCNTR_VL15PKTDROP);
-
- ret = 0;
-
-bail:
- return ret;
-}
-
-/**
- * qib_ib_piobufavail - callback when a PIO buffer is available
- * @dd: the device pointer
- *
- * This is called from qib_intr() at interrupt level when a PIO buffer is
- * available after qib_verbs_send() returned an error that no buffers were
- * available. Disable the interrupt if there are no more QPs waiting.
- */
-void qib_ib_piobufavail(struct qib_devdata *dd)
-{
- struct qib_ibdev *dev = &dd->verbs_dev;
- struct list_head *list;
- struct rvt_qp *qps[5];
- struct rvt_qp *qp;
- unsigned long flags;
- unsigned i, n;
- struct qib_qp_priv *priv;
-
- list = &dev->piowait;
- n = 0;
-
- /*
- * Note: checking that the piowait list is empty and clearing
- * the buffer available interrupt needs to be atomic or we
- * could end up with QPs on the wait list with the interrupt
- * disabled.
- */
- spin_lock_irqsave(&dev->rdi.pending_lock, flags);
- while (!list_empty(list)) {
- if (n == ARRAY_SIZE(qps))
- goto full;
- priv = list_entry(list->next, struct qib_qp_priv, iowait);
- qp = priv->owner;
- list_del_init(&priv->iowait);
- rvt_get_qp(qp);
- qps[n++] = qp;
- }
- dd->f_wantpiobuf_intr(dd, 0);
-full:
- spin_unlock_irqrestore(&dev->rdi.pending_lock, flags);
-
- for (i = 0; i < n; i++) {
- qp = qps[i];
-
- spin_lock_irqsave(&qp->s_lock, flags);
- if (qp->s_flags & RVT_S_WAIT_PIO) {
- qp->s_flags &= ~RVT_S_WAIT_PIO;
- qib_schedule_send(qp);
- }
- spin_unlock_irqrestore(&qp->s_lock, flags);
-
- /* Notify qib_destroy_qp() if it is waiting. */
- rvt_put_qp(qp);
- }
-}
-
-static int qib_query_port(struct rvt_dev_info *rdi, u32 port_num,
- struct ib_port_attr *props)
-{
- struct qib_ibdev *ibdev = container_of(rdi, struct qib_ibdev, rdi);
- struct qib_devdata *dd = dd_from_dev(ibdev);
- struct qib_pportdata *ppd = &dd->pport[port_num - 1];
- enum ib_mtu mtu;
- u16 lid = ppd->lid;
-
- /* props being zeroed by the caller, avoid zeroing it here */
- props->lid = lid ? lid : be16_to_cpu(IB_LID_PERMISSIVE);
- props->lmc = ppd->lmc;
- props->state = dd->f_iblink_state(ppd->lastibcstat);
- props->phys_state = dd->f_ibphys_portstate(ppd->lastibcstat);
- props->gid_tbl_len = QIB_GUIDS_PER_PORT;
- props->active_width = ppd->link_width_active;
- /* See rate_show() */
- props->active_speed = ppd->link_speed_active;
- props->max_vl_num = qib_num_vls(ppd->vls_supported);
-
- props->max_mtu = qib_ibmtu ? qib_ibmtu : IB_MTU_4096;
- switch (ppd->ibmtu) {
- case 4096:
- mtu = IB_MTU_4096;
- break;
- case 2048:
- mtu = IB_MTU_2048;
- break;
- case 1024:
- mtu = IB_MTU_1024;
- break;
- case 512:
- mtu = IB_MTU_512;
- break;
- case 256:
- mtu = IB_MTU_256;
- break;
- default:
- mtu = IB_MTU_2048;
- }
- props->active_mtu = mtu;
-
- return 0;
-}
-
-static int qib_modify_device(struct ib_device *device,
- int device_modify_mask,
- struct ib_device_modify *device_modify)
-{
- struct qib_devdata *dd = dd_from_ibdev(device);
- unsigned i;
- int ret;
-
- if (device_modify_mask & ~(IB_DEVICE_MODIFY_SYS_IMAGE_GUID |
- IB_DEVICE_MODIFY_NODE_DESC)) {
- ret = -EOPNOTSUPP;
- goto bail;
- }
-
- if (device_modify_mask & IB_DEVICE_MODIFY_NODE_DESC) {
- memcpy(device->node_desc, device_modify->node_desc,
- IB_DEVICE_NODE_DESC_MAX);
- for (i = 0; i < dd->num_pports; i++) {
- struct qib_ibport *ibp = &dd->pport[i].ibport_data;
-
- qib_node_desc_chg(ibp);
- }
- }
-
- if (device_modify_mask & IB_DEVICE_MODIFY_SYS_IMAGE_GUID) {
- ib_qib_sys_image_guid =
- cpu_to_be64(device_modify->sys_image_guid);
- for (i = 0; i < dd->num_pports; i++) {
- struct qib_ibport *ibp = &dd->pport[i].ibport_data;
-
- qib_sys_guid_chg(ibp);
- }
- }
-
- ret = 0;
-
-bail:
- return ret;
-}
-
-static int qib_shut_down_port(struct rvt_dev_info *rdi, u32 port_num)
-{
- struct qib_ibdev *ibdev = container_of(rdi, struct qib_ibdev, rdi);
- struct qib_devdata *dd = dd_from_dev(ibdev);
- struct qib_pportdata *ppd = &dd->pport[port_num - 1];
-
- qib_set_linkstate(ppd, QIB_IB_LINKDOWN);
-
- return 0;
-}
-
-static int qib_get_guid_be(struct rvt_dev_info *rdi, struct rvt_ibport *rvp,
- int guid_index, __be64 *guid)
-{
- struct qib_ibport *ibp = container_of(rvp, struct qib_ibport, rvp);
- struct qib_pportdata *ppd = ppd_from_ibp(ibp);
-
- if (guid_index == 0)
- *guid = ppd->guid;
- else if (guid_index < QIB_GUIDS_PER_PORT)
- *guid = ibp->guids[guid_index - 1];
- else
- return -EINVAL;
-
- return 0;
-}
-
-int qib_check_ah(struct ib_device *ibdev, struct rdma_ah_attr *ah_attr)
-{
- if (rdma_ah_get_sl(ah_attr) > 15)
- return -EINVAL;
-
- if (rdma_ah_get_dlid(ah_attr) == 0)
- return -EINVAL;
- if (rdma_ah_get_dlid(ah_attr) >=
- be16_to_cpu(IB_MULTICAST_LID_BASE) &&
- rdma_ah_get_dlid(ah_attr) !=
- be16_to_cpu(IB_LID_PERMISSIVE) &&
- !(rdma_ah_get_ah_flags(ah_attr) & IB_AH_GRH))
- return -EINVAL;
-
- return 0;
-}
-
-static void qib_notify_new_ah(struct ib_device *ibdev,
- struct rdma_ah_attr *ah_attr,
- struct rvt_ah *ah)
-{
- struct qib_ibport *ibp;
- struct qib_pportdata *ppd;
-
- /*
- * Do not trust reading anything from rvt_ah at this point as it is not
- * done being setup. We can however modify things which we need to set.
- */
-
- ibp = to_iport(ibdev, rdma_ah_get_port_num(ah_attr));
- ppd = ppd_from_ibp(ibp);
- ah->vl = ibp->sl_to_vl[rdma_ah_get_sl(&ah->attr)];
- ah->log_pmtu = ilog2(ppd->ibmtu);
-}
-
-struct ib_ah *qib_create_qp0_ah(struct qib_ibport *ibp, u16 dlid)
-{
- struct rdma_ah_attr attr;
- struct ib_ah *ah = ERR_PTR(-EINVAL);
- struct rvt_qp *qp0;
- struct qib_pportdata *ppd = ppd_from_ibp(ibp);
- struct qib_devdata *dd = dd_from_ppd(ppd);
- u32 port_num = ppd->port;
-
- memset(&attr, 0, sizeof(attr));
- attr.type = rdma_ah_find_type(&dd->verbs_dev.rdi.ibdev, port_num);
- rdma_ah_set_dlid(&attr, dlid);
- rdma_ah_set_port_num(&attr, port_num);
- rcu_read_lock();
- qp0 = rcu_dereference(ibp->rvp.qp[0]);
- if (qp0)
- ah = rdma_create_ah(qp0->ibqp.pd, &attr, 0);
- rcu_read_unlock();
- return ah;
-}
-
-/**
- * qib_get_npkeys - return the size of the PKEY table for context 0
- * @dd: the qlogic_ib device
- */
-unsigned qib_get_npkeys(struct qib_devdata *dd)
-{
- return ARRAY_SIZE(dd->rcd[0]->pkeys);
-}
-
-/*
- * Return the indexed PKEY from the port PKEY table.
- * No need to validate rcd[ctxt]; the port is setup if we are here.
- */
-unsigned qib_get_pkey(struct qib_ibport *ibp, unsigned index)
-{
- struct qib_pportdata *ppd = ppd_from_ibp(ibp);
- struct qib_devdata *dd = ppd->dd;
- unsigned ctxt = ppd->hw_pidx;
- unsigned ret;
-
- /* dd->rcd null if mini_init or some init failures */
- if (!dd->rcd || index >= ARRAY_SIZE(dd->rcd[ctxt]->pkeys))
- ret = 0;
- else
- ret = dd->rcd[ctxt]->pkeys[index];
-
- return ret;
-}
-
-static void init_ibport(struct qib_pportdata *ppd)
-{
- struct qib_verbs_counters cntrs;
- struct qib_ibport *ibp = &ppd->ibport_data;
-
- spin_lock_init(&ibp->rvp.lock);
- /* Set the prefix to the default value (see ch. 4.1.1) */
- ibp->rvp.gid_prefix = IB_DEFAULT_GID_PREFIX;
- ibp->rvp.sm_lid = be16_to_cpu(IB_LID_PERMISSIVE);
- ibp->rvp.port_cap_flags = IB_PORT_SYS_IMAGE_GUID_SUP |
- IB_PORT_CLIENT_REG_SUP | IB_PORT_SL_MAP_SUP |
- IB_PORT_TRAP_SUP | IB_PORT_AUTO_MIGR_SUP |
- IB_PORT_DR_NOTICE_SUP | IB_PORT_CAP_MASK_NOTICE_SUP |
- IB_PORT_OTHER_LOCAL_CHANGES_SUP;
- if (ppd->dd->flags & QIB_HAS_LINK_LATENCY)
- ibp->rvp.port_cap_flags |= IB_PORT_LINK_LATENCY_SUP;
- ibp->rvp.pma_counter_select[0] = IB_PMA_PORT_XMIT_DATA;
- ibp->rvp.pma_counter_select[1] = IB_PMA_PORT_RCV_DATA;
- ibp->rvp.pma_counter_select[2] = IB_PMA_PORT_XMIT_PKTS;
- ibp->rvp.pma_counter_select[3] = IB_PMA_PORT_RCV_PKTS;
- ibp->rvp.pma_counter_select[4] = IB_PMA_PORT_XMIT_WAIT;
-
- /* Snapshot current HW counters to "clear" them. */
- qib_get_counters(ppd, &cntrs);
- ibp->z_symbol_error_counter = cntrs.symbol_error_counter;
- ibp->z_link_error_recovery_counter =
- cntrs.link_error_recovery_counter;
- ibp->z_link_downed_counter = cntrs.link_downed_counter;
- ibp->z_port_rcv_errors = cntrs.port_rcv_errors;
- ibp->z_port_rcv_remphys_errors = cntrs.port_rcv_remphys_errors;
- ibp->z_port_xmit_discards = cntrs.port_xmit_discards;
- ibp->z_port_xmit_data = cntrs.port_xmit_data;
- ibp->z_port_rcv_data = cntrs.port_rcv_data;
- ibp->z_port_xmit_packets = cntrs.port_xmit_packets;
- ibp->z_port_rcv_packets = cntrs.port_rcv_packets;
- ibp->z_local_link_integrity_errors =
- cntrs.local_link_integrity_errors;
- ibp->z_excessive_buffer_overrun_errors =
- cntrs.excessive_buffer_overrun_errors;
- ibp->z_vl15_dropped = cntrs.vl15_dropped;
- RCU_INIT_POINTER(ibp->rvp.qp[0], NULL);
- RCU_INIT_POINTER(ibp->rvp.qp[1], NULL);
-}
-
-/**
- * qib_fill_device_attr - Fill in rvt dev info device attributes.
- * @dd: the device data structure
- */
-static void qib_fill_device_attr(struct qib_devdata *dd)
-{
- struct rvt_dev_info *rdi = &dd->verbs_dev.rdi;
-
- memset(&rdi->dparms.props, 0, sizeof(rdi->dparms.props));
-
- rdi->dparms.props.max_pd = ib_qib_max_pds;
- rdi->dparms.props.max_ah = ib_qib_max_ahs;
- rdi->dparms.props.device_cap_flags = IB_DEVICE_BAD_PKEY_CNTR |
- IB_DEVICE_BAD_QKEY_CNTR | IB_DEVICE_SHUTDOWN_PORT |
- IB_DEVICE_SYS_IMAGE_GUID | IB_DEVICE_RC_RNR_NAK_GEN |
- IB_DEVICE_PORT_ACTIVE_EVENT | IB_DEVICE_SRQ_RESIZE;
- rdi->dparms.props.page_size_cap = PAGE_SIZE;
- rdi->dparms.props.vendor_id =
- QIB_SRC_OUI_1 << 16 | QIB_SRC_OUI_2 << 8 | QIB_SRC_OUI_3;
- rdi->dparms.props.vendor_part_id = dd->deviceid;
- rdi->dparms.props.hw_ver = dd->minrev;
- rdi->dparms.props.sys_image_guid = ib_qib_sys_image_guid;
- rdi->dparms.props.max_mr_size = ~0ULL;
- rdi->dparms.props.max_qp = ib_qib_max_qps;
- rdi->dparms.props.max_qp_wr = ib_qib_max_qp_wrs;
- rdi->dparms.props.max_send_sge = ib_qib_max_sges;
- rdi->dparms.props.max_recv_sge = ib_qib_max_sges;
- rdi->dparms.props.max_sge_rd = ib_qib_max_sges;
- rdi->dparms.props.max_cq = ib_qib_max_cqs;
- rdi->dparms.props.max_cqe = ib_qib_max_cqes;
- rdi->dparms.props.max_ah = ib_qib_max_ahs;
- rdi->dparms.props.max_qp_rd_atom = QIB_MAX_RDMA_ATOMIC;
- rdi->dparms.props.max_qp_init_rd_atom = 255;
- rdi->dparms.props.max_srq = ib_qib_max_srqs;
- rdi->dparms.props.max_srq_wr = ib_qib_max_srq_wrs;
- rdi->dparms.props.max_srq_sge = ib_qib_max_srq_sges;
- rdi->dparms.props.atomic_cap = IB_ATOMIC_GLOB;
- rdi->dparms.props.max_pkeys = qib_get_npkeys(dd);
- rdi->dparms.props.max_mcast_grp = ib_qib_max_mcast_grps;
- rdi->dparms.props.max_mcast_qp_attach = ib_qib_max_mcast_qp_attached;
- rdi->dparms.props.max_total_mcast_qp_attach =
- rdi->dparms.props.max_mcast_qp_attach *
- rdi->dparms.props.max_mcast_grp;
- /* post send table */
- dd->verbs_dev.rdi.post_parms = qib_post_parms;
-
- /* opcode translation table */
- dd->verbs_dev.rdi.wc_opcode = ib_qib_wc_opcode;
-}
-
-static const struct ib_device_ops qib_dev_ops = {
- .owner = THIS_MODULE,
- .driver_id = RDMA_DRIVER_QIB,
-
- .port_groups = qib_attr_port_groups,
- .device_group = &qib_attr_group,
- .modify_device = qib_modify_device,
- .process_mad = qib_process_mad,
-};
-
-/**
- * qib_register_ib_device - register our device with the infiniband core
- * @dd: the device data structure
- * Return the allocated qib_ibdev pointer or NULL on error.
- */
-int qib_register_ib_device(struct qib_devdata *dd)
-{
- struct qib_ibdev *dev = &dd->verbs_dev;
- struct ib_device *ibdev = &dev->rdi.ibdev;
- struct qib_pportdata *ppd = dd->pport;
- unsigned i, ctxt;
- int ret;
-
- for (i = 0; i < dd->num_pports; i++)
- init_ibport(ppd + i);
-
- /* Only need to initialize non-zero fields. */
- timer_setup(&dev->mem_timer, mem_timer, 0);
-
- INIT_LIST_HEAD(&dev->piowait);
- INIT_LIST_HEAD(&dev->dmawait);
- INIT_LIST_HEAD(&dev->txwait);
- INIT_LIST_HEAD(&dev->memwait);
- INIT_LIST_HEAD(&dev->txreq_free);
-
- if (ppd->sdma_descq_cnt) {
- dev->pio_hdrs = dma_alloc_coherent(&dd->pcidev->dev,
- ppd->sdma_descq_cnt *
- sizeof(struct qib_pio_header),
- &dev->pio_hdrs_phys,
- GFP_KERNEL);
- if (!dev->pio_hdrs) {
- ret = -ENOMEM;
- goto err_hdrs;
- }
- }
-
- for (i = 0; i < ppd->sdma_descq_cnt; i++) {
- struct qib_verbs_txreq *tx;
-
- tx = kzalloc(sizeof(*tx), GFP_KERNEL);
- if (!tx) {
- ret = -ENOMEM;
- goto err_tx;
- }
- tx->hdr_inx = i;
- list_add(&tx->txreq.list, &dev->txreq_free);
- }
-
- /*
- * The system image GUID is supposed to be the same for all
- * IB HCAs in a single system but since there can be other
- * device types in the system, we can't be sure this is unique.
- */
- if (!ib_qib_sys_image_guid)
- ib_qib_sys_image_guid = ppd->guid;
-
- ibdev->node_guid = ppd->guid;
- ibdev->phys_port_cnt = dd->num_pports;
- ibdev->dev.parent = &dd->pcidev->dev;
-
- snprintf(ibdev->node_desc, sizeof(ibdev->node_desc),
- "Intel Infiniband HCA %.42s", init_utsname()->nodename);
-
- /*
- * Fill in rvt info object.
- */
- dd->verbs_dev.rdi.driver_f.get_pci_dev = qib_get_pci_dev;
- dd->verbs_dev.rdi.driver_f.check_ah = qib_check_ah;
- dd->verbs_dev.rdi.driver_f.setup_wqe = qib_check_send_wqe;
- dd->verbs_dev.rdi.driver_f.notify_new_ah = qib_notify_new_ah;
- dd->verbs_dev.rdi.driver_f.alloc_qpn = qib_alloc_qpn;
- dd->verbs_dev.rdi.driver_f.qp_priv_alloc = qib_qp_priv_alloc;
- dd->verbs_dev.rdi.driver_f.qp_priv_free = qib_qp_priv_free;
- dd->verbs_dev.rdi.driver_f.free_all_qps = qib_free_all_qps;
- dd->verbs_dev.rdi.driver_f.notify_qp_reset = qib_notify_qp_reset;
- dd->verbs_dev.rdi.driver_f.do_send = qib_do_send;
- dd->verbs_dev.rdi.driver_f.schedule_send = qib_schedule_send;
- dd->verbs_dev.rdi.driver_f.quiesce_qp = qib_quiesce_qp;
- dd->verbs_dev.rdi.driver_f.stop_send_queue = qib_stop_send_queue;
- dd->verbs_dev.rdi.driver_f.flush_qp_waiters = qib_flush_qp_waiters;
- dd->verbs_dev.rdi.driver_f.notify_error_qp = qib_notify_error_qp;
- dd->verbs_dev.rdi.driver_f.notify_restart_rc = qib_restart_rc;
- dd->verbs_dev.rdi.driver_f.mtu_to_path_mtu = qib_mtu_to_path_mtu;
- dd->verbs_dev.rdi.driver_f.mtu_from_qp = qib_mtu_from_qp;
- dd->verbs_dev.rdi.driver_f.get_pmtu_from_attr = qib_get_pmtu_from_attr;
- dd->verbs_dev.rdi.driver_f.schedule_send_no_lock = _qib_schedule_send;
- dd->verbs_dev.rdi.driver_f.query_port_state = qib_query_port;
- dd->verbs_dev.rdi.driver_f.shut_down_port = qib_shut_down_port;
- dd->verbs_dev.rdi.driver_f.cap_mask_chg = qib_cap_mask_chg;
- dd->verbs_dev.rdi.driver_f.notify_create_mad_agent =
- qib_notify_create_mad_agent;
- dd->verbs_dev.rdi.driver_f.notify_free_mad_agent =
- qib_notify_free_mad_agent;
-
- dd->verbs_dev.rdi.dparms.max_rdma_atomic = QIB_MAX_RDMA_ATOMIC;
- dd->verbs_dev.rdi.driver_f.get_guid_be = qib_get_guid_be;
- dd->verbs_dev.rdi.dparms.lkey_table_size = qib_lkey_table_size;
- dd->verbs_dev.rdi.dparms.qp_table_size = ib_qib_qp_table_size;
- dd->verbs_dev.rdi.dparms.qpn_start = 1;
- dd->verbs_dev.rdi.dparms.qpn_res_start = QIB_KD_QP;
- dd->verbs_dev.rdi.dparms.qpn_res_end = QIB_KD_QP; /* Reserve one QP */
- dd->verbs_dev.rdi.dparms.qpn_inc = 1;
- dd->verbs_dev.rdi.dparms.qos_shift = 1;
- dd->verbs_dev.rdi.dparms.psn_mask = QIB_PSN_MASK;
- dd->verbs_dev.rdi.dparms.psn_shift = QIB_PSN_SHIFT;
- dd->verbs_dev.rdi.dparms.psn_modify_mask = QIB_PSN_MASK;
- dd->verbs_dev.rdi.dparms.nports = dd->num_pports;
- dd->verbs_dev.rdi.dparms.npkeys = qib_get_npkeys(dd);
- dd->verbs_dev.rdi.dparms.node = dd->assigned_node_id;
- dd->verbs_dev.rdi.dparms.core_cap_flags = RDMA_CORE_PORT_IBA_IB;
- dd->verbs_dev.rdi.dparms.max_mad_size = IB_MGMT_MAD_SIZE;
- dd->verbs_dev.rdi.dparms.sge_copy_mode = RVT_SGE_COPY_MEMCPY;
-
- qib_fill_device_attr(dd);
-
- ppd = dd->pport;
- for (i = 0; i < dd->num_pports; i++, ppd++) {
- ctxt = ppd->hw_pidx;
- rvt_init_port(&dd->verbs_dev.rdi,
- &ppd->ibport_data.rvp,
- i,
- dd->rcd[ctxt]->pkeys);
- }
-
- ib_set_device_ops(ibdev, &qib_dev_ops);
- ret = rvt_register_device(&dd->verbs_dev.rdi);
- if (ret)
- goto err_tx;
-
- return ret;
-
-err_tx:
- while (!list_empty(&dev->txreq_free)) {
- struct list_head *l = dev->txreq_free.next;
- struct qib_verbs_txreq *tx;
-
- list_del(l);
- tx = list_entry(l, struct qib_verbs_txreq, txreq.list);
- kfree(tx);
- }
- if (ppd->sdma_descq_cnt)
- dma_free_coherent(&dd->pcidev->dev,
- ppd->sdma_descq_cnt *
- sizeof(struct qib_pio_header),
- dev->pio_hdrs, dev->pio_hdrs_phys);
-err_hdrs:
- qib_dev_err(dd, "cannot register verbs: %d!\n", -ret);
- return ret;
-}
-
-void qib_unregister_ib_device(struct qib_devdata *dd)
-{
- struct qib_ibdev *dev = &dd->verbs_dev;
-
- rvt_unregister_device(&dd->verbs_dev.rdi);
-
- if (!list_empty(&dev->piowait))
- qib_dev_err(dd, "piowait list not empty!\n");
- if (!list_empty(&dev->dmawait))
- qib_dev_err(dd, "dmawait list not empty!\n");
- if (!list_empty(&dev->txwait))
- qib_dev_err(dd, "txwait list not empty!\n");
- if (!list_empty(&dev->memwait))
- qib_dev_err(dd, "memwait list not empty!\n");
-
- timer_delete_sync(&dev->mem_timer);
- while (!list_empty(&dev->txreq_free)) {
- struct list_head *l = dev->txreq_free.next;
- struct qib_verbs_txreq *tx;
-
- list_del(l);
- tx = list_entry(l, struct qib_verbs_txreq, txreq.list);
- kfree(tx);
- }
- if (dd->pport->sdma_descq_cnt)
- dma_free_coherent(&dd->pcidev->dev,
- dd->pport->sdma_descq_cnt *
- sizeof(struct qib_pio_header),
- dev->pio_hdrs, dev->pio_hdrs_phys);
-}
-
-/**
- * _qib_schedule_send - schedule progress
- * @qp: the qp
- *
- * This schedules progress w/o regard to the s_flags.
- *
- * It is only used in post send, which doesn't hold
- * the s_lock.
- */
-bool _qib_schedule_send(struct rvt_qp *qp)
-{
- struct qib_ibport *ibp =
- to_iport(qp->ibqp.device, qp->port_num);
- struct qib_pportdata *ppd = ppd_from_ibp(ibp);
- struct qib_qp_priv *priv = qp->priv;
-
- return queue_work(ppd->qib_wq, &priv->s_work);
-}
-
-/**
- * qib_schedule_send - schedule progress
- * @qp: the qp
- *
- * This schedules qp progress. The s_lock
- * should be held.
- */
-bool qib_schedule_send(struct rvt_qp *qp)
-{
- if (qib_send_ok(qp))
- return _qib_schedule_send(qp);
- return false;
-}
diff --git a/drivers/infiniband/hw/qib/qib_verbs.h b/drivers/infiniband/hw/qib/qib_verbs.h
deleted file mode 100644
index 408fe1ba74b9..000000000000
--- a/drivers/infiniband/hw/qib/qib_verbs.h
+++ /dev/null
@@ -1,398 +0,0 @@
-/*
- * Copyright (c) 2012 - 2018 Intel Corporation. All rights reserved.
- * Copyright (c) 2006 - 2012 QLogic Corporation. All rights reserved.
- * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#ifndef QIB_VERBS_H
-#define QIB_VERBS_H
-
-#include <linux/types.h>
-#include <linux/spinlock.h>
-#include <linux/kernel.h>
-#include <linux/interrupt.h>
-#include <linux/kref.h>
-#include <linux/workqueue.h>
-#include <linux/kthread.h>
-#include <linux/completion.h>
-#include <rdma/ib_pack.h>
-#include <rdma/ib_user_verbs.h>
-#include <rdma/ib_hdrs.h>
-#include <rdma/rdmavt_qp.h>
-#include <rdma/rdmavt_cq.h>
-
-struct qib_ctxtdata;
-struct qib_pportdata;
-struct qib_devdata;
-struct qib_verbs_txreq;
-
-#define QIB_MAX_RDMA_ATOMIC 16
-#define QIB_GUIDS_PER_PORT 5
-#define QIB_PSN_SHIFT 8
-
-/*
- * Increment this value if any changes that break userspace ABI
- * compatibility are made.
- */
-#define QIB_UVERBS_ABI_VERSION 2
-
-/* IB Performance Manager status values */
-#define IB_PMA_SAMPLE_STATUS_DONE 0x00
-#define IB_PMA_SAMPLE_STATUS_STARTED 0x01
-#define IB_PMA_SAMPLE_STATUS_RUNNING 0x02
-
-/* Mandatory IB performance counter select values. */
-#define IB_PMA_PORT_XMIT_DATA cpu_to_be16(0x0001)
-#define IB_PMA_PORT_RCV_DATA cpu_to_be16(0x0002)
-#define IB_PMA_PORT_XMIT_PKTS cpu_to_be16(0x0003)
-#define IB_PMA_PORT_RCV_PKTS cpu_to_be16(0x0004)
-#define IB_PMA_PORT_XMIT_WAIT cpu_to_be16(0x0005)
-
-#define QIB_VENDOR_IPG cpu_to_be16(0xFFA0)
-
-#define IB_DEFAULT_GID_PREFIX cpu_to_be64(0xfe80000000000000ULL)
-
-/* Values for set/get portinfo VLCap OperationalVLs */
-#define IB_VL_VL0 1
-#define IB_VL_VL0_1 2
-#define IB_VL_VL0_3 3
-#define IB_VL_VL0_7 4
-#define IB_VL_VL0_14 5
-
-static inline int qib_num_vls(int vls)
-{
- switch (vls) {
- default:
- case IB_VL_VL0:
- return 1;
- case IB_VL_VL0_1:
- return 2;
- case IB_VL_VL0_3:
- return 4;
- case IB_VL_VL0_7:
- return 8;
- case IB_VL_VL0_14:
- return 15;
- }
-}
-
-struct qib_pio_header {
- __le32 pbc[2];
- struct ib_header hdr;
-} __packed;
-
-/*
- * qib specific data structure that will be hidden from rvt after the queue pair
- * is made common.
- */
-struct qib_qp_priv {
- struct ib_header *s_hdr; /* next packet header to send */
- struct list_head iowait; /* link for wait PIO buf */
- atomic_t s_dma_busy;
- struct qib_verbs_txreq *s_tx;
- struct work_struct s_work;
- wait_queue_head_t wait_dma;
- struct rvt_qp *owner;
-};
-
-#define QIB_PSN_CREDIT 16
-
-struct qib_opcode_stats {
- u64 n_packets; /* number of packets */
- u64 n_bytes; /* total number of bytes */
-};
-
-struct qib_opcode_stats_perctx {
- struct qib_opcode_stats stats[128];
-};
-
-struct qib_pma_counters {
- u64 n_unicast_xmit; /* total unicast packets sent */
- u64 n_unicast_rcv; /* total unicast packets received */
- u64 n_multicast_xmit; /* total multicast packets sent */
- u64 n_multicast_rcv; /* total multicast packets received */
-};
-
-struct qib_ibport {
- struct rvt_ibport rvp;
- struct rvt_ah *smi_ah;
- __be64 guids[QIB_GUIDS_PER_PORT - 1]; /* writable GUIDs */
- struct qib_pma_counters __percpu *pmastats;
- u64 z_unicast_xmit; /* starting count for PMA */
- u64 z_unicast_rcv; /* starting count for PMA */
- u64 z_multicast_xmit; /* starting count for PMA */
- u64 z_multicast_rcv; /* starting count for PMA */
- u64 z_symbol_error_counter; /* starting count for PMA */
- u64 z_link_error_recovery_counter; /* starting count for PMA */
- u64 z_link_downed_counter; /* starting count for PMA */
- u64 z_port_rcv_errors; /* starting count for PMA */
- u64 z_port_rcv_remphys_errors; /* starting count for PMA */
- u64 z_port_xmit_discards; /* starting count for PMA */
- u64 z_port_xmit_data; /* starting count for PMA */
- u64 z_port_rcv_data; /* starting count for PMA */
- u64 z_port_xmit_packets; /* starting count for PMA */
- u64 z_port_rcv_packets; /* starting count for PMA */
- u32 z_local_link_integrity_errors; /* starting count for PMA */
- u32 z_excessive_buffer_overrun_errors; /* starting count for PMA */
- u32 z_vl15_dropped; /* starting count for PMA */
- u8 sl_to_vl[16];
-};
-
-struct qib_ibdev {
- struct rvt_dev_info rdi;
-
- struct list_head piowait; /* list for wait PIO buf */
- struct list_head dmawait; /* list for wait DMA */
- struct list_head txwait; /* list for wait qib_verbs_txreq */
- struct list_head memwait; /* list for wait kernel memory */
- struct list_head txreq_free;
- struct timer_list mem_timer;
- struct qib_pio_header *pio_hdrs;
- dma_addr_t pio_hdrs_phys;
-
- u32 n_piowait;
- u32 n_txwait;
-
-#ifdef CONFIG_DEBUG_FS
- /* per HCA debugfs */
- struct dentry *qib_ibdev_dbg;
-#endif
-};
-
-struct qib_verbs_counters {
- u64 symbol_error_counter;
- u64 link_error_recovery_counter;
- u64 link_downed_counter;
- u64 port_rcv_errors;
- u64 port_rcv_remphys_errors;
- u64 port_xmit_discards;
- u64 port_xmit_data;
- u64 port_rcv_data;
- u64 port_xmit_packets;
- u64 port_rcv_packets;
- u32 local_link_integrity_errors;
- u32 excessive_buffer_overrun_errors;
- u32 vl15_dropped;
-};
-
-static inline struct qib_ibdev *to_idev(struct ib_device *ibdev)
-{
- struct rvt_dev_info *rdi;
-
- rdi = container_of(ibdev, struct rvt_dev_info, ibdev);
- return container_of(rdi, struct qib_ibdev, rdi);
-}
-
-/*
- * Send if not busy or waiting for I/O and either
- * a RC response is pending or we can process send work requests.
- */
-static inline int qib_send_ok(struct rvt_qp *qp)
-{
- return !(qp->s_flags & (RVT_S_BUSY | RVT_S_ANY_WAIT_IO)) &&
- (qp->s_hdrwords || (qp->s_flags & RVT_S_RESP_PENDING) ||
- !(qp->s_flags & RVT_S_ANY_WAIT_SEND));
-}
-
-bool _qib_schedule_send(struct rvt_qp *qp);
-bool qib_schedule_send(struct rvt_qp *qp);
-
-static inline int qib_pkey_ok(u16 pkey1, u16 pkey2)
-{
- u16 p1 = pkey1 & 0x7FFF;
- u16 p2 = pkey2 & 0x7FFF;
-
- /*
- * Low 15 bits must be non-zero and match, and
- * one of the two must be a full member.
- */
- return p1 && p1 == p2 && ((__s16)pkey1 < 0 || (__s16)pkey2 < 0);
-}
-
-void qib_bad_pkey(struct qib_ibport *ibp, u32 key, u32 sl,
- u32 qp1, u32 qp2, __be16 lid1, __be16 lid2);
-void qib_cap_mask_chg(struct rvt_dev_info *rdi, u32 port_num);
-void qib_sys_guid_chg(struct qib_ibport *ibp);
-void qib_node_desc_chg(struct qib_ibport *ibp);
-int qib_process_mad(struct ib_device *ibdev, int mad_flags, u32 port_num,
- const struct ib_wc *in_wc, const struct ib_grh *in_grh,
- const struct ib_mad *in, struct ib_mad *out,
- size_t *out_mad_size, u16 *out_mad_pkey_index);
-void qib_notify_create_mad_agent(struct rvt_dev_info *rdi, int port_idx);
-void qib_notify_free_mad_agent(struct rvt_dev_info *rdi, int port_idx);
-
-/*
- * Compare the lower 24 bits of the two values.
- * Returns an integer <, ==, or > than zero.
- */
-static inline int qib_cmp24(u32 a, u32 b)
-{
- return (((int) a) - ((int) b)) << 8;
-}
-
-int qib_snapshot_counters(struct qib_pportdata *ppd, u64 *swords,
- u64 *rwords, u64 *spkts, u64 *rpkts,
- u64 *xmit_wait);
-
-int qib_get_counters(struct qib_pportdata *ppd,
- struct qib_verbs_counters *cntrs);
-
-/*
- * Functions provided by qib driver for rdmavt to use
- */
-unsigned qib_free_all_qps(struct rvt_dev_info *rdi);
-void *qib_qp_priv_alloc(struct rvt_dev_info *rdi, struct rvt_qp *qp);
-void qib_qp_priv_free(struct rvt_dev_info *rdi, struct rvt_qp *qp);
-void qib_notify_qp_reset(struct rvt_qp *qp);
-int qib_alloc_qpn(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt,
- enum ib_qp_type type, u32 port);
-void qib_restart_rc(struct rvt_qp *qp, u32 psn, int wait);
-#ifdef CONFIG_DEBUG_FS
-
-void qib_qp_iter_print(struct seq_file *s, struct rvt_qp_iter *iter);
-
-#endif
-
-unsigned qib_pkt_delay(u32 plen, u8 snd_mult, u8 rcv_mult);
-
-void qib_verbs_sdma_desc_avail(struct qib_pportdata *ppd, unsigned avail);
-
-void qib_put_txreq(struct qib_verbs_txreq *tx);
-
-int qib_verbs_send(struct rvt_qp *qp, struct ib_header *hdr,
- u32 hdrwords, struct rvt_sge_state *ss, u32 len);
-
-void qib_uc_rcv(struct qib_ibport *ibp, struct ib_header *hdr,
- int has_grh, void *data, u32 tlen, struct rvt_qp *qp);
-
-void qib_rc_rcv(struct qib_ctxtdata *rcd, struct ib_header *hdr,
- int has_grh, void *data, u32 tlen, struct rvt_qp *qp);
-
-int qib_check_ah(struct ib_device *ibdev, struct rdma_ah_attr *ah_attr);
-
-int qib_check_send_wqe(struct rvt_qp *qp, struct rvt_swqe *wqe,
- bool *call_send);
-
-struct ib_ah *qib_create_qp0_ah(struct qib_ibport *ibp, u16 dlid);
-
-void qib_rc_send_complete(struct rvt_qp *qp, struct ib_header *hdr);
-
-int qib_post_ud_send(struct rvt_qp *qp, const struct ib_send_wr *wr);
-
-void qib_ud_rcv(struct qib_ibport *ibp, struct ib_header *hdr,
- int has_grh, void *data, u32 tlen, struct rvt_qp *qp);
-
-void qib_migrate_qp(struct rvt_qp *qp);
-
-int qib_ruc_check_hdr(struct qib_ibport *ibp, struct ib_header *hdr,
- int has_grh, struct rvt_qp *qp, u32 bth0);
-
-u32 qib_make_grh(struct qib_ibport *ibp, struct ib_grh *hdr,
- const struct ib_global_route *grh, u32 hwords, u32 nwords);
-
-void qib_make_ruc_header(struct rvt_qp *qp, struct ib_other_headers *ohdr,
- u32 bth0, u32 bth2);
-
-void _qib_do_send(struct work_struct *work);
-
-void qib_do_send(struct rvt_qp *qp);
-
-void qib_send_rc_ack(struct rvt_qp *qp);
-
-int qib_make_rc_req(struct rvt_qp *qp, unsigned long *flags);
-
-int qib_make_uc_req(struct rvt_qp *qp, unsigned long *flags);
-
-int qib_make_ud_req(struct rvt_qp *qp, unsigned long *flags);
-
-int qib_register_ib_device(struct qib_devdata *);
-
-void qib_unregister_ib_device(struct qib_devdata *);
-
-void qib_ib_rcv(struct qib_ctxtdata *, void *, void *, u32);
-
-void qib_ib_piobufavail(struct qib_devdata *);
-
-unsigned qib_get_npkeys(struct qib_devdata *);
-
-unsigned qib_get_pkey(struct qib_ibport *, unsigned);
-
-extern const enum ib_wc_opcode ib_qib_wc_opcode[];
-
-/*
- * Below HCA-independent IB PhysPortState values, returned
- * by the f_ibphys_portstate() routine.
- */
-#define IB_PHYSPORTSTATE_SLEEP 1
-#define IB_PHYSPORTSTATE_POLL 2
-#define IB_PHYSPORTSTATE_DISABLED 3
-#define IB_PHYSPORTSTATE_CFG_TRAIN 4
-#define IB_PHYSPORTSTATE_LINKUP 5
-#define IB_PHYSPORTSTATE_LINK_ERR_RECOVER 6
-#define IB_PHYSPORTSTATE_CFG_DEBOUNCE 8
-#define IB_PHYSPORTSTATE_CFG_IDLE 0xB
-#define IB_PHYSPORTSTATE_RECOVERY_RETRAIN 0xC
-#define IB_PHYSPORTSTATE_RECOVERY_WAITRMT 0xE
-#define IB_PHYSPORTSTATE_RECOVERY_IDLE 0xF
-#define IB_PHYSPORTSTATE_CFG_ENH 0x10
-#define IB_PHYSPORTSTATE_CFG_WAIT_ENH 0x13
-
-extern const int ib_rvt_state_ops[];
-
-extern __be64 ib_qib_sys_image_guid; /* in network order */
-
-extern unsigned int ib_rvt_lkey_table_size;
-
-extern unsigned int ib_qib_max_cqes;
-
-extern unsigned int ib_qib_max_cqs;
-
-extern unsigned int ib_qib_max_qp_wrs;
-
-extern unsigned int ib_qib_max_qps;
-
-extern unsigned int ib_qib_max_sges;
-
-extern unsigned int ib_qib_max_mcast_grps;
-
-extern unsigned int ib_qib_max_mcast_qp_attached;
-
-extern unsigned int ib_qib_max_srqs;
-
-extern unsigned int ib_qib_max_srq_sges;
-
-extern unsigned int ib_qib_max_srq_wrs;
-
-extern const u32 ib_qib_rnr_table[];
-
-extern const struct rvt_operation_params qib_post_parms[];
-
-#endif /* QIB_VERBS_H */
diff --git a/drivers/infiniband/hw/qib/qib_wc_ppc64.c b/drivers/infiniband/hw/qib/qib_wc_ppc64.c
deleted file mode 100644
index 673cf4c22ebd..000000000000
--- a/drivers/infiniband/hw/qib/qib_wc_ppc64.c
+++ /dev/null
@@ -1,62 +0,0 @@
-/*
- * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-/*
- * This file is conditionally built on PowerPC only. Otherwise weak symbol
- * versions of the functions exported from here are used.
- */
-
-#include "qib.h"
-
-/**
- * qib_enable_wc - enable write combining for MMIO writes to the device
- * @dd: qlogic_ib device
- *
- * Nothing to do on PowerPC, so just return without error.
- */
-int qib_enable_wc(struct qib_devdata *dd)
-{
- return 0;
-}
-
-/**
- * qib_unordered_wc - indicate whether write combining is unordered
- *
- * Because our performance depends on our ability to do write
- * combining mmio writes in the most efficient way, we need to
- * know if we are on a processor that may reorder stores when
- * write combining.
- */
-int qib_unordered_wc(void)
-{
- return 1;
-}
diff --git a/drivers/infiniband/hw/qib/qib_wc_x86_64.c b/drivers/infiniband/hw/qib/qib_wc_x86_64.c
deleted file mode 100644
index edd0ddbd4481..000000000000
--- a/drivers/infiniband/hw/qib/qib_wc_x86_64.c
+++ /dev/null
@@ -1,150 +0,0 @@
-/*
- * Copyright (c) 2012 Intel Corporation. All rights reserved.
- * Copyright (c) 2006 - 2012 QLogic Corporation. All rights reserved.
- * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-/*
- * This file is conditionally built on x86_64 only. Otherwise weak symbol
- * versions of the functions exported from here are used.
- */
-
-#include <linux/pci.h>
-#include <asm/mtrr.h>
-#include <asm/processor.h>
-
-#include "qib.h"
-
-/**
- * qib_enable_wc - enable write combining for MMIO writes to the device
- * @dd: qlogic_ib device
- *
- * This routine is x86_64-specific; it twiddles the CPU's MTRRs to enable
- * write combining.
- */
-int qib_enable_wc(struct qib_devdata *dd)
-{
- int ret = 0;
- u64 pioaddr, piolen;
- unsigned bits;
- const unsigned long addr = pci_resource_start(dd->pcidev, 0);
- const size_t len = pci_resource_len(dd->pcidev, 0);
-
- /*
- * Set the PIO buffers to be WCCOMB, so we get HT bursts to the
- * chip. Linux (possibly the hardware) requires it to be on a power
- * of 2 address matching the length (which has to be a power of 2).
- * For rev1, that means the base address, for rev2, it will be just
- * the PIO buffers themselves.
- * For chips with two sets of buffers, the calculations are
- * somewhat more complicated; we need to sum, and the piobufbase
- * register has both offsets, 2K in low 32 bits, 4K in high 32 bits.
- * The buffers are still packed, so a single range covers both.
- */
- if (dd->piobcnt2k && dd->piobcnt4k) {
- /* 2 sizes for chip */
- unsigned long pio2kbase, pio4kbase;
-
- pio2kbase = dd->piobufbase & 0xffffffffUL;
- pio4kbase = (dd->piobufbase >> 32) & 0xffffffffUL;
- if (pio2kbase < pio4kbase) {
- /* all current chips */
- pioaddr = addr + pio2kbase;
- piolen = pio4kbase - pio2kbase +
- dd->piobcnt4k * dd->align4k;
- } else {
- pioaddr = addr + pio4kbase;
- piolen = pio2kbase - pio4kbase +
- dd->piobcnt2k * dd->palign;
- }
- } else { /* single buffer size (2K, currently) */
- pioaddr = addr + dd->piobufbase;
- piolen = dd->piobcnt2k * dd->palign +
- dd->piobcnt4k * dd->align4k;
- }
-
- for (bits = 0; !(piolen & (1ULL << bits)); bits++)
- ; /* do nothing */
-
- if (piolen != (1ULL << bits)) {
- piolen >>= bits;
- while (piolen >>= 1)
- bits++;
- piolen = 1ULL << (bits + 1);
- }
- if (pioaddr & (piolen - 1)) {
- u64 atmp = pioaddr & ~(piolen - 1);
-
- if (atmp < addr || (atmp + piolen) > (addr + len)) {
- qib_dev_err(dd,
- "No way to align address/size (%llx/%llx), no WC mtrr\n",
- (unsigned long long) atmp,
- (unsigned long long) piolen << 1);
- ret = -ENODEV;
- } else {
- pioaddr = atmp;
- piolen <<= 1;
- }
- }
-
- if (!ret) {
- dd->wc_cookie = arch_phys_wc_add(pioaddr, piolen);
- if (dd->wc_cookie < 0)
- /* use error from routine */
- ret = dd->wc_cookie;
- }
-
- return ret;
-}
-
-/**
- * qib_disable_wc - disable write combining for MMIO writes to the device
- * @dd: qlogic_ib device
- */
-void qib_disable_wc(struct qib_devdata *dd)
-{
- arch_phys_wc_del(dd->wc_cookie);
-}
-
-/**
- * qib_unordered_wc - indicate whether write combining is ordered
- *
- * Because our performance depends on our ability to do write combining mmio
- * writes in the most efficient way, we need to know if we are on an Intel
- * or AMD x86_64 processor. AMD x86_64 processors flush WC buffers out in
- * the order completed, and so no special flushing is required to get
- * correct ordering. Intel processors, however, will flush write buffers
- * out in "random" orders, and so explicit ordering is needed at times.
- */
-int qib_unordered_wc(void)
-{
- return boot_cpu_data.x86_vendor != X86_VENDOR_AMD;
-}
diff --git a/drivers/infiniband/hw/usnic/usnic_ib_verbs.c b/drivers/infiniband/hw/usnic/usnic_ib_verbs.c
index 217af34e82b3..ae5df96589d9 100644
--- a/drivers/infiniband/hw/usnic/usnic_ib_verbs.c
+++ b/drivers/infiniband/hw/usnic/usnic_ib_verbs.c
@@ -592,6 +592,7 @@ int usnic_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata)
struct ib_mr *usnic_ib_reg_mr(struct ib_pd *pd, u64 start, u64 length,
u64 virt_addr, int access_flags,
+ struct ib_dmah *dmah,
struct ib_udata *udata)
{
struct usnic_ib_mr *mr;
@@ -600,6 +601,9 @@ struct ib_mr *usnic_ib_reg_mr(struct ib_pd *pd, u64 start, u64 length,
usnic_dbg("start 0x%llx va 0x%llx length 0x%llx\n", start,
virt_addr, length);
+ if (dmah)
+ return ERR_PTR(-EOPNOTSUPP);
+
mr = kzalloc(sizeof(*mr), GFP_KERNEL);
if (!mr)
return ERR_PTR(-ENOMEM);
diff --git a/drivers/infiniband/hw/usnic/usnic_ib_verbs.h b/drivers/infiniband/hw/usnic/usnic_ib_verbs.h
index 53f53f2d53be..e3031ac32488 100644
--- a/drivers/infiniband/hw/usnic/usnic_ib_verbs.h
+++ b/drivers/infiniband/hw/usnic/usnic_ib_verbs.h
@@ -60,6 +60,7 @@ int usnic_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
int usnic_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata);
struct ib_mr *usnic_ib_reg_mr(struct ib_pd *pd, u64 start, u64 length,
u64 virt_addr, int access_flags,
+ struct ib_dmah *dmah,
struct ib_udata *udata);
int usnic_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata);
int usnic_ib_alloc_ucontext(struct ib_ucontext *uctx, struct ib_udata *udata);
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_mr.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_mr.c
index e80848bfb3bd..ec7a00c8285b 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_mr.c
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_mr.c
@@ -104,12 +104,14 @@ struct ib_mr *pvrdma_get_dma_mr(struct ib_pd *pd, int acc)
* @length: length of region
* @virt_addr: I/O virtual address
* @access_flags: access flags for memory region
+ * @dmah: dma handle
* @udata: user data
*
* @return: ib_mr pointer on success, otherwise returns an errno.
*/
struct ib_mr *pvrdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
u64 virt_addr, int access_flags,
+ struct ib_dmah *dmah,
struct ib_udata *udata)
{
struct pvrdma_dev *dev = to_vdev(pd->device);
@@ -121,6 +123,9 @@ struct ib_mr *pvrdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
struct pvrdma_cmd_create_mr_resp *resp = &rsp.create_mr_resp;
int ret, npages;
+ if (dmah)
+ return ERR_PTR(-EOPNOTSUPP);
+
if (length == 0 || length > dev->dsr->caps.max_mr_size) {
dev_warn(&dev->pdev->dev, "invalid mem region length\n");
return ERR_PTR(-EINVAL);
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.h b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.h
index fd47b0b1df5c..603e5a9311eb 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.h
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.h
@@ -366,6 +366,7 @@ int pvrdma_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata);
struct ib_mr *pvrdma_get_dma_mr(struct ib_pd *pd, int acc);
struct ib_mr *pvrdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
u64 virt_addr, int access_flags,
+ struct ib_dmah *dmah,
struct ib_udata *udata);
int pvrdma_dereg_mr(struct ib_mr *mr, struct ib_udata *udata);
struct ib_mr *pvrdma_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,