summaryrefslogtreecommitdiff
path: root/drivers/infiniband/hw/cxgb4/t4.h
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/infiniband/hw/cxgb4/t4.h')
-rw-r--r--drivers/infiniband/hw/cxgb4/t4.h483
1 files changed, 362 insertions, 121 deletions
diff --git a/drivers/infiniband/hw/cxgb4/t4.h b/drivers/infiniband/hw/cxgb4/t4.h
index ebcb03bd1b72..c3b0e2896475 100644
--- a/drivers/infiniband/hw/cxgb4/t4.h
+++ b/drivers/infiniband/hw/cxgb4/t4.h
@@ -33,26 +33,17 @@
#include "t4_hw.h"
#include "t4_regs.h"
+#include "t4_values.h"
#include "t4_msg.h"
+#include "t4_tcb.h"
#include "t4fw_ri_api.h"
-#define T4_MAX_NUM_QP (1<<16)
-#define T4_MAX_NUM_CQ (1<<15)
-#define T4_MAX_NUM_PD (1<<15)
-#define T4_EQ_STATUS_ENTRIES (L1_CACHE_BYTES > 64 ? 2 : 1)
-#define T4_MAX_EQ_SIZE (65520 - T4_EQ_STATUS_ENTRIES)
-#define T4_MAX_IQ_SIZE (65520 - 1)
-#define T4_MAX_RQ_SIZE (8192 - T4_EQ_STATUS_ENTRIES)
-#define T4_MAX_SQ_SIZE (T4_MAX_EQ_SIZE - 1)
-#define T4_MAX_QP_DEPTH (T4_MAX_RQ_SIZE - 1)
-#define T4_MAX_CQ_DEPTH (T4_MAX_IQ_SIZE - 1)
-#define T4_MAX_NUM_STAG (1<<15)
-#define T4_MAX_MR_SIZE (~0ULL - 1)
+#define T4_MAX_NUM_PD 65536
+#define T4_MAX_MR_SIZE (~0ULL)
#define T4_PAGESIZE_MASK 0xffff000 /* 4KB-128MB */
#define T4_STAG_UNSET 0xffffffff
#define T4_FW_MAJ 0
-#define T4_EQ_STATUS_ENTRIES (L1_CACHE_BYTES > 64 ? 2 : 1)
-#define A_PCIE_MA_SYNC 0x30b4
+#define PCIE_MA_SYNC_A 0x30b4
struct t4_status_page {
__be32 rsvd1; /* flit 0 - hw owns */
@@ -62,12 +53,16 @@ struct t4_status_page {
__be16 pidx;
u8 qp_err; /* flit 1 - sw owns */
u8 db_off;
- u8 pad;
+ u8 pad[2];
u16 host_wq_pidx;
u16 host_cidx;
u16 host_pidx;
+ u16 pad2;
+ u32 srqidx;
};
+#define T4_RQT_ENTRY_SHIFT 6
+#define T4_RQT_ENTRY_SIZE BIT(T4_RQT_ENTRY_SHIFT)
#define T4_EQ_ENTRY_SIZE 64
#define T4_SQ_NUM_SLOTS 5
@@ -84,12 +79,22 @@ struct t4_status_page {
sizeof(struct fw_ri_isgl)) / sizeof(struct fw_ri_sge))
#define T4_MAX_FR_IMMD ((T4_SQ_NUM_BYTES - sizeof(struct fw_ri_fr_nsmr_wr) - \
sizeof(struct fw_ri_immd)) & ~31UL)
-#define T4_MAX_FR_DEPTH (1024 / sizeof(u64))
+#define T4_MAX_FR_IMMD_DEPTH (T4_MAX_FR_IMMD / sizeof(u64))
+#define T4_MAX_FR_DSGL 1024
+#define T4_MAX_FR_DSGL_DEPTH (T4_MAX_FR_DSGL / sizeof(u64))
+
+static inline int t4_max_fr_depth(int use_dsgl)
+{
+ return use_dsgl ? T4_MAX_FR_DSGL_DEPTH : T4_MAX_FR_IMMD_DEPTH;
+}
#define T4_RQ_NUM_SLOTS 2
#define T4_RQ_NUM_BYTES (T4_EQ_ENTRY_SIZE * T4_RQ_NUM_SLOTS)
#define T4_MAX_RECV_SGE 4
+#define T4_WRITE_CMPL_MAX_SGL 4
+#define T4_WRITE_CMPL_MAX_CQE 16
+
union t4_wr {
struct fw_ri_res_wr res;
struct fw_ri_wr ri;
@@ -98,7 +103,9 @@ union t4_wr {
struct fw_ri_rdma_read_wr read;
struct fw_ri_bind_mw_wr bind;
struct fw_ri_fr_nsmr_wr fr;
+ struct fw_ri_fr_nsmr_tpte_wr fr_tpte;
struct fw_ri_inv_lstag_wr inv;
+ struct fw_ri_rdma_write_cmpl_wr write_cmpl;
struct t4_status_page status;
__be64 flits[T4_EQ_ENTRY_SIZE / sizeof(__be64) * T4_SQ_NUM_SLOTS];
};
@@ -173,7 +180,7 @@ struct t4_cqe {
__be32 msn;
} rcqe;
struct {
- u32 nada1;
+ __be32 stag;
u16 nada2;
u16 cidx;
} scqe;
@@ -181,85 +188,120 @@ struct t4_cqe {
__be32 wrid_hi;
__be32 wrid_low;
} gen;
+ struct {
+ __be32 stag;
+ __be32 msn;
+ __be32 reserved;
+ __be32 abs_rqe_idx;
+ } srcqe;
+ struct {
+ __be32 mo;
+ __be32 msn;
+ /*
+ * Use union for immediate data to be consistent with
+ * stack's 32 bit data and iWARP spec's 64 bit data.
+ */
+ union {
+ struct {
+ __be32 imm_data32;
+ u32 reserved;
+ } ib_imm_data;
+ __be64 imm_data64;
+ } iw_imm_data;
+ } imm_data_rcqe;
+
+ u64 drain_cookie;
+ __be64 flits[3];
} u;
- __be64 reserved;
+ __be64 reserved[3];
__be64 bits_type_ts;
};
/* macros for flit 0 of the cqe */
-#define S_CQE_QPID 12
-#define M_CQE_QPID 0xFFFFF
-#define G_CQE_QPID(x) ((((x) >> S_CQE_QPID)) & M_CQE_QPID)
-#define V_CQE_QPID(x) ((x)<<S_CQE_QPID)
-
-#define S_CQE_SWCQE 11
-#define M_CQE_SWCQE 0x1
-#define G_CQE_SWCQE(x) ((((x) >> S_CQE_SWCQE)) & M_CQE_SWCQE)
-#define V_CQE_SWCQE(x) ((x)<<S_CQE_SWCQE)
-
-#define S_CQE_STATUS 5
-#define M_CQE_STATUS 0x1F
-#define G_CQE_STATUS(x) ((((x) >> S_CQE_STATUS)) & M_CQE_STATUS)
-#define V_CQE_STATUS(x) ((x)<<S_CQE_STATUS)
-
-#define S_CQE_TYPE 4
-#define M_CQE_TYPE 0x1
-#define G_CQE_TYPE(x) ((((x) >> S_CQE_TYPE)) & M_CQE_TYPE)
-#define V_CQE_TYPE(x) ((x)<<S_CQE_TYPE)
-
-#define S_CQE_OPCODE 0
-#define M_CQE_OPCODE 0xF
-#define G_CQE_OPCODE(x) ((((x) >> S_CQE_OPCODE)) & M_CQE_OPCODE)
-#define V_CQE_OPCODE(x) ((x)<<S_CQE_OPCODE)
-
-#define SW_CQE(x) (G_CQE_SWCQE(be32_to_cpu((x)->header)))
-#define CQE_QPID(x) (G_CQE_QPID(be32_to_cpu((x)->header)))
-#define CQE_TYPE(x) (G_CQE_TYPE(be32_to_cpu((x)->header)))
+#define CQE_QPID_S 12
+#define CQE_QPID_M 0xFFFFF
+#define CQE_QPID_G(x) ((((x) >> CQE_QPID_S)) & CQE_QPID_M)
+#define CQE_QPID_V(x) ((x)<<CQE_QPID_S)
+
+#define CQE_SWCQE_S 11
+#define CQE_SWCQE_M 0x1
+#define CQE_SWCQE_G(x) ((((x) >> CQE_SWCQE_S)) & CQE_SWCQE_M)
+#define CQE_SWCQE_V(x) ((x)<<CQE_SWCQE_S)
+
+#define CQE_DRAIN_S 10
+#define CQE_DRAIN_M 0x1
+#define CQE_DRAIN_G(x) ((((x) >> CQE_DRAIN_S)) & CQE_DRAIN_M)
+#define CQE_DRAIN_V(x) ((x)<<CQE_DRAIN_S)
+
+#define CQE_STATUS_S 5
+#define CQE_STATUS_M 0x1F
+#define CQE_STATUS_G(x) ((((x) >> CQE_STATUS_S)) & CQE_STATUS_M)
+#define CQE_STATUS_V(x) ((x)<<CQE_STATUS_S)
+
+#define CQE_TYPE_S 4
+#define CQE_TYPE_M 0x1
+#define CQE_TYPE_G(x) ((((x) >> CQE_TYPE_S)) & CQE_TYPE_M)
+#define CQE_TYPE_V(x) ((x)<<CQE_TYPE_S)
+
+#define CQE_OPCODE_S 0
+#define CQE_OPCODE_M 0xF
+#define CQE_OPCODE_G(x) ((((x) >> CQE_OPCODE_S)) & CQE_OPCODE_M)
+#define CQE_OPCODE_V(x) ((x)<<CQE_OPCODE_S)
+
+#define SW_CQE(x) (CQE_SWCQE_G(be32_to_cpu((x)->header)))
+#define DRAIN_CQE(x) (CQE_DRAIN_G(be32_to_cpu((x)->header)))
+#define CQE_QPID(x) (CQE_QPID_G(be32_to_cpu((x)->header)))
+#define CQE_TYPE(x) (CQE_TYPE_G(be32_to_cpu((x)->header)))
#define SQ_TYPE(x) (CQE_TYPE((x)))
#define RQ_TYPE(x) (!CQE_TYPE((x)))
-#define CQE_STATUS(x) (G_CQE_STATUS(be32_to_cpu((x)->header)))
-#define CQE_OPCODE(x) (G_CQE_OPCODE(be32_to_cpu((x)->header)))
+#define CQE_STATUS(x) (CQE_STATUS_G(be32_to_cpu((x)->header)))
+#define CQE_OPCODE(x) (CQE_OPCODE_G(be32_to_cpu((x)->header)))
#define CQE_SEND_OPCODE(x)( \
- (G_CQE_OPCODE(be32_to_cpu((x)->header)) == FW_RI_SEND) || \
- (G_CQE_OPCODE(be32_to_cpu((x)->header)) == FW_RI_SEND_WITH_SE) || \
- (G_CQE_OPCODE(be32_to_cpu((x)->header)) == FW_RI_SEND_WITH_INV) || \
- (G_CQE_OPCODE(be32_to_cpu((x)->header)) == FW_RI_SEND_WITH_SE_INV))
+ (CQE_OPCODE_G(be32_to_cpu((x)->header)) == FW_RI_SEND) || \
+ (CQE_OPCODE_G(be32_to_cpu((x)->header)) == FW_RI_SEND_WITH_SE) || \
+ (CQE_OPCODE_G(be32_to_cpu((x)->header)) == FW_RI_SEND_WITH_INV) || \
+ (CQE_OPCODE_G(be32_to_cpu((x)->header)) == FW_RI_SEND_WITH_SE_INV))
#define CQE_LEN(x) (be32_to_cpu((x)->len))
/* used for RQ completion processing */
#define CQE_WRID_STAG(x) (be32_to_cpu((x)->u.rcqe.stag))
#define CQE_WRID_MSN(x) (be32_to_cpu((x)->u.rcqe.msn))
+#define CQE_ABS_RQE_IDX(x) (be32_to_cpu((x)->u.srcqe.abs_rqe_idx))
+#define CQE_IMM_DATA(x)( \
+ (x)->u.imm_data_rcqe.iw_imm_data.ib_imm_data.imm_data32)
/* used for SQ completion processing */
#define CQE_WRID_SQ_IDX(x) ((x)->u.scqe.cidx)
+#define CQE_WRID_FR_STAG(x) (be32_to_cpu((x)->u.scqe.stag))
/* generic accessor macros */
-#define CQE_WRID_HI(x) ((x)->u.gen.wrid_hi)
-#define CQE_WRID_LOW(x) ((x)->u.gen.wrid_low)
+#define CQE_WRID_HI(x) (be32_to_cpu((x)->u.gen.wrid_hi))
+#define CQE_WRID_LOW(x) (be32_to_cpu((x)->u.gen.wrid_low))
+#define CQE_DRAIN_COOKIE(x) ((x)->u.drain_cookie)
/* macros for flit 3 of the cqe */
-#define S_CQE_GENBIT 63
-#define M_CQE_GENBIT 0x1
-#define G_CQE_GENBIT(x) (((x) >> S_CQE_GENBIT) & M_CQE_GENBIT)
-#define V_CQE_GENBIT(x) ((x)<<S_CQE_GENBIT)
+#define CQE_GENBIT_S 63
+#define CQE_GENBIT_M 0x1
+#define CQE_GENBIT_G(x) (((x) >> CQE_GENBIT_S) & CQE_GENBIT_M)
+#define CQE_GENBIT_V(x) ((x)<<CQE_GENBIT_S)
-#define S_CQE_OVFBIT 62
-#define M_CQE_OVFBIT 0x1
-#define G_CQE_OVFBIT(x) ((((x) >> S_CQE_OVFBIT)) & M_CQE_OVFBIT)
+#define CQE_OVFBIT_S 62
+#define CQE_OVFBIT_M 0x1
+#define CQE_OVFBIT_G(x) ((((x) >> CQE_OVFBIT_S)) & CQE_OVFBIT_M)
-#define S_CQE_IQTYPE 60
-#define M_CQE_IQTYPE 0x3
-#define G_CQE_IQTYPE(x) ((((x) >> S_CQE_IQTYPE)) & M_CQE_IQTYPE)
+#define CQE_IQTYPE_S 60
+#define CQE_IQTYPE_M 0x3
+#define CQE_IQTYPE_G(x) ((((x) >> CQE_IQTYPE_S)) & CQE_IQTYPE_M)
-#define M_CQE_TS 0x0fffffffffffffffULL
-#define G_CQE_TS(x) ((x) & M_CQE_TS)
+#define CQE_TS_M 0x0fffffffffffffffULL
+#define CQE_TS_G(x) ((x) & CQE_TS_M)
-#define CQE_OVFBIT(x) ((unsigned)G_CQE_OVFBIT(be64_to_cpu((x)->bits_type_ts)))
-#define CQE_GENBIT(x) ((unsigned)G_CQE_GENBIT(be64_to_cpu((x)->bits_type_ts)))
-#define CQE_TS(x) (G_CQE_TS(be64_to_cpu((x)->bits_type_ts)))
+#define CQE_OVFBIT(x) ((unsigned)CQE_OVFBIT_G(be64_to_cpu((x)->bits_type_ts)))
+#define CQE_GENBIT(x) ((unsigned)CQE_GENBIT_G(be64_to_cpu((x)->bits_type_ts)))
+#define CQE_TS(x) (CQE_TS_G(be64_to_cpu((x)->bits_type_ts)))
struct t4_swsqe {
u64 wr_id;
@@ -269,6 +311,9 @@ struct t4_swsqe {
int complete;
int signaled;
u16 idx;
+ int flushed;
+ ktime_t host_time;
+ u64 sge_ts;
};
static inline pgprot_t t4_pgprot_wc(pgprot_t prot)
@@ -291,19 +336,26 @@ struct t4_sq {
unsigned long phys_addr;
struct t4_swsqe *sw_sq;
struct t4_swsqe *oldest_read;
- u64 udb;
+ void __iomem *bar2_va;
+ u64 bar2_pa;
size_t memsize;
+ u32 bar2_qid;
u32 qid;
u16 in_use;
u16 size;
u16 cidx;
u16 pidx;
u16 wq_pidx;
+ u16 wq_pidx_inc;
u16 flags;
+ short flush_cidx;
};
struct t4_swrqe {
u64 wr_id;
+ ktime_t host_time;
+ u64 sge_ts;
+ int valid;
};
struct t4_rq {
@@ -311,8 +363,10 @@ struct t4_rq {
dma_addr_t dma_addr;
DEFINE_DMA_UNMAP_ADDR(mapping);
struct t4_swrqe *sw_rq;
- u64 udb;
+ void __iomem *bar2_va;
+ u64 bar2_pa;
size_t memsize;
+ u32 bar2_qid;
u32 qid;
u32 msn;
u32 rqt_hwaddr;
@@ -322,16 +376,107 @@ struct t4_rq {
u16 cidx;
u16 pidx;
u16 wq_pidx;
+ u16 wq_pidx_inc;
};
struct t4_wq {
struct t4_sq sq;
struct t4_rq rq;
void __iomem *db;
- void __iomem *gts;
struct c4iw_rdev *rdev;
+ int flushed;
+ u8 *qp_errp;
+ u32 *srqidxp;
};
+struct t4_srq_pending_wr {
+ u64 wr_id;
+ union t4_recv_wr wqe;
+ u8 len16;
+};
+
+struct t4_srq {
+ union t4_recv_wr *queue;
+ dma_addr_t dma_addr;
+ DEFINE_DMA_UNMAP_ADDR(mapping);
+ struct t4_swrqe *sw_rq;
+ void __iomem *bar2_va;
+ u64 bar2_pa;
+ size_t memsize;
+ u32 bar2_qid;
+ u32 qid;
+ u32 msn;
+ u32 rqt_hwaddr;
+ u32 rqt_abs_idx;
+ u16 rqt_size;
+ u16 size;
+ u16 cidx;
+ u16 pidx;
+ u16 wq_pidx;
+ u16 wq_pidx_inc;
+ u16 in_use;
+ struct t4_srq_pending_wr *pending_wrs;
+ u16 pending_cidx;
+ u16 pending_pidx;
+ u16 pending_in_use;
+ u16 ooo_count;
+};
+
+static inline u32 t4_srq_avail(struct t4_srq *srq)
+{
+ return srq->size - 1 - srq->in_use;
+}
+
+static inline void t4_srq_produce(struct t4_srq *srq, u8 len16)
+{
+ srq->in_use++;
+ if (++srq->pidx == srq->size)
+ srq->pidx = 0;
+ srq->wq_pidx += DIV_ROUND_UP(len16 * 16, T4_EQ_ENTRY_SIZE);
+ if (srq->wq_pidx >= srq->size * T4_RQ_NUM_SLOTS)
+ srq->wq_pidx %= srq->size * T4_RQ_NUM_SLOTS;
+ srq->queue[srq->size].status.host_pidx = srq->pidx;
+}
+
+static inline void t4_srq_produce_pending_wr(struct t4_srq *srq)
+{
+ srq->pending_in_use++;
+ srq->in_use++;
+ if (++srq->pending_pidx == srq->size)
+ srq->pending_pidx = 0;
+}
+
+static inline void t4_srq_consume_pending_wr(struct t4_srq *srq)
+{
+ srq->pending_in_use--;
+ srq->in_use--;
+ if (++srq->pending_cidx == srq->size)
+ srq->pending_cidx = 0;
+}
+
+static inline void t4_srq_produce_ooo(struct t4_srq *srq)
+{
+ srq->in_use--;
+ srq->ooo_count++;
+}
+
+static inline void t4_srq_consume_ooo(struct t4_srq *srq)
+{
+ srq->cidx++;
+ if (srq->cidx == srq->size)
+ srq->cidx = 0;
+ srq->queue[srq->size].status.host_cidx = srq->cidx;
+ srq->ooo_count--;
+}
+
+static inline void t4_srq_consume(struct t4_srq *srq)
+{
+ srq->in_use--;
+ if (++srq->cidx == srq->size)
+ srq->cidx = 0;
+ srq->queue[srq->size].status.host_cidx = srq->cidx;
+}
+
static inline int t4_rqes_posted(struct t4_wq *wq)
{
return wq->rq.in_use;
@@ -342,11 +487,6 @@ static inline int t4_rq_empty(struct t4_wq *wq)
return wq->rq.in_use == 0;
}
-static inline int t4_rq_full(struct t4_wq *wq)
-{
- return wq->rq.in_use == (wq->rq.size - 1);
-}
-
static inline u32 t4_rq_avail(struct t4_wq *wq)
{
return wq->rq.size - 1 - wq->rq.in_use;
@@ -365,7 +505,6 @@ static inline void t4_rq_produce(struct t4_wq *wq, u8 len16)
static inline void t4_rq_consume(struct t4_wq *wq)
{
wq->rq.in_use--;
- wq->rq.msn++;
if (++wq->rq.cidx == wq->rq.size)
wq->rq.cidx = 0;
}
@@ -390,11 +529,6 @@ static inline int t4_sq_empty(struct t4_wq *wq)
return wq->sq.in_use == 0;
}
-static inline int t4_sq_full(struct t4_wq *wq)
-{
- return wq->sq.in_use == (wq->sq.size - 1);
-}
-
static inline u32 t4_sq_avail(struct t4_wq *wq)
{
return wq->sq.size - 1 - wq->sq.in_use;
@@ -412,6 +546,8 @@ static inline void t4_sq_produce(struct t4_wq *wq, u8 len16)
static inline void t4_sq_consume(struct t4_wq *wq)
{
+ if (wq->sq.cidx == wq->sq.flush_cidx)
+ wq->sq.flush_cidx = -1;
wq->sq.in_use--;
if (++wq->sq.cidx == wq->sq.size)
wq->sq.cidx = 0;
@@ -427,26 +563,100 @@ static inline u16 t4_sq_wq_size(struct t4_wq *wq)
return wq->sq.size * T4_SQ_NUM_SLOTS;
}
-static inline void t4_ring_sq_db(struct t4_wq *wq, u16 inc)
+/* This function copies 64 byte coalesced work request to memory
+ * mapped BAR2 space. For coalesced WRs, the SGE fetches data
+ * from the FIFO instead of from Host.
+ */
+static inline void pio_copy(u64 __iomem *dst, u64 *src)
+{
+ int count = 8;
+
+ while (count) {
+ writeq(*src, dst);
+ src++;
+ dst++;
+ count--;
+ }
+}
+
+static inline void t4_ring_srq_db(struct t4_srq *srq, u16 inc, u8 len16,
+ union t4_recv_wr *wqe)
+{
+ /* Flush host queue memory writes. */
+ wmb();
+ if (inc == 1 && srq->bar2_qid == 0 && wqe) {
+ pr_debug("%s : WC srq->pidx = %d; len16=%d\n",
+ __func__, srq->pidx, len16);
+ pio_copy(srq->bar2_va + SGE_UDB_WCDOORBELL, (u64 *)wqe);
+ } else {
+ pr_debug("%s: DB srq->pidx = %d; len16=%d\n",
+ __func__, srq->pidx, len16);
+ writel(PIDX_T5_V(inc) | QID_V(srq->bar2_qid),
+ srq->bar2_va + SGE_UDB_KDOORBELL);
+ }
+ /* Flush user doorbell area writes. */
+ wmb();
+}
+
+static inline void t4_ring_sq_db(struct t4_wq *wq, u16 inc, union t4_wr *wqe)
{
+
+ /* Flush host queue memory writes. */
wmb();
- writel(QID(wq->sq.qid) | PIDX(inc), wq->db);
+ if (wq->sq.bar2_va) {
+ if (inc == 1 && wq->sq.bar2_qid == 0 && wqe) {
+ pr_debug("WC wq->sq.pidx = %d\n", wq->sq.pidx);
+ pio_copy((u64 __iomem *)
+ (wq->sq.bar2_va + SGE_UDB_WCDOORBELL),
+ (u64 *)wqe);
+ } else {
+ pr_debug("DB wq->sq.pidx = %d\n", wq->sq.pidx);
+ writel(PIDX_T5_V(inc) | QID_V(wq->sq.bar2_qid),
+ wq->sq.bar2_va + SGE_UDB_KDOORBELL);
+ }
+
+ /* Flush user doorbell area writes. */
+ wmb();
+ return;
+ }
+ writel(QID_V(wq->sq.qid) | PIDX_V(inc), wq->db);
}
-static inline void t4_ring_rq_db(struct t4_wq *wq, u16 inc)
+static inline void t4_ring_rq_db(struct t4_wq *wq, u16 inc,
+ union t4_recv_wr *wqe)
{
+
+ /* Flush host queue memory writes. */
wmb();
- writel(QID(wq->rq.qid) | PIDX(inc), wq->db);
+ if (wq->rq.bar2_va) {
+ if (inc == 1 && wq->rq.bar2_qid == 0 && wqe) {
+ pr_debug("WC wq->rq.pidx = %d\n", wq->rq.pidx);
+ pio_copy((u64 __iomem *)
+ (wq->rq.bar2_va + SGE_UDB_WCDOORBELL),
+ (void *)wqe);
+ } else {
+ pr_debug("DB wq->rq.pidx = %d\n", wq->rq.pidx);
+ writel(PIDX_T5_V(inc) | QID_V(wq->rq.bar2_qid),
+ wq->rq.bar2_va + SGE_UDB_KDOORBELL);
+ }
+
+ /* Flush user doorbell area writes. */
+ wmb();
+ return;
+ }
+ writel(QID_V(wq->rq.qid) | PIDX_V(inc), wq->db);
}
static inline int t4_wq_in_error(struct t4_wq *wq)
{
- return wq->rq.queue[wq->rq.size].status.qp_err;
+ return *wq->qp_errp;
}
-static inline void t4_set_wq_in_error(struct t4_wq *wq)
+static inline void t4_set_wq_in_error(struct t4_wq *wq, u32 srqidx)
{
- wq->rq.queue[wq->rq.size].status.qp_err = 1;
+ if (srqidx)
+ *wq->srqidxp = srqidx;
+ *wq->qp_errp = 1;
}
static inline void t4_disable_wq_db(struct t4_wq *wq)
@@ -459,10 +669,9 @@ static inline void t4_enable_wq_db(struct t4_wq *wq)
wq->rq.queue[wq->rq.size].status.db_off = 0;
}
-static inline int t4_wq_db_enabled(struct t4_wq *wq)
-{
- return !wq->rq.queue[wq->rq.size].status.db_off;
-}
+enum t4_cq_flags {
+ CQ_ARMED = 1,
+};
struct t4_cq {
struct t4_cqe *queue;
@@ -470,11 +679,15 @@ struct t4_cq {
DEFINE_DMA_UNMAP_ADDR(mapping);
struct t4_cqe *sw_queue;
void __iomem *gts;
+ void __iomem *bar2_va;
+ u64 bar2_pa;
+ u32 bar2_qid;
struct c4iw_rdev *rdev;
- u64 ugts;
size_t memsize;
__be64 bits_type_ts;
u32 cqid;
+ u32 qid_mask;
+ int vector;
u16 size; /* including status page */
u16 cidx;
u16 sw_pidx;
@@ -483,21 +696,36 @@ struct t4_cq {
u16 cidx_inc;
u8 gen;
u8 error;
+ u8 *qp_errp;
+ unsigned long flags;
};
+static inline void write_gts(struct t4_cq *cq, u32 val)
+{
+ if (cq->bar2_va)
+ writel(val | INGRESSQID_V(cq->bar2_qid),
+ cq->bar2_va + SGE_UDB_GTS);
+ else
+ writel(val | INGRESSQID_V(cq->cqid), cq->gts);
+}
+
+static inline int t4_clear_cq_armed(struct t4_cq *cq)
+{
+ return test_and_clear_bit(CQ_ARMED, &cq->flags);
+}
+
static inline int t4_arm_cq(struct t4_cq *cq, int se)
{
u32 val;
- while (cq->cidx_inc > CIDXINC_MASK) {
- val = SEINTARM(0) | CIDXINC(CIDXINC_MASK) | TIMERREG(7) |
- INGRESSQID(cq->cqid);
- writel(val, cq->gts);
- cq->cidx_inc -= CIDXINC_MASK;
+ set_bit(CQ_ARMED, &cq->flags);
+ while (cq->cidx_inc > CIDXINC_M) {
+ val = SEINTARM_V(0) | CIDXINC_V(CIDXINC_M) | TIMERREG_V(7);
+ write_gts(cq, val);
+ cq->cidx_inc -= CIDXINC_M;
}
- val = SEINTARM(se) | CIDXINC(cq->cidx_inc) | TIMERREG(6) |
- INGRESSQID(cq->cqid);
- writel(val, cq->gts);
+ val = SEINTARM_V(se) | CIDXINC_V(cq->cidx_inc) | TIMERREG_V(6);
+ write_gts(cq, val);
cq->cidx_inc = 0;
return 0;
}
@@ -505,6 +733,13 @@ static inline int t4_arm_cq(struct t4_cq *cq, int se)
static inline void t4_swcq_produce(struct t4_cq *cq)
{
cq->sw_in_use++;
+ if (cq->sw_in_use == cq->size) {
+ pr_warn("%s cxgb4 sw cq overflow cqid %u\n",
+ __func__, cq->cqid);
+ cq->error = 1;
+ cq->sw_in_use--;
+ return;
+ }
if (++cq->sw_pidx == cq->size)
cq->sw_pidx = 0;
}
@@ -519,12 +754,11 @@ static inline void t4_swcq_consume(struct t4_cq *cq)
static inline void t4_hwcq_consume(struct t4_cq *cq)
{
cq->bits_type_ts = cq->queue[cq->cidx].bits_type_ts;
- if (++cq->cidx_inc == (cq->size >> 4)) {
+ if (++cq->cidx_inc == (cq->size >> 4) || cq->cidx_inc == CIDXINC_M) {
u32 val;
- val = SEINTARM(0) | CIDXINC(cq->cidx_inc) | TIMERREG(7) |
- INGRESSQID(cq->cqid);
- writel(val, cq->gts);
+ val = SEINTARM_V(0) | CIDXINC_V(cq->cidx_inc) | TIMERREG_V(7);
+ write_gts(cq, val);
cq->cidx_inc = 0;
}
if (++cq->cidx == cq->size) {
@@ -538,6 +772,11 @@ static inline int t4_valid_cqe(struct t4_cq *cq, struct t4_cqe *cqe)
return (CQE_GENBIT(cqe) == cq->gen);
}
+static inline int t4_cq_notempty(struct t4_cq *cq)
+{
+ return cq->sw_in_use || t4_valid_cqe(cq, &cq->queue[cq->cidx]);
+}
+
static inline int t4_next_hw_cqe(struct t4_cq *cq, struct t4_cqe **cqe)
{
int ret;
@@ -551,8 +790,11 @@ static inline int t4_next_hw_cqe(struct t4_cq *cq, struct t4_cqe **cqe)
if (cq->queue[prev_cidx].bits_type_ts != cq->bits_type_ts) {
ret = -EOVERFLOW;
cq->error = 1;
- printk(KERN_ERR MOD "cq overflow cqid %u\n", cq->cqid);
+ pr_err("cq overflow cqid %u\n", cq->cqid);
} else if (t4_valid_cqe(cq, &cq->queue[cq->cidx])) {
+
+ /* Ensure CQE is flushed to memory */
+ rmb();
*cqe = &cq->queue[cq->cidx];
ret = 0;
} else
@@ -560,13 +802,6 @@ static inline int t4_next_hw_cqe(struct t4_cq *cq, struct t4_cqe **cqe)
return ret;
}
-static inline struct t4_cqe *t4_next_sw_cqe(struct t4_cq *cq)
-{
- if (cq->sw_in_use)
- return &cq->sw_queue[cq->sw_cidx];
- return NULL;
-}
-
static inline int t4_next_cqe(struct t4_cq *cq, struct t4_cqe **cqe)
{
int ret = 0;
@@ -580,13 +815,19 @@ static inline int t4_next_cqe(struct t4_cq *cq, struct t4_cqe **cqe)
return ret;
}
-static inline int t4_cq_in_error(struct t4_cq *cq)
-{
- return ((struct t4_status_page *)&cq->queue[cq->size])->qp_err;
-}
-
static inline void t4_set_cq_in_error(struct t4_cq *cq)
{
- ((struct t4_status_page *)&cq->queue[cq->size])->qp_err = 1;
+ *cq->qp_errp = 1;
}
#endif
+
+struct t4_dev_status_page {
+ u8 db_off;
+ u8 write_cmpl_supported;
+ u16 pad2;
+ u32 pad3;
+ u64 qp_start;
+ u64 qp_size;
+ u64 cq_start;
+ u64 cq_size;
+};