summaryrefslogtreecommitdiff
path: root/drivers/infiniband/hw/qib/qib_rc.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/infiniband/hw/qib/qib_rc.c')
-rw-r--r--drivers/infiniband/hw/qib/qib_rc.c94
1 files changed, 17 insertions, 77 deletions
diff --git a/drivers/infiniband/hw/qib/qib_rc.c b/drivers/infiniband/hw/qib/qib_rc.c
index 6a189310e12d..ea0bc6bc09fa 100644
--- a/drivers/infiniband/hw/qib/qib_rc.c
+++ b/drivers/infiniband/hw/qib/qib_rc.c
@@ -38,7 +38,6 @@
/* cut down ridiculously long IB macro names */
#define OP(x) IB_OPCODE_RC_##x
-static void rc_timeout(unsigned long arg);
static u32 restart_sge(struct rvt_sge_state *ss, struct rvt_swqe *wqe,
u32 psn, u32 pmtu)
@@ -54,15 +53,6 @@ static u32 restart_sge(struct rvt_sge_state *ss, struct rvt_swqe *wqe,
return wqe->length - len;
}
-static void start_timer(struct rvt_qp *qp)
-{
- qp->s_flags |= RVT_S_TIMER;
- qp->s_timer.function = rc_timeout;
- /* 4.096 usec. * (1 << qp->timeout) */
- qp->s_timer.expires = jiffies + qp->timeout_jiffies;
- add_timer(&qp->s_timer);
-}
-
/**
* qib_make_rc_ack - construct a response packet (ACK, NAK, or RDMA read)
* @dev: the device for this QP
@@ -837,7 +827,7 @@ done:
* Back up requester to resend the last un-ACKed request.
* The QP r_lock and s_lock should be held and interrupts disabled.
*/
-static void qib_restart_rc(struct rvt_qp *qp, u32 psn, int wait)
+void qib_restart_rc(struct rvt_qp *qp, u32 psn, int wait)
{
struct rvt_swqe *wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
struct qib_ibport *ibp;
@@ -870,46 +860,6 @@ static void qib_restart_rc(struct rvt_qp *qp, u32 psn, int wait)
}
/*
- * This is called from s_timer for missing responses.
- */
-static void rc_timeout(unsigned long arg)
-{
- struct rvt_qp *qp = (struct rvt_qp *)arg;
- struct qib_ibport *ibp;
- unsigned long flags;
-
- spin_lock_irqsave(&qp->r_lock, flags);
- spin_lock(&qp->s_lock);
- if (qp->s_flags & RVT_S_TIMER) {
- ibp = to_iport(qp->ibqp.device, qp->port_num);
- ibp->rvp.n_rc_timeouts++;
- qp->s_flags &= ~RVT_S_TIMER;
- del_timer(&qp->s_timer);
- qib_restart_rc(qp, qp->s_last_psn + 1, 1);
- qib_schedule_send(qp);
- }
- spin_unlock(&qp->s_lock);
- spin_unlock_irqrestore(&qp->r_lock, flags);
-}
-
-/*
- * This is called from s_timer for RNR timeouts.
- */
-void qib_rc_rnr_retry(unsigned long arg)
-{
- struct rvt_qp *qp = (struct rvt_qp *)arg;
- unsigned long flags;
-
- spin_lock_irqsave(&qp->s_lock, flags);
- if (qp->s_flags & RVT_S_WAIT_RNR) {
- qp->s_flags &= ~RVT_S_WAIT_RNR;
- del_timer(&qp->s_timer);
- qib_schedule_send(qp);
- }
- spin_unlock_irqrestore(&qp->s_lock, flags);
-}
-
-/*
* Set qp->s_sending_psn to the next PSN after the given one.
* This would be psn+1 except when RDMA reads are present.
*/
@@ -972,7 +922,7 @@ void qib_rc_send_complete(struct rvt_qp *qp, struct ib_header *hdr)
if ((psn & IB_BTH_REQ_ACK) && qp->s_acked != qp->s_tail &&
!(qp->s_flags & (RVT_S_TIMER | RVT_S_WAIT_RNR | RVT_S_WAIT_PSN)) &&
(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK))
- start_timer(qp);
+ rvt_add_retry_timer(qp);
while (qp->s_last != qp->s_acked) {
u32 s_last;
@@ -1085,12 +1035,6 @@ static int do_rc_ack(struct rvt_qp *qp, u32 aeth, u32 psn, int opcode,
u32 ack_psn;
int diff;
- /* Remove QP from retry timer */
- if (qp->s_flags & (RVT_S_TIMER | RVT_S_WAIT_RNR)) {
- qp->s_flags &= ~(RVT_S_TIMER | RVT_S_WAIT_RNR);
- del_timer(&qp->s_timer);
- }
-
/*
* Note that NAKs implicitly ACK outstanding SEND and RDMA write
* requests and implicitly NAK RDMA read and atomic requests issued
@@ -1186,16 +1130,20 @@ static int do_rc_ack(struct rvt_qp *qp, u32 aeth, u32 psn, int opcode,
* We are expecting more ACKs so
* reset the retransmit timer.
*/
- start_timer(qp);
+ rvt_mod_retry_timer(qp);
/*
* We can stop resending the earlier packets and
* continue with the next packet the receiver wants.
*/
if (qib_cmp24(qp->s_psn, psn) <= 0)
reset_psn(qp, psn + 1);
- } else if (qib_cmp24(qp->s_psn, psn) <= 0) {
- qp->s_state = OP(SEND_LAST);
- qp->s_psn = psn + 1;
+ } else {
+ /* No more acks - kill all timers */
+ rvt_stop_rc_timers(qp);
+ if (qib_cmp24(qp->s_psn, psn) <= 0) {
+ qp->s_state = OP(SEND_LAST);
+ qp->s_psn = psn + 1;
+ }
}
if (qp->s_flags & RVT_S_WAIT_ACK) {
qp->s_flags &= ~RVT_S_WAIT_ACK;
@@ -1205,8 +1153,7 @@ static int do_rc_ack(struct rvt_qp *qp, u32 aeth, u32 psn, int opcode,
qp->s_rnr_retry = qp->s_rnr_retry_cnt;
qp->s_retry = qp->s_retry_cnt;
update_last_psn(qp, psn);
- ret = 1;
- goto bail;
+ return 1;
case 1: /* RNR NAK */
ibp->rvp.n_rnr_naks++;
@@ -1229,13 +1176,9 @@ static int do_rc_ack(struct rvt_qp *qp, u32 aeth, u32 psn, int opcode,
reset_psn(qp, psn);
qp->s_flags &= ~(RVT_S_WAIT_SSN_CREDIT | RVT_S_WAIT_ACK);
- qp->s_flags |= RVT_S_WAIT_RNR;
- qp->s_timer.function = qib_rc_rnr_retry;
- qp->s_timer.expires = jiffies + usecs_to_jiffies(
- ib_qib_rnr_table[(aeth >> RVT_AETH_CREDIT_SHIFT) &
- RVT_AETH_CREDIT_MASK]);
- add_timer(&qp->s_timer);
- goto bail;
+ rvt_stop_rc_timers(qp);
+ rvt_add_rnr_timer(qp, aeth);
+ return 0;
case 3: /* NAK */
if (qp->s_acked == qp->s_tail)
@@ -1291,6 +1234,7 @@ reserved:
}
bail:
+ rvt_stop_rc_timers(qp);
return ret;
}
@@ -1304,10 +1248,7 @@ static void rdma_seq_err(struct rvt_qp *qp, struct qib_ibport *ibp, u32 psn,
struct rvt_swqe *wqe;
/* Remove QP from retry timer */
- if (qp->s_flags & (RVT_S_TIMER | RVT_S_WAIT_RNR)) {
- qp->s_flags &= ~(RVT_S_TIMER | RVT_S_WAIT_RNR);
- del_timer(&qp->s_timer);
- }
+ rvt_stop_rc_timers(qp);
wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
@@ -1462,8 +1403,7 @@ read_middle:
* We got a response so update the timeout.
* 4.096 usec. * (1 << qp->timeout)
*/
- qp->s_flags |= RVT_S_TIMER;
- mod_timer(&qp->s_timer, jiffies + qp->timeout_jiffies);
+ rvt_mod_retry_timer(qp);
if (qp->s_flags & RVT_S_WAIT_ACK) {
qp->s_flags &= ~RVT_S_WAIT_ACK;
qib_schedule_send(qp);