summaryrefslogtreecommitdiff
path: root/drivers/infiniband/sw/rxe/rxe_req.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/infiniband/sw/rxe/rxe_req.c')
-rw-r--r--drivers/infiniband/sw/rxe/rxe_req.c121
1 files changed, 49 insertions, 72 deletions
diff --git a/drivers/infiniband/sw/rxe/rxe_req.c b/drivers/infiniband/sw/rxe/rxe_req.c
index 65134a9aefe7..373b03f223be 100644
--- a/drivers/infiniband/sw/rxe/rxe_req.c
+++ b/drivers/infiniband/sw/rxe/rxe_req.c
@@ -5,7 +5,6 @@
*/
#include <linux/skbuff.h>
-#include <crypto/hash.h>
#include "rxe.h"
#include "rxe_loc.h"
@@ -98,18 +97,19 @@ static void req_retry(struct rxe_qp *qp)
void rnr_nak_timer(struct timer_list *t)
{
- struct rxe_qp *qp = from_timer(qp, t, rnr_nak_timer);
+ struct rxe_qp *qp = timer_container_of(qp, t, rnr_nak_timer);
+ unsigned long flags;
rxe_dbg_qp(qp, "nak timer fired\n");
- spin_lock_bh(&qp->state_lock);
+ spin_lock_irqsave(&qp->state_lock, flags);
if (qp->valid) {
/* request a send queue retry */
qp->req.need_retry = 1;
qp->req.wait_for_rnr_timer = 0;
- rxe_sched_task(&qp->req.task);
+ rxe_sched_task(&qp->send_task);
}
- spin_unlock_bh(&qp->state_lock);
+ spin_unlock_irqrestore(&qp->state_lock, flags);
}
static void req_check_sq_drain_done(struct rxe_qp *qp)
@@ -118,8 +118,9 @@ static void req_check_sq_drain_done(struct rxe_qp *qp)
unsigned int index;
unsigned int cons;
struct rxe_send_wqe *wqe;
+ unsigned long flags;
- spin_lock_bh(&qp->state_lock);
+ spin_lock_irqsave(&qp->state_lock, flags);
if (qp_state(qp) == IB_QPS_SQD) {
q = qp->sq.queue;
index = qp->req.wqe_index;
@@ -140,7 +141,7 @@ static void req_check_sq_drain_done(struct rxe_qp *qp)
break;
qp->attr.sq_draining = 0;
- spin_unlock_bh(&qp->state_lock);
+ spin_unlock_irqrestore(&qp->state_lock, flags);
if (qp->ibqp.event_handler) {
struct ib_event ev;
@@ -154,7 +155,7 @@ static void req_check_sq_drain_done(struct rxe_qp *qp)
return;
} while (0);
}
- spin_unlock_bh(&qp->state_lock);
+ spin_unlock_irqrestore(&qp->state_lock, flags);
}
static struct rxe_send_wqe *__req_next_wqe(struct rxe_qp *qp)
@@ -173,6 +174,7 @@ static struct rxe_send_wqe *__req_next_wqe(struct rxe_qp *qp)
static struct rxe_send_wqe *req_next_wqe(struct rxe_qp *qp)
{
struct rxe_send_wqe *wqe;
+ unsigned long flags;
req_check_sq_drain_done(qp);
@@ -180,13 +182,13 @@ static struct rxe_send_wqe *req_next_wqe(struct rxe_qp *qp)
if (wqe == NULL)
return NULL;
- spin_lock_bh(&qp->state_lock);
+ spin_lock_irqsave(&qp->state_lock, flags);
if (unlikely((qp_state(qp) == IB_QPS_SQD) &&
(wqe->state != wqe_state_processing))) {
- spin_unlock_bh(&qp->state_lock);
+ spin_unlock_irqrestore(&qp->state_lock, flags);
return NULL;
}
- spin_unlock_bh(&qp->state_lock);
+ spin_unlock_irqrestore(&qp->state_lock, flags);
wqe->mask = wr_opcode_mask(wqe->wr.opcode, qp);
return wqe;
@@ -421,7 +423,7 @@ static struct sk_buff *init_req_packet(struct rxe_qp *qp,
int paylen;
int solicited;
u32 qp_num;
- int ack_req;
+ int ack_req = 0;
/* length from start of bth to end of icrc */
paylen = rxe_opcode[opcode].length + payload + pad + RXE_ICRC_SIZE;
@@ -442,8 +444,9 @@ static struct sk_buff *init_req_packet(struct rxe_qp *qp,
qp_num = (pkt->mask & RXE_DETH_MASK) ? ibwr->wr.ud.remote_qpn :
qp->attr.dest_qp_num;
- ack_req = ((pkt->mask & RXE_END_MASK) ||
- (qp->req.noack_pkts++ > RXE_MAX_PKT_PER_ACK));
+ if (qp_type(qp) != IB_QPT_UD && qp_type(qp) != IB_QPT_UC)
+ ack_req = ((pkt->mask & RXE_END_MASK) ||
+ (qp->req.noack_pkts++ > RXE_MAX_PKT_PER_ACK));
if (ack_req)
qp->req.noack_pkts = 0;
@@ -542,6 +545,8 @@ static void update_wqe_state(struct rxe_qp *qp,
if (pkt->mask & RXE_END_MASK) {
if (qp_type(qp) == IB_QPT_RC)
wqe->state = wqe_state_pending;
+ else
+ wqe->state = wqe_state_done;
} else {
wqe->state = wqe_state_processing;
}
@@ -570,28 +575,6 @@ static void update_wqe_psn(struct rxe_qp *qp,
qp->req.psn = (qp->req.psn + 1) & BTH_PSN_MASK;
}
-static void save_state(struct rxe_send_wqe *wqe,
- struct rxe_qp *qp,
- struct rxe_send_wqe *rollback_wqe,
- u32 *rollback_psn)
-{
- rollback_wqe->state = wqe->state;
- rollback_wqe->first_psn = wqe->first_psn;
- rollback_wqe->last_psn = wqe->last_psn;
- *rollback_psn = qp->req.psn;
-}
-
-static void rollback_state(struct rxe_send_wqe *wqe,
- struct rxe_qp *qp,
- struct rxe_send_wqe *rollback_wqe,
- u32 rollback_psn)
-{
- wqe->state = rollback_wqe->state;
- wqe->first_psn = rollback_wqe->first_psn;
- wqe->last_psn = rollback_wqe->last_psn;
- qp->req.psn = rollback_psn;
-}
-
static void update_state(struct rxe_qp *qp, struct rxe_pkt_info *pkt)
{
qp->req.opcode = pkt->opcode;
@@ -650,12 +633,6 @@ static int rxe_do_local_ops(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
wqe->status = IB_WC_SUCCESS;
qp->req.wqe_index = queue_next_index(qp->sq.queue, qp->req.wqe_index);
- /* There is no ack coming for local work requests
- * which can lead to a deadlock. So go ahead and complete
- * it now.
- */
- rxe_sched_task(&qp->comp.task);
-
return 0;
}
@@ -671,25 +648,26 @@ int rxe_requester(struct rxe_qp *qp)
int opcode;
int err;
int ret;
- struct rxe_send_wqe rollback_wqe;
- u32 rollback_psn;
struct rxe_queue *q = qp->sq.queue;
struct rxe_ah *ah;
struct rxe_av *av;
+ unsigned long flags;
- spin_lock_bh(&qp->state_lock);
+ spin_lock_irqsave(&qp->state_lock, flags);
if (unlikely(!qp->valid)) {
- spin_unlock_bh(&qp->state_lock);
+ spin_unlock_irqrestore(&qp->state_lock, flags);
goto exit;
}
if (unlikely(qp_state(qp) == IB_QPS_ERR)) {
wqe = __req_next_wqe(qp);
- spin_unlock_bh(&qp->state_lock);
- if (wqe)
+ spin_unlock_irqrestore(&qp->state_lock, flags);
+ if (wqe) {
+ wqe->status = IB_WC_WR_FLUSH_ERR;
goto err;
- else
+ } else {
goto exit;
+ }
}
if (unlikely(qp_state(qp) == IB_QPS_RESET)) {
@@ -700,10 +678,10 @@ int rxe_requester(struct rxe_qp *qp)
qp->req.wait_psn = 0;
qp->req.need_retry = 0;
qp->req.wait_for_rnr_timer = 0;
- spin_unlock_bh(&qp->state_lock);
+ spin_unlock_irqrestore(&qp->state_lock, flags);
goto exit;
}
- spin_unlock_bh(&qp->state_lock);
+ spin_unlock_irqrestore(&qp->state_lock, flags);
/* we come here if the retransmit timer has fired
* or if the rnr timer has fired. If the retransmit
@@ -780,7 +758,6 @@ int rxe_requester(struct rxe_qp *qp)
qp->req.wqe_index);
wqe->state = wqe_state_done;
wqe->status = IB_WC_SUCCESS;
- rxe_sched_task(&qp->comp.task);
goto done;
}
payload = mtu;
@@ -825,35 +802,18 @@ int rxe_requester(struct rxe_qp *qp)
if (ah)
rxe_put(ah);
- /*
- * To prevent a race on wqe access between requester and completer,
- * wqe members state and psn need to be set before calling
- * rxe_xmit_packet().
- * Otherwise, completer might initiate an unjustified retry flow.
- */
- save_state(wqe, qp, &rollback_wqe, &rollback_psn);
- update_wqe_state(qp, wqe, &pkt);
- update_wqe_psn(qp, wqe, &pkt, payload);
-
err = rxe_xmit_packet(qp, &pkt, skb);
if (err) {
- qp->need_req_skb = 1;
-
- rollback_state(wqe, qp, &rollback_wqe, rollback_psn);
-
- if (err == -EAGAIN) {
- rxe_sched_task(&qp->req.task);
- goto exit;
- }
-
wqe->status = IB_WC_LOC_QP_OP_ERR;
goto err;
}
+ update_wqe_state(qp, wqe, &pkt);
+ update_wqe_psn(qp, wqe, &pkt, payload);
update_state(qp, &pkt);
/* A non-zero return value will cause rxe_do_task to
- * exit its loop and end the tasklet. A zero return
+ * exit its loop and end the work item. A zero return
* will continue looping and return to rxe_requester
*/
done:
@@ -869,3 +829,20 @@ exit:
out:
return ret;
}
+
+int rxe_sender(struct rxe_qp *qp)
+{
+ int req_ret;
+ int comp_ret;
+
+ /* process the send queue */
+ req_ret = rxe_requester(qp);
+
+ /* process the response queue */
+ comp_ret = rxe_completer(qp);
+
+ /* exit the task loop if both requester and completer
+ * are ready
+ */
+ return (req_ret && comp_ret) ? -EAGAIN : 0;
+}