diff options
Diffstat (limited to 'drivers/infiniband/sw/rxe/rxe_resp.c')
-rw-r--r-- | drivers/infiniband/sw/rxe/rxe_resp.c | 66 |
1 files changed, 42 insertions, 24 deletions
diff --git a/drivers/infiniband/sw/rxe/rxe_resp.c b/drivers/infiniband/sw/rxe/rxe_resp.c index da470a925efc..711f73e0bbb1 100644 --- a/drivers/infiniband/sw/rxe/rxe_resp.c +++ b/drivers/infiniband/sw/rxe/rxe_resp.c @@ -49,18 +49,8 @@ static char *resp_state_name[] = { /* rxe_recv calls here to add a request packet to the input queue */ void rxe_resp_queue_pkt(struct rxe_qp *qp, struct sk_buff *skb) { - int must_sched; - struct rxe_pkt_info *pkt = SKB_TO_PKT(skb); - skb_queue_tail(&qp->req_pkts, skb); - - must_sched = (pkt->opcode == IB_OPCODE_RC_RDMA_READ_REQUEST) || - (skb_queue_len(&qp->req_pkts) > 1); - - if (must_sched) - rxe_sched_task(&qp->resp.task); - else - rxe_run_task(&qp->resp.task); + rxe_sched_task(&qp->recv_task); } static inline enum resp_states get_req(struct rxe_qp *qp, @@ -351,9 +341,22 @@ static enum resp_states rxe_resp_check_length(struct rxe_qp *qp, /* * See IBA C9-92 * For UD QPs we only check if the packet will fit in the - * receive buffer later. For rmda operations additional + * receive buffer later. For RDMA operations additional * length checks are performed in check_rkey. */ + if ((qp_type(qp) == IB_QPT_GSI) || (qp_type(qp) == IB_QPT_UD)) { + unsigned int payload = payload_size(pkt); + unsigned int recv_buffer_len = 0; + int i; + + for (i = 0; i < qp->resp.wqe->dma.num_sge; i++) + recv_buffer_len += qp->resp.wqe->dma.sge[i].length; + if (payload + sizeof(union rdma_network_hdr) > recv_buffer_len) { + rxe_dbg_qp(qp, "The receive buffer is too small for this UD packet.\n"); + return RESPST_ERR_LENGTH; + } + } + if (pkt->mask & RXE_PAYLOAD_MASK && ((qp_type(qp) == IB_QPT_RC) || (qp_type(qp) == IB_QPT_UC))) { unsigned int mtu = qp->mtu; @@ -362,18 +365,18 @@ static enum resp_states rxe_resp_check_length(struct rxe_qp *qp, if ((pkt->mask & RXE_START_MASK) && (pkt->mask & RXE_END_MASK)) { if (unlikely(payload > mtu)) { - rxe_dbg_qp(qp, "only packet too long"); + rxe_dbg_qp(qp, "only packet too long\n"); return RESPST_ERR_LENGTH; } } else if ((pkt->mask & RXE_START_MASK) || (pkt->mask & RXE_MIDDLE_MASK)) { if (unlikely(payload != mtu)) { - rxe_dbg_qp(qp, "first or middle packet not mtu"); + rxe_dbg_qp(qp, "first or middle packet not mtu\n"); return RESPST_ERR_LENGTH; } } else if (pkt->mask & RXE_END_MASK) { if (unlikely((payload == 0) || (payload > mtu))) { - rxe_dbg_qp(qp, "last packet zero or too long"); + rxe_dbg_qp(qp, "last packet zero or too long\n"); return RESPST_ERR_LENGTH; } } @@ -382,7 +385,7 @@ static enum resp_states rxe_resp_check_length(struct rxe_qp *qp, /* See IBA C9-94 */ if (pkt->mask & RXE_RETH_MASK) { if (reth_len(pkt) > (1U << 31)) { - rxe_dbg_qp(qp, "dma length too long"); + rxe_dbg_qp(qp, "dma length too long\n"); return RESPST_ERR_LENGTH; } } @@ -699,10 +702,16 @@ static enum resp_states atomic_reply(struct rxe_qp *qp, if (!res->replay) { u64 iova = qp->resp.va + qp->resp.offset; - err = rxe_mr_do_atomic_op(mr, iova, pkt->opcode, - atmeth_comp(pkt), - atmeth_swap_add(pkt), - &res->atomic.orig_val); + if (is_odp_mr(mr)) + err = rxe_odp_atomic_op(mr, iova, pkt->opcode, + atmeth_comp(pkt), + atmeth_swap_add(pkt), + &res->atomic.orig_val); + else + err = rxe_mr_do_atomic_op(mr, iova, pkt->opcode, + atmeth_comp(pkt), + atmeth_swap_add(pkt), + &res->atomic.orig_val); if (err) return err; @@ -740,7 +749,16 @@ static enum resp_states atomic_write_reply(struct rxe_qp *qp, value = *(u64 *)payload_addr(pkt); iova = qp->resp.va + qp->resp.offset; - err = rxe_mr_do_atomic_write(mr, iova, value); + /* See IBA oA19-28 */ + if (unlikely(mr->state != RXE_MR_STATE_VALID)) { + rxe_dbg_mr(mr, "mr not in valid state\n"); + return RESPST_ERR_RKEY_VIOLATION; + } + + if (is_odp_mr(mr)) + err = rxe_odp_do_atomic_write(mr, iova, value); + else + err = rxe_mr_do_atomic_write(mr, iova, value); if (err) return err; @@ -1133,7 +1151,7 @@ static enum resp_states do_complete(struct rxe_qp *qp, } } else { if (wc->status != IB_WC_WR_FLUSH_ERR) - rxe_err_qp(qp, "non-flush error status = %d", + rxe_err_qp(qp, "non-flush error status = %d\n", wc->status); } @@ -1442,7 +1460,7 @@ static int flush_recv_wqe(struct rxe_qp *qp, struct rxe_recv_wqe *wqe) err = rxe_cq_post(qp->rcq, &cqe, 0); if (err) - rxe_dbg_cq(qp->rcq, "post cq failed err = %d", err); + rxe_dbg_cq(qp->rcq, "post cq failed err = %d\n", err); return err; } @@ -1485,7 +1503,7 @@ static void flush_recv_queue(struct rxe_qp *qp, bool notify) qp->resp.wqe = NULL; } -int rxe_responder(struct rxe_qp *qp) +int rxe_receiver(struct rxe_qp *qp) { struct rxe_dev *rxe = to_rdev(qp->ibqp.device); enum resp_states state; |