diff options
| author | Chuck Lever <chuck.lever@oracle.com> | 2021-03-01 10:44:49 -0500 | 
|---|---|---|
| committer | Chuck Lever <chuck.lever@oracle.com> | 2021-03-31 15:58:48 -0400 | 
| commit | e3eded5e81c4df60006e94614ec645da089e35e7 (patch) | |
| tree | ee5cc39c1c0ab30bbe830436b25b1c039c80d969 | |
| parent | 5533c4f4b996b7fc36d16b5e0807ebbc08c93af4 (diff) | |
svcrdma: Clean up dto_q critical section in svc_rdma_recvfrom()
This, to me, seems less cluttered and less redundant. I was hoping
it could help reduce lock contention on the dto_q lock by reducing
the size of the critical section, but alas, the only improvement is
readability.
Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
| -rw-r--r-- | net/sunrpc/xprtrdma/svc_rdma_recvfrom.c | 14 | 
1 files changed, 7 insertions, 7 deletions
| diff --git a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c index 232860ea683b..6be23ce7a93d 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c +++ b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c @@ -794,22 +794,22 @@ int svc_rdma_recvfrom(struct svc_rqst *rqstp)  	rqstp->rq_xprt_ctxt = NULL; +	ctxt = NULL;  	spin_lock(&rdma_xprt->sc_rq_dto_lock);  	ctxt = svc_rdma_next_recv_ctxt(&rdma_xprt->sc_rq_dto_q); -	if (!ctxt) { +	if (ctxt) +		list_del(&ctxt->rc_list); +	else  		/* No new incoming requests, terminate the loop */  		clear_bit(XPT_DATA, &xprt->xpt_flags); -		spin_unlock(&rdma_xprt->sc_rq_dto_lock); -		svc_xprt_received(xprt); -		return 0; -	} -	list_del(&ctxt->rc_list);  	spin_unlock(&rdma_xprt->sc_rq_dto_lock); -	percpu_counter_inc(&svcrdma_stat_recv);  	/* Unblock the transport for the next receive */  	svc_xprt_received(xprt); +	if (!ctxt) +		return 0; +	percpu_counter_inc(&svcrdma_stat_recv);  	ib_dma_sync_single_for_cpu(rdma_xprt->sc_pd->device,  				   ctxt->rc_recv_sge.addr, ctxt->rc_byte_len,  				   DMA_FROM_DEVICE); | 
