summaryrefslogtreecommitdiff
path: root/include/linux/sunrpc/svc_rdma.h
diff options
context:
space:
mode:
authorChuck Lever <chuck.lever@oracle.com>2023-12-18 17:32:07 -0500
committerChuck Lever <chuck.lever@oracle.com>2024-01-07 17:54:33 -0500
commitd3dba534100d4e9eb7a5204be97cd6f9ada2066e (patch)
tree8ff227f2cc04de812f1eb7f2692880f1d87547cd /include/linux/sunrpc/svc_rdma.h
parentecba85e951c178e3fe7cea04eebf1035e8168f93 (diff)
svcrdma: Implement multi-stage Read completion again
Having an nfsd thread waiting for an RDMA Read completion is problematic if the Read responder (ie, the client) stops responding. We need to go back to handling RDMA Reads by getting the svc scheduler to call svc_rdma_recvfrom() a second time to finish building an RPC message after a Read completion. This is the final patch, and makes several changes that have to happen concurrently: 1. svc_rdma_process_read_list no longer waits for a completion, but simply builds and posts the Read WRs. 2. svc_rdma_read_done() now queues a completed Read on sc_read_complete_q for later processing rather than calling complete(). 3. The completed RPC message is no longer built in the svc_rdma_process_read_list() path. Finishing the message is now done in svc_rdma_recvfrom() when it notices work on the sc_read_complete_q. The "finish building this RPC message" code is removed from the svc_rdma_process_read_list() path. This arrangement avoids the need for an nfsd thread to wait for an RDMA Read non-interruptibly without a timeout. It's basically the same code structure that Tom Tucker used for Read chunks along with some clean-up and modernization. Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
Diffstat (limited to 'include/linux/sunrpc/svc_rdma.h')
-rw-r--r--include/linux/sunrpc/svc_rdma.h6
1 files changed, 4 insertions, 2 deletions
diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h
index c98d29e51b9c..e7595ae62fe2 100644
--- a/include/linux/sunrpc/svc_rdma.h
+++ b/include/linux/sunrpc/svc_rdma.h
@@ -170,8 +170,6 @@ struct svc_rdma_chunk_ctxt {
struct list_head cc_rwctxts;
ktime_t cc_posttime;
int cc_sqecount;
- enum ib_wc_status cc_status;
- struct completion cc_done;
};
struct svc_rdma_recv_ctxt {
@@ -191,6 +189,7 @@ struct svc_rdma_recv_ctxt {
unsigned int rc_pageoff;
unsigned int rc_curpage;
unsigned int rc_readbytes;
+ struct xdr_buf rc_saved_arg;
struct svc_rdma_chunk_ctxt rc_cc;
struct svc_rdma_pcl rc_call_pcl;
@@ -240,6 +239,9 @@ extern int svc_rdma_recvfrom(struct svc_rqst *);
extern void svc_rdma_destroy_rw_ctxts(struct svcxprt_rdma *rdma);
extern void svc_rdma_cc_init(struct svcxprt_rdma *rdma,
struct svc_rdma_chunk_ctxt *cc);
+extern void svc_rdma_cc_release(struct svcxprt_rdma *rdma,
+ struct svc_rdma_chunk_ctxt *cc,
+ enum dma_data_direction dir);
extern int svc_rdma_send_write_chunk(struct svcxprt_rdma *rdma,
const struct svc_rdma_chunk *chunk,
const struct xdr_buf *xdr);