summaryrefslogtreecommitdiff
path: root/include/linux/sunrpc/svc_rdma.h
diff options
context:
space:
mode:
authorChuck Lever <chuck.lever@oracle.com>2016-01-07 14:49:12 -0500
committerDoug Ledford <dledford@redhat.com>2016-01-19 15:30:48 -0500
commitcc886c9ff1607eda04062bdcec963e2f8e6a3eb1 (patch)
tree26cb6dea78b0a3fbdcab65159a180de1fbba7aec /include/linux/sunrpc/svc_rdma.h
parentced4ac0c4f7819d586b559a2ed7244e4f78ac3d0 (diff)
svcrdma: Improve allocation of struct svc_rdma_op_ctxt
When the maximum payload size of NFS READ and WRITE was increased by commit cc9a903d915c ("svcrdma: Change maximum server payload back to RPCSVC_MAXPAYLOAD"), the size of struct svc_rdma_op_ctxt increased to over 6KB (on x86_64). That makes allocating one of these from a kmem_cache more likely to fail in situations when system memory is exhausted. Since I'm about to add a caller where this allocation must always work _and_ it cannot sleep, pre-allocate ctxts for each connection. Another motivation for this change is that NFSv4.x servers are required by specification not to drop NFS requests. Pre-allocating memory resources reduces the likelihood of a drop. Signed-off-by: Chuck Lever <chuck.lever@oracle.com> Acked-by: Bruce Fields <bfields@fieldses.org> Signed-off-by: Doug Ledford <dledford@redhat.com>
Diffstat (limited to 'include/linux/sunrpc/svc_rdma.h')
-rw-r--r--include/linux/sunrpc/svc_rdma.h6
1 files changed, 5 insertions, 1 deletions
diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h
index f869807a0d0e..be2804b72cd8 100644
--- a/include/linux/sunrpc/svc_rdma.h
+++ b/include/linux/sunrpc/svc_rdma.h
@@ -69,6 +69,7 @@ extern atomic_t rdma_stat_sq_prod;
* completes.
*/
struct svc_rdma_op_ctxt {
+ struct list_head free;
struct svc_rdma_op_ctxt *read_hdr;
struct svc_rdma_fastreg_mr *frmr;
int hdr_count;
@@ -141,7 +142,10 @@ struct svcxprt_rdma {
struct ib_pd *sc_pd;
atomic_t sc_dma_used;
- atomic_t sc_ctxt_used;
+ spinlock_t sc_ctxt_lock;
+ struct list_head sc_ctxts;
+ int sc_ctxt_used;
+
struct list_head sc_rq_dto_q;
spinlock_t sc_rq_dto_lock;
struct ib_qp *sc_qp;