summaryrefslogtreecommitdiff
path: root/net/sunrpc
diff options
context:
space:
mode:
authorTrond Myklebust <trond.myklebust@primarydata.com>2017-04-28 10:52:42 -0400
committerTrond Myklebust <trond.myklebust@hammerspace.com>2019-07-06 14:54:49 -0400
commit21f0ffaff510b0530bfdf77da7133c0b99dee2fe (patch)
tree0302c053444f3a371d7c188a33f9834fb7cbc31e /net/sunrpc
parent44942b4e457beda00981f616402a1a791e8c616e (diff)
SUNRPC: Add basic load balancing to the transport switch
For now, just count the queue length. It is less accurate than counting number of bytes queued, but easier to implement. Signed-off-by: Trond Myklebust <trond.myklebust@primarydata.com>
Diffstat (limited to 'net/sunrpc')
-rw-r--r--net/sunrpc/clnt.c40
-rw-r--r--net/sunrpc/xprtmultipath.c20
2 files changed, 56 insertions, 4 deletions
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
index b03bfa055c08..976eab68bb5d 100644
--- a/net/sunrpc/clnt.c
+++ b/net/sunrpc/clnt.c
@@ -968,13 +968,47 @@ out:
}
EXPORT_SYMBOL_GPL(rpc_bind_new_program);
+static struct rpc_xprt *
+rpc_task_get_xprt(struct rpc_clnt *clnt)
+{
+ struct rpc_xprt_switch *xps;
+ struct rpc_xprt *xprt= xprt_iter_get_next(&clnt->cl_xpi);
+
+ if (!xprt)
+ return NULL;
+ rcu_read_lock();
+ xps = rcu_dereference(clnt->cl_xpi.xpi_xpswitch);
+ atomic_long_inc(&xps->xps_queuelen);
+ rcu_read_unlock();
+ atomic_long_inc(&xprt->queuelen);
+
+ return xprt;
+}
+
+static void
+rpc_task_release_xprt(struct rpc_clnt *clnt, struct rpc_xprt *xprt)
+{
+ struct rpc_xprt_switch *xps;
+
+ atomic_long_dec(&xprt->queuelen);
+ rcu_read_lock();
+ xps = rcu_dereference(clnt->cl_xpi.xpi_xpswitch);
+ atomic_long_dec(&xps->xps_queuelen);
+ rcu_read_unlock();
+
+ xprt_put(xprt);
+}
+
void rpc_task_release_transport(struct rpc_task *task)
{
struct rpc_xprt *xprt = task->tk_xprt;
if (xprt) {
task->tk_xprt = NULL;
- xprt_put(xprt);
+ if (task->tk_client)
+ rpc_task_release_xprt(task->tk_client, xprt);
+ else
+ xprt_put(xprt);
}
}
EXPORT_SYMBOL_GPL(rpc_task_release_transport);
@@ -983,6 +1017,7 @@ void rpc_task_release_client(struct rpc_task *task)
{
struct rpc_clnt *clnt = task->tk_client;
+ rpc_task_release_transport(task);
if (clnt != NULL) {
/* Remove from client task list */
spin_lock(&clnt->cl_lock);
@@ -992,14 +1027,13 @@ void rpc_task_release_client(struct rpc_task *task)
rpc_release_client(clnt);
}
- rpc_task_release_transport(task);
}
static
void rpc_task_set_transport(struct rpc_task *task, struct rpc_clnt *clnt)
{
if (!task->tk_xprt)
- task->tk_xprt = xprt_iter_get_next(&clnt->cl_xpi);
+ task->tk_xprt = rpc_task_get_xprt(clnt);
}
static
diff --git a/net/sunrpc/xprtmultipath.c b/net/sunrpc/xprtmultipath.c
index 8394124126f8..394e427533be 100644
--- a/net/sunrpc/xprtmultipath.c
+++ b/net/sunrpc/xprtmultipath.c
@@ -36,6 +36,7 @@ static void xprt_switch_add_xprt_locked(struct rpc_xprt_switch *xps,
if (xps->xps_nxprts == 0)
xps->xps_net = xprt->xprt_net;
xps->xps_nxprts++;
+ xps->xps_nactive++;
}
/**
@@ -62,6 +63,7 @@ static void xprt_switch_remove_xprt_locked(struct rpc_xprt_switch *xps,
{
if (unlikely(xprt == NULL))
return;
+ xps->xps_nactive--;
xps->xps_nxprts--;
if (xps->xps_nxprts == 0)
xps->xps_net = NULL;
@@ -317,8 +319,24 @@ struct rpc_xprt *xprt_switch_find_next_entry_roundrobin(struct list_head *head,
static
struct rpc_xprt *xprt_iter_next_entry_roundrobin(struct rpc_xprt_iter *xpi)
{
- return xprt_iter_next_entry_multiple(xpi,
+ struct rpc_xprt_switch *xps = rcu_dereference(xpi->xpi_xpswitch);
+ struct rpc_xprt *xprt;
+ unsigned long xprt_queuelen;
+ unsigned long xps_queuelen;
+ unsigned long xps_avglen;
+
+ do {
+ xprt = xprt_iter_next_entry_multiple(xpi,
xprt_switch_find_next_entry_roundrobin);
+ if (xprt == NULL)
+ break;
+ xprt_queuelen = atomic_long_read(&xprt->queuelen);
+ if (xprt_queuelen <= 2)
+ break;
+ xps_queuelen = atomic_long_read(&xps->xps_queuelen);
+ xps_avglen = DIV_ROUND_UP(xps_queuelen, xps->xps_nactive);
+ } while (xprt_queuelen > xps_avglen);
+ return xprt;
}
static