summaryrefslogtreecommitdiff
path: root/net/smc/smc_core.c
diff options
context:
space:
mode:
authorTony Lu <tonylu@linux.alibaba.com>2021-12-03 12:33:31 +0100
committerJakub Kicinski <kuba@kernel.org>2021-12-06 17:01:28 -0800
commit1c5526968e270e4efccfa1da21d211a4915cdeda (patch)
treeb103539ecf652fefde85bc00383092a18bdea351 /net/smc/smc_core.c
parent364d470d547097075070c19189a31dda97b3a962 (diff)
net/smc: Clear memory when release and reuse buffer
Currently, buffers are cleared when smc connections are created and buffers are reused. This slows down the speed of establishing new connections. In most cases, the applications want to establish connections as quickly as possible. This patch moves memset() from connection creation path to release and buffer unuse path, this trades off between speed of establishing and release. Test environments: - CPU Intel Xeon Platinum 8 core, mem 32 GiB, nic Mellanox CX4 - socket sndbuf / rcvbuf: 16384 / 131072 bytes - w/o first round, 5 rounds, avg, 100 conns batch per round - smc_buf_create() use bpftrace kprobe, introduces extra latency Latency benchmarks for smc_buf_create(): w/o patch : 19040.0 ns w/ patch : 1932.6 ns ratio : 10.2% (-89.8%) Latency benchmarks for socket create and connect: w/o patch : 143.3 us w/ patch : 102.2 us ratio : 71.3% (-28.7%) The latency of establishing connections is reduced by 28.7%. Signed-off-by: Tony Lu <tonylu@linux.alibaba.com> Reviewed-by: Wen Gu <guwen@linux.alibaba.com> Signed-off-by: Karsten Graul <kgraul@linux.ibm.com> Link: https://lore.kernel.org/r/20211203113331.2818873-1-kgraul@linux.ibm.com Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Diffstat (limited to 'net/smc/smc_core.c')
-rw-r--r--net/smc/smc_core.c13
1 files changed, 9 insertions, 4 deletions
diff --git a/net/smc/smc_core.c b/net/smc/smc_core.c
index 387d28b2f8dd..85be94cabb01 100644
--- a/net/smc/smc_core.c
+++ b/net/smc/smc_core.c
@@ -1101,18 +1101,24 @@ static void smcr_buf_unuse(struct smc_buf_desc *rmb_desc,
smc_buf_free(lgr, true, rmb_desc);
} else {
rmb_desc->used = 0;
+ memset(rmb_desc->cpu_addr, 0, rmb_desc->len);
}
}
static void smc_buf_unuse(struct smc_connection *conn,
struct smc_link_group *lgr)
{
- if (conn->sndbuf_desc)
+ if (conn->sndbuf_desc) {
conn->sndbuf_desc->used = 0;
- if (conn->rmb_desc && lgr->is_smcd)
+ memset(conn->sndbuf_desc->cpu_addr, 0, conn->sndbuf_desc->len);
+ }
+ if (conn->rmb_desc && lgr->is_smcd) {
conn->rmb_desc->used = 0;
- else if (conn->rmb_desc)
+ memset(conn->rmb_desc->cpu_addr, 0, conn->rmb_desc->len +
+ sizeof(struct smcd_cdc_msg));
+ } else if (conn->rmb_desc) {
smcr_buf_unuse(conn->rmb_desc, lgr);
+ }
}
/* remove a finished connection from its link group */
@@ -2148,7 +2154,6 @@ static int __smc_buf_create(struct smc_sock *smc, bool is_smcd, bool is_rmb)
if (buf_desc) {
SMC_STAT_RMB_SIZE(smc, is_smcd, is_rmb, bufsize);
SMC_STAT_BUF_REUSE(smc, is_smcd, is_rmb);
- memset(buf_desc->cpu_addr, 0, bufsize);
break; /* found reusable slot */
}