summaryrefslogtreecommitdiff
path: root/net/smc
diff options
context:
space:
mode:
authorGuvenc Gulce <guvenc@linux.ibm.com>2020-12-01 20:20:38 +0100
committerJakub Kicinski <kuba@kernel.org>2020-12-01 17:56:12 -0800
commit07d51580ff6594dc7261821ee40b37392040db2a (patch)
tree03669f4a7812a73d452ec5bc936887a34b78c18d /net/smc
parent8b2f0f44f06bd5f00d7d5e6c20a4dc3ec28e0ecd (diff)
net/smc: Add connection counters for links
Add connection counters to the structure of the link. Increase/decrease the counters as needed in the corresponding routines. Signed-off-by: Guvenc Gulce <guvenc@linux.ibm.com> Signed-off-by: Karsten Graul <kgraul@linux.ibm.com> Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Diffstat (limited to 'net/smc')
-rw-r--r--net/smc/smc_core.c16
-rw-r--r--net/smc/smc_core.h1
2 files changed, 15 insertions, 2 deletions
diff --git a/net/smc/smc_core.c b/net/smc/smc_core.c
index af96f813c075..5bc8ebcd03f3 100644
--- a/net/smc/smc_core.c
+++ b/net/smc/smc_core.c
@@ -139,6 +139,7 @@ static int smcr_lgr_conn_assign_link(struct smc_connection *conn, bool first)
}
if (!conn->lnk)
return SMC_CLC_DECL_NOACTLINK;
+ atomic_inc(&conn->lnk->conn_cnt);
return 0;
}
@@ -180,6 +181,8 @@ static void __smc_lgr_unregister_conn(struct smc_connection *conn)
struct smc_link_group *lgr = conn->lgr;
rb_erase(&conn->alert_node, &lgr->conns_all);
+ if (conn->lnk)
+ atomic_dec(&conn->lnk->conn_cnt);
lgr->conns_num--;
conn->alert_token_local = 0;
sock_put(&smc->sk); /* sock_hold in smc_lgr_register_conn() */
@@ -314,6 +317,7 @@ int smcr_link_init(struct smc_link_group *lgr, struct smc_link *lnk,
lnk->smcibdev = ini->ib_dev;
lnk->ibport = ini->ib_port;
lnk->path_mtu = ini->ib_dev->pattr[ini->ib_port - 1].active_mtu;
+ atomic_set(&lnk->conn_cnt, 0);
smc_llc_link_set_uid(lnk);
INIT_WORK(&lnk->link_down_wrk, smc_link_down_work);
if (!ini->ib_dev->initialized) {
@@ -526,6 +530,14 @@ static int smc_switch_cursor(struct smc_sock *smc, struct smc_cdc_tx_pend *pend,
return rc;
}
+static void smc_switch_link_and_count(struct smc_connection *conn,
+ struct smc_link *to_lnk)
+{
+ atomic_dec(&conn->lnk->conn_cnt);
+ conn->lnk = to_lnk;
+ atomic_inc(&conn->lnk->conn_cnt);
+}
+
struct smc_link *smc_switch_conns(struct smc_link_group *lgr,
struct smc_link *from_lnk, bool is_dev_err)
{
@@ -574,7 +586,7 @@ again:
smc->sk.sk_state == SMC_PEERABORTWAIT ||
smc->sk.sk_state == SMC_PROCESSABORT) {
spin_lock_bh(&conn->send_lock);
- conn->lnk = to_lnk;
+ smc_switch_link_and_count(conn, to_lnk);
spin_unlock_bh(&conn->send_lock);
continue;
}
@@ -588,7 +600,7 @@ again:
}
/* avoid race with smcr_tx_sndbuf_nonempty() */
spin_lock_bh(&conn->send_lock);
- conn->lnk = to_lnk;
+ smc_switch_link_and_count(conn, to_lnk);
rc = smc_switch_cursor(smc, pend, wr_buf);
spin_unlock_bh(&conn->send_lock);
sock_put(&smc->sk);
diff --git a/net/smc/smc_core.h b/net/smc/smc_core.h
index 9aee54a6bcba..eefb6770b268 100644
--- a/net/smc/smc_core.h
+++ b/net/smc/smc_core.h
@@ -129,6 +129,7 @@ struct smc_link {
struct delayed_work llc_testlink_wrk; /* testlink worker */
struct completion llc_testlink_resp; /* wait for rx of testlink */
int llc_testlink_time; /* testlink interval */
+ atomic_t conn_cnt; /* connections on this link */
};
/* For now we just allow one parallel link per link group. The SMC protocol