summaryrefslogtreecommitdiff
path: root/net/smc/smc_close.c
diff options
context:
space:
mode:
authorKarsten Graul <kgraul@linux.ibm.com>2019-04-11 11:17:30 +0200
committerDavid S. Miller <davem@davemloft.net>2019-04-11 11:04:08 -0700
commitfd57770dd198f5b2ddd5b9e6bf282cf98d63adb9 (patch)
tree37e08bc918b40df3a79e4fc28ba16f8a2821d2c4 /net/smc/smc_close.c
parent988dc4a9a3b66be75b30405a5494faf0dc7cffb6 (diff)
net/smc: wait for pending work before clcsock release_sock
When the clcsock is already released using sock_release() and a pending smc_listen_work accesses the clcsock than that will fail. Solve this by canceling and waiting for the work to complete first. Because the work holds the sock_lock it must make sure that the lock is not hold before the new helper smc_clcsock_release() is invoked. And before the smc_listen_work starts working check if the parent listen socket is still valid, otherwise stop the work early. Signed-off-by: Karsten Graul <kgraul@linux.ibm.com> Signed-off-by: Ursula Braun <ubraun@linux.ibm.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/smc/smc_close.c')
-rw-r--r--net/smc/smc_close.c25
1 files changed, 21 insertions, 4 deletions
diff --git a/net/smc/smc_close.c b/net/smc/smc_close.c
index 2ad37e998509..fc06720b53c1 100644
--- a/net/smc/smc_close.c
+++ b/net/smc/smc_close.c
@@ -21,6 +21,22 @@
#define SMC_CLOSE_WAIT_LISTEN_CLCSOCK_TIME (5 * HZ)
+/* release the clcsock that is assigned to the smc_sock */
+void smc_clcsock_release(struct smc_sock *smc)
+{
+ struct socket *tcp;
+
+ if (smc->listen_smc && current_work() != &smc->smc_listen_work)
+ cancel_work_sync(&smc->smc_listen_work);
+ mutex_lock(&smc->clcsock_release_lock);
+ if (smc->clcsock) {
+ tcp = smc->clcsock;
+ smc->clcsock = NULL;
+ sock_release(tcp);
+ }
+ mutex_unlock(&smc->clcsock_release_lock);
+}
+
static void smc_close_cleanup_listen(struct sock *parent)
{
struct sock *sk;
@@ -321,6 +337,7 @@ static void smc_close_passive_work(struct work_struct *work)
close_work);
struct smc_sock *smc = container_of(conn, struct smc_sock, conn);
struct smc_cdc_conn_state_flags *rxflags;
+ bool release_clcsock = false;
struct sock *sk = &smc->sk;
int old_state;
@@ -400,13 +417,13 @@ wakeup:
if ((sk->sk_state == SMC_CLOSED) &&
(sock_flag(sk, SOCK_DEAD) || !sk->sk_socket)) {
smc_conn_free(conn);
- if (smc->clcsock) {
- sock_release(smc->clcsock);
- smc->clcsock = NULL;
- }
+ if (smc->clcsock)
+ release_clcsock = true;
}
}
release_sock(sk);
+ if (release_clcsock)
+ smc_clcsock_release(smc);
sock_put(sk); /* sock_hold done by schedulers of close_work */
}