summaryrefslogtreecommitdiff
path: root/net/smc/smc_core.c
diff options
context:
space:
mode:
authorKarsten Graul <kgraul@linux.ibm.com>2018-07-25 16:35:33 +0200
committerDavid S. Miller <davem@davemloft.net>2018-07-25 22:25:53 -0700
commit0d18a0cb4b1585d9e5a3b300d5df9ed866561ffb (patch)
tree1c854a0d55d1e3636aa7f75674712c233eabb36c /net/smc/smc_core.c
parent603cc1498455cf57f5ca4483b600efb37ea2c56c (diff)
net/smc: improve delete link processing
Send an orderly DELETE LINK request before termination of a link group, add support for client triggered DELETE LINK processing. And send a disorderly DELETE LINK before module is unloaded. Signed-off-by: Karsten Graul <kgraul@linux.ibm.com> Signed-off-by: Ursula Braun <ubraun@linux.ibm.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/smc/smc_core.c')
-rw-r--r--net/smc/smc_core.c47
1 files changed, 42 insertions, 5 deletions
diff --git a/net/smc/smc_core.c b/net/smc/smc_core.c
index 90c10ae9ae09..a46418f45ecd 100644
--- a/net/smc/smc_core.c
+++ b/net/smc/smc_core.c
@@ -30,6 +30,7 @@
#define SMC_LGR_NUM_INCR 256
#define SMC_LGR_FREE_DELAY_SERV (600 * HZ)
#define SMC_LGR_FREE_DELAY_CLNT (SMC_LGR_FREE_DELAY_SERV + 10 * HZ)
+#define SMC_LGR_FREE_DELAY_FAST (8 * HZ)
static struct smc_lgr_list smc_lgr_list = { /* established link groups */
.lock = __SPIN_LOCK_UNLOCKED(smc_lgr_list.lock),
@@ -51,6 +52,11 @@ static void smc_lgr_schedule_free_work(struct smc_link_group *lgr)
SMC_LGR_FREE_DELAY_CLNT : SMC_LGR_FREE_DELAY_SERV);
}
+void smc_lgr_schedule_free_work_fast(struct smc_link_group *lgr)
+{
+ mod_delayed_work(system_wq, &lgr->free_work, SMC_LGR_FREE_DELAY_FAST);
+}
+
/* Register connection's alert token in our lookup structure.
* To use rbtrees we have to implement our own insert core.
* Requires @conns_lock
@@ -133,6 +139,20 @@ static void smc_lgr_unregister_conn(struct smc_connection *conn)
smc_lgr_schedule_free_work(lgr);
}
+/* Send delete link, either as client to request the initiation
+ * of the DELETE LINK sequence from server; or as server to
+ * initiate the delete processing. See smc_llc_rx_delete_link().
+ */
+static int smc_link_send_delete(struct smc_link *lnk)
+{
+ if (lnk->state == SMC_LNK_ACTIVE &&
+ !smc_llc_send_delete_link(lnk, SMC_LLC_REQ, true)) {
+ smc_llc_link_deleting(lnk);
+ return 0;
+ }
+ return -ENOTCONN;
+}
+
static void smc_lgr_free_work(struct work_struct *work)
{
struct smc_link_group *lgr = container_of(to_delayed_work(work),
@@ -153,10 +173,21 @@ static void smc_lgr_free_work(struct work_struct *work)
list_del_init(&lgr->list); /* remove from smc_lgr_list */
free:
spin_unlock_bh(&smc_lgr_list.lock);
+
+ if (!lgr->is_smcd && !lgr->terminating) {
+ /* try to send del link msg, on error free lgr immediately */
+ if (!smc_link_send_delete(&lgr->lnk[SMC_SINGLE_LINK])) {
+ /* reschedule in case we never receive a response */
+ smc_lgr_schedule_free_work(lgr);
+ return;
+ }
+ }
+
if (!delayed_work_pending(&lgr->free_work)) {
- if (!lgr->is_smcd &&
- lgr->lnk[SMC_SINGLE_LINK].state != SMC_LNK_INACTIVE)
- smc_llc_link_inactive(&lgr->lnk[SMC_SINGLE_LINK]);
+ struct smc_link *lnk = &lgr->lnk[SMC_SINGLE_LINK];
+
+ if (!lgr->is_smcd && lnk->state != SMC_LNK_INACTIVE)
+ smc_llc_link_inactive(lnk);
smc_lgr_free(lgr);
}
}
@@ -984,8 +1015,14 @@ void smc_core_exit(void)
spin_unlock_bh(&smc_lgr_list.lock);
list_for_each_entry_safe(lgr, lg, &lgr_freeing_list, list) {
list_del_init(&lgr->list);
- if (!lgr->is_smcd)
- smc_llc_link_inactive(&lgr->lnk[SMC_SINGLE_LINK]);
+ if (!lgr->is_smcd) {
+ struct smc_link *lnk = &lgr->lnk[SMC_SINGLE_LINK];
+
+ if (lnk->state == SMC_LNK_ACTIVE)
+ smc_llc_send_delete_link(lnk, SMC_LLC_REQ,
+ false);
+ smc_llc_link_inactive(lnk);
+ }
cancel_delayed_work_sync(&lgr->free_work);
smc_lgr_free(lgr); /* free link group */
}