summaryrefslogtreecommitdiff
path: root/net/smc/smc_core.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/smc/smc_core.c')
-rw-r--r--net/smc/smc_core.c103
1 files changed, 71 insertions, 32 deletions
diff --git a/net/smc/smc_core.c b/net/smc/smc_core.c
index 2424c7100aaf..f44f6803f7ff 100644
--- a/net/smc/smc_core.c
+++ b/net/smc/smc_core.c
@@ -32,6 +32,17 @@
static u32 smc_lgr_num; /* unique link group number */
+static void smc_lgr_schedule_free_work(struct smc_link_group *lgr)
+{
+ /* client link group creation always follows the server link group
+ * creation. For client use a somewhat higher removal delay time,
+ * otherwise there is a risk of out-of-sync link groups.
+ */
+ mod_delayed_work(system_wq, &lgr->free_work,
+ lgr->role == SMC_CLNT ? SMC_LGR_FREE_DELAY_CLNT :
+ SMC_LGR_FREE_DELAY_SERV);
+}
+
/* Register connection's alert token in our lookup structure.
* To use rbtrees we have to implement our own insert core.
* Requires @conns_lock
@@ -111,13 +122,7 @@ static void smc_lgr_unregister_conn(struct smc_connection *conn)
write_unlock_bh(&lgr->conns_lock);
if (!reduced || lgr->conns_num)
return;
- /* client link group creation always follows the server link group
- * creation. For client use a somewhat higher removal delay time,
- * otherwise there is a risk of out-of-sync link groups.
- */
- mod_delayed_work(system_wq, &lgr->free_work,
- lgr->role == SMC_CLNT ? SMC_LGR_FREE_DELAY_CLNT :
- SMC_LGR_FREE_DELAY_SERV);
+ smc_lgr_schedule_free_work(lgr);
}
static void smc_lgr_free_work(struct work_struct *work)
@@ -140,11 +145,12 @@ static void smc_lgr_free_work(struct work_struct *work)
list_del_init(&lgr->list); /* remove from smc_lgr_list */
free:
spin_unlock_bh(&smc_lgr_list.lock);
- smc_lgr_free(lgr);
+ if (!delayed_work_pending(&lgr->free_work))
+ smc_lgr_free(lgr);
}
/* create a new SMC link group */
-static int smc_lgr_create(struct smc_sock *smc, __be32 peer_in_addr,
+static int smc_lgr_create(struct smc_sock *smc,
struct smc_ib_device *smcibdev, u8 ibport,
char *peer_systemid, unsigned short vlan_id)
{
@@ -161,7 +167,6 @@ static int smc_lgr_create(struct smc_sock *smc, __be32 peer_in_addr,
}
lgr->role = smc->listen_smc ? SMC_SERV : SMC_CLNT;
lgr->sync_err = false;
- lgr->daddr = peer_in_addr;
memcpy(lgr->peer_systemid, peer_systemid, SMC_SYSTEMID_LEN);
lgr->vlan_id = vlan_id;
rwlock_init(&lgr->sndbufs_lock);
@@ -177,6 +182,8 @@ static int smc_lgr_create(struct smc_sock *smc, __be32 peer_in_addr,
lnk = &lgr->lnk[SMC_SINGLE_LINK];
/* initialize link */
+ lnk->state = SMC_LNK_ACTIVATING;
+ lnk->link_id = SMC_SINGLE_LINK;
lnk->smcibdev = smcibdev;
lnk->ibport = ibport;
lnk->path_mtu = smcibdev->pattr[ibport - 1].active_mtu;
@@ -198,6 +205,8 @@ static int smc_lgr_create(struct smc_sock *smc, __be32 peer_in_addr,
goto destroy_qp;
init_completion(&lnk->llc_confirm);
init_completion(&lnk->llc_confirm_resp);
+ init_completion(&lnk->llc_add);
+ init_completion(&lnk->llc_add_resp);
smc->conn.lgr = lgr;
rwlock_init(&lgr->conns_lock);
@@ -306,6 +315,15 @@ void smc_lgr_free(struct smc_link_group *lgr)
kfree(lgr);
}
+void smc_lgr_forget(struct smc_link_group *lgr)
+{
+ spin_lock_bh(&smc_lgr_list.lock);
+ /* do not use this link group for new connections */
+ if (!list_empty(&lgr->list))
+ list_del_init(&lgr->list);
+ spin_unlock_bh(&smc_lgr_list.lock);
+}
+
/* terminate linkgroup abnormally */
void smc_lgr_terminate(struct smc_link_group *lgr)
{
@@ -313,15 +331,7 @@ void smc_lgr_terminate(struct smc_link_group *lgr)
struct smc_sock *smc;
struct rb_node *node;
- spin_lock_bh(&smc_lgr_list.lock);
- if (list_empty(&lgr->list)) {
- /* termination already triggered */
- spin_unlock_bh(&smc_lgr_list.lock);
- return;
- }
- /* do not use this link group for new connections */
- list_del_init(&lgr->list);
- spin_unlock_bh(&smc_lgr_list.lock);
+ smc_lgr_forget(lgr);
write_lock_bh(&lgr->conns_lock);
node = rb_first(&lgr->conns_all);
@@ -339,6 +349,7 @@ void smc_lgr_terminate(struct smc_link_group *lgr)
}
write_unlock_bh(&lgr->conns_lock);
wake_up(&lgr->lnk[SMC_SINGLE_LINK].wr_reg_wait);
+ smc_lgr_schedule_free_work(lgr);
}
/* Determine vlan of internal TCP socket.
@@ -400,7 +411,7 @@ static int smc_link_determine_gid(struct smc_link_group *lgr)
}
/* create a new SMC connection (and a new link group if necessary) */
-int smc_conn_create(struct smc_sock *smc, __be32 peer_in_addr,
+int smc_conn_create(struct smc_sock *smc,
struct smc_ib_device *smcibdev, u8 ibport,
struct smc_clc_msg_local *lcl, int srv_first_contact)
{
@@ -457,7 +468,7 @@ int smc_conn_create(struct smc_sock *smc, __be32 peer_in_addr,
create:
if (local_contact == SMC_FIRST_CONTACT) {
- rc = smc_lgr_create(smc, peer_in_addr, smcibdev, ibport,
+ rc = smc_lgr_create(smc, smcibdev, ibport,
lcl->id_for_peer, vlan_id);
if (rc)
goto out;
@@ -465,7 +476,7 @@ create:
rc = smc_link_determine_gid(conn->lgr);
}
conn->local_tx_ctrl.common.type = SMC_CDC_MSG_TYPE;
- conn->local_tx_ctrl.len = sizeof(struct smc_cdc_msg);
+ conn->local_tx_ctrl.len = SMC_WR_TX_SIZE;
#ifndef KERNEL_HAS_ATOMIC64
spin_lock_init(&conn->acurs_lock);
#endif
@@ -698,27 +709,55 @@ static inline int smc_rmb_reserve_rtoken_idx(struct smc_link_group *lgr)
return -ENOSPC;
}
-/* save rkey and dma_addr received from peer during clc handshake */
-int smc_rmb_rtoken_handling(struct smc_connection *conn,
- struct smc_clc_msg_accept_confirm *clc)
+/* add a new rtoken from peer */
+int smc_rtoken_add(struct smc_link_group *lgr, __be64 nw_vaddr, __be32 nw_rkey)
{
- u64 dma_addr = be64_to_cpu(clc->rmb_dma_addr);
- struct smc_link_group *lgr = conn->lgr;
- u32 rkey = ntohl(clc->rmb_rkey);
+ u64 dma_addr = be64_to_cpu(nw_vaddr);
+ u32 rkey = ntohl(nw_rkey);
int i;
for (i = 0; i < SMC_RMBS_PER_LGR_MAX; i++) {
if ((lgr->rtokens[i][SMC_SINGLE_LINK].rkey == rkey) &&
(lgr->rtokens[i][SMC_SINGLE_LINK].dma_addr == dma_addr) &&
test_bit(i, lgr->rtokens_used_mask)) {
- conn->rtoken_idx = i;
+ /* already in list */
+ return i;
+ }
+ }
+ i = smc_rmb_reserve_rtoken_idx(lgr);
+ if (i < 0)
+ return i;
+ lgr->rtokens[i][SMC_SINGLE_LINK].rkey = rkey;
+ lgr->rtokens[i][SMC_SINGLE_LINK].dma_addr = dma_addr;
+ return i;
+}
+
+/* delete an rtoken */
+int smc_rtoken_delete(struct smc_link_group *lgr, __be32 nw_rkey)
+{
+ u32 rkey = ntohl(nw_rkey);
+ int i;
+
+ for (i = 0; i < SMC_RMBS_PER_LGR_MAX; i++) {
+ if (lgr->rtokens[i][SMC_SINGLE_LINK].rkey == rkey &&
+ test_bit(i, lgr->rtokens_used_mask)) {
+ lgr->rtokens[i][SMC_SINGLE_LINK].rkey = 0;
+ lgr->rtokens[i][SMC_SINGLE_LINK].dma_addr = 0;
+
+ clear_bit(i, lgr->rtokens_used_mask);
return 0;
}
}
- conn->rtoken_idx = smc_rmb_reserve_rtoken_idx(lgr);
+ return -ENOENT;
+}
+
+/* save rkey and dma_addr received from peer during clc handshake */
+int smc_rmb_rtoken_handling(struct smc_connection *conn,
+ struct smc_clc_msg_accept_confirm *clc)
+{
+ conn->rtoken_idx = smc_rtoken_add(conn->lgr, clc->rmb_dma_addr,
+ clc->rmb_rkey);
if (conn->rtoken_idx < 0)
return conn->rtoken_idx;
- lgr->rtokens[conn->rtoken_idx][SMC_SINGLE_LINK].rkey = rkey;
- lgr->rtokens[conn->rtoken_idx][SMC_SINGLE_LINK].dma_addr = dma_addr;
return 0;
}