summaryrefslogtreecommitdiff
path: root/net/smc
diff options
context:
space:
mode:
Diffstat (limited to 'net/smc')
-rw-r--r--net/smc/af_smc.c21
-rw-r--r--net/smc/smc.h5
-rw-r--r--net/smc/smc_cdc.c3
-rw-r--r--net/smc/smc_close.c4
-rw-r--r--net/smc/smc_core.c3
-rw-r--r--net/smc/smc_core.h10
-rw-r--r--net/smc/smc_ib.c2
-rw-r--r--net/smc/smc_ism.c2
-rw-r--r--net/smc/smc_llc.c13
-rw-r--r--net/smc/smc_rx.c4
-rw-r--r--net/smc/smc_tx.c4
-rw-r--r--net/smc/smc_wr.c35
-rw-r--r--net/smc/smc_wr.h5
13 files changed, 79 insertions, 32 deletions
diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c
index ff6dd86bdc9f..538e9c6ec8c9 100644
--- a/net/smc/af_smc.c
+++ b/net/smc/af_smc.c
@@ -2000,8 +2000,10 @@ static int smc_listen_rdma_init(struct smc_sock *new_smc,
return rc;
/* create send buffer and rmb */
- if (smc_buf_create(new_smc, false))
+ if (smc_buf_create(new_smc, false)) {
+ smc_conn_abort(new_smc, ini->first_contact_local);
return SMC_CLC_DECL_MEM;
+ }
return 0;
}
@@ -2217,8 +2219,11 @@ static void smc_find_rdma_v2_device_serv(struct smc_sock *new_smc,
smcr_version = ini->smcr_version;
ini->smcr_version = SMC_V2;
rc = smc_listen_rdma_init(new_smc, ini);
- if (!rc)
+ if (!rc) {
rc = smc_listen_rdma_reg(new_smc, ini->first_contact_local);
+ if (rc)
+ smc_conn_abort(new_smc, ini->first_contact_local);
+ }
if (!rc)
return;
ini->smcr_version = smcr_version;
@@ -3270,6 +3275,17 @@ static int __smc_create(struct net *net, struct socket *sock, int protocol,
sk_common_release(sk);
goto out;
}
+
+ /* smc_clcsock_release() does not wait smc->clcsock->sk's
+ * destruction; its sk_state might not be TCP_CLOSE after
+ * smc->sk is close()d, and TCP timers can be fired later,
+ * which need net ref.
+ */
+ sk = smc->clcsock->sk;
+ __netns_tracker_free(net, &sk->ns_tracker, false);
+ sk->sk_net_refcnt = 1;
+ get_net_track(net, &sk->ns_tracker, GFP_KERNEL);
+ sock_inuse_add(net, 1);
} else {
smc->clcsock = clcsock;
}
@@ -3501,6 +3517,7 @@ out_pnet:
out_nl:
smc_nl_exit();
out_ism:
+ smc_clc_exit();
smc_ism_exit();
out_pernet_subsys_stat:
unregister_pernet_subsys(&smc_net_stat_ops);
diff --git a/net/smc/smc.h b/net/smc/smc.h
index 5ed765ea0c73..2eeea4cdc718 100644
--- a/net/smc/smc.h
+++ b/net/smc/smc.h
@@ -283,10 +283,7 @@ struct smc_sock { /* smc sock container */
* */
};
-static inline struct smc_sock *smc_sk(const struct sock *sk)
-{
- return (struct smc_sock *)sk;
-}
+#define smc_sk(ptr) container_of_const(ptr, struct smc_sock, sk)
static inline void smc_init_saved_callbacks(struct smc_sock *smc)
{
diff --git a/net/smc/smc_cdc.c b/net/smc/smc_cdc.c
index 53f63bfbaf5f..89105e95b452 100644
--- a/net/smc/smc_cdc.c
+++ b/net/smc/smc_cdc.c
@@ -114,6 +114,9 @@ int smc_cdc_msg_send(struct smc_connection *conn,
union smc_host_cursor cfed;
int rc;
+ if (unlikely(!READ_ONCE(conn->sndbuf_desc)))
+ return -ENOBUFS;
+
smc_cdc_add_pending_send(conn, pend);
conn->tx_cdc_seq++;
diff --git a/net/smc/smc_close.c b/net/smc/smc_close.c
index 31db7438857c..dbdf03e8aa5b 100644
--- a/net/smc/smc_close.c
+++ b/net/smc/smc_close.c
@@ -67,8 +67,8 @@ static void smc_close_stream_wait(struct smc_sock *smc, long timeout)
rc = sk_wait_event(sk, &timeout,
!smc_tx_prepared_sends(&smc->conn) ||
- sk->sk_err == ECONNABORTED ||
- sk->sk_err == ECONNRESET ||
+ READ_ONCE(sk->sk_err) == ECONNABORTED ||
+ READ_ONCE(sk->sk_err) == ECONNRESET ||
smc->conn.killed,
&wait);
if (rc)
diff --git a/net/smc/smc_core.c b/net/smc/smc_core.c
index d52060b2680c..3f465faf2b68 100644
--- a/net/smc/smc_core.c
+++ b/net/smc/smc_core.c
@@ -127,6 +127,7 @@ static int smcr_lgr_conn_assign_link(struct smc_connection *conn, bool first)
int i, j;
/* do link balancing */
+ conn->lnk = NULL; /* reset conn->lnk first */
for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
struct smc_link *lnk = &conn->lgr->lnk[i];
@@ -1464,7 +1465,7 @@ static void __smc_lgr_terminate(struct smc_link_group *lgr, bool soft)
if (lgr->terminating)
return; /* lgr already terminating */
/* cancel free_work sync, will terminate when lgr->freeing is set */
- cancel_delayed_work_sync(&lgr->free_work);
+ cancel_delayed_work(&lgr->free_work);
lgr->terminating = 1;
/* kill remaining link group connections */
diff --git a/net/smc/smc_core.h b/net/smc/smc_core.h
index 08b457c2d294..1645fba0d2d3 100644
--- a/net/smc/smc_core.h
+++ b/net/smc/smc_core.h
@@ -106,7 +106,10 @@ struct smc_link {
unsigned long *wr_tx_mask; /* bit mask of used indexes */
u32 wr_tx_cnt; /* number of WR send buffers */
wait_queue_head_t wr_tx_wait; /* wait for free WR send buf */
- atomic_t wr_tx_refcnt; /* tx refs to link */
+ struct {
+ struct percpu_ref wr_tx_refs;
+ } ____cacheline_aligned_in_smp;
+ struct completion tx_ref_comp;
struct smc_wr_buf *wr_rx_bufs; /* WR recv payload buffers */
struct ib_recv_wr *wr_rx_ibs; /* WR recv meta data */
@@ -122,7 +125,10 @@ struct smc_link {
struct ib_reg_wr wr_reg; /* WR register memory region */
wait_queue_head_t wr_reg_wait; /* wait for wr_reg result */
- atomic_t wr_reg_refcnt; /* reg refs to link */
+ struct {
+ struct percpu_ref wr_reg_refs;
+ } ____cacheline_aligned_in_smp;
+ struct completion reg_ref_comp;
enum smc_wr_reg_state wr_reg_state; /* state of wr_reg request */
u8 gid[SMC_GID_SIZE];/* gid matching used vlan id*/
diff --git a/net/smc/smc_ib.c b/net/smc/smc_ib.c
index 854772dd52fd..9b66d6aeeb1a 100644
--- a/net/smc/smc_ib.c
+++ b/net/smc/smc_ib.c
@@ -843,7 +843,7 @@ long smc_ib_setup_per_ibdev(struct smc_ib_device *smcibdev)
goto out;
/* the calculated number of cq entries fits to mlx5 cq allocation */
cqe_size_order = cache_line_size() == 128 ? 7 : 6;
- smc_order = MAX_ORDER - cqe_size_order - 1;
+ smc_order = MAX_ORDER - cqe_size_order;
if (SMC_MAX_CQE + 2 > (0x00000001 << smc_order) * PAGE_SIZE)
cqattr.cqe = (0x00000001 << smc_order) * PAGE_SIZE - 2;
smcibdev->roce_cq_send = ib_create_cq(smcibdev->ibdev,
diff --git a/net/smc/smc_ism.c b/net/smc/smc_ism.c
index 3b0b7710c6b0..fbee2493091f 100644
--- a/net/smc/smc_ism.c
+++ b/net/smc/smc_ism.c
@@ -429,7 +429,7 @@ static void smcd_register_dev(struct ism_dev *ism)
u8 *system_eid = NULL;
system_eid = smcd->ops->get_system_eid();
- if (system_eid[24] != '0' || system_eid[28] != '0') {
+ if (smcd->ops->supports_v2()) {
smc_ism_v2_capable = true;
memcpy(smc_ism_v2_system_eid, system_eid,
SMC_MAX_EID_LEN);
diff --git a/net/smc/smc_llc.c b/net/smc/smc_llc.c
index a0840b8c935b..90f0b60b196a 100644
--- a/net/smc/smc_llc.c
+++ b/net/smc/smc_llc.c
@@ -578,7 +578,10 @@ static struct smc_buf_desc *smc_llc_get_next_rmb(struct smc_link_group *lgr,
{
struct smc_buf_desc *buf_next;
- if (!buf_pos || list_is_last(&buf_pos->list, &lgr->rmbs[*buf_lst])) {
+ if (!buf_pos)
+ return _smc_llc_get_next_rmb(lgr, buf_lst);
+
+ if (list_is_last(&buf_pos->list, &lgr->rmbs[*buf_lst])) {
(*buf_lst)++;
return _smc_llc_get_next_rmb(lgr, buf_lst);
}
@@ -614,6 +617,8 @@ static int smc_llc_fill_ext_v2(struct smc_llc_msg_add_link_v2_ext *ext,
goto out;
buf_pos = smc_llc_get_first_rmb(lgr, &buf_lst);
for (i = 0; i < ext->num_rkeys; i++) {
+ while (buf_pos && !(buf_pos)->used)
+ buf_pos = smc_llc_get_next_rmb(lgr, &buf_lst, buf_pos);
if (!buf_pos)
break;
rmb = buf_pos;
@@ -623,8 +628,6 @@ static int smc_llc_fill_ext_v2(struct smc_llc_msg_add_link_v2_ext *ext,
cpu_to_be64((uintptr_t)rmb->cpu_addr) :
cpu_to_be64((u64)sg_dma_address(rmb->sgt[lnk_idx].sgl));
buf_pos = smc_llc_get_next_rmb(lgr, &buf_lst, buf_pos);
- while (buf_pos && !(buf_pos)->used)
- buf_pos = smc_llc_get_next_rmb(lgr, &buf_lst, buf_pos);
}
len += i * sizeof(ext->rt[0]);
out:
@@ -848,6 +851,8 @@ static int smc_llc_add_link_cont(struct smc_link *link,
addc_llc->num_rkeys = *num_rkeys_todo;
n = *num_rkeys_todo;
for (i = 0; i < min_t(u8, n, SMC_LLC_RKEYS_PER_CONT_MSG); i++) {
+ while (*buf_pos && !(*buf_pos)->used)
+ *buf_pos = smc_llc_get_next_rmb(lgr, buf_lst, *buf_pos);
if (!*buf_pos) {
addc_llc->num_rkeys = addc_llc->num_rkeys -
*num_rkeys_todo;
@@ -864,8 +869,6 @@ static int smc_llc_add_link_cont(struct smc_link *link,
(*num_rkeys_todo)--;
*buf_pos = smc_llc_get_next_rmb(lgr, buf_lst, *buf_pos);
- while (*buf_pos && !(*buf_pos)->used)
- *buf_pos = smc_llc_get_next_rmb(lgr, buf_lst, *buf_pos);
}
addc_llc->hd.common.llc_type = SMC_LLC_ADD_LINK_CONT;
addc_llc->hd.length = sizeof(struct smc_llc_msg_add_link_cont);
diff --git a/net/smc/smc_rx.c b/net/smc/smc_rx.c
index 4380d32f5a5f..9a2f3638d161 100644
--- a/net/smc/smc_rx.c
+++ b/net/smc/smc_rx.c
@@ -267,9 +267,9 @@ int smc_rx_wait(struct smc_sock *smc, long *timeo,
sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
add_wait_queue(sk_sleep(sk), &wait);
rc = sk_wait_event(sk, timeo,
- sk->sk_err ||
+ READ_ONCE(sk->sk_err) ||
cflags->peer_conn_abort ||
- sk->sk_shutdown & RCV_SHUTDOWN ||
+ READ_ONCE(sk->sk_shutdown) & RCV_SHUTDOWN ||
conn->killed ||
fcrit(conn),
&wait);
diff --git a/net/smc/smc_tx.c b/net/smc/smc_tx.c
index f4b6a71ac488..45128443f1f1 100644
--- a/net/smc/smc_tx.c
+++ b/net/smc/smc_tx.c
@@ -113,8 +113,8 @@ static int smc_tx_wait(struct smc_sock *smc, int flags)
break; /* at least 1 byte of free & no urgent data */
set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
sk_wait_event(sk, &timeo,
- sk->sk_err ||
- (sk->sk_shutdown & SEND_SHUTDOWN) ||
+ READ_ONCE(sk->sk_err) ||
+ (READ_ONCE(sk->sk_shutdown) & SEND_SHUTDOWN) ||
smc_cdc_rxed_any_close(conn) ||
(atomic_read(&conn->sndbuf_space) &&
!conn->urg_tx_pend),
diff --git a/net/smc/smc_wr.c b/net/smc/smc_wr.c
index b0678a417e09..0021065a600a 100644
--- a/net/smc/smc_wr.c
+++ b/net/smc/smc_wr.c
@@ -377,12 +377,11 @@ int smc_wr_reg_send(struct smc_link *link, struct ib_mr *mr)
if (rc)
return rc;
- atomic_inc(&link->wr_reg_refcnt);
+ percpu_ref_get(&link->wr_reg_refs);
rc = wait_event_interruptible_timeout(link->wr_reg_wait,
(link->wr_reg_state != POSTED),
SMC_WR_REG_MR_WAIT_TIME);
- if (atomic_dec_and_test(&link->wr_reg_refcnt))
- wake_up_all(&link->wr_reg_wait);
+ percpu_ref_put(&link->wr_reg_refs);
if (!rc) {
/* timeout - terminate link */
smcr_link_down_cond_sched(link);
@@ -647,8 +646,10 @@ void smc_wr_free_link(struct smc_link *lnk)
smc_wr_wakeup_tx_wait(lnk);
smc_wr_tx_wait_no_pending_sends(lnk);
- wait_event(lnk->wr_reg_wait, (!atomic_read(&lnk->wr_reg_refcnt)));
- wait_event(lnk->wr_tx_wait, (!atomic_read(&lnk->wr_tx_refcnt)));
+ percpu_ref_kill(&lnk->wr_reg_refs);
+ wait_for_completion(&lnk->reg_ref_comp);
+ percpu_ref_kill(&lnk->wr_tx_refs);
+ wait_for_completion(&lnk->tx_ref_comp);
if (lnk->wr_rx_dma_addr) {
ib_dma_unmap_single(ibdev, lnk->wr_rx_dma_addr,
@@ -847,6 +848,20 @@ void smc_wr_add_dev(struct smc_ib_device *smcibdev)
tasklet_setup(&smcibdev->send_tasklet, smc_wr_tx_tasklet_fn);
}
+static void smcr_wr_tx_refs_free(struct percpu_ref *ref)
+{
+ struct smc_link *lnk = container_of(ref, struct smc_link, wr_tx_refs);
+
+ complete(&lnk->tx_ref_comp);
+}
+
+static void smcr_wr_reg_refs_free(struct percpu_ref *ref)
+{
+ struct smc_link *lnk = container_of(ref, struct smc_link, wr_reg_refs);
+
+ complete(&lnk->reg_ref_comp);
+}
+
int smc_wr_create_link(struct smc_link *lnk)
{
struct ib_device *ibdev = lnk->smcibdev->ibdev;
@@ -890,9 +905,15 @@ int smc_wr_create_link(struct smc_link *lnk)
smc_wr_init_sge(lnk);
bitmap_zero(lnk->wr_tx_mask, SMC_WR_BUF_CNT);
init_waitqueue_head(&lnk->wr_tx_wait);
- atomic_set(&lnk->wr_tx_refcnt, 0);
+ rc = percpu_ref_init(&lnk->wr_tx_refs, smcr_wr_tx_refs_free, 0, GFP_KERNEL);
+ if (rc)
+ goto dma_unmap;
+ init_completion(&lnk->tx_ref_comp);
init_waitqueue_head(&lnk->wr_reg_wait);
- atomic_set(&lnk->wr_reg_refcnt, 0);
+ rc = percpu_ref_init(&lnk->wr_reg_refs, smcr_wr_reg_refs_free, 0, GFP_KERNEL);
+ if (rc)
+ goto dma_unmap;
+ init_completion(&lnk->reg_ref_comp);
init_waitqueue_head(&lnk->wr_rx_empty_wait);
return rc;
diff --git a/net/smc/smc_wr.h b/net/smc/smc_wr.h
index 45e9b894d3f8..f3008dda222a 100644
--- a/net/smc/smc_wr.h
+++ b/net/smc/smc_wr.h
@@ -63,14 +63,13 @@ static inline bool smc_wr_tx_link_hold(struct smc_link *link)
{
if (!smc_link_sendable(link))
return false;
- atomic_inc(&link->wr_tx_refcnt);
+ percpu_ref_get(&link->wr_tx_refs);
return true;
}
static inline void smc_wr_tx_link_put(struct smc_link *link)
{
- if (atomic_dec_and_test(&link->wr_tx_refcnt))
- wake_up_all(&link->wr_tx_wait);
+ percpu_ref_put(&link->wr_tx_refs);
}
static inline void smc_wr_drain_cq(struct smc_link *lnk)