summaryrefslogtreecommitdiff
path: root/net/mptcp
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2021-06-07 13:01:52 -0700
committerDavid S. Miller <davem@davemloft.net>2021-06-07 13:01:52 -0700
commit126285651b7f95282a0afe3a1b0221419b31d989 (patch)
treee5d547255814a5ed55b6b74be3155464598b39f2 /net/mptcp
parent9977d6f56bacc9784654be4d0f4d27b368f57f5b (diff)
parent3822d0670c9d4342794d73e0d0e615322b40438e (diff)
Merge ra.kernel.org:/pub/scm/linux/kernel/git/netdev/net
Bug fixes overlapping feature additions and refactoring, mostly. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/mptcp')
-rw-r--r--net/mptcp/protocol.c16
-rw-r--r--net/mptcp/subflow.c79
2 files changed, 55 insertions, 40 deletions
diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
index 3897d35fd9df..993095089990 100644
--- a/net/mptcp/protocol.c
+++ b/net/mptcp/protocol.c
@@ -956,6 +956,10 @@ static void __mptcp_update_wmem(struct sock *sk)
{
struct mptcp_sock *msk = mptcp_sk(sk);
+#ifdef CONFIG_LOCKDEP
+ WARN_ON_ONCE(!lockdep_is_held(&sk->sk_lock.slock));
+#endif
+
if (!msk->wmem_reserved)
return;
@@ -1094,10 +1098,20 @@ out:
static void __mptcp_clean_una_wakeup(struct sock *sk)
{
+#ifdef CONFIG_LOCKDEP
+ WARN_ON_ONCE(!lockdep_is_held(&sk->sk_lock.slock));
+#endif
__mptcp_clean_una(sk);
mptcp_write_space(sk);
}
+static void mptcp_clean_una_wakeup(struct sock *sk)
+{
+ mptcp_data_lock(sk);
+ __mptcp_clean_una_wakeup(sk);
+ mptcp_data_unlock(sk);
+}
+
static void mptcp_enter_memory_pressure(struct sock *sk)
{
struct mptcp_subflow_context *subflow;
@@ -2321,7 +2335,7 @@ static void __mptcp_retrans(struct sock *sk)
struct sock *ssk;
int ret;
- __mptcp_clean_una_wakeup(sk);
+ mptcp_clean_una_wakeup(sk);
dfrag = mptcp_rtx_head(sk);
if (!dfrag) {
if (mptcp_data_fin_enabled(msk)) {
diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c
index 2a58503e55bd..33956337c46b 100644
--- a/net/mptcp/subflow.c
+++ b/net/mptcp/subflow.c
@@ -630,21 +630,20 @@ static struct sock *subflow_syn_recv_sock(const struct sock *sk,
/* if the sk is MP_CAPABLE, we try to fetch the client key */
if (subflow_req->mp_capable) {
- if (TCP_SKB_CB(skb)->seq != subflow_req->ssn_offset + 1) {
- /* here we can receive and accept an in-window,
- * out-of-order pkt, which will not carry the MP_CAPABLE
- * opt even on mptcp enabled paths
- */
- goto create_msk;
- }
-
+ /* we can receive and accept an in-window, out-of-order pkt,
+ * which may not carry the MP_CAPABLE opt even on mptcp enabled
+ * paths: always try to extract the peer key, and fallback
+ * for packets missing it.
+ * Even OoO DSS packets coming legitly after dropped or
+ * reordered MPC will cause fallback, but we don't have other
+ * options.
+ */
mptcp_get_options(skb, &mp_opt);
if (!mp_opt.mp_capable) {
fallback = true;
goto create_child;
}
-create_msk:
new_msk = mptcp_sk_clone(listener->conn, &mp_opt, req);
if (!new_msk)
fallback = true;
@@ -1012,21 +1011,11 @@ static bool subflow_check_data_avail(struct sock *ssk)
status = get_mapping_status(ssk, msk);
trace_subflow_check_data_avail(status, skb_peek(&ssk->sk_receive_queue));
- if (status == MAPPING_INVALID) {
- ssk->sk_err = EBADMSG;
- goto fatal;
- }
- if (status == MAPPING_DUMMY) {
- __mptcp_do_fallback(msk);
- skb = skb_peek(&ssk->sk_receive_queue);
- subflow->map_valid = 1;
- subflow->map_seq = READ_ONCE(msk->ack_seq);
- subflow->map_data_len = skb->len;
- subflow->map_subflow_seq = tcp_sk(ssk)->copied_seq -
- subflow->ssn_offset;
- subflow->data_avail = MPTCP_SUBFLOW_DATA_AVAIL;
- return true;
- }
+ if (unlikely(status == MAPPING_INVALID))
+ goto fallback;
+
+ if (unlikely(status == MAPPING_DUMMY))
+ goto fallback;
if (status != MAPPING_OK)
goto no_data;
@@ -1039,10 +1028,8 @@ static bool subflow_check_data_avail(struct sock *ssk)
* MP_CAPABLE-based mapping
*/
if (unlikely(!READ_ONCE(msk->can_ack))) {
- if (!subflow->mpc_map) {
- ssk->sk_err = EBADMSG;
- goto fatal;
- }
+ if (!subflow->mpc_map)
+ goto fallback;
WRITE_ONCE(msk->remote_key, subflow->remote_key);
WRITE_ONCE(msk->ack_seq, subflow->map_seq);
WRITE_ONCE(msk->can_ack, true);
@@ -1070,17 +1057,31 @@ static bool subflow_check_data_avail(struct sock *ssk)
no_data:
subflow_sched_work_if_closed(msk, ssk);
return false;
-fatal:
- /* fatal protocol error, close the socket */
- /* This barrier is coupled with smp_rmb() in tcp_poll() */
- smp_wmb();
- ssk->sk_error_report(ssk);
- tcp_set_state(ssk, TCP_CLOSE);
- subflow->reset_transient = 0;
- subflow->reset_reason = MPTCP_RST_EMPTCP;
- tcp_send_active_reset(ssk, GFP_ATOMIC);
- subflow->data_avail = 0;
- return false;
+
+fallback:
+ /* RFC 8684 section 3.7. */
+ if (subflow->mp_join || subflow->fully_established) {
+ /* fatal protocol error, close the socket.
+ * subflow_error_report() will introduce the appropriate barriers
+ */
+ ssk->sk_err = EBADMSG;
+ ssk->sk_error_report(ssk);
+ tcp_set_state(ssk, TCP_CLOSE);
+ subflow->reset_transient = 0;
+ subflow->reset_reason = MPTCP_RST_EMPTCP;
+ tcp_send_active_reset(ssk, GFP_ATOMIC);
+ subflow->data_avail = 0;
+ return false;
+ }
+
+ __mptcp_do_fallback(msk);
+ skb = skb_peek(&ssk->sk_receive_queue);
+ subflow->map_valid = 1;
+ subflow->map_seq = READ_ONCE(msk->ack_seq);
+ subflow->map_data_len = skb->len;
+ subflow->map_subflow_seq = tcp_sk(ssk)->copied_seq - subflow->ssn_offset;
+ subflow->data_avail = MPTCP_SUBFLOW_DATA_AVAIL;
+ return true;
}
bool mptcp_subflow_data_available(struct sock *sk)