summaryrefslogtreecommitdiff
path: root/net/mptcp/protocol.h
diff options
context:
space:
mode:
Diffstat (limited to 'net/mptcp/protocol.h')
-rw-r--r--net/mptcp/protocol.h86
1 files changed, 69 insertions, 17 deletions
diff --git a/net/mptcp/protocol.h b/net/mptcp/protocol.h
index b15d7fab5c4b..9c0d17876b22 100644
--- a/net/mptcp/protocol.h
+++ b/net/mptcp/protocol.h
@@ -124,7 +124,6 @@
#define MPTCP_FLUSH_JOIN_LIST 5
#define MPTCP_SYNC_STATE 6
#define MPTCP_SYNC_SNDBUF 7
-#define MPTCP_DEQUEUE 8
struct mptcp_skb_cb {
u64 map_seq;
@@ -235,7 +234,7 @@ struct mptcp_pm_data {
u8 add_addr_accepted;
u8 local_addr_used;
u8 pm_type;
- u8 subflows;
+ u8 extra_subflows;
u8 status;
);
@@ -341,8 +340,8 @@ struct mptcp_sock {
struct mptcp_pm_data pm;
struct mptcp_sched_ops *sched;
struct {
- u32 space; /* bytes copied in last measurement window */
- u32 copied; /* bytes copied in this measurement window */
+ int space; /* bytes copied in last measurement window */
+ int copied; /* bytes copied in this measurement window */
u64 time; /* start time of measurement window */
u64 rtt_us; /* last maximum rtt of subflows */
} rcvq_space;
@@ -357,6 +356,10 @@ struct mptcp_sock {
* allow_infinite_fallback and
* allow_join
*/
+
+ struct list_head backlog_list; /* protected by the data lock */
+ u32 backlog_len;
+ u32 backlog_unaccounted;
};
#define mptcp_data_lock(sk) spin_lock_bh(&(sk)->sk_lock.slock)
@@ -407,6 +410,7 @@ static inline int mptcp_space_from_win(const struct sock *sk, int win)
static inline int __mptcp_space(const struct sock *sk)
{
return mptcp_win_from_space(sk, READ_ONCE(sk->sk_rcvbuf) -
+ READ_ONCE(mptcp_sk(sk)->backlog_len) -
sk_rmem_alloc_get(sk));
}
@@ -414,7 +418,7 @@ static inline struct mptcp_data_frag *mptcp_send_head(const struct sock *sk)
{
const struct mptcp_sock *msk = mptcp_sk(sk);
- return READ_ONCE(msk->first_pending);
+ return msk->first_pending;
}
static inline struct mptcp_data_frag *mptcp_send_next(struct sock *sk)
@@ -509,6 +513,7 @@ struct mptcp_subflow_context {
u64 remote_key;
u64 idsn;
u64 map_seq;
+ u64 rcv_wnd_sent;
u32 snd_isn;
u32 token;
u32 rel_write_seq;
@@ -535,16 +540,18 @@ struct mptcp_subflow_context {
send_infinite_map : 1,
remote_key_valid : 1, /* received the peer key from */
disposable : 1, /* ctx can be free at ulp release time */
+ closing : 1, /* must not pass rx data to msk anymore */
stale : 1, /* unable to snd/rcv data, do not use for xmit */
valid_csum_seen : 1, /* at least one csum validated */
is_mptfo : 1, /* subflow is doing TFO */
close_event_done : 1, /* has done the post-closed part */
mpc_drop : 1, /* the MPC option has been dropped in a rtx */
- __unused : 9;
+ __unused : 8;
bool data_avail;
bool scheduled;
bool pm_listener; /* a listener managed by the kernel PM? */
bool fully_established; /* path validated */
+ u32 lent_mem_frag;
u32 remote_nonce;
u64 thmac;
u32 local_nonce;
@@ -644,6 +651,42 @@ mptcp_send_active_reset_reason(struct sock *sk)
tcp_send_active_reset(sk, GFP_ATOMIC, reason);
}
+/* Made the fwd mem carried by the given skb available to the msk,
+ * To be paired with a previous mptcp_subflow_lend_fwdmem() before freeing
+ * the skb or setting the skb ownership.
+ */
+static inline void mptcp_borrow_fwdmem(struct sock *sk, struct sk_buff *skb)
+{
+ struct sock *ssk = skb->sk;
+
+ /* The subflow just lend the skb fwd memory; if the subflow meanwhile
+ * closed, mptcp_close_ssk() already released the ssk rcv memory.
+ */
+ DEBUG_NET_WARN_ON_ONCE(skb->destructor);
+ sk_forward_alloc_add(sk, skb->truesize);
+ if (!ssk)
+ return;
+
+ atomic_sub(skb->truesize, &ssk->sk_rmem_alloc);
+ skb->sk = NULL;
+}
+
+static inline void
+__mptcp_subflow_lend_fwdmem(struct mptcp_subflow_context *subflow, int size)
+{
+ int frag = (subflow->lent_mem_frag + size) & (PAGE_SIZE - 1);
+
+ subflow->lent_mem_frag = frag;
+}
+
+static inline void
+mptcp_subflow_lend_fwdmem(struct mptcp_subflow_context *subflow,
+ struct sk_buff *skb)
+{
+ __mptcp_subflow_lend_fwdmem(subflow, skb->truesize);
+ skb->destructor = NULL;
+}
+
static inline u64
mptcp_subflow_get_map_offset(const struct mptcp_subflow_context *subflow)
{
@@ -706,6 +749,9 @@ mptcp_subflow_delegated_next(struct mptcp_delegated_action *delegated)
return ret;
}
+void __mptcp_inherit_memcg(struct sock *sk, struct sock *ssk, gfp_t gfp);
+void __mptcp_inherit_cgrp_data(struct sock *sk, struct sock *ssk);
+
int mptcp_is_enabled(const struct net *net);
unsigned int mptcp_get_add_addr_timeout(const struct net *net);
int mptcp_is_checksum_enabled(const struct net *net);
@@ -788,9 +834,7 @@ static inline bool mptcp_epollin_ready(const struct sock *sk)
* as it can always coalesce them
*/
return (data_avail >= sk->sk_rcvlowat) ||
- (mem_cgroup_sockets_enabled && sk->sk_memcg &&
- mem_cgroup_under_socket_pressure(sk->sk_memcg)) ||
- READ_ONCE(tcp_memory_pressure);
+ tcp_under_memory_pressure(sk);
}
int mptcp_set_rcvlowat(struct sock *sk, int val);
@@ -848,7 +892,7 @@ static inline void mptcp_stop_tout_timer(struct sock *sk)
if (!inet_csk(sk)->icsk_mtup.probe_timestamp)
return;
- sk_stop_timer(sk, &sk->sk_timer);
+ sk_stop_timer(sk, &inet_csk(sk)->mptcp_tout_timer);
inet_csk(sk)->icsk_mtup.probe_timestamp = 0;
}
@@ -978,8 +1022,6 @@ static inline void mptcp_propagate_sndbuf(struct sock *sk, struct sock *ssk)
local_bh_enable();
}
-void mptcp_destroy_common(struct mptcp_sock *msk, unsigned int flags);
-
#define MPTCP_TOKEN_MAX_RETRIES 4
void __init mptcp_token_init(void);
@@ -1182,15 +1224,17 @@ void __init mptcp_pm_userspace_register(void);
void __init mptcp_pm_nl_init(void);
void mptcp_pm_worker(struct mptcp_sock *msk);
void __mptcp_pm_kernel_worker(struct mptcp_sock *msk);
-unsigned int mptcp_pm_get_add_addr_signal_max(const struct mptcp_sock *msk);
-unsigned int mptcp_pm_get_add_addr_accept_max(const struct mptcp_sock *msk);
-unsigned int mptcp_pm_get_subflows_max(const struct mptcp_sock *msk);
-unsigned int mptcp_pm_get_local_addr_max(const struct mptcp_sock *msk);
+u8 mptcp_pm_get_endp_signal_max(const struct mptcp_sock *msk);
+u8 mptcp_pm_get_endp_subflow_max(const struct mptcp_sock *msk);
+u8 mptcp_pm_get_endp_laminar_max(const struct mptcp_sock *msk);
+u8 mptcp_pm_get_endp_fullmesh_max(const struct mptcp_sock *msk);
+u8 mptcp_pm_get_limit_add_addr_accepted(const struct mptcp_sock *msk);
+u8 mptcp_pm_get_limit_extra_subflows(const struct mptcp_sock *msk);
/* called under PM lock */
static inline void __mptcp_pm_close_subflow(struct mptcp_sock *msk)
{
- if (--msk->pm.subflows < mptcp_pm_get_subflows_max(msk))
+ if (--msk->pm.extra_subflows < mptcp_pm_get_limit_extra_subflows(msk))
WRITE_ONCE(msk->pm.accept_subflow, true);
}
@@ -1201,6 +1245,14 @@ static inline void mptcp_pm_close_subflow(struct mptcp_sock *msk)
spin_unlock_bh(&msk->pm.lock);
}
+static inline bool mptcp_pm_add_addr_c_flag_case(struct mptcp_sock *msk)
+{
+ return READ_ONCE(msk->pm.remote_deny_join_id0) &&
+ msk->pm.local_addr_used == 0 &&
+ mptcp_pm_get_limit_add_addr_accepted(msk) == 0 &&
+ msk->pm.extra_subflows < mptcp_pm_get_limit_extra_subflows(msk);
+}
+
void mptcp_sockopt_sync_locked(struct mptcp_sock *msk, struct sock *ssk);
static inline struct mptcp_ext *mptcp_get_ext(const struct sk_buff *skb)