summaryrefslogtreecommitdiff
path: root/net
diff options
context:
space:
mode:
authorJakub Kicinski <kuba@kernel.org>2021-06-29 15:45:27 -0700
committerJakub Kicinski <kuba@kernel.org>2021-06-29 15:45:27 -0700
commitb6df00789e2831fff7a2c65aa7164b2a4dcbe599 (patch)
treea94cbeeca3f0ae2fffed008cb287c02dbee4dceb /net
parent3f8ad50a9e43b6a59070e6c9c5eec79626f81095 (diff)
parenta118ff661889ecee3ca90f8125bad8fb5bbc07d5 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
Trivial conflict in net/netfilter/nf_tables_api.c. Duplicate fix in tools/testing/selftests/net/devlink_port_split.py - take the net-next version. skmsg, and L4 bpf - keep the bpf code but remove the flags and err params. Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Diffstat (limited to 'net')
-rw-r--r--net/can/bcm.c7
-rw-r--r--net/can/gw.c3
-rw-r--r--net/can/isotp.c7
-rw-r--r--net/can/j1939/main.c4
-rw-r--r--net/can/j1939/socket.c5
-rw-r--r--net/core/dev.c4
-rw-r--r--net/core/skmsg.c81
-rw-r--r--net/core/sock_map.c2
-rw-r--r--net/ipv4/esp4.c2
-rw-r--r--net/ipv4/fib_frontend.c2
-rw-r--r--net/ipv4/route.c3
-rw-r--r--net/ipv4/tcp_bpf.c24
-rw-r--r--net/ipv4/udp.c2
-rw-r--r--net/ipv4/udp_bpf.c47
-rw-r--r--net/ipv6/esp6.c2
-rw-r--r--net/ipv6/exthdrs.c31
-rw-r--r--net/ipv6/ip6_tunnel.c4
-rw-r--r--net/mptcp/options.c29
-rw-r--r--net/mptcp/protocol.c5
-rw-r--r--net/mptcp/protocol.h13
-rw-r--r--net/mptcp/subflow.c47
-rw-r--r--net/netfilter/nf_tables_api.c65
-rw-r--r--net/netfilter/nf_tables_offload.c34
-rw-r--r--net/netfilter/nft_exthdr.c3
-rw-r--r--net/netfilter/nft_osf.c5
-rw-r--r--net/netfilter/nft_tproxy.c9
-rw-r--r--net/sched/cls_tcindex.c2
-rw-r--r--net/sched/sch_qfq.c8
-rw-r--r--net/sctp/bind_addr.c19
-rw-r--r--net/sctp/input.c11
-rw-r--r--net/sctp/ipv6.c7
-rw-r--r--net/sctp/protocol.c7
-rw-r--r--net/sctp/sm_make_chunk.c42
-rw-r--r--net/tls/tls_sw.c2
-rw-r--r--net/vmw_vsock/af_vsock.c2
-rw-r--r--net/xdp/xsk_queue.h11
-rw-r--r--net/xfrm/xfrm_device.c1
-rw-r--r--net/xfrm/xfrm_output.c7
-rw-r--r--net/xfrm/xfrm_policy.c21
-rw-r--r--net/xfrm/xfrm_state.c14
-rw-r--r--net/xfrm/xfrm_user.c28
41 files changed, 381 insertions, 241 deletions
diff --git a/net/can/bcm.c b/net/can/bcm.c
index e15a7dbe5f6c..508f67de0b80 100644
--- a/net/can/bcm.c
+++ b/net/can/bcm.c
@@ -785,6 +785,7 @@ static int bcm_delete_rx_op(struct list_head *ops, struct bcm_msg_head *mh,
bcm_rx_handler, op);
list_del(&op->list);
+ synchronize_rcu();
bcm_remove_op(op);
return 1; /* done */
}
@@ -1533,9 +1534,13 @@ static int bcm_release(struct socket *sock)
REGMASK(op->can_id),
bcm_rx_handler, op);
- bcm_remove_op(op);
}
+ synchronize_rcu();
+
+ list_for_each_entry_safe(op, next, &bo->rx_ops, list)
+ bcm_remove_op(op);
+
#if IS_ENABLED(CONFIG_PROC_FS)
/* remove procfs entry */
if (net->can.bcmproc_dir && bo->bcm_proc_read)
diff --git a/net/can/gw.c b/net/can/gw.c
index ba4124805602..d8861e862f15 100644
--- a/net/can/gw.c
+++ b/net/can/gw.c
@@ -596,6 +596,7 @@ static int cgw_notifier(struct notifier_block *nb,
if (gwj->src.dev == dev || gwj->dst.dev == dev) {
hlist_del(&gwj->list);
cgw_unregister_filter(net, gwj);
+ synchronize_rcu();
kmem_cache_free(cgw_cache, gwj);
}
}
@@ -1154,6 +1155,7 @@ static void cgw_remove_all_jobs(struct net *net)
hlist_for_each_entry_safe(gwj, nx, &net->can.cgw_list, list) {
hlist_del(&gwj->list);
cgw_unregister_filter(net, gwj);
+ synchronize_rcu();
kmem_cache_free(cgw_cache, gwj);
}
}
@@ -1222,6 +1224,7 @@ static int cgw_remove_job(struct sk_buff *skb, struct nlmsghdr *nlh,
hlist_del(&gwj->list);
cgw_unregister_filter(net, gwj);
+ synchronize_rcu();
kmem_cache_free(cgw_cache, gwj);
err = 0;
break;
diff --git a/net/can/isotp.c b/net/can/isotp.c
index 9fd274cf166b..caaa532ece94 100644
--- a/net/can/isotp.c
+++ b/net/can/isotp.c
@@ -1030,9 +1030,6 @@ static int isotp_release(struct socket *sock)
lock_sock(sk);
- hrtimer_cancel(&so->txtimer);
- hrtimer_cancel(&so->rxtimer);
-
/* remove current filters & unregister */
if (so->bound && (!(so->opt.flags & CAN_ISOTP_SF_BROADCAST))) {
if (so->ifindex) {
@@ -1044,10 +1041,14 @@ static int isotp_release(struct socket *sock)
SINGLE_MASK(so->rxid),
isotp_rcv, sk);
dev_put(dev);
+ synchronize_rcu();
}
}
}
+ hrtimer_cancel(&so->txtimer);
+ hrtimer_cancel(&so->rxtimer);
+
so->ifindex = 0;
so->bound = 0;
diff --git a/net/can/j1939/main.c b/net/can/j1939/main.c
index da3a7a7bcff2..08c8606cfd9c 100644
--- a/net/can/j1939/main.c
+++ b/net/can/j1939/main.c
@@ -193,6 +193,10 @@ static void j1939_can_rx_unregister(struct j1939_priv *priv)
can_rx_unregister(dev_net(ndev), ndev, J1939_CAN_ID, J1939_CAN_MASK,
j1939_can_recv, priv);
+ /* The last reference of priv is dropped by the RCU deferred
+ * j1939_sk_sock_destruct() of the last socket, so we can
+ * safely drop this reference here.
+ */
j1939_priv_put(priv);
}
diff --git a/net/can/j1939/socket.c b/net/can/j1939/socket.c
index bf18a32dc6ae..54f6d521492f 100644
--- a/net/can/j1939/socket.c
+++ b/net/can/j1939/socket.c
@@ -398,6 +398,9 @@ static int j1939_sk_init(struct sock *sk)
atomic_set(&jsk->skb_pending, 0);
spin_lock_init(&jsk->sk_session_queue_lock);
INIT_LIST_HEAD(&jsk->sk_session_queue);
+
+ /* j1939_sk_sock_destruct() depends on SOCK_RCU_FREE flag */
+ sock_set_flag(sk, SOCK_RCU_FREE);
sk->sk_destruct = j1939_sk_sock_destruct;
sk->sk_protocol = CAN_J1939;
@@ -673,7 +676,7 @@ static int j1939_sk_setsockopt(struct socket *sock, int level, int optname,
switch (optname) {
case SO_J1939_FILTER:
- if (!sockptr_is_null(optval)) {
+ if (!sockptr_is_null(optval) && optlen != 0) {
struct j1939_filter *f;
int c;
diff --git a/net/core/dev.c b/net/core/dev.c
index d609366da95c..316b4032317e 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -5304,9 +5304,9 @@ another_round:
if (static_branch_unlikely(&generic_xdp_needed_key)) {
int ret2;
- preempt_disable();
+ migrate_disable();
ret2 = do_xdp_generic(rcu_dereference(skb->dev->xdp_prog), skb);
- preempt_enable();
+ migrate_enable();
if (ret2 != XDP_PASS) {
ret = NET_RX_DROP;
diff --git a/net/core/skmsg.c b/net/core/skmsg.c
index f0b9decdf279..9b6160a191f8 100644
--- a/net/core/skmsg.c
+++ b/net/core/skmsg.c
@@ -399,28 +399,6 @@ out:
}
EXPORT_SYMBOL_GPL(sk_msg_memcopy_from_iter);
-int sk_msg_wait_data(struct sock *sk, struct sk_psock *psock, long timeo)
-{
- DEFINE_WAIT_FUNC(wait, woken_wake_function);
- int ret = 0;
-
- if (sk->sk_shutdown & RCV_SHUTDOWN)
- return 1;
-
- if (!timeo)
- return ret;
-
- add_wait_queue(sk_sleep(sk), &wait);
- sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
- ret = sk_wait_event(sk, &timeo,
- !list_empty(&psock->ingress_msg) ||
- !skb_queue_empty(&sk->sk_receive_queue), &wait);
- sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
- remove_wait_queue(sk_sleep(sk), &wait);
- return ret;
-}
-EXPORT_SYMBOL_GPL(sk_msg_wait_data);
-
/* Receive sk_msg from psock->ingress_msg to @msg. */
int sk_msg_recvmsg(struct sock *sk, struct sk_psock *psock, struct msghdr *msg,
int len, int flags)
@@ -600,6 +578,12 @@ static int sk_psock_handle_skb(struct sk_psock *psock, struct sk_buff *skb,
return sk_psock_skb_ingress(psock, skb);
}
+static void sock_drop(struct sock *sk, struct sk_buff *skb)
+{
+ sk_drops_add(sk, skb);
+ kfree_skb(skb);
+}
+
static void sk_psock_backlog(struct work_struct *work)
{
struct sk_psock *psock = container_of(work, struct sk_psock, work);
@@ -639,7 +623,7 @@ start:
/* Hard errors break pipe and stop xmit. */
sk_psock_report_error(psock, ret ? -ret : EPIPE);
sk_psock_clear_state(psock, SK_PSOCK_TX_ENABLED);
- kfree_skb(skb);
+ sock_drop(psock->sk, skb);
goto end;
}
off += ret;
@@ -730,7 +714,7 @@ static void __sk_psock_zap_ingress(struct sk_psock *psock)
while ((skb = skb_dequeue(&psock->ingress_skb)) != NULL) {
skb_bpf_redirect_clear(skb);
- kfree_skb(skb);
+ sock_drop(psock->sk, skb);
}
__sk_psock_purge_ingress_msg(psock);
}
@@ -846,7 +830,7 @@ out:
}
EXPORT_SYMBOL_GPL(sk_psock_msg_verdict);
-static void sk_psock_skb_redirect(struct sk_buff *skb)
+static int sk_psock_skb_redirect(struct sk_psock *from, struct sk_buff *skb)
{
struct sk_psock *psock_other;
struct sock *sk_other;
@@ -856,8 +840,8 @@ static void sk_psock_skb_redirect(struct sk_buff *skb)
* return code, but then didn't set a redirect interface.
*/
if (unlikely(!sk_other)) {
- kfree_skb(skb);
- return;
+ sock_drop(from->sk, skb);
+ return -EIO;
}
psock_other = sk_psock(sk_other);
/* This error indicates the socket is being torn down or had another
@@ -865,26 +849,30 @@ static void sk_psock_skb_redirect(struct sk_buff *skb)
* a socket that is in this state so we drop the skb.
*/
if (!psock_other || sock_flag(sk_other, SOCK_DEAD)) {
- kfree_skb(skb);
- return;
+ skb_bpf_redirect_clear(skb);
+ sock_drop(from->sk, skb);
+ return -EIO;
}
spin_lock_bh(&psock_other->ingress_lock);
if (!sk_psock_test_state(psock_other, SK_PSOCK_TX_ENABLED)) {
spin_unlock_bh(&psock_other->ingress_lock);
- kfree_skb(skb);
- return;
+ skb_bpf_redirect_clear(skb);
+ sock_drop(from->sk, skb);
+ return -EIO;
}
skb_queue_tail(&psock_other->ingress_skb, skb);
schedule_work(&psock_other->work);
spin_unlock_bh(&psock_other->ingress_lock);
+ return 0;
}
-static void sk_psock_tls_verdict_apply(struct sk_buff *skb, struct sock *sk, int verdict)
+static void sk_psock_tls_verdict_apply(struct sk_buff *skb,
+ struct sk_psock *from, int verdict)
{
switch (verdict) {
case __SK_REDIRECT:
- sk_psock_skb_redirect(skb);
+ sk_psock_skb_redirect(from, skb);
break;
case __SK_PASS:
case __SK_DROP:
@@ -908,20 +896,21 @@ int sk_psock_tls_strp_read(struct sk_psock *psock, struct sk_buff *skb)
ret = sk_psock_map_verd(ret, skb_bpf_redirect_fetch(skb));
skb->sk = NULL;
}
- sk_psock_tls_verdict_apply(skb, psock->sk, ret);
+ sk_psock_tls_verdict_apply(skb, psock, ret);
rcu_read_unlock();
return ret;
}
EXPORT_SYMBOL_GPL(sk_psock_tls_strp_read);
-static void sk_psock_verdict_apply(struct sk_psock *psock,
- struct sk_buff *skb, int verdict)
+static int sk_psock_verdict_apply(struct sk_psock *psock, struct sk_buff *skb,
+ int verdict)
{
struct sock *sk_other;
- int err = -EIO;
+ int err = 0;
switch (verdict) {
case __SK_PASS:
+ err = -EIO;
sk_other = psock->sk;
if (sock_flag(sk_other, SOCK_DEAD) ||
!sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED)) {
@@ -944,18 +933,25 @@ static void sk_psock_verdict_apply(struct sk_psock *psock,
if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED)) {
skb_queue_tail(&psock->ingress_skb, skb);
schedule_work(&psock->work);
+ err = 0;
}
spin_unlock_bh(&psock->ingress_lock);
+ if (err < 0) {
+ skb_bpf_redirect_clear(skb);
+ goto out_free;
+ }
}
break;
case __SK_REDIRECT:
- sk_psock_skb_redirect(skb);
+ err = sk_psock_skb_redirect(psock, skb);
break;
case __SK_DROP:
default:
out_free:
- kfree_skb(skb);
+ sock_drop(psock->sk, skb);
}
+
+ return err;
}
static void sk_psock_write_space(struct sock *sk)
@@ -987,7 +983,7 @@ static void sk_psock_strp_read(struct strparser *strp, struct sk_buff *skb)
sk = strp->sk;
psock = sk_psock(sk);
if (unlikely(!psock)) {
- kfree_skb(skb);
+ sock_drop(sk, skb);
goto out;
}
prog = READ_ONCE(psock->progs.stream_verdict);
@@ -1108,7 +1104,7 @@ static int sk_psock_verdict_recv(read_descriptor_t *desc, struct sk_buff *skb,
psock = sk_psock(sk);
if (unlikely(!psock)) {
len = 0;
- kfree_skb(skb);
+ sock_drop(sk, skb);
goto out;
}
prog = READ_ONCE(psock->progs.stream_verdict);
@@ -1122,7 +1118,8 @@ static int sk_psock_verdict_recv(read_descriptor_t *desc, struct sk_buff *skb,
ret = sk_psock_map_verd(ret, skb_bpf_redirect_fetch(skb));
skb->sk = NULL;
}
- sk_psock_verdict_apply(psock, skb, ret);
+ if (sk_psock_verdict_apply(psock, skb, ret) < 0)
+ len = 0;
out:
rcu_read_unlock();
return len;
diff --git a/net/core/sock_map.c b/net/core/sock_map.c
index 6f1b82b8ad49..60decd6420ca 100644
--- a/net/core/sock_map.c
+++ b/net/core/sock_map.c
@@ -48,7 +48,7 @@ static struct bpf_map *sock_map_alloc(union bpf_attr *attr)
bpf_map_init_from_attr(&stab->map, attr);
raw_spin_lock_init(&stab->lock);
- stab->sks = bpf_map_area_alloc(stab->map.max_entries *
+ stab->sks = bpf_map_area_alloc((u64) stab->map.max_entries *
sizeof(struct sock *),
stab->map.numa_node);
if (!stab->sks) {
diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c
index f5362b9d75eb..f414ad246fdf 100644
--- a/net/ipv4/esp4.c
+++ b/net/ipv4/esp4.c
@@ -673,7 +673,7 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb)
struct xfrm_dst *dst = (struct xfrm_dst *)skb_dst(skb);
u32 padto;
- padto = min(x->tfcpad, xfrm_state_mtu(x, dst->child_mtu_cached));
+ padto = min(x->tfcpad, __xfrm_state_mtu(x, dst->child_mtu_cached));
if (skb->len < padto)
esp.tfclen = padto - skb->len;
}
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
index af8814a11378..a933bd6345b1 100644
--- a/net/ipv4/fib_frontend.c
+++ b/net/ipv4/fib_frontend.c
@@ -371,6 +371,8 @@ static int __fib_validate_source(struct sk_buff *skb, __be32 src, __be32 dst,
fl4.flowi4_proto = 0;
fl4.fl4_sport = 0;
fl4.fl4_dport = 0;
+ } else {
+ swap(fl4.fl4_sport, fl4.fl4_dport);
}
if (fib_lookup(net, &fl4, &res, 0))
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 66aacb939d3e..99c06944501a 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -1306,7 +1306,7 @@ INDIRECT_CALLABLE_SCOPE unsigned int ipv4_mtu(const struct dst_entry *dst)
mtu = dst_metric_raw(dst, RTAX_MTU);
if (mtu)
- return mtu;
+ goto out;
mtu = READ_ONCE(dst->dev->mtu);
@@ -1315,6 +1315,7 @@ INDIRECT_CALLABLE_SCOPE unsigned int ipv4_mtu(const struct dst_entry *dst)
mtu = 576;
}
+out:
mtu = min_t(unsigned int, mtu, IP_MAX_MTU);
return mtu - lwtunnel_headroom(dst->lwtstate, mtu);
diff --git a/net/ipv4/tcp_bpf.c b/net/ipv4/tcp_bpf.c
index a80de92ea3b6..f26916a62f25 100644
--- a/net/ipv4/tcp_bpf.c
+++ b/net/ipv4/tcp_bpf.c
@@ -163,6 +163,28 @@ static bool tcp_bpf_stream_read(const struct sock *sk)
return !empty;
}
+static int tcp_msg_wait_data(struct sock *sk, struct sk_psock *psock,
+ long timeo)
+{
+ DEFINE_WAIT_FUNC(wait, woken_wake_function);
+ int ret = 0;
+
+ if (sk->sk_shutdown & RCV_SHUTDOWN)
+ return 1;
+
+ if (!timeo)
+ return ret;
+
+ add_wait_queue(sk_sleep(sk), &wait);
+ sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
+ ret = sk_wait_event(sk, &timeo,
+ !list_empty(&psock->ingress_msg) ||
+ !skb_queue_empty(&sk->sk_receive_queue), &wait);
+ sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
+ remove_wait_queue(sk_sleep(sk), &wait);
+ return ret;
+}
+
static int tcp_bpf_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
int nonblock, int flags, int *addr_len)
{
@@ -188,7 +210,7 @@ msg_bytes_ready:
int data;
timeo = sock_rcvtimeo(sk, nonblock);
- data = sk_msg_wait_data(sk, psock, timeo);
+ data = tcp_msg_wait_data(sk, psock, timeo);
if (data) {
if (!sk_psock_queue_empty(psock))
goto msg_bytes_ready;
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index f86ccbf7c135..62682807b4b2 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -1798,11 +1798,13 @@ int udp_read_sock(struct sock *sk, read_descriptor_t *desc,
if (used <= 0) {
if (!copied)
copied = used;
+ kfree_skb(skb);
break;
} else if (used <= skb->len) {
copied += used;
}
+ kfree_skb(skb);
if (!desc->count)
break;
}
diff --git a/net/ipv4/udp_bpf.c b/net/ipv4/udp_bpf.c
index b07e4b6dda25..45b8782aec0c 100644
--- a/net/ipv4/udp_bpf.c
+++ b/net/ipv4/udp_bpf.c
@@ -21,6 +21,45 @@ static int sk_udp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
return udp_prot.recvmsg(sk, msg, len, noblock, flags, addr_len);
}
+static bool udp_sk_has_data(struct sock *sk)
+{
+ return !skb_queue_empty(&udp_sk(sk)->reader_queue) ||
+ !skb_queue_empty(&sk->sk_receive_queue);
+}
+
+static bool psock_has_data(struct sk_psock *psock)
+{
+ return !skb_queue_empty(&psock->ingress_skb) ||
+ !sk_psock_queue_empty(psock);
+}
+
+#define udp_msg_has_data(__sk, __psock) \
+ ({ udp_sk_has_data(__sk) || psock_has_data(__psock); })
+
+static int udp_msg_wait_data(struct sock *sk, struct sk_psock *psock,
+ long timeo)
+{
+ DEFINE_WAIT_FUNC(wait, woken_wake_function);
+ int ret = 0;
+
+ if (sk->sk_shutdown & RCV_SHUTDOWN)
+ return 1;
+
+ if (!timeo)
+ return ret;
+
+ add_wait_queue(sk_sleep(sk), &wait);
+ sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
+ ret = udp_msg_has_data(sk, psock);
+ if (!ret) {
+ wait_woken(&wait, TASK_INTERRUPTIBLE, timeo);
+ ret = udp_msg_has_data(sk, psock);
+ }
+ sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
+ remove_wait_queue(sk_sleep(sk), &wait);
+ return ret;
+}
+
static int udp_bpf_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
int nonblock, int flags, int *addr_len)
{
@@ -34,8 +73,7 @@ static int udp_bpf_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
if (unlikely(!psock))
return sk_udp_recvmsg(sk, msg, len, nonblock, flags, addr_len);
- lock_sock(sk);
- if (sk_psock_queue_empty(psock)) {
+ if (!psock_has_data(psock)) {
ret = sk_udp_recvmsg(sk, msg, len, nonblock, flags, addr_len);
goto out;
}
@@ -47,9 +85,9 @@ msg_bytes_ready:
int data;
timeo = sock_rcvtimeo(sk, nonblock);
- data = sk_msg_wait_data(sk, psock, timeo);
+ data = udp_msg_wait_data(sk, psock, timeo);
if (data) {
- if (!sk_psock_queue_empty(psock))
+ if (psock_has_data(psock))
goto msg_bytes_ready;
ret = sk_udp_recvmsg(sk, msg, len, nonblock, flags, addr_len);
goto out;
@@ -58,7 +96,6 @@ msg_bytes_ready:
}
ret = copied;
out:
- release_sock(sk);
sk_psock_put(sk, psock);
return ret;
}
diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c
index 37c4b1726c5e..ed2f061b8768 100644
--- a/net/ipv6/esp6.c
+++ b/net/ipv6/esp6.c
@@ -708,7 +708,7 @@ static int esp6_output(struct xfrm_state *x, struct sk_buff *skb)
struct xfrm_dst *dst = (struct xfrm_dst *)skb_dst(skb);
u32 padto;
- padto = min(x->tfcpad, xfrm_state_mtu(x, dst->child_mtu_cached));
+ padto = min(x->tfcpad, __xfrm_state_mtu(x, dst->child_mtu_cached));
if (skb->len < padto)
esp.tfclen = padto - skb->len;
}
diff --git a/net/ipv6/exthdrs.c b/net/ipv6/exthdrs.c
index 56e479d158b7..26882e165c9e 100644
--- a/net/ipv6/exthdrs.c
+++ b/net/ipv6/exthdrs.c
@@ -135,18 +135,23 @@ static bool ip6_parse_tlv(const struct tlvtype_proc *procs,
len -= 2;
while (len > 0) {
- int optlen = nh[off + 1] + 2;
- int i;
+ int optlen, i;
- switch (nh[off]) {
- case IPV6_TLV_PAD1:
- optlen = 1;
+ if (nh[off] == IPV6_TLV_PAD1) {
padlen++;
if (padlen > 7)
goto bad;
- break;
+ off++;
+ len--;
+ continue;
+ }
+ if (len < 2)
+ goto bad;
+ optlen = nh[off + 1] + 2;
+ if (optlen > len)
+ goto bad;
- case IPV6_TLV_PADN:
+ if (nh[off] == IPV6_TLV_PADN) {
/* RFC 2460 states that the purpose of PadN is
* to align the containing header to multiples
* of 8. 7 is therefore the highest valid value.
@@ -163,12 +168,7 @@ static bool ip6_parse_tlv(const struct tlvtype_proc *procs,
if (nh[off + i] != 0)
goto bad;
}
- break;
-
- default: /* Other TLV code so scan list */
- if (optlen > len)
- goto bad;
-
+ } else {
tlv_count++;
if (tlv_count > max_count)
goto bad;
@@ -188,7 +188,6 @@ static bool ip6_parse_tlv(const struct tlvtype_proc *procs,
return false;
padlen = 0;
- break;
}
off += optlen;
len -= optlen;
@@ -306,7 +305,7 @@ fail_and_free:
#endif
if (ip6_parse_tlv(tlvprocdestopt_lst, skb,
- init_net.ipv6.sysctl.max_dst_opts_cnt)) {
+ net->ipv6.sysctl.max_dst_opts_cnt)) {
skb->transport_header += extlen;
opt = IP6CB(skb);
#if IS_ENABLED(CONFIG_IPV6_MIP6)
@@ -1037,7 +1036,7 @@ fail_and_free:
opt->flags |= IP6SKB_HOPBYHOP;
if (ip6_parse_tlv(tlvprochopopt_lst, skb,
- init_net.ipv6.sysctl.max_hbh_opts_cnt)) {
+ net->ipv6.sysctl.max_hbh_opts_cnt)) {
skb->transport_header += extlen;
opt = IP6CB(skb);
opt->nhoff = sizeof(struct ipv6hdr);
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index 0b8a38687ce4..322698d9fcf4 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -1240,8 +1240,6 @@ route_lookup:
if (max_headroom > dev->needed_headroom)
dev->needed_headroom = max_headroom;
- skb_set_inner_ipproto(skb, proto);
-
err = ip6_tnl_encap(skb, t, &proto, fl6);
if (err)
return err;
@@ -1378,6 +1376,8 @@ ipxip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev,
if (iptunnel_handle_offloads(skb, SKB_GSO_IPXIP6))
return -1;
+ skb_set_inner_ipproto(skb, protocol);
+
err = ip6_tnl_xmit(skb, dev, dsfield, &fl6, encap_limit, &mtu,
protocol);
if (err != 0) {
diff --git a/net/mptcp/options.c b/net/mptcp/options.c
index a05270996613..b5850afea343 100644
--- a/net/mptcp/options.c
+++ b/net/mptcp/options.c
@@ -942,19 +942,20 @@ reset:
return false;
}
-static u64 expand_ack(u64 old_ack, u64 cur_ack, bool use_64bit)
+u64 __mptcp_expand_seq(u64 old_seq, u64 cur_seq)
{
- u32 old_ack32, cur_ack32;
-
- if (use_64bit)
- return cur_ack;
-
- old_ack32 = (u32)old_ack;
- cur_ack32 = (u32)cur_ack;
- cur_ack = (old_ack & GENMASK_ULL(63, 32)) + cur_ack32;
- if (unlikely(before(cur_ack32, old_ack32)))
- return cur_ack + (1LL << 32);
- return cur_ack;
+ u32 old_seq32, cur_seq32;
+
+ old_seq32 = (u32)old_seq;
+ cur_seq32 = (u32)cur_seq;
+ cur_seq = (old_seq & GENMASK_ULL(63, 32)) + cur_seq32;
+ if (unlikely(cur_seq32 < old_seq32 && before(old_seq32, cur_seq32)))
+ return cur_seq + (1LL << 32);
+
+ /* reverse wrap could happen, too */
+ if (unlikely(cur_seq32 > old_seq32 && after(old_seq32, cur_seq32)))
+ return cur_seq - (1LL << 32);
+ return cur_seq;
}
static void ack_update_msk(struct mptcp_sock *msk,
@@ -972,7 +973,7 @@ static void ack_update_msk(struct mptcp_sock *msk,
* more dangerous than missing an ack
*/
old_snd_una = msk->snd_una;
- new_snd_una = expand_ack(old_snd_una, mp_opt->data_ack, mp_opt->ack64);
+ new_snd_una = mptcp_expand_seq(old_snd_una, mp_opt->data_ack, mp_opt->ack64);
/* ACK for data not even sent yet? Ignore. */
if (after64(new_snd_una, snd_nxt))
@@ -1009,7 +1010,7 @@ bool mptcp_update_rcv_data_fin(struct mptcp_sock *msk, u64 data_fin_seq, bool us
return false;
WRITE_ONCE(msk->rcv_data_fin_seq,
- expand_ack(READ_ONCE(msk->ack_seq), data_fin_seq, use_64bit));
+ mptcp_expand_seq(READ_ONCE(msk->ack_seq), data_fin_seq, use_64bit));
WRITE_ONCE(msk->rcv_data_fin, 1);
return true;
diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
index 7bb82424e551..7a5afa8c6866 100644
--- a/net/mptcp/protocol.c
+++ b/net/mptcp/protocol.c
@@ -2896,6 +2896,11 @@ static void mptcp_release_cb(struct sock *sk)
spin_lock_bh(&sk->sk_lock.slock);
}
+ /* be sure to set the current sk state before tacking actions
+ * depending on sk_state
+ */
+ if (test_and_clear_bit(MPTCP_CONNECTED, &mptcp_sk(sk)->flags))
+ __mptcp_set_connected(sk);
if (test_and_clear_bit(MPTCP_CLEAN_UNA, &mptcp_sk(sk)->flags))
__mptcp_clean_una_wakeup(sk);
if (test_and_clear_bit(MPTCP_ERROR_REPORT, &mptcp_sk(sk)->flags))
diff --git a/net/mptcp/protocol.h b/net/mptcp/protocol.h
index d8ad3270dfab..426ed80fe72f 100644
--- a/net/mptcp/protocol.h
+++ b/net/mptcp/protocol.h
@@ -112,6 +112,7 @@
#define MPTCP_ERROR_REPORT 8
#define MPTCP_RETRANSMIT 9
#define MPTCP_WORK_SYNC_SETSOCKOPT 10
+#define MPTCP_CONNECTED 11
static inline bool before64(__u64 seq1, __u64 seq2)
{
@@ -600,6 +601,7 @@ void mptcp_get_options(const struct sock *sk,
struct mptcp_options_received *mp_opt);
void mptcp_finish_connect(struct sock *sk);
+void __mptcp_set_connected(struct sock *sk);
static inline bool mptcp_is_fully_established(struct sock *sk)
{
return inet_sk_state_load(sk) == TCP_ESTABLISHED &&
@@ -614,6 +616,14 @@ int mptcp_setsockopt(struct sock *sk, int level, int optname,
int mptcp_getsockopt(struct sock *sk, int level, int optname,
char __user *optval, int __user *option);
+u64 __mptcp_expand_seq(u64 old_seq, u64 cur_seq);
+static inline u64 mptcp_expand_seq(u64 old_seq, u64 cur_seq, bool use_64bit)
+{
+ if (use_64bit)
+ return cur_seq;
+
+ return __mptcp_expand_seq(old_seq, cur_seq);
+}
void __mptcp_check_push(struct sock *sk, struct sock *ssk);
void __mptcp_data_acked(struct sock *sk);
void __mptcp_error_report(struct sock *sk);
@@ -775,9 +785,6 @@ unsigned int mptcp_pm_get_add_addr_accept_max(struct mptcp_sock *msk);
unsigned int mptcp_pm_get_subflows_max(struct mptcp_sock *msk);
unsigned int mptcp_pm_get_local_addr_max(struct mptcp_sock *msk);
-int mptcp_setsockopt(struct sock *sk, int level, int optname,
- sockptr_t optval, unsigned int optlen);
-
void mptcp_sockopt_sync(struct mptcp_sock *msk, struct sock *ssk);
void mptcp_sockopt_sync_all(struct mptcp_sock *msk);
diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c
index 706a26a1b0fe..66d0b1893d26 100644
--- a/net/mptcp/subflow.c
+++ b/net/mptcp/subflow.c
@@ -373,6 +373,24 @@ static bool subflow_use_different_dport(struct mptcp_sock *msk, const struct soc
return inet_sk(sk)->inet_dport != inet_sk((struct sock *)msk)->inet_dport;
}
+void __mptcp_set_connected(struct sock *sk)
+{
+ if (sk->sk_state == TCP_SYN_SENT) {
+ inet_sk_state_store(sk, TCP_ESTABLISHED);
+ sk->sk_state_change(sk);
+ }
+}
+
+static void mptcp_set_connected(struct sock *sk)
+{
+ mptcp_data_lock(sk);
+ if (!sock_owned_by_user(sk))
+ __mptcp_set_connected(sk);
+ else
+ set_bit(MPTCP_CONNECTED, &mptcp_sk(sk)->flags);
+ mptcp_data_unlock(sk);
+}
+
static void subflow_finish_connect(struct sock *sk, const struct sk_buff *skb)
{
struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
@@ -381,10 +399,6 @@ static void subflow_finish_connect(struct sock *sk, const struct sk_buff *skb)
subflow->icsk_af_ops->sk_rx_dst_set(sk, skb);
- if (inet_sk_state_load(parent) == TCP_SYN_SENT) {
- inet_sk_state_store(parent, TCP_ESTABLISHED);
- parent->sk_state_change(parent);
- }
/* be sure no special action on any packet other than syn-ack */
if (subflow->conn_finished)
@@ -417,6 +431,7 @@ static void subflow_finish_connect(struct sock *sk, const struct sk_buff *skb)
subflow->remote_key);
MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_MPCAPABLEACTIVEACK);
mptcp_finish_connect(sk);
+ mptcp_set_connected(parent);
} else if (subflow->request_join) {
u8 hmac[SHA256_DIGEST_SIZE];
@@ -457,6 +472,7 @@ static void subflow_finish_connect(struct sock *sk, const struct sk_buff *skb)
} else if (mptcp_check_fallback(sk)) {
fallback:
mptcp_rcv_space_init(mptcp_sk(parent), sk);
+ mptcp_set_connected(parent);
}
return;
@@ -564,6 +580,7 @@ static void mptcp_sock_destruct(struct sock *sk)
static void mptcp_force_close(struct sock *sk)
{
+ /* the msk is not yet exposed to user-space */
inet_sk_state_store(sk, TCP_CLOSE);
sk_common_release(sk);
}
@@ -781,15 +798,6 @@ enum mapping_status {
MAPPING_DUMMY
};
-static u64 expand_seq(u64 old_seq, u16 old_data_len, u64 seq)
-{
- if ((u32)seq == (u32)old_seq)
- return old_seq;
-
- /* Assume map covers data not mapped yet. */
- return seq | ((old_seq + old_data_len + 1) & GENMASK_ULL(63, 32));
-}
-
static void dbg_bad_map(struct mptcp_subflow_context *subflow, u32 ssn)
{
pr_debug("Bad mapping: ssn=%d map_seq=%d map_data_len=%d",
@@ -995,13 +1003,7 @@ static enum mapping_status get_mapping_status(struct sock *ssk,
data_len--;
}
- if (!mpext->dsn64) {
- map_seq = expand_seq(subflow->map_seq, subflow->map_data_len,
- mpext->data_seq);
- pr_debug("expanded seq=%llu", subflow->map_seq);
- } else {
- map_seq = mpext->data_seq;
- }
+ map_seq = mptcp_expand_seq(READ_ONCE(msk->ack_seq), mpext->data_seq, mpext->dsn64);
WRITE_ONCE(mptcp_sk(subflow->conn)->use_64bit_ack, !!mpext->dsn64);
if (subflow->map_valid) {
@@ -1592,10 +1594,7 @@ static void subflow_state_change(struct sock *sk)
mptcp_rcv_space_init(mptcp_sk(parent), sk);
pr_fallback(mptcp_sk(parent));
subflow->conn_finished = 1;
- if (inet_sk_state_load(parent) == TCP_SYN_SENT) {
- inet_sk_state_store(parent, TCP_ESTABLISHED);
- parent->sk_state_change(parent);
- }
+ mptcp_set_connected(parent);
}
/* as recvmsg() does not acquire the subflow socket for ssk selection
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index d6214242fe7f..390d4466567f 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -571,7 +571,7 @@ static struct nft_table *nft_table_lookup(const struct net *net,
table->family == family &&
nft_active_genmask(table, genmask)) {
if (nft_table_has_owner(table) &&
- table->nlpid != nlpid)
+ nlpid && table->nlpid != nlpid)
return ERR_PTR(-EPERM);
return table;
@@ -583,7 +583,7 @@ static struct nft_table *nft_table_lookup(const struct net *net,
static struct nft_table *nft_table_lookup_byhandle(const struct net *net,
const struct nlattr *nla,
- u8 genmask)
+ u8 genmask, u32 nlpid)
{
struct nftables_pernet *nft_net;
struct nft_table *table;
@@ -591,8 +591,13 @@ static struct nft_table *nft_table_lookup_byhandle(const struct net *net,
nft_net = nft_pernet(net);
list_for_each_entry(table, &nft_net->tables, list) {
if (be64_to_cpu(nla_get_be64(nla)) == table->handle &&
- nft_active_genmask(table, genmask))
+ nft_active_genmask(table, genmask)) {
+ if (nft_table_has_owner(table) &&
+ nlpid && table->nlpid != nlpid)
+ return ERR_PTR(-EPERM);
+
return table;
+ }
}
return ERR_PTR(-ENOENT);
@@ -1276,7 +1281,8 @@ static int nf_tables_deltable(struct sk_buff *skb, const struct nfnl_info *info,
if (nla[NFTA_TABLE_HANDLE]) {
attr = nla[NFTA_TABLE_HANDLE];
- table = nft_table_lookup_byhandle(net, attr, genmask);
+ table = nft_table_lookup_byhandle(net, attr, genmask,
+ NETLINK_CB(skb).portid);
} else {
attr = nla[NFTA_TABLE_NAME];
table = nft_table_lookup(net, attr, family, genmask,
@@ -3237,8 +3243,8 @@ static int nf_tables_newrule(struct sk_buff *skb, const struct nfnl_info *info,
struct nft_rule *rule, *old_rule = NULL;
struct nft_expr_info *expr_info = NULL;
u8 family = info->nfmsg->nfgen_family;
+ struct nft_flow_rule *flow = NULL;
struct net *net = info->net;
- struct nft_flow_rule *flow;
struct nft_userdata *udata;
struct nft_table *table;
struct nft_chain *chain;
@@ -3333,13 +3339,13 @@ static int nf_tables_newrule(struct sk_buff *skb, const struct nfnl_info *info,
nla_for_each_nested(tmp, nla[NFTA_RULE_EXPRESSIONS], rem) {
err = -EINVAL;
if (nla_type(tmp) != NFTA_LIST_ELEM)
- goto err1;
+ goto err_release_expr;
if (n == NFT_RULE_MAXEXPRS)
- goto err1;
+ goto err_release_expr;
err = nf_tables_expr_parse(&ctx, tmp, &expr_info[n]);
if (err < 0) {
NL_SET_BAD_ATTR(extack, tmp);
- goto err1;
+ goto err_release_expr;
}
size += expr_info[n].ops->size;
n++;
@@ -3348,7 +3354,7 @@ static int nf_tables_newrule(struct sk_buff *skb, const struct nfnl_info *info,
/* Check for overflow of dlen field */
err = -EFBIG;
if (size >= 1 << 12)
- goto err1;
+ goto err_release_expr;
if (nla[NFTA_RULE_USERDATA]) {
ulen = nla_len(nla[NFTA_RULE_USERDATA]);
@@ -3359,7 +3365,7 @@ static int nf_tables_newrule(struct sk_buff *skb, const struct nfnl_info *info,
err = -ENOMEM;
rule = kzalloc(sizeof(*rule) + size + usize, GFP_KERNEL);
if (rule == NULL)
- goto err1;
+ goto err_release_expr;
nft_activate_next(net, rule);
@@ -3378,7 +3384,7 @@ static int nf_tables_newrule(struct sk_buff *skb, const struct nfnl_info *info,
err = nf_tables_newexpr(&ctx, &expr_info[i], expr);
if (err < 0) {
NL_SET_BAD_ATTR(extack, expr_info[i].attr);
- goto err2;
+ goto err_release_rule;
}
if (expr_info[i].ops->validate)
@@ -3388,16 +3394,24 @@ static int nf_tables_newrule(struct sk_buff *skb, const struct nfnl_info *info,
expr = nft_expr_next(expr);
}
+ if (chain->flags & NFT_CHAIN_HW_OFFLOAD) {
+ flow = nft_flow_rule_create(net, rule);
+ if (IS_ERR(flow)) {
+ err = PTR_ERR(flow);
+ goto err_release_rule;
+ }
+ }
+
if (info->nlh->nlmsg_flags & NLM_F_REPLACE) {
trans = nft_trans_rule_add(&ctx, NFT_MSG_NEWRULE, rule);
if (trans == NULL) {
err = -ENOMEM;
- goto err2;
+ goto err_destroy_flow_rule;
}
err = nft_delrule(&ctx, old_rule);
if (err < 0) {
nft_trans_destroy(trans);
- goto err2;
+ goto err_destroy_flow_rule;
}
list_add_tail_rcu(&rule->list, &old_rule->list);
@@ -3405,7 +3419,7 @@ static int nf_tables_newrule(struct sk_buff *skb, const struct nfnl_info *info,
trans = nft_trans_rule_add(&ctx, NFT_MSG_NEWRULE, rule);
if (!trans) {
err = -ENOMEM;
- goto err2;
+ goto err_destroy_flow_rule;
}
if (info->nlh->nlmsg_flags & NLM_F_APPEND) {
@@ -3423,21 +3437,19 @@ static int nf_tables_newrule(struct sk_buff *skb, const struct nfnl_info *info,
kvfree(expr_info);
chain->use++;
+ if (flow)
+ nft_trans_flow_rule(trans) = flow;
+
if (nft_net->validate_state == NFT_VALIDATE_DO)
return nft_table_validate(net, table);
- if (chain->flags & NFT_CHAIN_HW_OFFLOAD) {
- flow = nft_flow_rule_create(net, rule);
- if (IS_ERR(flow))
- return PTR_ERR(flow);
-
- nft_trans_flow_rule(trans) = flow;
- }
-
return 0;
-err2:
+
+err_destroy_flow_rule:
+ nft_flow_rule_destroy(flow);
+err_release_rule:
nf_tables_rule_release(&ctx, rule);
-err1:
+err_release_expr:
for (i = 0; i < n; i++) {
if (expr_info[i].ops) {
module_put(expr_info[i].ops->type->owner);
@@ -8805,11 +8817,16 @@ static int __nf_tables_abort(struct net *net, enum nfnl_abort_action action)
nft_rule_expr_deactivate(&trans->ctx,
nft_trans_rule(trans),
NFT_TRANS_ABORT);
+ if (trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD)
+ nft_flow_rule_destroy(nft_trans_flow_rule(trans));
break;
case NFT_MSG_DELRULE:
trans->ctx.chain->use++;
nft_clear(trans->ctx.net, nft_trans_rule(trans));
nft_rule_expr_activate(&trans->ctx, nft_trans_rule(trans));
+ if (trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD)
+ nft_flow_rule_destroy(nft_trans_flow_rule(trans));
+
nft_trans_destroy(trans);
break;
case NFT_MSG_NEWSET:
diff --git a/net/netfilter/nf_tables_offload.c b/net/netfilter/nf_tables_offload.c
index a48c5fd53a80..b58d73a96523 100644
--- a/net/netfilter/nf_tables_offload.c
+++ b/net/netfilter/nf_tables_offload.c
@@ -54,15 +54,10 @@ static void nft_flow_rule_transfer_vlan(struct nft_offload_ctx *ctx,
struct nft_flow_rule *flow)
{
struct nft_flow_match *match = &flow->match;
- struct nft_offload_ethertype ethertype;
-
- if (match->dissector.used_keys & BIT(FLOW_DISSECTOR_KEY_CONTROL) &&
- match->key.basic.n_proto != htons(ETH_P_8021Q) &&
- match->key.basic.n_proto != htons(ETH_P_8021AD))
- return;
-
- ethertype.value = match->key.basic.n_proto;
- ethertype.mask = match->mask.basic.n_proto;
+ struct nft_offload_ethertype ethertype = {
+ .value = match->key.basic.n_proto,
+ .mask = match->mask.basic.n_proto,
+ };
if (match->dissector.used_keys & BIT(FLOW_DISSECTOR_KEY_VLAN) &&
(match->key.vlan.vlan_tpid == htons(ETH_P_8021Q) ||
@@ -76,7 +71,9 @@ static void nft_flow_rule_transfer_vlan(struct nft_offload_ctx *ctx,
match->dissector.offset[FLOW_DISSECTOR_KEY_CVLAN] =
offsetof(struct nft_flow_key, cvlan);
match->dissector.used_keys |= BIT(FLOW_DISSECTOR_KEY_CVLAN);
- } else {
+ } else if (match->dissector.used_keys & BIT(FLOW_DISSECTOR_KEY_BASIC) &&
+ (match->key.basic.n_proto == htons(ETH_P_8021Q) ||
+ match->key.basic.n_proto == htons(ETH_P_8021AD))) {
match->key.basic.n_proto = match->key.vlan.vlan_tpid;
match->mask.basic.n_proto = match->mask.vlan.vlan_tpid;
match->key.vlan.vlan_tpid = ethertype.value;
@@ -594,23 +591,6 @@ int nft_flow_rule_offload_commit(struct net *net)
}
}
- list_for_each_entry(trans, &nft_net->commit_list, list) {
- if (trans->ctx.family != NFPROTO_NETDEV)
- continue;
-
- switch (trans->msg_type) {
- case NFT_MSG_NEWRULE:
- case NFT_MSG_DELRULE:
- if (!(trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD))
- continue;
-
- nft_flow_rule_destroy(nft_trans_flow_rule(trans));
- break;
- default:
- break;
- }
- }
-
return err;
}
diff --git a/net/netfilter/nft_exthdr.c b/net/netfilter/nft_exthdr.c
index 4f583d2e220e..af4ee874a067 100644
--- a/net/netfilter/nft_exthdr.c
+++ b/net/netfilter/nft_exthdr.c
@@ -44,6 +44,9 @@ static void nft_exthdr_ipv6_eval(const struct nft_expr *expr,
unsigned int offset = 0;
int err;
+ if (pkt->skb->protocol != htons(ETH_P_IPV6))
+ goto err;
+
err = ipv6_find_hdr(pkt->skb, &offset, priv->type, NULL, NULL);
if (priv->flags & NFT_EXTHDR_F_PRESENT) {
nft_reg_store8(dest, err >= 0);
diff --git a/net/netfilter/nft_osf.c b/net/netfilter/nft_osf.c
index ac61f708b82d..d82677e83400 100644
--- a/net/netfilter/nft_osf.c
+++ b/net/netfilter/nft_osf.c
@@ -28,6 +28,11 @@ static void nft_osf_eval(const struct nft_expr *expr, struct nft_regs *regs,
struct nf_osf_data data;
struct tcphdr _tcph;
+ if (pkt->tprot != IPPROTO_TCP) {
+ regs->verdict.code = NFT_BREAK;
+ return;
+ }
+
tcp = skb_header_pointer(skb, ip_hdrlen(skb),
sizeof(struct tcphdr), &_tcph);
if (!tcp) {
diff --git a/net/netfilter/nft_tproxy.c b/net/netfilter/nft_tproxy.c
index 18e79c0fd3cf..b5b09a902c7a 100644
--- a/net/netfilter/nft_tproxy.c
+++ b/net/netfilter/nft_tproxy.c
@@ -30,6 +30,12 @@ static void nft_tproxy_eval_v4(const struct nft_expr *expr,
__be16 tport = 0;
struct sock *sk;
+ if (pkt->tprot != IPPROTO_TCP &&
+ pkt->tprot != IPPROTO_UDP) {
+ regs->verdict.code = NFT_BREAK;
+ return;
+ }
+
hp = skb_header_pointer(skb, ip_hdrlen(skb), sizeof(_hdr), &_hdr);
if (!hp) {
regs->verdict.code = NFT_BREAK;
@@ -91,7 +97,8 @@ static void nft_tproxy_eval_v6(const struct nft_expr *expr,
memset(&taddr, 0, sizeof(taddr));
- if (!pkt->tprot_set) {
+ if (pkt->tprot != IPPROTO_TCP &&
+ pkt->tprot != IPPROTO_UDP) {
regs->verdict.code = NFT_BREAK;
return;
}
diff --git a/net/sched/cls_tcindex.c b/net/sched/cls_tcindex.c
index c4007b9cd16d..5b274534264c 100644
--- a/net/sched/cls_tcindex.c
+++ b/net/sched/cls_tcindex.c
@@ -304,7 +304,7 @@ static int tcindex_alloc_perfect_hash(struct net *net, struct tcindex_data *cp)
int i, err = 0;
cp->perfect = kcalloc(cp->hash, sizeof(struct tcindex_filter_result),
- GFP_KERNEL);
+ GFP_KERNEL | __GFP_NOWARN);
if (!cp->perfect)
return -ENOMEM;
diff --git a/net/sched/sch_qfq.c b/net/sched/sch_qfq.c
index 1db9d4a2ef5e..b692a0de1ad5 100644
--- a/net/sched/sch_qfq.c
+++ b/net/sched/sch_qfq.c
@@ -485,11 +485,6 @@ static int qfq_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
if (cl->qdisc != &noop_qdisc)
qdisc_hash_add(cl->qdisc, true);
- sch_tree_lock(sch);
- qdisc_class_hash_insert(&q->clhash, &cl->common);
- sch_tree_unlock(sch);
-
- qdisc_class_hash_grow(sch, &q->clhash);
set_change_agg:
sch_tree_lock(sch);
@@ -507,8 +502,11 @@ set_change_agg:
}
if (existing)
qfq_deact_rm_from_agg(q, cl);
+ else
+ qdisc_class_hash_insert(&q->clhash, &cl->common);
qfq_add_to_agg(q, new_agg, cl);
sch_tree_unlock(sch);
+ qdisc_class_hash_grow(sch, &q->clhash);
*arg = (unsigned long)cl;
return 0;
diff --git a/net/sctp/bind_addr.c b/net/sctp/bind_addr.c
index 53e5ed79f63f..59e653b528b1 100644
--- a/net/sctp/bind_addr.c
+++ b/net/sctp/bind_addr.c
@@ -270,22 +270,19 @@ int sctp_raw_to_bind_addrs(struct sctp_bind_addr *bp, __u8 *raw_addr_list,
rawaddr = (union sctp_addr_param *)raw_addr_list;
af = sctp_get_af_specific(param_type2af(param->type));
- if (unlikely(!af)) {
+ if (unlikely(!af) ||
+ !af->from_addr_param(&addr, rawaddr, htons(port), 0)) {
retval = -EINVAL;
- sctp_bind_addr_clean(bp);
- break;
+ goto out_err;
}
- af->from_addr_param(&addr, rawaddr, htons(port), 0);
if (sctp_bind_addr_state(bp, &addr) != -1)
goto next;
retval = sctp_add_bind_addr(bp, &addr, sizeof(addr),
SCTP_ADDR_SRC, gfp);
- if (retval) {
+ if (retval)
/* Can't finish building the list, clean up. */
- sctp_bind_addr_clean(bp);
- break;
- }
+ goto out_err;
next:
len = ntohs(param->length);
@@ -294,6 +291,12 @@ next:
}
return retval;
+
+out_err:
+ if (retval)
+ sctp_bind_addr_clean(bp);
+
+ return retval;
}
/********************************************************************
diff --git a/net/sctp/input.c b/net/sctp/input.c
index 76dcc137f761..02e73264e81e 100644
--- a/net/sctp/input.c
+++ b/net/sctp/input.c
@@ -1155,7 +1155,8 @@ static struct sctp_association *__sctp_rcv_init_lookup(struct net *net,
if (!af)
continue;
- af->from_addr_param(paddr, params.addr, sh->source, 0);
+ if (!af->from_addr_param(paddr, params.addr, sh->source, 0))
+ continue;
asoc = __sctp_lookup_association(net, laddr, paddr, transportp);
if (asoc)
@@ -1191,6 +1192,9 @@ static struct sctp_association *__sctp_rcv_asconf_lookup(
union sctp_addr_param *param;
union sctp_addr paddr;
+ if (ntohs(ch->length) < sizeof(*asconf) + sizeof(struct sctp_paramhdr))
+ return NULL;
+
/* Skip over the ADDIP header and find the Address parameter */
param = (union sctp_addr_param *)(asconf + 1);
@@ -1198,7 +1202,8 @@ static struct sctp_association *__sctp_rcv_asconf_lookup(
if (unlikely(!af))
return NULL;
- af->from_addr_param(&paddr, param, peer_port, 0);
+ if (af->from_addr_param(&paddr, param, peer_port, 0))
+ return NULL;
return __sctp_lookup_association(net, laddr, &paddr, transportp);
}
@@ -1269,7 +1274,7 @@ static struct sctp_association *__sctp_rcv_walk_lookup(struct net *net,
ch = (struct sctp_chunkhdr *)ch_end;
chunk_num++;
- } while (ch_end < skb_tail_pointer(skb));
+ } while (ch_end + sizeof(*ch) < skb_tail_pointer(skb));
return asoc;
}
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
index d041bed86322..e48dd909dee5 100644
--- a/net/sctp/ipv6.c
+++ b/net/sctp/ipv6.c
@@ -577,15 +577,20 @@ static void sctp_v6_to_sk_daddr(union sctp_addr *addr, struct sock *sk)
}
/* Initialize a sctp_addr from an address parameter. */
-static void sctp_v6_from_addr_param(union sctp_addr *addr,
+static bool sctp_v6_from_addr_param(union sctp_addr *addr,
union sctp_addr_param *param,
__be16 port, int iif)
{
+ if (ntohs(param->v6.param_hdr.length) < sizeof(struct sctp_ipv6addr_param))
+ return false;
+
addr->v6.sin6_family = AF_INET6;
addr->v6.sin6_port = port;
addr->v6.sin6_flowinfo = 0; /* BUG */
addr->v6.sin6_addr = param->v6.addr;
addr->v6.sin6_scope_id = iif;
+
+ return true;
}
/* Initialize an address parameter from a sctp_addr and return the length
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
index bc5db0b404ce..3c1fbf38f4f7 100644
--- a/net/sctp/protocol.c
+++ b/net/sctp/protocol.c
@@ -254,14 +254,19 @@ static void sctp_v4_to_sk_daddr(union sctp_addr *addr, struct sock *sk)
}
/* Initialize a sctp_addr from an address parameter. */
-static void sctp_v4_from_addr_param(union sctp_addr *addr,
+static bool sctp_v4_from_addr_param(union sctp_addr *addr,
union sctp_addr_param *param,
__be16 port, int iif)
{
+ if (ntohs(param->v4.param_hdr.length) < sizeof(struct sctp_ipv4addr_param))
+ return false;
+
addr->v4.sin_family = AF_INET;
addr->v4.sin_port = port;
addr->v4.sin_addr.s_addr = param->v4.addr.s_addr;
memset(addr->v4.sin_zero, 0, sizeof(addr->v4.sin_zero));
+
+ return true;
}
/* Initialize an address parameter from a sctp_addr and return the length
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
index b0eaa93a9cc6..6c08e5048d38 100644
--- a/net/sctp/sm_make_chunk.c
+++ b/net/sctp/sm_make_chunk.c
@@ -2195,9 +2195,16 @@ static enum sctp_ierror sctp_verify_param(struct net *net,
break;
case SCTP_PARAM_SET_PRIMARY:
- if (ep->asconf_enable)
- break;
- goto unhandled;
+ if (!ep->asconf_enable)
+ goto unhandled;
+
+ if (ntohs(param.p->length) < sizeof(struct sctp_addip_param) +
+ sizeof(struct sctp_paramhdr)) {
+ sctp_process_inv_paramlength(asoc, param.p,
+ chunk, err_chunk);
+ retval = SCTP_IERROR_ABORT;
+ }
+ break;
case SCTP_PARAM_HOST_NAME_ADDRESS:
/* Tell the peer, we won't support this param. */
@@ -2375,11 +2382,13 @@ int sctp_process_init(struct sctp_association *asoc, struct sctp_chunk *chunk,
/* Process the initialization parameters. */
sctp_walk_params(param, peer_init, init_hdr.params) {
- if (!src_match && (param.p->type == SCTP_PARAM_IPV4_ADDRESS ||
- param.p->type == SCTP_PARAM_IPV6_ADDRESS)) {
+ if (!src_match &&
+ (param.p->type == SCTP_PARAM_IPV4_ADDRESS ||
+ param.p->type == SCTP_PARAM_IPV6_ADDRESS)) {
af = sctp_get_af_specific(param_type2af(param.p->type));
- af->from_addr_param(&addr, param.addr,
- chunk->sctp_hdr->source, 0);
+ if (!af->from_addr_param(&addr, param.addr,
+ chunk->sctp_hdr->source, 0))
+ continue;
if (sctp_cmp_addr_exact(sctp_source(chunk), &addr))
src_match = 1;
}
@@ -2560,7 +2569,8 @@ static int sctp_process_param(struct sctp_association *asoc,
break;
do_addr_param:
af = sctp_get_af_specific(param_type2af(param.p->type));
- af->from_addr_param(&addr, param.addr, htons(asoc->peer.port), 0);
+ if (!af->from_addr_param(&addr, param.addr, htons(asoc->peer.port), 0))
+ break;
scope = sctp_scope(peer_addr);
if (sctp_in_scope(net, &addr, scope))
if (!sctp_assoc_add_peer(asoc, &addr, gfp, SCTP_UNCONFIRMED))
@@ -2661,15 +2671,13 @@ do_addr_param:
addr_param = param.v + sizeof(struct sctp_addip_param);
af = sctp_get_af_specific(param_type2af(addr_param->p.type));
- if (af == NULL)
+ if (!af)
break;
- af->from_addr_param(&addr, addr_param,
- htons(asoc->peer.port), 0);
+ if (!af->from_addr_param(&addr, addr_param,
+ htons(asoc->peer.port), 0))
+ break;
- /* if the address is invalid, we can't process it.
- * XXX: see spec for what to do.
- */
if (!af->addr_valid(&addr, NULL, NULL))
break;
@@ -3083,7 +3091,8 @@ static __be16 sctp_process_asconf_param(struct sctp_association *asoc,
if (unlikely(!af))
return SCTP_ERROR_DNS_FAILED;
- af->from_addr_param(&addr, addr_param, htons(asoc->peer.port), 0);
+ if (!af->from_addr_param(&addr, addr_param, htons(asoc->peer.port), 0))
+ return SCTP_ERROR_DNS_FAILED;
/* ADDIP 4.2.1 This parameter MUST NOT contain a broadcast
* or multicast address.
@@ -3360,7 +3369,8 @@ static void sctp_asconf_param_success(struct sctp_association *asoc,
/* We have checked the packet before, so we do not check again. */
af = sctp_get_af_specific(param_type2af(addr_param->p.type));
- af->from_addr_param(&addr, addr_param, htons(bp->port), 0);
+ if (!af->from_addr_param(&addr, addr_param, htons(bp->port), 0))
+ return;
switch (asconf_param->param_hdr.type) {
case SCTP_PARAM_ADD_IP:
diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c
index f0fbb079cbaa..4feb95e34b64 100644
--- a/net/tls/tls_sw.c
+++ b/net/tls/tls_sw.c
@@ -1153,7 +1153,7 @@ static int tls_sw_do_sendpage(struct sock *sk, struct page *page,
int ret = 0;
bool eor;
- eor = !(flags & (MSG_MORE | MSG_SENDPAGE_NOTLAST));
+ eor = !(flags & MSG_SENDPAGE_NOTLAST);
sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
/* Call the sk_stream functions to manage the sndbuf mem. */
diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c
index 9f12da1ff406..3e02cc3b24f8 100644
--- a/net/vmw_vsock/af_vsock.c
+++ b/net/vmw_vsock/af_vsock.c
@@ -1395,7 +1395,7 @@ static int vsock_connect(struct socket *sock, struct sockaddr *addr,
if (signal_pending(current)) {
err = sock_intr_errno(timeout);
- sk->sk_state = TCP_CLOSE;
+ sk->sk_state = sk->sk_state == TCP_ESTABLISHED ? TCP_CLOSING : TCP_CLOSE;
sock->state = SS_UNCONNECTED;
vsock_transport_cancel_pkt(vsk);
goto out_wait;
diff --git a/net/xdp/xsk_queue.h b/net/xdp/xsk_queue.h
index 9d2a89d793c0..9ae13cccfb28 100644
--- a/net/xdp/xsk_queue.h
+++ b/net/xdp/xsk_queue.h
@@ -128,12 +128,15 @@ static inline bool xskq_cons_read_addr_unchecked(struct xsk_queue *q, u64 *addr)
static inline bool xp_aligned_validate_desc(struct xsk_buff_pool *pool,
struct xdp_desc *desc)
{
- u64 chunk;
-
- if (desc->len > pool->chunk_size)
- return false;
+ u64 chunk, chunk_end;
chunk = xp_aligned_extract_addr(pool, desc->addr);
+ if (likely(desc->len)) {
+ chunk_end = xp_aligned_extract_addr(pool, desc->addr + desc->len - 1);
+ if (chunk != chunk_end)
+ return false;
+ }
+
if (chunk >= pool->addrs_cnt)
return false;
diff --git a/net/xfrm/xfrm_device.c b/net/xfrm/xfrm_device.c
index 6d6917b68856..e843b0d9e2a6 100644
--- a/net/xfrm/xfrm_device.c
+++ b/net/xfrm/xfrm_device.c
@@ -268,6 +268,7 @@ int xfrm_dev_state_add(struct net *net, struct xfrm_state *x,
xso->num_exthdrs = 0;
xso->flags = 0;
xso->dev = NULL;
+ xso->real_dev = NULL;
dev_put(dev);
if (err != -EOPNOTSUPP)
diff --git a/net/xfrm/xfrm_output.c b/net/xfrm/xfrm_output.c
index ab2fbe432261..229544bc70c2 100644
--- a/net/xfrm/xfrm_output.c
+++ b/net/xfrm/xfrm_output.c
@@ -827,15 +827,8 @@ out:
static int xfrm6_extract_output(struct xfrm_state *x, struct sk_buff *skb)
{
#if IS_ENABLED(CONFIG_IPV6)
- unsigned int ptr = 0;
int err;
- if (x->outer_mode.encap == XFRM_MODE_BEET &&
- ipv6_find_hdr(skb, &ptr, NEXTHDR_FRAGMENT, NULL, NULL) >= 0) {
- net_warn_ratelimited("BEET mode doesn't support inner IPv6 fragments\n");
- return -EAFNOSUPPORT;
- }
-
err = xfrm6_tunnel_check_size(skb);
if (err)
return err;
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index 1e24b21457f7..837df4b5c1bc 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -2092,12 +2092,15 @@ static struct xfrm_policy *xfrm_policy_lookup_bytype(struct net *net, u8 type,
if (unlikely(!daddr || !saddr))
return NULL;
- rcu_read_lock();
retry:
- do {
- sequence = read_seqcount_begin(&xfrm_policy_hash_generation);
- chain = policy_hash_direct(net, daddr, saddr, family, dir);
- } while (read_seqcount_retry(&xfrm_policy_hash_generation, sequence));
+ sequence = read_seqcount_begin(&xfrm_policy_hash_generation);
+ rcu_read_lock();
+
+ chain = policy_hash_direct(net, daddr, saddr, family, dir);
+ if (read_seqcount_retry(&xfrm_policy_hash_generation, sequence)) {
+ rcu_read_unlock();
+ goto retry;
+ }
ret = NULL;
hlist_for_each_entry_rcu(pol, chain, bydst) {
@@ -2128,11 +2131,15 @@ static struct xfrm_policy *xfrm_policy_lookup_bytype(struct net *net, u8 type,
}
skip_inexact:
- if (read_seqcount_retry(&xfrm_policy_hash_generation, sequence))
+ if (read_seqcount_retry(&xfrm_policy_hash_generation, sequence)) {
+ rcu_read_unlock();
goto retry;
+ }
- if (ret && !xfrm_pol_hold_rcu(ret))
+ if (ret && !xfrm_pol_hold_rcu(ret)) {
+ rcu_read_unlock();
goto retry;
+ }
fail:
rcu_read_unlock();
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
index c2ce1e6f4760..a2f4001221d1 100644
--- a/net/xfrm/xfrm_state.c
+++ b/net/xfrm/xfrm_state.c
@@ -2550,7 +2550,7 @@ void xfrm_state_delete_tunnel(struct xfrm_state *x)
}
EXPORT_SYMBOL(xfrm_state_delete_tunnel);
-u32 xfrm_state_mtu(struct xfrm_state *x, int mtu)
+u32 __xfrm_state_mtu(struct xfrm_state *x, int mtu)
{
const struct xfrm_type *type = READ_ONCE(x->type);
struct crypto_aead *aead;
@@ -2581,7 +2581,17 @@ u32 xfrm_state_mtu(struct xfrm_state *x, int mtu)
return ((mtu - x->props.header_len - crypto_aead_authsize(aead) -
net_adj) & ~(blksize - 1)) + net_adj - 2;
}
-EXPORT_SYMBOL_GPL(xfrm_state_mtu);
+EXPORT_SYMBOL_GPL(__xfrm_state_mtu);
+
+u32 xfrm_state_mtu(struct xfrm_state *x, int mtu)
+{
+ mtu = __xfrm_state_mtu(x, mtu);
+
+ if (x->props.family == AF_INET6 && mtu < IPV6_MIN_MTU)
+ return IPV6_MIN_MTU;
+
+ return mtu;
+}
int __xfrm_init_state(struct xfrm_state *x, bool init_replay, bool offload)
{
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index f0aecee4d539..b47d613409b7 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -580,6 +580,20 @@ static struct xfrm_state *xfrm_state_construct(struct net *net,
copy_from_user_state(x, p);
+ if (attrs[XFRMA_ENCAP]) {
+ x->encap = kmemdup(nla_data(attrs[XFRMA_ENCAP]),
+ sizeof(*x->encap), GFP_KERNEL);
+ if (x->encap == NULL)
+ goto error;
+ }
+
+ if (attrs[XFRMA_COADDR]) {
+ x->coaddr = kmemdup(nla_data(attrs[XFRMA_COADDR]),
+ sizeof(*x->coaddr), GFP_KERNEL);
+ if (x->coaddr == NULL)
+ goto error;
+ }
+
if (attrs[XFRMA_SA_EXTRA_FLAGS])
x->props.extra_flags = nla_get_u32(attrs[XFRMA_SA_EXTRA_FLAGS]);
@@ -600,23 +614,9 @@ static struct xfrm_state *xfrm_state_construct(struct net *net,
attrs[XFRMA_ALG_COMP])))
goto error;
- if (attrs[XFRMA_ENCAP]) {
- x->encap = kmemdup(nla_data(attrs[XFRMA_ENCAP]),
- sizeof(*x->encap), GFP_KERNEL);
- if (x->encap == NULL)
- goto error;
- }
-
if (attrs[XFRMA_TFCPAD])
x->tfcpad = nla_get_u32(attrs[XFRMA_TFCPAD]);
- if (attrs[XFRMA_COADDR]) {
- x->coaddr = kmemdup(nla_data(attrs[XFRMA_COADDR]),
- sizeof(*x->coaddr), GFP_KERNEL);
- if (x->coaddr == NULL)
- goto error;
- }
-
xfrm_mark_get(attrs, &x->mark);
xfrm_smark_init(attrs, &x->props.smark);