summaryrefslogtreecommitdiff
path: root/net
diff options
context:
space:
mode:
Diffstat (limited to 'net')
-rw-r--r--net/bridge/netfilter/Kconfig1
-rw-r--r--net/core/dev.c12
-rw-r--r--net/core/dev.h8
-rw-r--r--net/core/page_pool.c29
-rw-r--r--net/devlink/port.c2
-rw-r--r--net/ipv4/netfilter/Kconfig3
-rw-r--r--net/ipv4/udp_offload.c2
-rw-r--r--net/ipv6/netfilter/Kconfig1
-rw-r--r--net/ipv6/xfrm6_tunnel.c2
-rw-r--r--net/kcm/kcmsock.c10
-rw-r--r--net/mctp/test/route-test.c1
-rw-r--r--net/netfilter/ipvs/ip_vs_est.c3
-rw-r--r--net/netfilter/nf_conntrack_netlink.c65
-rw-r--r--net/netfilter/nf_conntrack_standalone.c6
-rw-r--r--net/netfilter/nf_tables_api.c30
-rw-r--r--net/netfilter/nft_set_pipapo.c5
-rw-r--r--net/netfilter/nft_set_pipapo_avx2.c12
-rw-r--r--net/netfilter/nft_socket.c2
-rw-r--r--net/sched/sch_ets.c11
-rw-r--r--net/sctp/input.c2
-rw-r--r--net/sunrpc/svcsock.c43
-rw-r--r--net/tls/tls.h2
-rw-r--r--net/tls/tls_strp.c11
-rw-r--r--net/tls/tls_sw.c3
-rw-r--r--net/vmw_vsock/af_vsock.c3
-rw-r--r--net/xfrm/xfrm_device.c12
-rw-r--r--net/xfrm/xfrm_state.c2
27 files changed, 195 insertions, 88 deletions
diff --git a/net/bridge/netfilter/Kconfig b/net/bridge/netfilter/Kconfig
index 60f28e4fb5c0..4fd5a6ea26b4 100644
--- a/net/bridge/netfilter/Kconfig
+++ b/net/bridge/netfilter/Kconfig
@@ -43,6 +43,7 @@ config NF_CONNTRACK_BRIDGE
config BRIDGE_NF_EBTABLES_LEGACY
tristate "Legacy EBTABLES support"
depends on BRIDGE && NETFILTER_XTABLES_LEGACY
+ depends on NETFILTER_XTABLES
default n
help
Legacy ebtables packet/frame classifier.
diff --git a/net/core/dev.c b/net/core/dev.c
index 68dc47d7e700..5a3c0f40a93f 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -6999,7 +6999,7 @@ int netif_set_threaded(struct net_device *dev,
enum netdev_napi_threaded threaded)
{
struct napi_struct *napi;
- int err = 0;
+ int i, err = 0;
netdev_assert_locked_or_invisible(dev);
@@ -7021,6 +7021,10 @@ int netif_set_threaded(struct net_device *dev,
list_for_each_entry(napi, &dev->napi_list, dev_list)
WARN_ON_ONCE(napi_set_threaded(napi, threaded));
+ /* Override the config for all NAPIs even if currently not listed */
+ for (i = 0; i < dev->num_napi_configs; i++)
+ dev->napi_config[i].threaded = threaded;
+
return err;
}
@@ -7353,8 +7357,9 @@ void netif_napi_add_weight_locked(struct net_device *dev,
* Clear dev->threaded if kthread creation failed so that
* threaded mode will not be enabled in napi_enable().
*/
- if (dev->threaded && napi_kthread_create(napi))
- dev->threaded = NETDEV_NAPI_THREADED_DISABLED;
+ if (napi_get_threaded_config(dev, napi))
+ if (napi_kthread_create(napi))
+ dev->threaded = NETDEV_NAPI_THREADED_DISABLED;
netif_napi_set_irq_locked(napi, -1);
}
EXPORT_SYMBOL(netif_napi_add_weight_locked);
@@ -11873,6 +11878,7 @@ struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
goto free_all;
dev->cfg_pending = dev->cfg;
+ dev->num_napi_configs = maxqs;
napi_config_sz = array_size(maxqs, sizeof(*dev->napi_config));
dev->napi_config = kvzalloc(napi_config_sz, GFP_KERNEL_ACCOUNT);
if (!dev->napi_config)
diff --git a/net/core/dev.h b/net/core/dev.h
index ab69edc0c3e3..d6b08d435479 100644
--- a/net/core/dev.h
+++ b/net/core/dev.h
@@ -323,6 +323,14 @@ static inline enum netdev_napi_threaded napi_get_threaded(struct napi_struct *n)
return NETDEV_NAPI_THREADED_DISABLED;
}
+static inline enum netdev_napi_threaded
+napi_get_threaded_config(struct net_device *dev, struct napi_struct *n)
+{
+ if (n->config)
+ return n->config->threaded;
+ return dev->threaded;
+}
+
int napi_set_threaded(struct napi_struct *n,
enum netdev_napi_threaded threaded);
diff --git a/net/core/page_pool.c b/net/core/page_pool.c
index 05e2e22a8f7c..343a6cac21e3 100644
--- a/net/core/page_pool.c
+++ b/net/core/page_pool.c
@@ -1201,6 +1201,35 @@ void page_pool_use_xdp_mem(struct page_pool *pool, void (*disconnect)(void *),
pool->xdp_mem_id = mem->id;
}
+/**
+ * page_pool_enable_direct_recycling() - mark page pool as owned by NAPI
+ * @pool: page pool to modify
+ * @napi: NAPI instance to associate the page pool with
+ *
+ * Associate a page pool with a NAPI instance for lockless page recycling.
+ * This is useful when a new page pool has to be added to a NAPI instance
+ * without disabling that NAPI instance, to mark the point at which control
+ * path "hands over" the page pool to the NAPI instance. In most cases driver
+ * can simply set the @napi field in struct page_pool_params, and does not
+ * have to call this helper.
+ *
+ * The function is idempotent, but does not implement any refcounting.
+ * Single page_pool_disable_direct_recycling() will disable recycling,
+ * no matter how many times enable was called.
+ */
+void page_pool_enable_direct_recycling(struct page_pool *pool,
+ struct napi_struct *napi)
+{
+ if (READ_ONCE(pool->p.napi) == napi)
+ return;
+ WARN_ON(!napi || pool->p.napi);
+
+ mutex_lock(&page_pools_lock);
+ WRITE_ONCE(pool->p.napi, napi);
+ mutex_unlock(&page_pools_lock);
+}
+EXPORT_SYMBOL(page_pool_enable_direct_recycling);
+
void page_pool_disable_direct_recycling(struct page_pool *pool)
{
/* Disable direct recycling based on pool->cpuid.
diff --git a/net/devlink/port.c b/net/devlink/port.c
index 939081a0e615..cb8d4df61619 100644
--- a/net/devlink/port.c
+++ b/net/devlink/port.c
@@ -1519,7 +1519,7 @@ static int __devlink_port_phys_port_name_get(struct devlink_port *devlink_port,
struct devlink_port_attrs *attrs = &devlink_port->attrs;
int n = 0;
- if (!devlink_port->attrs_set)
+ if (!devlink_port->attrs_set || devlink_port->attrs.no_phys_port_name)
return -EOPNOTSUPP;
switch (attrs->flavour) {
diff --git a/net/ipv4/netfilter/Kconfig b/net/ipv4/netfilter/Kconfig
index 2c438b140e88..7dc9772fe2d8 100644
--- a/net/ipv4/netfilter/Kconfig
+++ b/net/ipv4/netfilter/Kconfig
@@ -14,6 +14,7 @@ config NF_DEFRAG_IPV4
config IP_NF_IPTABLES_LEGACY
tristate "Legacy IP tables support"
depends on NETFILTER_XTABLES_LEGACY
+ depends on NETFILTER_XTABLES
default m if NETFILTER_XTABLES_LEGACY
help
iptables is a legacy packet classifier.
@@ -326,6 +327,7 @@ endif # IP_NF_IPTABLES
config IP_NF_ARPTABLES
tristate "Legacy ARPTABLES support"
depends on NETFILTER_XTABLES_LEGACY
+ depends on NETFILTER_XTABLES
default n
help
arptables is a legacy packet classifier.
@@ -343,6 +345,7 @@ config IP_NF_ARPFILTER
select IP_NF_ARPTABLES
select NETFILTER_FAMILY_ARP
depends on NETFILTER_XTABLES_LEGACY
+ depends on NETFILTER_XTABLES
help
ARP packet filtering defines a table `filter', which has a series of
rules for simple ARP packet filtering at local input and
diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c
index 5128e2a5b00a..b1f3fd302e9d 100644
--- a/net/ipv4/udp_offload.c
+++ b/net/ipv4/udp_offload.c
@@ -217,7 +217,7 @@ static struct sk_buff *__skb_udp_tunnel_segment(struct sk_buff *skb,
remcsum = !!(skb_shinfo(skb)->gso_type & SKB_GSO_TUNNEL_REMCSUM);
skb->remcsum_offload = remcsum;
- need_ipsec = skb_dst(skb) && dst_xfrm(skb_dst(skb));
+ need_ipsec = (skb_dst(skb) && dst_xfrm(skb_dst(skb))) || skb_sec_path(skb);
/* Try to offload checksum if possible */
offload_csum = !!(need_csum &&
!need_ipsec &&
diff --git a/net/ipv6/netfilter/Kconfig b/net/ipv6/netfilter/Kconfig
index 276860f65baa..81daf82ddc2d 100644
--- a/net/ipv6/netfilter/Kconfig
+++ b/net/ipv6/netfilter/Kconfig
@@ -10,6 +10,7 @@ menu "IPv6: Netfilter Configuration"
config IP6_NF_IPTABLES_LEGACY
tristate "Legacy IP6 tables support"
depends on INET && IPV6 && NETFILTER_XTABLES_LEGACY
+ depends on NETFILTER_XTABLES
default m if NETFILTER_XTABLES_LEGACY
help
ip6tables is a legacy packet classifier.
diff --git a/net/ipv6/xfrm6_tunnel.c b/net/ipv6/xfrm6_tunnel.c
index 5120a763da0d..0a0eeaed0591 100644
--- a/net/ipv6/xfrm6_tunnel.c
+++ b/net/ipv6/xfrm6_tunnel.c
@@ -334,7 +334,7 @@ static void __net_exit xfrm6_tunnel_net_exit(struct net *net)
struct xfrm6_tunnel_net *xfrm6_tn = xfrm6_tunnel_pernet(net);
unsigned int i;
- xfrm_state_flush(net, IPSEC_PROTO_ANY, false);
+ xfrm_state_flush(net, 0, false);
xfrm_flush_gc();
for (i = 0; i < XFRM6_TUNNEL_SPI_BYADDR_HSIZE; i++)
diff --git a/net/kcm/kcmsock.c b/net/kcm/kcmsock.c
index a4971e6fa943..b4f01cb07561 100644
--- a/net/kcm/kcmsock.c
+++ b/net/kcm/kcmsock.c
@@ -430,7 +430,7 @@ static void psock_write_space(struct sock *sk)
/* Check if the socket is reserved so someone is waiting for sending. */
kcm = psock->tx_kcm;
- if (kcm && !unlikely(kcm->tx_stopped))
+ if (kcm)
queue_work(kcm_wq, &kcm->tx_work);
spin_unlock_bh(&mux->lock);
@@ -1693,12 +1693,6 @@ static int kcm_release(struct socket *sock)
*/
__skb_queue_purge(&sk->sk_write_queue);
- /* Set tx_stopped. This is checked when psock is bound to a kcm and we
- * get a writespace callback. This prevents further work being queued
- * from the callback (unbinding the psock occurs after canceling work.
- */
- kcm->tx_stopped = 1;
-
release_sock(sk);
spin_lock_bh(&mux->lock);
@@ -1714,7 +1708,7 @@ static int kcm_release(struct socket *sock)
/* Cancel work. After this point there should be no outside references
* to the kcm socket.
*/
- cancel_work_sync(&kcm->tx_work);
+ disable_work_sync(&kcm->tx_work);
lock_sock(sk);
psock = kcm->tx_psock;
diff --git a/net/mctp/test/route-test.c b/net/mctp/test/route-test.c
index fb6b46a952cb..69a3ccfc6310 100644
--- a/net/mctp/test/route-test.c
+++ b/net/mctp/test/route-test.c
@@ -1586,7 +1586,6 @@ static void mctp_test_bind_lookup(struct kunit *test)
cleanup:
kfree_skb(skb_sock);
- kfree_skb(skb_pkt);
/* Drop all binds */
for (size_t i = 0; i < ARRAY_SIZE(lookup_binds); i++)
diff --git a/net/netfilter/ipvs/ip_vs_est.c b/net/netfilter/ipvs/ip_vs_est.c
index f821ad2e19b3..15049b826732 100644
--- a/net/netfilter/ipvs/ip_vs_est.c
+++ b/net/netfilter/ipvs/ip_vs_est.c
@@ -265,7 +265,8 @@ int ip_vs_est_kthread_start(struct netns_ipvs *ipvs,
}
set_user_nice(kd->task, sysctl_est_nice(ipvs));
- set_cpus_allowed_ptr(kd->task, sysctl_est_cpulist(ipvs));
+ if (sysctl_est_preferred_cpulist(ipvs))
+ kthread_affine_preferred(kd->task, sysctl_est_preferred_cpulist(ipvs));
pr_info("starting estimator thread %d...\n", kd->id);
wake_up_process(kd->task);
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index 486d52b45fe5..50fd6809380f 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -884,8 +884,6 @@ errout:
static int ctnetlink_done(struct netlink_callback *cb)
{
- if (cb->args[1])
- nf_ct_put((struct nf_conn *)cb->args[1]);
kfree(cb->data);
return 0;
}
@@ -1208,19 +1206,26 @@ ignore_entry:
return 0;
}
+static unsigned long ctnetlink_get_id(const struct nf_conn *ct)
+{
+ unsigned long id = nf_ct_get_id(ct);
+
+ return id ? id : 1;
+}
+
static int
ctnetlink_dump_table(struct sk_buff *skb, struct netlink_callback *cb)
{
unsigned int flags = cb->data ? NLM_F_DUMP_FILTERED : 0;
struct net *net = sock_net(skb->sk);
- struct nf_conn *ct, *last;
+ unsigned long last_id = cb->args[1];
struct nf_conntrack_tuple_hash *h;
struct hlist_nulls_node *n;
struct nf_conn *nf_ct_evict[8];
+ struct nf_conn *ct;
int res, i;
spinlock_t *lockp;
- last = (struct nf_conn *)cb->args[1];
i = 0;
local_bh_disable();
@@ -1257,7 +1262,7 @@ restart:
continue;
if (cb->args[1]) {
- if (ct != last)
+ if (ctnetlink_get_id(ct) != last_id)
continue;
cb->args[1] = 0;
}
@@ -1270,8 +1275,7 @@ restart:
NFNL_MSG_TYPE(cb->nlh->nlmsg_type),
ct, true, flags);
if (res < 0) {
- nf_conntrack_get(&ct->ct_general);
- cb->args[1] = (unsigned long)ct;
+ cb->args[1] = ctnetlink_get_id(ct);
spin_unlock(lockp);
goto out;
}
@@ -1284,12 +1288,10 @@ restart:
}
out:
local_bh_enable();
- if (last) {
+ if (last_id) {
/* nf ct hash resize happened, now clear the leftover. */
- if ((struct nf_conn *)cb->args[1] == last)
+ if (cb->args[1] == last_id)
cb->args[1] = 0;
-
- nf_ct_put(last);
}
while (i) {
@@ -3168,23 +3170,27 @@ errout:
return 0;
}
#endif
-static int ctnetlink_exp_done(struct netlink_callback *cb)
+
+static unsigned long ctnetlink_exp_id(const struct nf_conntrack_expect *exp)
{
- if (cb->args[1])
- nf_ct_expect_put((struct nf_conntrack_expect *)cb->args[1]);
- return 0;
+ unsigned long id = (unsigned long)exp;
+
+ id += nf_ct_get_id(exp->master);
+ id += exp->class;
+
+ return id ? id : 1;
}
static int
ctnetlink_exp_dump_table(struct sk_buff *skb, struct netlink_callback *cb)
{
struct net *net = sock_net(skb->sk);
- struct nf_conntrack_expect *exp, *last;
struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh);
u_int8_t l3proto = nfmsg->nfgen_family;
+ unsigned long last_id = cb->args[1];
+ struct nf_conntrack_expect *exp;
rcu_read_lock();
- last = (struct nf_conntrack_expect *)cb->args[1];
for (; cb->args[0] < nf_ct_expect_hsize; cb->args[0]++) {
restart:
hlist_for_each_entry_rcu(exp, &nf_ct_expect_hash[cb->args[0]],
@@ -3196,7 +3202,7 @@ restart:
continue;
if (cb->args[1]) {
- if (exp != last)
+ if (ctnetlink_exp_id(exp) != last_id)
continue;
cb->args[1] = 0;
}
@@ -3205,9 +3211,7 @@ restart:
cb->nlh->nlmsg_seq,
IPCTNL_MSG_EXP_NEW,
exp) < 0) {
- if (!refcount_inc_not_zero(&exp->use))
- continue;
- cb->args[1] = (unsigned long)exp;
+ cb->args[1] = ctnetlink_exp_id(exp);
goto out;
}
}
@@ -3218,32 +3222,30 @@ restart:
}
out:
rcu_read_unlock();
- if (last)
- nf_ct_expect_put(last);
-
return skb->len;
}
static int
ctnetlink_exp_ct_dump_table(struct sk_buff *skb, struct netlink_callback *cb)
{
- struct nf_conntrack_expect *exp, *last;
struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh);
struct nf_conn *ct = cb->data;
struct nf_conn_help *help = nfct_help(ct);
u_int8_t l3proto = nfmsg->nfgen_family;
+ unsigned long last_id = cb->args[1];
+ struct nf_conntrack_expect *exp;
if (cb->args[0])
return 0;
rcu_read_lock();
- last = (struct nf_conntrack_expect *)cb->args[1];
+
restart:
hlist_for_each_entry_rcu(exp, &help->expectations, lnode) {
if (l3proto && exp->tuple.src.l3num != l3proto)
continue;
if (cb->args[1]) {
- if (exp != last)
+ if (ctnetlink_exp_id(exp) != last_id)
continue;
cb->args[1] = 0;
}
@@ -3251,9 +3253,7 @@ restart:
cb->nlh->nlmsg_seq,
IPCTNL_MSG_EXP_NEW,
exp) < 0) {
- if (!refcount_inc_not_zero(&exp->use))
- continue;
- cb->args[1] = (unsigned long)exp;
+ cb->args[1] = ctnetlink_exp_id(exp);
goto out;
}
}
@@ -3264,9 +3264,6 @@ restart:
cb->args[0] = 1;
out:
rcu_read_unlock();
- if (last)
- nf_ct_expect_put(last);
-
return skb->len;
}
@@ -3285,7 +3282,6 @@ static int ctnetlink_dump_exp_ct(struct net *net, struct sock *ctnl,
struct nf_conntrack_zone zone;
struct netlink_dump_control c = {
.dump = ctnetlink_exp_ct_dump_table,
- .done = ctnetlink_exp_done,
};
err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_MASTER,
@@ -3335,7 +3331,6 @@ static int ctnetlink_get_expect(struct sk_buff *skb,
else {
struct netlink_dump_control c = {
.dump = ctnetlink_exp_dump_table,
- .done = ctnetlink_exp_done,
};
return netlink_dump_start(info->sk, skb, info->nlh, &c);
}
diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c
index 9b8b10a85233..1f14ef0436c6 100644
--- a/net/netfilter/nf_conntrack_standalone.c
+++ b/net/netfilter/nf_conntrack_standalone.c
@@ -567,16 +567,16 @@ nf_conntrack_log_invalid_sysctl(const struct ctl_table *table, int write,
return ret;
if (*(u8 *)table->data == 0)
- return ret;
+ return 0;
/* Load nf_log_syslog only if no logger is currently registered */
for (i = 0; i < NFPROTO_NUMPROTO; i++) {
if (nf_log_is_registered(i))
- return ret;
+ return 0;
}
request_module("%s", "nf_log_syslog");
- return ret;
+ return 0;
}
static struct ctl_table_header *nf_ct_netfilter_header;
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index 13d0ed9d1895..58c5425d61c2 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -2803,6 +2803,7 @@ static int nf_tables_updchain(struct nft_ctx *ctx, u8 genmask, u8 policy,
struct nft_chain *chain = ctx->chain;
struct nft_chain_hook hook = {};
struct nft_stats __percpu *stats = NULL;
+ struct nftables_pernet *nft_net;
struct nft_hook *h, *next;
struct nf_hook_ops *ops;
struct nft_trans *trans;
@@ -2845,6 +2846,20 @@ static int nf_tables_updchain(struct nft_ctx *ctx, u8 genmask, u8 policy,
if (nft_hook_list_find(&basechain->hook_list, h)) {
list_del(&h->list);
nft_netdev_hook_free(h);
+ continue;
+ }
+
+ nft_net = nft_pernet(ctx->net);
+ list_for_each_entry(trans, &nft_net->commit_list, list) {
+ if (trans->msg_type != NFT_MSG_NEWCHAIN ||
+ trans->table != ctx->table ||
+ !nft_trans_chain_update(trans))
+ continue;
+
+ if (nft_hook_list_find(&nft_trans_chain_hooks(trans), h)) {
+ nft_chain_release_hook(&hook);
+ return -EEXIST;
+ }
}
}
} else {
@@ -9060,6 +9075,7 @@ static int nft_flowtable_update(struct nft_ctx *ctx, const struct nlmsghdr *nlh,
{
const struct nlattr * const *nla = ctx->nla;
struct nft_flowtable_hook flowtable_hook;
+ struct nftables_pernet *nft_net;
struct nft_hook *hook, *next;
struct nf_hook_ops *ops;
struct nft_trans *trans;
@@ -9076,6 +9092,20 @@ static int nft_flowtable_update(struct nft_ctx *ctx, const struct nlmsghdr *nlh,
if (nft_hook_list_find(&flowtable->hook_list, hook)) {
list_del(&hook->list);
nft_netdev_hook_free(hook);
+ continue;
+ }
+
+ nft_net = nft_pernet(ctx->net);
+ list_for_each_entry(trans, &nft_net->commit_list, list) {
+ if (trans->msg_type != NFT_MSG_NEWFLOWTABLE ||
+ trans->table != ctx->table ||
+ !nft_trans_flowtable_update(trans))
+ continue;
+
+ if (nft_hook_list_find(&nft_trans_flowtable_hooks(trans), hook)) {
+ err = -EEXIST;
+ goto err_flowtable_update_hook;
+ }
}
}
diff --git a/net/netfilter/nft_set_pipapo.c b/net/netfilter/nft_set_pipapo.c
index 1a19649c2851..9a10251228fd 100644
--- a/net/netfilter/nft_set_pipapo.c
+++ b/net/netfilter/nft_set_pipapo.c
@@ -426,10 +426,9 @@ static struct nft_pipapo_elem *pipapo_get(const struct nft_pipapo_match *m,
local_bh_disable();
- if (unlikely(!raw_cpu_ptr(m->scratch)))
- goto out;
-
scratch = *raw_cpu_ptr(m->scratch);
+ if (unlikely(!scratch))
+ goto out;
map_index = scratch->map_index;
diff --git a/net/netfilter/nft_set_pipapo_avx2.c b/net/netfilter/nft_set_pipapo_avx2.c
index db5d367e43c4..2f090e253caf 100644
--- a/net/netfilter/nft_set_pipapo_avx2.c
+++ b/net/netfilter/nft_set_pipapo_avx2.c
@@ -1150,12 +1150,12 @@ nft_pipapo_avx2_lookup(const struct net *net, const struct nft_set *set,
const u32 *key)
{
struct nft_pipapo *priv = nft_set_priv(set);
+ const struct nft_set_ext *ext = NULL;
struct nft_pipapo_scratch *scratch;
u8 genmask = nft_genmask_cur(net);
const struct nft_pipapo_match *m;
const struct nft_pipapo_field *f;
const u8 *rp = (const u8 *)key;
- const struct nft_set_ext *ext;
unsigned long *res, *fill;
bool map_index;
int i;
@@ -1246,13 +1246,13 @@ next_match:
goto out;
if (last) {
- ext = &f->mt[ret].e->ext;
- if (unlikely(nft_set_elem_expired(ext) ||
- !nft_set_elem_active(ext, genmask))) {
- ext = NULL;
+ const struct nft_set_ext *e = &f->mt[ret].e->ext;
+
+ if (unlikely(nft_set_elem_expired(e) ||
+ !nft_set_elem_active(e, genmask)))
goto next_match;
- }
+ ext = e;
goto out;
}
diff --git a/net/netfilter/nft_socket.c b/net/netfilter/nft_socket.c
index 35d0409b0095..36affbb697c2 100644
--- a/net/netfilter/nft_socket.c
+++ b/net/netfilter/nft_socket.c
@@ -217,7 +217,7 @@ static int nft_socket_init(const struct nft_ctx *ctx,
level += err;
/* Implies a giant cgroup tree */
- if (WARN_ON_ONCE(level > 255))
+ if (level > 255)
return -EOPNOTSUPP;
priv->level = level;
diff --git a/net/sched/sch_ets.c b/net/sched/sch_ets.c
index 037f764822b9..82635dd2cfa5 100644
--- a/net/sched/sch_ets.c
+++ b/net/sched/sch_ets.c
@@ -651,6 +651,12 @@ static int ets_qdisc_change(struct Qdisc *sch, struct nlattr *opt,
sch_tree_lock(sch);
+ for (i = nbands; i < oldbands; i++) {
+ if (i >= q->nstrict && q->classes[i].qdisc->q.qlen)
+ list_del_init(&q->classes[i].alist);
+ qdisc_purge_queue(q->classes[i].qdisc);
+ }
+
WRITE_ONCE(q->nbands, nbands);
for (i = nstrict; i < q->nstrict; i++) {
if (q->classes[i].qdisc->q.qlen) {
@@ -658,11 +664,6 @@ static int ets_qdisc_change(struct Qdisc *sch, struct nlattr *opt,
q->classes[i].deficit = quanta[i];
}
}
- for (i = q->nbands; i < oldbands; i++) {
- if (i >= q->nstrict && q->classes[i].qdisc->q.qlen)
- list_del_init(&q->classes[i].alist);
- qdisc_purge_queue(q->classes[i].qdisc);
- }
WRITE_ONCE(q->nstrict, nstrict);
memcpy(q->prio2band, priomap, sizeof(priomap));
diff --git a/net/sctp/input.c b/net/sctp/input.c
index 2dc2666988fb..7e99894778d4 100644
--- a/net/sctp/input.c
+++ b/net/sctp/input.c
@@ -117,7 +117,7 @@ int sctp_rcv(struct sk_buff *skb)
* it's better to just linearize it otherwise crc computing
* takes longer.
*/
- if ((!is_gso && skb_linearize(skb)) ||
+ if (((!is_gso || skb_cloned(skb)) && skb_linearize(skb)) ||
!pskb_may_pull(skb, sizeof(struct sctphdr)))
goto discard_it;
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
index 46c156b121db..e2c5e0e626f9 100644
--- a/net/sunrpc/svcsock.c
+++ b/net/sunrpc/svcsock.c
@@ -257,20 +257,47 @@ svc_tcp_sock_process_cmsg(struct socket *sock, struct msghdr *msg,
}
static int
-svc_tcp_sock_recv_cmsg(struct svc_sock *svsk, struct msghdr *msg)
+svc_tcp_sock_recv_cmsg(struct socket *sock, unsigned int *msg_flags)
{
union {
struct cmsghdr cmsg;
u8 buf[CMSG_SPACE(sizeof(u8))];
} u;
- struct socket *sock = svsk->sk_sock;
+ u8 alert[2];
+ struct kvec alert_kvec = {
+ .iov_base = alert,
+ .iov_len = sizeof(alert),
+ };
+ struct msghdr msg = {
+ .msg_flags = *msg_flags,
+ .msg_control = &u,
+ .msg_controllen = sizeof(u),
+ };
+ int ret;
+
+ iov_iter_kvec(&msg.msg_iter, ITER_DEST, &alert_kvec, 1,
+ alert_kvec.iov_len);
+ ret = sock_recvmsg(sock, &msg, MSG_DONTWAIT);
+ if (ret > 0 &&
+ tls_get_record_type(sock->sk, &u.cmsg) == TLS_RECORD_TYPE_ALERT) {
+ iov_iter_revert(&msg.msg_iter, ret);
+ ret = svc_tcp_sock_process_cmsg(sock, &msg, &u.cmsg, -EAGAIN);
+ }
+ return ret;
+}
+
+static int
+svc_tcp_sock_recvmsg(struct svc_sock *svsk, struct msghdr *msg)
+{
int ret;
+ struct socket *sock = svsk->sk_sock;
- msg->msg_control = &u;
- msg->msg_controllen = sizeof(u);
ret = sock_recvmsg(sock, msg, MSG_DONTWAIT);
- if (unlikely(msg->msg_controllen != sizeof(u)))
- ret = svc_tcp_sock_process_cmsg(sock, msg, &u.cmsg, ret);
+ if (msg->msg_flags & MSG_CTRUNC) {
+ msg->msg_flags &= ~(MSG_CTRUNC | MSG_EOR);
+ if (ret == 0 || ret == -EIO)
+ ret = svc_tcp_sock_recv_cmsg(sock, &msg->msg_flags);
+ }
return ret;
}
@@ -321,7 +348,7 @@ static ssize_t svc_tcp_read_msg(struct svc_rqst *rqstp, size_t buflen,
iov_iter_advance(&msg.msg_iter, seek);
buflen -= seek;
}
- len = svc_tcp_sock_recv_cmsg(svsk, &msg);
+ len = svc_tcp_sock_recvmsg(svsk, &msg);
if (len > 0)
svc_flush_bvec(bvec, len, seek);
@@ -1018,7 +1045,7 @@ static ssize_t svc_tcp_read_marker(struct svc_sock *svsk,
iov.iov_base = ((char *)&svsk->sk_marker) + svsk->sk_tcplen;
iov.iov_len = want;
iov_iter_kvec(&msg.msg_iter, ITER_DEST, &iov, 1, want);
- len = svc_tcp_sock_recv_cmsg(svsk, &msg);
+ len = svc_tcp_sock_recvmsg(svsk, &msg);
if (len < 0)
return len;
svsk->sk_tcplen += len;
diff --git a/net/tls/tls.h b/net/tls/tls.h
index 774859b63f0d..4e077068e6d9 100644
--- a/net/tls/tls.h
+++ b/net/tls/tls.h
@@ -196,7 +196,7 @@ void tls_strp_msg_done(struct tls_strparser *strp);
int tls_rx_msg_size(struct tls_strparser *strp, struct sk_buff *skb);
void tls_rx_msg_ready(struct tls_strparser *strp);
-void tls_strp_msg_load(struct tls_strparser *strp, bool force_refresh);
+bool tls_strp_msg_load(struct tls_strparser *strp, bool force_refresh);
int tls_strp_msg_cow(struct tls_sw_context_rx *ctx);
struct sk_buff *tls_strp_msg_detach(struct tls_sw_context_rx *ctx);
int tls_strp_msg_hold(struct tls_strparser *strp, struct sk_buff_head *dst);
diff --git a/net/tls/tls_strp.c b/net/tls/tls_strp.c
index 095cf31bae0b..d71643b494a1 100644
--- a/net/tls/tls_strp.c
+++ b/net/tls/tls_strp.c
@@ -475,7 +475,7 @@ static void tls_strp_load_anchor_with_queue(struct tls_strparser *strp, int len)
strp->stm.offset = offset;
}
-void tls_strp_msg_load(struct tls_strparser *strp, bool force_refresh)
+bool tls_strp_msg_load(struct tls_strparser *strp, bool force_refresh)
{
struct strp_msg *rxm;
struct tls_msg *tlm;
@@ -484,8 +484,11 @@ void tls_strp_msg_load(struct tls_strparser *strp, bool force_refresh)
DEBUG_NET_WARN_ON_ONCE(!strp->stm.full_len);
if (!strp->copy_mode && force_refresh) {
- if (WARN_ON(tcp_inq(strp->sk) < strp->stm.full_len))
- return;
+ if (unlikely(tcp_inq(strp->sk) < strp->stm.full_len)) {
+ WRITE_ONCE(strp->msg_ready, 0);
+ memset(&strp->stm, 0, sizeof(strp->stm));
+ return false;
+ }
tls_strp_load_anchor_with_queue(strp, strp->stm.full_len);
}
@@ -495,6 +498,8 @@ void tls_strp_msg_load(struct tls_strparser *strp, bool force_refresh)
rxm->offset = strp->stm.offset;
tlm = tls_msg(strp->anchor);
tlm->control = strp->mark;
+
+ return true;
}
/* Called with lock held on lower socket */
diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c
index 549d1ea01a72..51c98a007dda 100644
--- a/net/tls/tls_sw.c
+++ b/net/tls/tls_sw.c
@@ -1384,7 +1384,8 @@ tls_rx_rec_wait(struct sock *sk, struct sk_psock *psock, bool nonblock,
return sock_intr_errno(timeo);
}
- tls_strp_msg_load(&ctx->strp, released);
+ if (unlikely(!tls_strp_msg_load(&ctx->strp, released)))
+ return tls_rx_rec_wait(sk, psock, nonblock, false);
return 1;
}
diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c
index ead6a3c14b87..bebb355f3ffe 100644
--- a/net/vmw_vsock/af_vsock.c
+++ b/net/vmw_vsock/af_vsock.c
@@ -689,7 +689,8 @@ static int __vsock_bind_connectible(struct vsock_sock *vsk,
unsigned int i;
for (i = 0; i < MAX_PORT_RETRIES; i++) {
- if (port <= LAST_RESERVED_PORT)
+ if (port == VMADDR_PORT_ANY ||
+ port <= LAST_RESERVED_PORT)
port = LAST_RESERVED_PORT + 1;
new_addr.svm_port = port++;
diff --git a/net/xfrm/xfrm_device.c b/net/xfrm/xfrm_device.c
index d2819baea414..c7a1f080d2de 100644
--- a/net/xfrm/xfrm_device.c
+++ b/net/xfrm/xfrm_device.c
@@ -155,7 +155,8 @@ struct sk_buff *validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t featur
return skb;
}
- if (skb_is_gso(skb) && unlikely(xmit_xfrm_check_overflow(skb))) {
+ if (skb_is_gso(skb) && (unlikely(x->xso.dev != dev) ||
+ unlikely(xmit_xfrm_check_overflow(skb)))) {
struct sk_buff *segs;
/* Packet got rerouted, fixup features and segment it. */
@@ -415,10 +416,12 @@ bool xfrm_dev_offload_ok(struct sk_buff *skb, struct xfrm_state *x)
struct net_device *dev = x->xso.dev;
bool check_tunnel_size;
- if (x->xso.type == XFRM_DEV_OFFLOAD_UNSPECIFIED)
+ if (!x->type_offload ||
+ (x->xso.type == XFRM_DEV_OFFLOAD_UNSPECIFIED && x->encap))
return false;
- if ((dev == xfrm_dst_path(dst)->dev) && !xdst->child->xfrm) {
+ if ((!dev || dev == xfrm_dst_path(dst)->dev) &&
+ !xdst->child->xfrm) {
mtu = xfrm_state_mtu(x, xdst->child_mtu_cached);
if (skb->len <= mtu)
goto ok;
@@ -430,6 +433,9 @@ bool xfrm_dev_offload_ok(struct sk_buff *skb, struct xfrm_state *x)
return false;
ok:
+ if (!dev)
+ return true;
+
check_tunnel_size = x->xso.type == XFRM_DEV_OFFLOAD_PACKET &&
x->props.mode == XFRM_MODE_TUNNEL;
switch (x->props.family) {
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
index 77db3b5fe4ac..78fcbb89cf32 100644
--- a/net/xfrm/xfrm_state.c
+++ b/net/xfrm/xfrm_state.c
@@ -3297,7 +3297,7 @@ void xfrm_state_fini(struct net *net)
unsigned int sz;
flush_work(&net->xfrm.state_hash_work);
- xfrm_state_flush(net, IPSEC_PROTO_ANY, false);
+ xfrm_state_flush(net, 0, false);
flush_work(&xfrm_state_gc_work);
WARN_ON(!list_empty(&net->xfrm.state_all));