summaryrefslogtreecommitdiff
path: root/net
diff options
context:
space:
mode:
Diffstat (limited to 'net')
-rw-r--r--net/batman-adv/bat_v_ogm.c11
-rw-r--r--net/batman-adv/bridge_loop_avoidance.c5
-rw-r--r--net/batman-adv/gateway_client.c6
-rw-r--r--net/caif/cfrfml.c4
-rw-r--r--net/core/dev.c17
-rw-r--r--net/core/netpoll.c2
-rw-r--r--net/core/skbuff.c2
-rw-r--r--net/core/sock.c2
-rw-r--r--net/ipv4/fib_trie.c3
-rw-r--r--net/ipv4/nexthop.c5
-rw-r--r--net/ipv4/raw.c2
-rw-r--r--net/ipv6/ip6_tunnel.c10
-rw-r--r--net/l3mdev/l3mdev.c2
-rw-r--r--net/mptcp/protocol.c3
-rw-r--r--net/netfilter/nf_conntrack_proto_sctp.c39
-rw-r--r--net/netfilter/nf_tables_api.c3
-rw-r--r--net/netfilter/nft_payload.c4
-rw-r--r--net/netfilter/nft_set_rbtree.c57
-rw-r--r--net/netlabel/netlabel_domainhash.c59
-rw-r--r--net/rxrpc/ar-internal.h13
-rw-r--r--net/rxrpc/call_object.c1
-rw-r--r--net/rxrpc/input.c123
-rw-r--r--net/rxrpc/output.c82
-rw-r--r--net/rxrpc/peer_object.c16
-rw-r--r--net/rxrpc/rtt.c3
-rw-r--r--net/rxrpc/rxkad.c3
-rw-r--r--net/sched/act_ct.c2
-rw-r--r--net/sched/sch_taprio.c30
-rw-r--r--net/sctp/socket.c16
-rw-r--r--net/sctp/stream.c6
-rw-r--r--net/smc/smc_diag.c16
-rw-r--r--net/socket.c4
-rw-r--r--net/tipc/crypto.c2
33 files changed, 370 insertions, 183 deletions
diff --git a/net/batman-adv/bat_v_ogm.c b/net/batman-adv/bat_v_ogm.c
index 0f8495b9eeb1..717fe657561d 100644
--- a/net/batman-adv/bat_v_ogm.c
+++ b/net/batman-adv/bat_v_ogm.c
@@ -881,6 +881,12 @@ static void batadv_v_ogm_process(const struct sk_buff *skb, int ogm_offset,
ntohl(ogm_packet->seqno), ogm_throughput, ogm_packet->ttl,
ogm_packet->version, ntohs(ogm_packet->tvlv_len));
+ if (batadv_is_my_mac(bat_priv, ogm_packet->orig)) {
+ batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
+ "Drop packet: originator packet from ourself\n");
+ return;
+ }
+
/* If the throughput metric is 0, immediately drop the packet. No need
* to create orig_node / neigh_node for an unusable route.
*/
@@ -1008,11 +1014,6 @@ int batadv_v_ogm_packet_recv(struct sk_buff *skb,
if (batadv_is_my_mac(bat_priv, ethhdr->h_source))
goto free_skb;
- ogm_packet = (struct batadv_ogm2_packet *)skb->data;
-
- if (batadv_is_my_mac(bat_priv, ogm_packet->orig))
- goto free_skb;
-
batadv_inc_counter(bat_priv, BATADV_CNT_MGMT_RX);
batadv_add_counter(bat_priv, BATADV_CNT_MGMT_RX_BYTES,
skb->len + ETH_HLEN);
diff --git a/net/batman-adv/bridge_loop_avoidance.c b/net/batman-adv/bridge_loop_avoidance.c
index 91a04ca373dc..8500f56cbd10 100644
--- a/net/batman-adv/bridge_loop_avoidance.c
+++ b/net/batman-adv/bridge_loop_avoidance.c
@@ -437,7 +437,10 @@ static void batadv_bla_send_claim(struct batadv_priv *bat_priv, u8 *mac,
batadv_add_counter(bat_priv, BATADV_CNT_RX_BYTES,
skb->len + ETH_HLEN);
- netif_rx(skb);
+ if (in_interrupt())
+ netif_rx(skb);
+ else
+ netif_rx_ni(skb);
out:
if (primary_if)
batadv_hardif_put(primary_if);
diff --git a/net/batman-adv/gateway_client.c b/net/batman-adv/gateway_client.c
index a18dcc686dc3..ef3f85b576c4 100644
--- a/net/batman-adv/gateway_client.c
+++ b/net/batman-adv/gateway_client.c
@@ -703,8 +703,10 @@ batadv_gw_dhcp_recipient_get(struct sk_buff *skb, unsigned int *header_len,
chaddr_offset = *header_len + BATADV_DHCP_CHADDR_OFFSET;
/* store the client address if the message is going to a client */
- if (ret == BATADV_DHCP_TO_CLIENT &&
- pskb_may_pull(skb, chaddr_offset + ETH_ALEN)) {
+ if (ret == BATADV_DHCP_TO_CLIENT) {
+ if (!pskb_may_pull(skb, chaddr_offset + ETH_ALEN))
+ return BATADV_DHCP_NO;
+
/* check if the DHCP packet carries an Ethernet DHCP */
p = skb->data + *header_len + BATADV_DHCP_HTYPE_OFFSET;
if (*p != BATADV_DHCP_HTYPE_ETHERNET)
diff --git a/net/caif/cfrfml.c b/net/caif/cfrfml.c
index ce2767e9cec6..7b0af33bdb97 100644
--- a/net/caif/cfrfml.c
+++ b/net/caif/cfrfml.c
@@ -116,7 +116,7 @@ static int cfrfml_receive(struct cflayer *layr, struct cfpkt *pkt)
if (segmented) {
if (rfml->incomplete_frm == NULL) {
/* Initial Segment */
- if (cfpkt_peek_head(pkt, rfml->seghead, 6) < 0)
+ if (cfpkt_peek_head(pkt, rfml->seghead, 6) != 0)
goto out;
rfml->pdu_size = get_unaligned_le16(rfml->seghead+4);
@@ -233,7 +233,7 @@ static int cfrfml_transmit(struct cflayer *layr, struct cfpkt *pkt)
if (cfpkt_getlen(pkt) > rfml->fragment_size + RFM_HEAD_SIZE)
err = cfpkt_peek_head(pkt, head, 6);
- if (err < 0)
+ if (err != 0)
goto out;
while (cfpkt_getlen(frontpkt) > rfml->fragment_size + RFM_HEAD_SIZE) {
diff --git a/net/core/dev.c b/net/core/dev.c
index b5d1129d8310..95ac7568f693 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -6612,12 +6612,13 @@ void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
netdev_err_once(dev, "%s() called with weight %d\n", __func__,
weight);
napi->weight = weight;
- list_add(&napi->dev_list, &dev->napi_list);
napi->dev = dev;
#ifdef CONFIG_NETPOLL
napi->poll_owner = -1;
#endif
set_bit(NAPI_STATE_SCHED, &napi->state);
+ set_bit(NAPI_STATE_NPSVC, &napi->state);
+ list_add_rcu(&napi->dev_list, &dev->napi_list);
napi_hash_add(napi);
}
EXPORT_SYMBOL(netif_napi_add);
@@ -8742,13 +8743,15 @@ struct bpf_xdp_link {
int flags;
};
-static enum bpf_xdp_mode dev_xdp_mode(u32 flags)
+static enum bpf_xdp_mode dev_xdp_mode(struct net_device *dev, u32 flags)
{
if (flags & XDP_FLAGS_HW_MODE)
return XDP_MODE_HW;
if (flags & XDP_FLAGS_DRV_MODE)
return XDP_MODE_DRV;
- return XDP_MODE_SKB;
+ if (flags & XDP_FLAGS_SKB_MODE)
+ return XDP_MODE_SKB;
+ return dev->netdev_ops->ndo_bpf ? XDP_MODE_DRV : XDP_MODE_SKB;
}
static bpf_op_t dev_xdp_bpf_op(struct net_device *dev, enum bpf_xdp_mode mode)
@@ -8896,7 +8899,7 @@ static int dev_xdp_attach(struct net_device *dev, struct netlink_ext_ack *extack
return -EINVAL;
}
- mode = dev_xdp_mode(flags);
+ mode = dev_xdp_mode(dev, flags);
/* can't replace attached link */
if (dev_xdp_link(dev, mode)) {
NL_SET_ERR_MSG(extack, "Can't replace active BPF XDP link");
@@ -8984,7 +8987,7 @@ static int dev_xdp_detach_link(struct net_device *dev,
ASSERT_RTNL();
- mode = dev_xdp_mode(link->flags);
+ mode = dev_xdp_mode(dev, link->flags);
if (dev_xdp_link(dev, mode) != link)
return -EINVAL;
@@ -9080,7 +9083,7 @@ static int bpf_xdp_link_update(struct bpf_link *link, struct bpf_prog *new_prog,
goto out_unlock;
}
- mode = dev_xdp_mode(xdp_link->flags);
+ mode = dev_xdp_mode(xdp_link->dev, xdp_link->flags);
bpf_op = dev_xdp_bpf_op(xdp_link->dev, mode);
err = dev_xdp_install(xdp_link->dev, mode, bpf_op, NULL,
xdp_link->flags, new_prog);
@@ -9164,7 +9167,7 @@ out_put_dev:
int dev_change_xdp_fd(struct net_device *dev, struct netlink_ext_ack *extack,
int fd, int expected_fd, u32 flags)
{
- enum bpf_xdp_mode mode = dev_xdp_mode(flags);
+ enum bpf_xdp_mode mode = dev_xdp_mode(dev, flags);
struct bpf_prog *new_prog = NULL, *old_prog = NULL;
int err;
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index 093e90e52bc2..2338753e936b 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -162,7 +162,7 @@ static void poll_napi(struct net_device *dev)
struct napi_struct *napi;
int cpu = smp_processor_id();
- list_for_each_entry(napi, &dev->napi_list, dev_list) {
+ list_for_each_entry_rcu(napi, &dev->napi_list, dev_list) {
if (cmpxchg(&napi->poll_owner, -1, cpu) == -1) {
poll_one_napi(napi);
smp_store_release(&napi->poll_owner, -1);
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index e18184ffa9c3..6faf73d6a0f7 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -820,6 +820,7 @@ void skb_tx_error(struct sk_buff *skb)
}
EXPORT_SYMBOL(skb_tx_error);
+#ifdef CONFIG_TRACEPOINTS
/**
* consume_skb - free an skbuff
* @skb: buffer to free
@@ -837,6 +838,7 @@ void consume_skb(struct sk_buff *skb)
__kfree_skb(skb);
}
EXPORT_SYMBOL(consume_skb);
+#endif
/**
* consume_stateless_skb - free an skbuff, assuming it is stateless
diff --git a/net/core/sock.c b/net/core/sock.c
index e4f40b175acb..8eb2c924805a 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -3254,7 +3254,7 @@ void sk_common_release(struct sock *sk)
sk->sk_prot->destroy(sk);
/*
- * Observation: when sock_common_release is called, processes have
+ * Observation: when sk_common_release is called, processes have
* no access to socket. But net still has.
* Step one, detach it from networking:
*
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
index c89b46fec153..ffc5332f1390 100644
--- a/net/ipv4/fib_trie.c
+++ b/net/ipv4/fib_trie.c
@@ -2121,7 +2121,8 @@ void fib_info_notify_update(struct net *net, struct nl_info *info)
struct hlist_head *head = &net->ipv4.fib_table_hash[h];
struct fib_table *tb;
- hlist_for_each_entry_rcu(tb, head, tb_hlist)
+ hlist_for_each_entry_rcu(tb, head, tb_hlist,
+ lockdep_rtnl_is_held())
__fib_info_notify_update(net, tb, info);
}
}
diff --git a/net/ipv4/nexthop.c b/net/ipv4/nexthop.c
index cc8049b100b2..134e92382275 100644
--- a/net/ipv4/nexthop.c
+++ b/net/ipv4/nexthop.c
@@ -446,7 +446,7 @@ static int nh_check_attr_group(struct net *net, struct nlattr *tb[],
unsigned int i, j;
u8 nhg_fdb = 0;
- if (len & (sizeof(struct nexthop_grp) - 1)) {
+ if (!len || len & (sizeof(struct nexthop_grp) - 1)) {
NL_SET_ERR_MSG(extack,
"Invalid length for nexthop group attribute");
return -EINVAL;
@@ -1187,6 +1187,9 @@ static struct nexthop *nexthop_create_group(struct net *net,
struct nexthop *nh;
int i;
+ if (WARN_ON(!num_nh))
+ return ERR_PTR(-EINVAL);
+
nh = nexthop_alloc();
if (!nh)
return ERR_PTR(-ENOMEM);
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
index 6fd4330287c2..407956be7deb 100644
--- a/net/ipv4/raw.c
+++ b/net/ipv4/raw.c
@@ -610,7 +610,7 @@ static int raw_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
} else if (!ipc.oif) {
ipc.oif = inet->uc_index;
} else if (ipv4_is_lbcast(daddr) && inet->uc_index) {
- /* oif is set, packet is to local broadcast and
+ /* oif is set, packet is to local broadcast
* and uc_index is set. oif is most likely set
* by sk_bound_dev_if. If uc_index != oif check if the
* oif is an L3 master and uc_index is an L3 slave.
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index f635914f42ec..a0217e5bf3bc 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -915,7 +915,15 @@ int ip6_tnl_rcv(struct ip6_tnl *t, struct sk_buff *skb,
struct metadata_dst *tun_dst,
bool log_ecn_err)
{
- return __ip6_tnl_rcv(t, skb, tpi, tun_dst, ip6ip6_dscp_ecn_decapsulate,
+ int (*dscp_ecn_decapsulate)(const struct ip6_tnl *t,
+ const struct ipv6hdr *ipv6h,
+ struct sk_buff *skb);
+
+ dscp_ecn_decapsulate = ip6ip6_dscp_ecn_decapsulate;
+ if (tpi->proto == htons(ETH_P_IP))
+ dscp_ecn_decapsulate = ip4ip6_dscp_ecn_decapsulate;
+
+ return __ip6_tnl_rcv(t, skb, tpi, tun_dst, dscp_ecn_decapsulate,
log_ecn_err);
}
EXPORT_SYMBOL(ip6_tnl_rcv);
diff --git a/net/l3mdev/l3mdev.c b/net/l3mdev/l3mdev.c
index e71ca5aec684..864326f150e2 100644
--- a/net/l3mdev/l3mdev.c
+++ b/net/l3mdev/l3mdev.c
@@ -154,7 +154,7 @@ int l3mdev_master_upper_ifindex_by_index_rcu(struct net *net, int ifindex)
EXPORT_SYMBOL_GPL(l3mdev_master_upper_ifindex_by_index_rcu);
/**
- * l3mdev_fib_table - get FIB table id associated with an L3
+ * l3mdev_fib_table_rcu - get FIB table id associated with an L3
* master interface
* @dev: targeted interface
*/
diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
index 1aad411a0e46..8ccd4a151dcb 100644
--- a/net/mptcp/protocol.c
+++ b/net/mptcp/protocol.c
@@ -892,7 +892,6 @@ restart:
goto out;
}
-wait_for_sndbuf:
__mptcp_flush_join_list(msk);
ssk = mptcp_subflow_get_send(msk);
while (!sk_stream_memory_free(sk) ||
@@ -982,7 +981,7 @@ wait_for_sndbuf:
*/
mptcp_set_timeout(sk, ssk);
release_sock(ssk);
- goto wait_for_sndbuf;
+ goto restart;
}
}
}
diff --git a/net/netfilter/nf_conntrack_proto_sctp.c b/net/netfilter/nf_conntrack_proto_sctp.c
index 4f897b14b606..810cca24b399 100644
--- a/net/netfilter/nf_conntrack_proto_sctp.c
+++ b/net/netfilter/nf_conntrack_proto_sctp.c
@@ -62,6 +62,8 @@ static const unsigned int sctp_timeouts[SCTP_CONNTRACK_MAX] = {
[SCTP_CONNTRACK_HEARTBEAT_ACKED] = 210 SECS,
};
+#define SCTP_FLAG_HEARTBEAT_VTAG_FAILED 1
+
#define sNO SCTP_CONNTRACK_NONE
#define sCL SCTP_CONNTRACK_CLOSED
#define sCW SCTP_CONNTRACK_COOKIE_WAIT
@@ -369,6 +371,7 @@ int nf_conntrack_sctp_packet(struct nf_conn *ct,
u_int32_t offset, count;
unsigned int *timeouts;
unsigned long map[256 / sizeof(unsigned long)] = { 0 };
+ bool ignore = false;
if (sctp_error(skb, dataoff, state))
return -NF_ACCEPT;
@@ -427,15 +430,39 @@ int nf_conntrack_sctp_packet(struct nf_conn *ct,
/* Sec 8.5.1 (D) */
if (sh->vtag != ct->proto.sctp.vtag[dir])
goto out_unlock;
- } else if (sch->type == SCTP_CID_HEARTBEAT ||
- sch->type == SCTP_CID_HEARTBEAT_ACK) {
+ } else if (sch->type == SCTP_CID_HEARTBEAT) {
+ if (ct->proto.sctp.vtag[dir] == 0) {
+ pr_debug("Setting %d vtag %x for dir %d\n", sch->type, sh->vtag, dir);
+ ct->proto.sctp.vtag[dir] = sh->vtag;
+ } else if (sh->vtag != ct->proto.sctp.vtag[dir]) {
+ if (test_bit(SCTP_CID_DATA, map) || ignore)
+ goto out_unlock;
+
+ ct->proto.sctp.flags |= SCTP_FLAG_HEARTBEAT_VTAG_FAILED;
+ ct->proto.sctp.last_dir = dir;
+ ignore = true;
+ continue;
+ } else if (ct->proto.sctp.flags & SCTP_FLAG_HEARTBEAT_VTAG_FAILED) {
+ ct->proto.sctp.flags &= ~SCTP_FLAG_HEARTBEAT_VTAG_FAILED;
+ }
+ } else if (sch->type == SCTP_CID_HEARTBEAT_ACK) {
if (ct->proto.sctp.vtag[dir] == 0) {
pr_debug("Setting vtag %x for dir %d\n",
sh->vtag, dir);
ct->proto.sctp.vtag[dir] = sh->vtag;
} else if (sh->vtag != ct->proto.sctp.vtag[dir]) {
- pr_debug("Verification tag check failed\n");
- goto out_unlock;
+ if (test_bit(SCTP_CID_DATA, map) || ignore)
+ goto out_unlock;
+
+ if ((ct->proto.sctp.flags & SCTP_FLAG_HEARTBEAT_VTAG_FAILED) == 0 ||
+ ct->proto.sctp.last_dir == dir)
+ goto out_unlock;
+
+ ct->proto.sctp.flags &= ~SCTP_FLAG_HEARTBEAT_VTAG_FAILED;
+ ct->proto.sctp.vtag[dir] = sh->vtag;
+ ct->proto.sctp.vtag[!dir] = 0;
+ } else if (ct->proto.sctp.flags & SCTP_FLAG_HEARTBEAT_VTAG_FAILED) {
+ ct->proto.sctp.flags &= ~SCTP_FLAG_HEARTBEAT_VTAG_FAILED;
}
}
@@ -470,6 +497,10 @@ int nf_conntrack_sctp_packet(struct nf_conn *ct,
}
spin_unlock_bh(&ct->lock);
+ /* allow but do not refresh timeout */
+ if (ignore)
+ return NF_ACCEPT;
+
timeouts = nf_ct_timeout_lookup(ct);
if (!timeouts)
timeouts = nf_sctp_pernet(nf_ct_net(ct))->timeouts;
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index fd814e514f94..71e501c5ad21 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -3770,7 +3770,8 @@ static int nf_tables_fill_set(struct sk_buff *skb, const struct nft_ctx *ctx,
goto nla_put_failure;
}
- if (nla_put(skb, NFTA_SET_USERDATA, set->udlen, set->udata))
+ if (set->udata &&
+ nla_put(skb, NFTA_SET_USERDATA, set->udlen, set->udata))
goto nla_put_failure;
nest = nla_nest_start_noflag(skb, NFTA_SET_DESC);
diff --git a/net/netfilter/nft_payload.c b/net/netfilter/nft_payload.c
index ed7cb9f747f6..7a2e59638499 100644
--- a/net/netfilter/nft_payload.c
+++ b/net/netfilter/nft_payload.c
@@ -87,7 +87,9 @@ void nft_payload_eval(const struct nft_expr *expr,
u32 *dest = &regs->data[priv->dreg];
int offset;
- dest[priv->len / NFT_REG32_SIZE] = 0;
+ if (priv->len % NFT_REG32_SIZE)
+ dest[priv->len / NFT_REG32_SIZE] = 0;
+
switch (priv->base) {
case NFT_PAYLOAD_LL_HEADER:
if (!skb_mac_header_was_set(skb))
diff --git a/net/netfilter/nft_set_rbtree.c b/net/netfilter/nft_set_rbtree.c
index 4b2834fd17b2..217ab3644c25 100644
--- a/net/netfilter/nft_set_rbtree.c
+++ b/net/netfilter/nft_set_rbtree.c
@@ -218,11 +218,11 @@ static int __nft_rbtree_insert(const struct net *net, const struct nft_set *set,
struct nft_rbtree_elem *new,
struct nft_set_ext **ext)
{
+ bool overlap = false, dup_end_left = false, dup_end_right = false;
struct nft_rbtree *priv = nft_set_priv(set);
u8 genmask = nft_genmask_next(net);
struct nft_rbtree_elem *rbe;
struct rb_node *parent, **p;
- bool overlap = false;
int d;
/* Detect overlaps as we descend the tree. Set the flag in these cases:
@@ -238,24 +238,44 @@ static int __nft_rbtree_insert(const struct net *net, const struct nft_set *set,
*
* b1. _ _ __>| !_ _ __| (insert end before existing start)
* b2. _ _ ___| !_ _ _>| (insert end after existing start)
- * b3. _ _ ___! >|_ _ __| (insert start after existing end)
+ * b3. _ _ ___! >|_ _ __| (insert start after existing end, as a leaf)
+ * '--' no nodes falling in this range
+ * b4. >|_ _ ! (insert start before existing start)
*
* Case a3. resolves to b3.:
* - if the inserted start element is the leftmost, because the '0'
* element in the tree serves as end element
- * - otherwise, if an existing end is found. Note that end elements are
- * always inserted after corresponding start elements.
+ * - otherwise, if an existing end is found immediately to the left. If
+ * there are existing nodes in between, we need to further descend the
+ * tree before we can conclude the new start isn't causing an overlap
+ *
+ * or to b4., which, preceded by a3., means we already traversed one or
+ * more existing intervals entirely, from the right.
*
* For a new, rightmost pair of elements, we'll hit cases b3. and b2.,
* in that order.
*
* The flag is also cleared in two special cases:
*
- * b4. |__ _ _!|<_ _ _ (insert start right before existing end)
- * b5. |__ _ >|!__ _ _ (insert end right after existing start)
+ * b5. |__ _ _!|<_ _ _ (insert start right before existing end)
+ * b6. |__ _ >|!__ _ _ (insert end right after existing start)
*
* which always happen as last step and imply that no further
* overlapping is possible.
+ *
+ * Another special case comes from the fact that start elements matching
+ * an already existing start element are allowed: insertion is not
+ * performed but we return -EEXIST in that case, and the error will be
+ * cleared by the caller if NLM_F_EXCL is not present in the request.
+ * This way, request for insertion of an exact overlap isn't reported as
+ * error to userspace if not desired.
+ *
+ * However, if the existing start matches a pre-existing start, but the
+ * end element doesn't match the corresponding pre-existing end element,
+ * we need to report a partial overlap. This is a local condition that
+ * can be noticed without need for a tracking flag, by checking for a
+ * local duplicated end for a corresponding start, from left and right,
+ * separately.
*/
parent = NULL;
@@ -272,26 +292,41 @@ static int __nft_rbtree_insert(const struct net *net, const struct nft_set *set,
if (nft_rbtree_interval_start(new)) {
if (nft_rbtree_interval_end(rbe) &&
nft_set_elem_active(&rbe->ext, genmask) &&
- !nft_set_elem_expired(&rbe->ext))
+ !nft_set_elem_expired(&rbe->ext) && !*p)
overlap = false;
} else {
+ if (dup_end_left && !*p)
+ return -ENOTEMPTY;
+
overlap = nft_rbtree_interval_end(rbe) &&
nft_set_elem_active(&rbe->ext,
genmask) &&
!nft_set_elem_expired(&rbe->ext);
+
+ if (overlap) {
+ dup_end_right = true;
+ continue;
+ }
}
} else if (d > 0) {
p = &parent->rb_right;
if (nft_rbtree_interval_end(new)) {
+ if (dup_end_right && !*p)
+ return -ENOTEMPTY;
+
overlap = nft_rbtree_interval_end(rbe) &&
nft_set_elem_active(&rbe->ext,
genmask) &&
!nft_set_elem_expired(&rbe->ext);
- } else if (nft_rbtree_interval_end(rbe) &&
- nft_set_elem_active(&rbe->ext, genmask) &&
+
+ if (overlap) {
+ dup_end_left = true;
+ continue;
+ }
+ } else if (nft_set_elem_active(&rbe->ext, genmask) &&
!nft_set_elem_expired(&rbe->ext)) {
- overlap = true;
+ overlap = nft_rbtree_interval_end(rbe);
}
} else {
if (nft_rbtree_interval_end(rbe) &&
@@ -316,6 +351,8 @@ static int __nft_rbtree_insert(const struct net *net, const struct nft_set *set,
p = &parent->rb_left;
}
}
+
+ dup_end_left = dup_end_right = false;
}
if (overlap)
diff --git a/net/netlabel/netlabel_domainhash.c b/net/netlabel/netlabel_domainhash.c
index d07de2c0fbc7..f73a8382c275 100644
--- a/net/netlabel/netlabel_domainhash.c
+++ b/net/netlabel/netlabel_domainhash.c
@@ -85,6 +85,7 @@ static void netlbl_domhsh_free_entry(struct rcu_head *entry)
kfree(netlbl_domhsh_addr6_entry(iter6));
}
#endif /* IPv6 */
+ kfree(ptr->def.addrsel);
}
kfree(ptr->domain);
kfree(ptr);
@@ -537,6 +538,8 @@ int netlbl_domhsh_add(struct netlbl_dom_map *entry,
goto add_return;
}
#endif /* IPv6 */
+ /* cleanup the new entry since we've moved everything over */
+ netlbl_domhsh_free_entry(&entry->rcu);
} else
ret_val = -EINVAL;
@@ -580,6 +583,12 @@ int netlbl_domhsh_remove_entry(struct netlbl_dom_map *entry,
{
int ret_val = 0;
struct audit_buffer *audit_buf;
+ struct netlbl_af4list *iter4;
+ struct netlbl_domaddr4_map *map4;
+#if IS_ENABLED(CONFIG_IPV6)
+ struct netlbl_af6list *iter6;
+ struct netlbl_domaddr6_map *map6;
+#endif /* IPv6 */
if (entry == NULL)
return -ENOENT;
@@ -597,6 +606,9 @@ int netlbl_domhsh_remove_entry(struct netlbl_dom_map *entry,
ret_val = -ENOENT;
spin_unlock(&netlbl_domhsh_lock);
+ if (ret_val)
+ return ret_val;
+
audit_buf = netlbl_audit_start_common(AUDIT_MAC_MAP_DEL, audit_info);
if (audit_buf != NULL) {
audit_log_format(audit_buf,
@@ -606,40 +618,29 @@ int netlbl_domhsh_remove_entry(struct netlbl_dom_map *entry,
audit_log_end(audit_buf);
}
- if (ret_val == 0) {
- struct netlbl_af4list *iter4;
- struct netlbl_domaddr4_map *map4;
-#if IS_ENABLED(CONFIG_IPV6)
- struct netlbl_af6list *iter6;
- struct netlbl_domaddr6_map *map6;
-#endif /* IPv6 */
-
- switch (entry->def.type) {
- case NETLBL_NLTYPE_ADDRSELECT:
- netlbl_af4list_foreach_rcu(iter4,
- &entry->def.addrsel->list4) {
- map4 = netlbl_domhsh_addr4_entry(iter4);
- cipso_v4_doi_putdef(map4->def.cipso);
- }
+ switch (entry->def.type) {
+ case NETLBL_NLTYPE_ADDRSELECT:
+ netlbl_af4list_foreach_rcu(iter4, &entry->def.addrsel->list4) {
+ map4 = netlbl_domhsh_addr4_entry(iter4);
+ cipso_v4_doi_putdef(map4->def.cipso);
+ }
#if IS_ENABLED(CONFIG_IPV6)
- netlbl_af6list_foreach_rcu(iter6,
- &entry->def.addrsel->list6) {
- map6 = netlbl_domhsh_addr6_entry(iter6);
- calipso_doi_putdef(map6->def.calipso);
- }
+ netlbl_af6list_foreach_rcu(iter6, &entry->def.addrsel->list6) {
+ map6 = netlbl_domhsh_addr6_entry(iter6);
+ calipso_doi_putdef(map6->def.calipso);
+ }
#endif /* IPv6 */
- break;
- case NETLBL_NLTYPE_CIPSOV4:
- cipso_v4_doi_putdef(entry->def.cipso);
- break;
+ break;
+ case NETLBL_NLTYPE_CIPSOV4:
+ cipso_v4_doi_putdef(entry->def.cipso);
+ break;
#if IS_ENABLED(CONFIG_IPV6)
- case NETLBL_NLTYPE_CALIPSO:
- calipso_doi_putdef(entry->def.calipso);
- break;
+ case NETLBL_NLTYPE_CALIPSO:
+ calipso_doi_putdef(entry->def.calipso);
+ break;
#endif /* IPv6 */
- }
- call_rcu(&entry->rcu, netlbl_domhsh_free_entry);
}
+ call_rcu(&entry->rcu, netlbl_domhsh_free_entry);
return ret_val;
}
diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h
index 6d29a3603a3e..884cff7bb169 100644
--- a/net/rxrpc/ar-internal.h
+++ b/net/rxrpc/ar-internal.h
@@ -488,7 +488,6 @@ enum rxrpc_call_flag {
RXRPC_CALL_RX_LAST, /* Received the last packet (at rxtx_top) */
RXRPC_CALL_TX_LAST, /* Last packet in Tx buffer (at rxtx_top) */
RXRPC_CALL_SEND_PING, /* A ping will need to be sent */
- RXRPC_CALL_PINGING, /* Ping in process */
RXRPC_CALL_RETRANS_TIMEOUT, /* Retransmission due to timeout occurred */
RXRPC_CALL_BEGAN_RX_TIMER, /* We began the expect_rx_by timer */
RXRPC_CALL_RX_HEARD, /* The peer responded at least once to this call */
@@ -673,9 +672,13 @@ struct rxrpc_call {
rxrpc_seq_t ackr_consumed; /* Highest packet shown consumed */
rxrpc_seq_t ackr_seen; /* Highest packet shown seen */
- /* ping management */
- rxrpc_serial_t ping_serial; /* Last ping sent */
- ktime_t ping_time; /* Time last ping sent */
+ /* RTT management */
+ rxrpc_serial_t rtt_serial[4]; /* Serial number of DATA or PING sent */
+ ktime_t rtt_sent_at[4]; /* Time packet sent */
+ unsigned long rtt_avail; /* Mask of available slots in bits 0-3,
+ * Mask of pending samples in 8-11 */
+#define RXRPC_CALL_RTT_AVAIL_MASK 0xf
+#define RXRPC_CALL_RTT_PEND_SHIFT 8
/* transmission-phase ACK management */
ktime_t acks_latest_ts; /* Timestamp of latest ACK received */
@@ -1037,7 +1040,7 @@ static inline bool __rxrpc_abort_eproto(struct rxrpc_call *call,
/*
* rtt.c
*/
-void rxrpc_peer_add_rtt(struct rxrpc_call *, enum rxrpc_rtt_rx_trace,
+void rxrpc_peer_add_rtt(struct rxrpc_call *, enum rxrpc_rtt_rx_trace, int,
rxrpc_serial_t, rxrpc_serial_t, ktime_t, ktime_t);
unsigned long rxrpc_get_rto_backoff(struct rxrpc_peer *, bool);
void rxrpc_peer_init_rtt(struct rxrpc_peer *);
diff --git a/net/rxrpc/call_object.c b/net/rxrpc/call_object.c
index 38a46167523f..a40fae013942 100644
--- a/net/rxrpc/call_object.c
+++ b/net/rxrpc/call_object.c
@@ -153,6 +153,7 @@ struct rxrpc_call *rxrpc_alloc_call(struct rxrpc_sock *rx, gfp_t gfp,
call->cong_ssthresh = RXRPC_RXTX_BUFF_SIZE - 1;
call->rxnet = rxnet;
+ call->rtt_avail = RXRPC_CALL_RTT_AVAIL_MASK;
atomic_inc(&rxnet->nr_calls);
return call;
diff --git a/net/rxrpc/input.c b/net/rxrpc/input.c
index 767579328a06..19ddfc9807e8 100644
--- a/net/rxrpc/input.c
+++ b/net/rxrpc/input.c
@@ -608,36 +608,57 @@ unlock:
}
/*
- * Process a requested ACK.
+ * See if there's a cached RTT probe to complete.
*/
-static void rxrpc_input_requested_ack(struct rxrpc_call *call,
- ktime_t resp_time,
- rxrpc_serial_t orig_serial,
- rxrpc_serial_t ack_serial)
+static void rxrpc_complete_rtt_probe(struct rxrpc_call *call,
+ ktime_t resp_time,
+ rxrpc_serial_t acked_serial,
+ rxrpc_serial_t ack_serial,
+ enum rxrpc_rtt_rx_trace type)
{
- struct rxrpc_skb_priv *sp;
- struct sk_buff *skb;
+ rxrpc_serial_t orig_serial;
+ unsigned long avail;
ktime_t sent_at;
- int ix;
+ bool matched = false;
+ int i;
- for (ix = 0; ix < RXRPC_RXTX_BUFF_SIZE; ix++) {
- skb = call->rxtx_buffer[ix];
- if (!skb)
- continue;
+ avail = READ_ONCE(call->rtt_avail);
+ smp_rmb(); /* Read avail bits before accessing data. */
- sent_at = skb->tstamp;
- smp_rmb(); /* Read timestamp before serial. */
- sp = rxrpc_skb(skb);
- if (sp->hdr.serial != orig_serial)
+ for (i = 0; i < ARRAY_SIZE(call->rtt_serial); i++) {
+ if (!test_bit(i + RXRPC_CALL_RTT_PEND_SHIFT, &avail))
continue;
- goto found;
- }
- return;
+ sent_at = call->rtt_sent_at[i];
+ orig_serial = call->rtt_serial[i];
+
+ if (orig_serial == acked_serial) {
+ clear_bit(i + RXRPC_CALL_RTT_PEND_SHIFT, &call->rtt_avail);
+ smp_mb(); /* Read data before setting avail bit */
+ set_bit(i, &call->rtt_avail);
+ if (type != rxrpc_rtt_rx_cancel)
+ rxrpc_peer_add_rtt(call, type, i, acked_serial, ack_serial,
+ sent_at, resp_time);
+ else
+ trace_rxrpc_rtt_rx(call, rxrpc_rtt_rx_cancel, i,
+ orig_serial, acked_serial, 0, 0);
+ matched = true;
+ }
+
+ /* If a later serial is being acked, then mark this slot as
+ * being available.
+ */
+ if (after(acked_serial, orig_serial)) {
+ trace_rxrpc_rtt_rx(call, rxrpc_rtt_rx_obsolete, i,
+ orig_serial, acked_serial, 0, 0);
+ clear_bit(i + RXRPC_CALL_RTT_PEND_SHIFT, &call->rtt_avail);
+ smp_wmb();
+ set_bit(i, &call->rtt_avail);
+ }
+ }
-found:
- rxrpc_peer_add_rtt(call, rxrpc_rtt_rx_requested_ack,
- orig_serial, ack_serial, sent_at, resp_time);
+ if (!matched)
+ trace_rxrpc_rtt_rx(call, rxrpc_rtt_rx_lost, 9, 0, acked_serial, 0, 0);
}
/*
@@ -682,27 +703,11 @@ static void rxrpc_input_check_for_lost_ack(struct rxrpc_call *call)
*/
static void rxrpc_input_ping_response(struct rxrpc_call *call,
ktime_t resp_time,
- rxrpc_serial_t orig_serial,
+ rxrpc_serial_t acked_serial,
rxrpc_serial_t ack_serial)
{
- rxrpc_serial_t ping_serial;
- ktime_t ping_time;
-
- ping_time = call->ping_time;
- smp_rmb();
- ping_serial = READ_ONCE(call->ping_serial);
-
- if (orig_serial == call->acks_lost_ping)
+ if (acked_serial == call->acks_lost_ping)
rxrpc_input_check_for_lost_ack(call);
-
- if (before(orig_serial, ping_serial) ||
- !test_and_clear_bit(RXRPC_CALL_PINGING, &call->flags))
- return;
- if (after(orig_serial, ping_serial))
- return;
-
- rxrpc_peer_add_rtt(call, rxrpc_rtt_rx_ping_response,
- orig_serial, ack_serial, ping_time, resp_time);
}
/*
@@ -843,7 +848,7 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb)
struct rxrpc_ackinfo info;
u8 acks[RXRPC_MAXACKS];
} buf;
- rxrpc_serial_t acked_serial;
+ rxrpc_serial_t ack_serial, acked_serial;
rxrpc_seq_t first_soft_ack, hard_ack, prev_pkt;
int nr_acks, offset, ioffset;
@@ -856,6 +861,7 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb)
}
offset += sizeof(buf.ack);
+ ack_serial = sp->hdr.serial;
acked_serial = ntohl(buf.ack.serial);
first_soft_ack = ntohl(buf.ack.firstPacket);
prev_pkt = ntohl(buf.ack.previousPacket);
@@ -864,31 +870,42 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb)
summary.ack_reason = (buf.ack.reason < RXRPC_ACK__INVALID ?
buf.ack.reason : RXRPC_ACK__INVALID);
- trace_rxrpc_rx_ack(call, sp->hdr.serial, acked_serial,
+ trace_rxrpc_rx_ack(call, ack_serial, acked_serial,
first_soft_ack, prev_pkt,
summary.ack_reason, nr_acks);
- if (buf.ack.reason == RXRPC_ACK_PING_RESPONSE)
+ switch (buf.ack.reason) {
+ case RXRPC_ACK_PING_RESPONSE:
rxrpc_input_ping_response(call, skb->tstamp, acked_serial,
- sp->hdr.serial);
- if (buf.ack.reason == RXRPC_ACK_REQUESTED)
- rxrpc_input_requested_ack(call, skb->tstamp, acked_serial,
- sp->hdr.serial);
+ ack_serial);
+ rxrpc_complete_rtt_probe(call, skb->tstamp, acked_serial, ack_serial,
+ rxrpc_rtt_rx_ping_response);
+ break;
+ case RXRPC_ACK_REQUESTED:
+ rxrpc_complete_rtt_probe(call, skb->tstamp, acked_serial, ack_serial,
+ rxrpc_rtt_rx_requested_ack);
+ break;
+ default:
+ if (acked_serial != 0)
+ rxrpc_complete_rtt_probe(call, skb->tstamp, acked_serial, ack_serial,
+ rxrpc_rtt_rx_cancel);
+ break;
+ }
if (buf.ack.reason == RXRPC_ACK_PING) {
- _proto("Rx ACK %%%u PING Request", sp->hdr.serial);
+ _proto("Rx ACK %%%u PING Request", ack_serial);
rxrpc_propose_ACK(call, RXRPC_ACK_PING_RESPONSE,
- sp->hdr.serial, true, true,
+ ack_serial, true, true,
rxrpc_propose_ack_respond_to_ping);
} else if (sp->hdr.flags & RXRPC_REQUEST_ACK) {
rxrpc_propose_ACK(call, RXRPC_ACK_REQUESTED,
- sp->hdr.serial, true, true,
+ ack_serial, true, true,
rxrpc_propose_ack_respond_to_ack);
}
/* Discard any out-of-order or duplicate ACKs (outside lock). */
if (!rxrpc_is_ack_valid(call, first_soft_ack, prev_pkt)) {
- trace_rxrpc_rx_discard_ack(call->debug_id, sp->hdr.serial,
+ trace_rxrpc_rx_discard_ack(call->debug_id, ack_serial,
first_soft_ack, call->ackr_first_seq,
prev_pkt, call->ackr_prev_seq);
return;
@@ -904,7 +921,7 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb)
/* Discard any out-of-order or duplicate ACKs (inside lock). */
if (!rxrpc_is_ack_valid(call, first_soft_ack, prev_pkt)) {
- trace_rxrpc_rx_discard_ack(call->debug_id, sp->hdr.serial,
+ trace_rxrpc_rx_discard_ack(call->debug_id, ack_serial,
first_soft_ack, call->ackr_first_seq,
prev_pkt, call->ackr_prev_seq);
goto out;
@@ -964,7 +981,7 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb)
RXRPC_TX_ANNO_LAST &&
summary.nr_acks == call->tx_top - hard_ack &&
rxrpc_is_client_call(call))
- rxrpc_propose_ACK(call, RXRPC_ACK_PING, sp->hdr.serial,
+ rxrpc_propose_ACK(call, RXRPC_ACK_PING, ack_serial,
false, true,
rxrpc_propose_ack_ping_for_lost_reply);
diff --git a/net/rxrpc/output.c b/net/rxrpc/output.c
index 1ba43c3df4ad..3cfff7922ba8 100644
--- a/net/rxrpc/output.c
+++ b/net/rxrpc/output.c
@@ -124,6 +124,49 @@ static size_t rxrpc_fill_out_ack(struct rxrpc_connection *conn,
}
/*
+ * Record the beginning of an RTT probe.
+ */
+static int rxrpc_begin_rtt_probe(struct rxrpc_call *call, rxrpc_serial_t serial,
+ enum rxrpc_rtt_tx_trace why)
+{
+ unsigned long avail = call->rtt_avail;
+ int rtt_slot = 9;
+
+ if (!(avail & RXRPC_CALL_RTT_AVAIL_MASK))
+ goto no_slot;
+
+ rtt_slot = __ffs(avail & RXRPC_CALL_RTT_AVAIL_MASK);
+ if (!test_and_clear_bit(rtt_slot, &call->rtt_avail))
+ goto no_slot;
+
+ call->rtt_serial[rtt_slot] = serial;
+ call->rtt_sent_at[rtt_slot] = ktime_get_real();
+ smp_wmb(); /* Write data before avail bit */
+ set_bit(rtt_slot + RXRPC_CALL_RTT_PEND_SHIFT, &call->rtt_avail);
+
+ trace_rxrpc_rtt_tx(call, why, rtt_slot, serial);
+ return rtt_slot;
+
+no_slot:
+ trace_rxrpc_rtt_tx(call, rxrpc_rtt_tx_no_slot, rtt_slot, serial);
+ return -1;
+}
+
+/*
+ * Cancel an RTT probe.
+ */
+static void rxrpc_cancel_rtt_probe(struct rxrpc_call *call,
+ rxrpc_serial_t serial, int rtt_slot)
+{
+ if (rtt_slot != -1) {
+ clear_bit(rtt_slot + RXRPC_CALL_RTT_PEND_SHIFT, &call->rtt_avail);
+ smp_wmb(); /* Clear pending bit before setting slot */
+ set_bit(rtt_slot, &call->rtt_avail);
+ trace_rxrpc_rtt_tx(call, rxrpc_rtt_tx_cancel, rtt_slot, serial);
+ }
+}
+
+/*
* Send an ACK call packet.
*/
int rxrpc_send_ack_packet(struct rxrpc_call *call, bool ping,
@@ -136,7 +179,7 @@ int rxrpc_send_ack_packet(struct rxrpc_call *call, bool ping,
rxrpc_serial_t serial;
rxrpc_seq_t hard_ack, top;
size_t len, n;
- int ret;
+ int ret, rtt_slot = -1;
u8 reason;
if (test_bit(RXRPC_CALL_DISCONNECTED, &call->flags))
@@ -196,18 +239,8 @@ int rxrpc_send_ack_packet(struct rxrpc_call *call, bool ping,
if (_serial)
*_serial = serial;
- if (ping) {
- call->ping_serial = serial;
- smp_wmb();
- /* We need to stick a time in before we send the packet in case
- * the reply gets back before kernel_sendmsg() completes - but
- * asking UDP to send the packet can take a relatively long
- * time.
- */
- call->ping_time = ktime_get_real();
- set_bit(RXRPC_CALL_PINGING, &call->flags);
- trace_rxrpc_rtt_tx(call, rxrpc_rtt_tx_ping, serial);
- }
+ if (ping)
+ rtt_slot = rxrpc_begin_rtt_probe(call, serial, rxrpc_rtt_tx_ping);
ret = kernel_sendmsg(conn->params.local->socket, &msg, iov, 2, len);
conn->params.peer->last_tx_at = ktime_get_seconds();
@@ -221,8 +254,7 @@ int rxrpc_send_ack_packet(struct rxrpc_call *call, bool ping,
if (call->state < RXRPC_CALL_COMPLETE) {
if (ret < 0) {
- if (ping)
- clear_bit(RXRPC_CALL_PINGING, &call->flags);
+ rxrpc_cancel_rtt_probe(call, serial, rtt_slot);
rxrpc_propose_ACK(call, pkt->ack.reason,
ntohl(pkt->ack.serial),
false, true,
@@ -321,7 +353,7 @@ int rxrpc_send_data_packet(struct rxrpc_call *call, struct sk_buff *skb,
struct kvec iov[2];
rxrpc_serial_t serial;
size_t len;
- int ret;
+ int ret, rtt_slot = -1;
_enter(",{%d}", skb->len);
@@ -397,6 +429,8 @@ int rxrpc_send_data_packet(struct rxrpc_call *call, struct sk_buff *skb,
sp->hdr.serial = serial;
smp_wmb(); /* Set serial before timestamp */
skb->tstamp = ktime_get_real();
+ if (whdr.flags & RXRPC_REQUEST_ACK)
+ rtt_slot = rxrpc_begin_rtt_probe(call, serial, rxrpc_rtt_tx_data);
/* send the packet by UDP
* - returns -EMSGSIZE if UDP would have to fragment the packet
@@ -408,12 +442,15 @@ int rxrpc_send_data_packet(struct rxrpc_call *call, struct sk_buff *skb,
conn->params.peer->last_tx_at = ktime_get_seconds();
up_read(&conn->params.local->defrag_sem);
- if (ret < 0)
+ if (ret < 0) {
+ rxrpc_cancel_rtt_probe(call, serial, rtt_slot);
trace_rxrpc_tx_fail(call->debug_id, serial, ret,
rxrpc_tx_point_call_data_nofrag);
- else
+ } else {
trace_rxrpc_tx_packet(call->debug_id, &whdr,
rxrpc_tx_point_call_data_nofrag);
+ }
+
rxrpc_tx_backoff(call, ret);
if (ret == -EMSGSIZE)
goto send_fragmentable;
@@ -422,7 +459,6 @@ done:
if (ret >= 0) {
if (whdr.flags & RXRPC_REQUEST_ACK) {
call->peer->rtt_last_req = skb->tstamp;
- trace_rxrpc_rtt_tx(call, rxrpc_rtt_tx_data, serial);
if (call->peer->rtt_count > 1) {
unsigned long nowj = jiffies, ack_lost_at;
@@ -469,6 +505,8 @@ send_fragmentable:
sp->hdr.serial = serial;
smp_wmb(); /* Set serial before timestamp */
skb->tstamp = ktime_get_real();
+ if (whdr.flags & RXRPC_REQUEST_ACK)
+ rtt_slot = rxrpc_begin_rtt_probe(call, serial, rxrpc_rtt_tx_data);
switch (conn->params.local->srx.transport.family) {
case AF_INET6:
@@ -487,12 +525,14 @@ send_fragmentable:
BUG();
}
- if (ret < 0)
+ if (ret < 0) {
+ rxrpc_cancel_rtt_probe(call, serial, rtt_slot);
trace_rxrpc_tx_fail(call->debug_id, serial, ret,
rxrpc_tx_point_call_data_frag);
- else
+ } else {
trace_rxrpc_tx_packet(call->debug_id, &whdr,
rxrpc_tx_point_call_data_frag);
+ }
rxrpc_tx_backoff(call, ret);
up_write(&conn->params.local->defrag_sem);
diff --git a/net/rxrpc/peer_object.c b/net/rxrpc/peer_object.c
index ca29976bb193..68396d052052 100644
--- a/net/rxrpc/peer_object.c
+++ b/net/rxrpc/peer_object.c
@@ -502,11 +502,21 @@ EXPORT_SYMBOL(rxrpc_kernel_get_peer);
* rxrpc_kernel_get_srtt - Get a call's peer smoothed RTT
* @sock: The socket on which the call is in progress.
* @call: The call to query
+ * @_srtt: Where to store the SRTT value.
*
- * Get the call's peer smoothed RTT.
+ * Get the call's peer smoothed RTT in uS.
*/
-u32 rxrpc_kernel_get_srtt(struct socket *sock, struct rxrpc_call *call)
+bool rxrpc_kernel_get_srtt(struct socket *sock, struct rxrpc_call *call,
+ u32 *_srtt)
{
- return call->peer->srtt_us >> 3;
+ struct rxrpc_peer *peer = call->peer;
+
+ if (peer->rtt_count == 0) {
+ *_srtt = 1000000; /* 1S */
+ return false;
+ }
+
+ *_srtt = call->peer->srtt_us >> 3;
+ return true;
}
EXPORT_SYMBOL(rxrpc_kernel_get_srtt);
diff --git a/net/rxrpc/rtt.c b/net/rxrpc/rtt.c
index 928d8b34a3ee..1221b0637a7e 100644
--- a/net/rxrpc/rtt.c
+++ b/net/rxrpc/rtt.c
@@ -146,6 +146,7 @@ static void rxrpc_ack_update_rtt(struct rxrpc_peer *peer, long rtt_us)
* exclusive access to the peer RTT data.
*/
void rxrpc_peer_add_rtt(struct rxrpc_call *call, enum rxrpc_rtt_rx_trace why,
+ int rtt_slot,
rxrpc_serial_t send_serial, rxrpc_serial_t resp_serial,
ktime_t send_time, ktime_t resp_time)
{
@@ -162,7 +163,7 @@ void rxrpc_peer_add_rtt(struct rxrpc_call *call, enum rxrpc_rtt_rx_trace why,
peer->rtt_count++;
spin_unlock(&peer->rtt_input_lock);
- trace_rxrpc_rtt_rx(call, why, send_serial, resp_serial,
+ trace_rxrpc_rtt_rx(call, why, rtt_slot, send_serial, resp_serial,
peer->srtt_us >> 3, peer->rto_j);
}
diff --git a/net/rxrpc/rxkad.c b/net/rxrpc/rxkad.c
index 52a24d4ef5d8..e08130e5746b 100644
--- a/net/rxrpc/rxkad.c
+++ b/net/rxrpc/rxkad.c
@@ -1137,7 +1137,7 @@ static int rxkad_verify_response(struct rxrpc_connection *conn,
ret = -ENOMEM;
ticket = kmalloc(ticket_len, GFP_NOFS);
if (!ticket)
- goto temporary_error;
+ goto temporary_error_free_resp;
eproto = tracepoint_string("rxkad_tkt_short");
abort_code = RXKADPACKETSHORT;
@@ -1230,6 +1230,7 @@ protocol_error:
temporary_error_free_ticket:
kfree(ticket);
+temporary_error_free_resp:
kfree(response);
temporary_error:
/* Ignore the response packet if we got a temporary error such as
diff --git a/net/sched/act_ct.c b/net/sched/act_ct.c
index e6ad42b11835..2c3619165680 100644
--- a/net/sched/act_ct.c
+++ b/net/sched/act_ct.c
@@ -704,7 +704,7 @@ static int tcf_ct_handle_fragments(struct net *net, struct sk_buff *skb,
err = ip_defrag(net, skb, user);
local_bh_enable();
if (err && err != -EINPROGRESS)
- goto out_free;
+ return err;
if (!err) {
*defrag = true;
diff --git a/net/sched/sch_taprio.c b/net/sched/sch_taprio.c
index e981992634dd..fe53c1e38c7d 100644
--- a/net/sched/sch_taprio.c
+++ b/net/sched/sch_taprio.c
@@ -1176,9 +1176,27 @@ static void taprio_offload_config_changed(struct taprio_sched *q)
spin_unlock(&q->current_entry_lock);
}
-static void taprio_sched_to_offload(struct taprio_sched *q,
+static u32 tc_map_to_queue_mask(struct net_device *dev, u32 tc_mask)
+{
+ u32 i, queue_mask = 0;
+
+ for (i = 0; i < dev->num_tc; i++) {
+ u32 offset, count;
+
+ if (!(tc_mask & BIT(i)))
+ continue;
+
+ offset = dev->tc_to_txq[i].offset;
+ count = dev->tc_to_txq[i].count;
+
+ queue_mask |= GENMASK(offset + count - 1, offset);
+ }
+
+ return queue_mask;
+}
+
+static void taprio_sched_to_offload(struct net_device *dev,
struct sched_gate_list *sched,
- const struct tc_mqprio_qopt *mqprio,
struct tc_taprio_qopt_offload *offload)
{
struct sched_entry *entry;
@@ -1193,7 +1211,8 @@ static void taprio_sched_to_offload(struct taprio_sched *q,
e->command = entry->command;
e->interval = entry->interval;
- e->gate_mask = entry->gate_mask;
+ e->gate_mask = tc_map_to_queue_mask(dev, entry->gate_mask);
+
i++;
}
@@ -1201,7 +1220,6 @@ static void taprio_sched_to_offload(struct taprio_sched *q,
}
static int taprio_enable_offload(struct net_device *dev,
- struct tc_mqprio_qopt *mqprio,
struct taprio_sched *q,
struct sched_gate_list *sched,
struct netlink_ext_ack *extack)
@@ -1223,7 +1241,7 @@ static int taprio_enable_offload(struct net_device *dev,
return -ENOMEM;
}
offload->enable = 1;
- taprio_sched_to_offload(q, sched, mqprio, offload);
+ taprio_sched_to_offload(dev, sched, offload);
err = ops->ndo_setup_tc(dev, TC_SETUP_QDISC_TAPRIO, offload);
if (err < 0) {
@@ -1485,7 +1503,7 @@ static int taprio_change(struct Qdisc *sch, struct nlattr *opt,
}
if (FULL_OFFLOAD_IS_ENABLED(q->flags))
- err = taprio_enable_offload(dev, mqprio, q, new_admin, extack);
+ err = taprio_enable_offload(dev, q, new_admin, extack);
else
err = taprio_disable_offload(dev, q, extack);
if (err)
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index ec1fba1fbe71..836615f71a7d 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -8060,8 +8060,6 @@ static int sctp_get_port_local(struct sock *sk, union sctp_addr *addr)
pr_debug("%s: begins, snum:%d\n", __func__, snum);
- local_bh_disable();
-
if (snum == 0) {
/* Search for an available port. */
int low, high, remaining, index;
@@ -8079,20 +8077,21 @@ static int sctp_get_port_local(struct sock *sk, union sctp_addr *addr)
continue;
index = sctp_phashfn(net, rover);
head = &sctp_port_hashtable[index];
- spin_lock(&head->lock);
+ spin_lock_bh(&head->lock);
sctp_for_each_hentry(pp, &head->chain)
if ((pp->port == rover) &&
net_eq(net, pp->net))
goto next;
break;
next:
- spin_unlock(&head->lock);
+ spin_unlock_bh(&head->lock);
+ cond_resched();
} while (--remaining > 0);
/* Exhausted local port range during search? */
ret = 1;
if (remaining <= 0)
- goto fail;
+ return ret;
/* OK, here is the one we will use. HEAD (the port
* hash table list entry) is non-NULL and we hold it's
@@ -8107,7 +8106,7 @@ static int sctp_get_port_local(struct sock *sk, union sctp_addr *addr)
* port iterator, pp being NULL.
*/
head = &sctp_port_hashtable[sctp_phashfn(net, snum)];
- spin_lock(&head->lock);
+ spin_lock_bh(&head->lock);
sctp_for_each_hentry(pp, &head->chain) {
if ((pp->port == snum) && net_eq(pp->net, net))
goto pp_found;
@@ -8207,10 +8206,7 @@ success:
ret = 0;
fail_unlock:
- spin_unlock(&head->lock);
-
-fail:
- local_bh_enable();
+ spin_unlock_bh(&head->lock);
return ret;
}
diff --git a/net/sctp/stream.c b/net/sctp/stream.c
index bda2536dd740..6dc95dcc0ff4 100644
--- a/net/sctp/stream.c
+++ b/net/sctp/stream.c
@@ -88,12 +88,13 @@ static int sctp_stream_alloc_out(struct sctp_stream *stream, __u16 outcnt,
int ret;
if (outcnt <= stream->outcnt)
- return 0;
+ goto out;
ret = genradix_prealloc(&stream->out, outcnt, gfp);
if (ret)
return ret;
+out:
stream->outcnt = outcnt;
return 0;
}
@@ -104,12 +105,13 @@ static int sctp_stream_alloc_in(struct sctp_stream *stream, __u16 incnt,
int ret;
if (incnt <= stream->incnt)
- return 0;
+ goto out;
ret = genradix_prealloc(&stream->in, incnt, gfp);
if (ret)
return ret;
+out:
stream->incnt = incnt;
return 0;
}
diff --git a/net/smc/smc_diag.c b/net/smc/smc_diag.c
index e1f64f4ba236..da9ba6d1679b 100644
--- a/net/smc/smc_diag.c
+++ b/net/smc/smc_diag.c
@@ -170,13 +170,15 @@ static int __smc_diag_dump(struct sock *sk, struct sk_buff *skb,
(req->diag_ext & (1 << (SMC_DIAG_DMBINFO - 1))) &&
!list_empty(&smc->conn.lgr->list)) {
struct smc_connection *conn = &smc->conn;
- struct smcd_diag_dmbinfo dinfo = {
- .linkid = *((u32 *)conn->lgr->id),
- .peer_gid = conn->lgr->peer_gid,
- .my_gid = conn->lgr->smcd->local_gid,
- .token = conn->rmb_desc->token,
- .peer_token = conn->peer_token
- };
+ struct smcd_diag_dmbinfo dinfo;
+
+ memset(&dinfo, 0, sizeof(dinfo));
+
+ dinfo.linkid = *((u32 *)conn->lgr->id);
+ dinfo.peer_gid = conn->lgr->peer_gid;
+ dinfo.my_gid = conn->lgr->smcd->local_gid;
+ dinfo.token = conn->rmb_desc->token;
+ dinfo.peer_token = conn->peer_token;
if (nla_put(skb, SMC_DIAG_DMBINFO, sizeof(dinfo), &dinfo) < 0)
goto errout;
diff --git a/net/socket.c b/net/socket.c
index dbbe8ea7d395..0c0144604f81 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -3610,7 +3610,7 @@ int kernel_getsockname(struct socket *sock, struct sockaddr *addr)
EXPORT_SYMBOL(kernel_getsockname);
/**
- * kernel_peername - get the address which the socket is connected (kernel space)
+ * kernel_getpeername - get the address which the socket is connected (kernel space)
* @sock: socket
* @addr: address holder
*
@@ -3671,7 +3671,7 @@ int kernel_sendpage_locked(struct sock *sk, struct page *page, int offset,
EXPORT_SYMBOL(kernel_sendpage_locked);
/**
- * kernel_shutdown - shut down part of a full-duplex connection (kernel space)
+ * kernel_sock_shutdown - shut down part of a full-duplex connection (kernel space)
* @sock: socket
* @how: connection part
*
diff --git a/net/tipc/crypto.c b/net/tipc/crypto.c
index 001bcb0f2480..c38babaa4e57 100644
--- a/net/tipc/crypto.c
+++ b/net/tipc/crypto.c
@@ -757,10 +757,12 @@ static void tipc_aead_encrypt_done(struct crypto_async_request *base, int err)
switch (err) {
case 0:
this_cpu_inc(tx->stats->stat[STAT_ASYNC_OK]);
+ rcu_read_lock();
if (likely(test_bit(0, &b->up)))
b->media->send_msg(net, skb, b, &tx_ctx->dst);
else
kfree_skb(skb);
+ rcu_read_unlock();
break;
case -EINPROGRESS:
return;