summaryrefslogtreecommitdiff
path: root/include/net/udp.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/net/udp.h')
-rw-r--r--include/net/udp.h46
1 files changed, 36 insertions, 10 deletions
diff --git a/include/net/udp.h b/include/net/udp.h
index 6e89520e100d..a061d1b22ddc 100644
--- a/include/net/udp.h
+++ b/include/net/udp.h
@@ -205,7 +205,6 @@ static inline void udp_hash4_dec(struct udp_hslot *hslot2)
extern struct proto udp_prot;
-extern atomic_long_t udp_memory_allocated;
DECLARE_PER_CPU(int, udp_memory_per_cpu_fw_alloc);
/* sysctl variables for udp */
@@ -285,13 +284,28 @@ INDIRECT_CALLABLE_DECLARE(int udpv6_rcv(struct sk_buff *));
struct sk_buff *__udp_gso_segment(struct sk_buff *gso_skb,
netdev_features_t features, bool is_ipv6);
-static inline void udp_lib_init_sock(struct sock *sk)
+static inline int udp_lib_init_sock(struct sock *sk)
{
struct udp_sock *up = udp_sk(sk);
+ sk->sk_drop_counters = &up->drop_counters;
skb_queue_head_init(&up->reader_queue);
+ INIT_HLIST_NODE(&up->tunnel_list);
up->forward_threshold = sk->sk_rcvbuf >> 2;
set_bit(SOCK_CUSTOM_SOCKOPT, &sk->sk_socket->flags);
+
+ up->udp_prod_queue = kcalloc(nr_node_ids, sizeof(*up->udp_prod_queue),
+ GFP_KERNEL);
+ if (!up->udp_prod_queue)
+ return -ENOMEM;
+ for (int i = 0; i < nr_node_ids; i++)
+ init_llist_head(&up->udp_prod_queue[i].ll_root);
+ return 0;
+}
+
+static inline void udp_drops_inc(struct sock *sk)
+{
+ numa_drop_add(&udp_sk(sk)->drop_counters, 1);
}
/* hash routines shared between UDPv4/6 and UDP-Litev4/6 */
@@ -397,7 +411,7 @@ static inline struct sk_buff *skb_recv_udp(struct sock *sk, unsigned int flags,
return __skb_recv_udp(sk, flags, &off, err);
}
-int udp_v4_early_demux(struct sk_buff *skb);
+enum skb_drop_reason udp_v4_early_demux(struct sk_buff *skb);
bool udp_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst);
int udp_err(struct sk_buff *, u32);
int udp_abort(struct sock *sk, int err);
@@ -410,7 +424,7 @@ void udp4_hwcsum(struct sk_buff *skb, __be32 src, __be32 dst);
int udp_rcv(struct sk_buff *skb);
int udp_ioctl(struct sock *sk, int cmd, int *karg);
int udp_init_sock(struct sock *sk);
-int udp_pre_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len);
+int udp_pre_connect(struct sock *sk, struct sockaddr_unsized *uaddr, int addr_len);
int __udp_disconnect(struct sock *sk, int flags);
int udp_disconnect(struct sock *sk, int flags);
__poll_t udp_poll(struct file *file, struct socket *sock, poll_table *wait);
@@ -586,6 +600,16 @@ static inline struct sk_buff *udp_rcv_segment(struct sock *sk,
{
netdev_features_t features = NETIF_F_SG;
struct sk_buff *segs;
+ int drop_count;
+
+ /*
+ * Segmentation in UDP receive path is only for UDP GRO, drop udp
+ * fragmentation offload (UFO) packets.
+ */
+ if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP) {
+ drop_count = 1;
+ goto drop;
+ }
/* Avoid csum recalculation by skb_segment unless userspace explicitly
* asks for the final checksum values
@@ -609,16 +633,18 @@ static inline struct sk_buff *udp_rcv_segment(struct sock *sk,
*/
segs = __skb_gso_segment(skb, features, false);
if (IS_ERR_OR_NULL(segs)) {
- int segs_nr = skb_shinfo(skb)->gso_segs;
-
- atomic_add(segs_nr, &sk->sk_drops);
- SNMP_ADD_STATS(__UDPX_MIB(sk, ipv4), UDP_MIB_INERRORS, segs_nr);
- kfree_skb(skb);
- return NULL;
+ drop_count = skb_shinfo(skb)->gso_segs;
+ goto drop;
}
consume_skb(skb);
return segs;
+
+drop:
+ sk_drops_add(sk, drop_count);
+ SNMP_ADD_STATS(__UDPX_MIB(sk, ipv4), UDP_MIB_INERRORS, drop_count);
+ kfree_skb(skb);
+ return NULL;
}
static inline void udp_post_segment_fix_csum(struct sk_buff *skb)