summaryrefslogtreecommitdiff
path: root/include/linux/skbuff.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/skbuff.h')
-rw-r--r--include/linux/skbuff.h139
1 files changed, 103 insertions, 36 deletions
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index d448a4804aea..ed06e1c28fc7 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -15,7 +15,6 @@
#define _LINUX_SKBUFF_H
#include <linux/kernel.h>
-#include <linux/kmemcheck.h>
#include <linux/compiler.h>
#include <linux/time.h>
#include <linux/bug.h>
@@ -489,8 +488,9 @@ int skb_zerocopy_iter_stream(struct sock *sk, struct sk_buff *skb,
* the end of the header data, ie. at skb->end.
*/
struct skb_shared_info {
- unsigned short _unused;
- unsigned char nr_frags;
+ __u8 __unused;
+ __u8 meta_len;
+ __u8 nr_frags;
__u8 tx_flags;
unsigned short gso_size;
/* Warning: this field is not always filled in (UFO)! */
@@ -499,7 +499,6 @@ struct skb_shared_info {
struct skb_shared_hwtstamps hwtstamps;
unsigned int gso_type;
u32 tskey;
- __be32 ip6_frag_id;
/*
* Warning : all fields before dataref are cleared in __alloc_skb()
@@ -616,6 +615,7 @@ typedef unsigned char *sk_buff_data_t;
* @nf_trace: netfilter packet trace flag
* @protocol: Packet protocol from driver
* @destructor: Destruct function
+ * @tcp_tsorted_anchor: list structure for TCP (tp->tsorted_sent_queue)
* @_nfct: Associated connection, if any (with nfctinfo bits)
* @nf_bridge: Saved data about a bridged frame - see br_netfilter.c
* @skb_iif: ifindex of device we arrived on
@@ -661,8 +661,12 @@ struct sk_buff {
struct sk_buff *prev;
union {
- ktime_t tstamp;
- u64 skb_mstamp;
+ struct net_device *dev;
+ /* Some protocols might use this space to store information,
+ * while device pointer would be NULL.
+ * UDP receive path is one user.
+ */
+ unsigned long dev_scratch;
};
};
struct rb_node rbnode; /* used in netem & tcp stack */
@@ -670,12 +674,8 @@ struct sk_buff {
struct sock *sk;
union {
- struct net_device *dev;
- /* Some protocols might use this space to store information,
- * while device pointer would be NULL.
- * UDP receive path is one user.
- */
- unsigned long dev_scratch;
+ ktime_t tstamp;
+ u64 skb_mstamp;
};
/*
* This is the control buffer. It is free to use for every
@@ -685,8 +685,14 @@ struct sk_buff {
*/
char cb[48] __aligned(8);
- unsigned long _skb_refdst;
- void (*destructor)(struct sk_buff *skb);
+ union {
+ struct {
+ unsigned long _skb_refdst;
+ void (*destructor)(struct sk_buff *skb);
+ };
+ struct list_head tcp_tsorted_anchor;
+ };
+
#ifdef CONFIG_XFRM
struct sec_path *sp;
#endif
@@ -704,7 +710,6 @@ struct sk_buff {
/* Following fields are _not_ copied in __copy_skb_header()
* Note that queue_mapping is here mostly to fill a hole.
*/
- kmemcheck_bitfield_begin(flags1);
__u16 queue_mapping;
/* if you move cloned around you also must adapt those constants */
@@ -723,7 +728,6 @@ struct sk_buff {
head_frag:1,
xmit_more:1,
__unused:1; /* one bit hole */
- kmemcheck_bitfield_end(flags1);
/* fields enclosed in headers_start/headers_end are copied
* using a single memcpy() in __copy_skb_header()
@@ -771,6 +775,7 @@ struct sk_buff {
__u8 remcsum_offload:1;
#ifdef CONFIG_NET_SWITCHDEV
__u8 offload_fwd_mark:1;
+ __u8 offload_mr_fwd_mark:1;
#endif
#ifdef CONFIG_NET_CLS_ACT
__u8 tc_skip_classify:1;
@@ -1457,27 +1462,8 @@ static inline int skb_header_unclone(struct sk_buff *skb, gfp_t pri)
}
/**
- * skb_header_release - release reference to header
- * @skb: buffer to operate on
- *
- * Drop a reference to the header part of the buffer. This is done
- * by acquiring a payload reference. You must not read from the header
- * part of skb->data after this.
- * Note : Check if you can use __skb_header_release() instead.
- */
-static inline void skb_header_release(struct sk_buff *skb)
-{
- BUG_ON(skb->nohdr);
- skb->nohdr = 1;
- atomic_add(1 << SKB_DATAREF_SHIFT, &skb_shinfo(skb)->dataref);
-}
-
-/**
* __skb_header_release - release reference to header
* @skb: buffer to operate on
- *
- * Variant of skb_header_release() assuming skb is private to caller.
- * We can avoid one atomic operation.
*/
static inline void __skb_header_release(struct sk_buff *skb)
{
@@ -2675,7 +2661,7 @@ static inline struct page *__dev_alloc_pages(gfp_t gfp_mask,
* 4. __GFP_MEMALLOC is ignored if __GFP_NOMEMALLOC is set due to
* code in gfp_to_alloc_flags that should be enforcing this.
*/
- gfp_mask |= __GFP_COLD | __GFP_COMP | __GFP_MEMALLOC;
+ gfp_mask |= __GFP_COMP | __GFP_MEMALLOC;
return alloc_pages_node(NUMA_NO_NODE, gfp_mask, order);
}
@@ -3168,6 +3154,12 @@ static inline int __skb_grow_rcsum(struct sk_buff *skb, unsigned int len)
return __skb_grow(skb, len);
}
+#define rb_to_skb(rb) rb_entry_safe(rb, struct sk_buff, rbnode)
+#define skb_rb_first(root) rb_to_skb(rb_first(root))
+#define skb_rb_last(root) rb_to_skb(rb_last(root))
+#define skb_rb_next(skb) rb_to_skb(rb_next(&(skb)->rbnode))
+#define skb_rb_prev(skb) rb_to_skb(rb_prev(&(skb)->rbnode))
+
#define skb_queue_walk(queue, skb) \
for (skb = (queue)->next; \
skb != (struct sk_buff *)(queue); \
@@ -3182,6 +3174,18 @@ static inline int __skb_grow_rcsum(struct sk_buff *skb, unsigned int len)
for (; skb != (struct sk_buff *)(queue); \
skb = skb->next)
+#define skb_rbtree_walk(skb, root) \
+ for (skb = skb_rb_first(root); skb != NULL; \
+ skb = skb_rb_next(skb))
+
+#define skb_rbtree_walk_from(skb) \
+ for (; skb != NULL; \
+ skb = skb_rb_next(skb))
+
+#define skb_rbtree_walk_from_safe(skb, tmp) \
+ for (; tmp = skb ? skb_rb_next(skb) : NULL, (skb != NULL); \
+ skb = tmp)
+
#define skb_queue_walk_from_safe(queue, skb, tmp) \
for (tmp = skb->next; \
skb != (struct sk_buff *)(queue); \
@@ -3419,6 +3423,69 @@ static inline ktime_t net_invalid_timestamp(void)
return 0;
}
+static inline u8 skb_metadata_len(const struct sk_buff *skb)
+{
+ return skb_shinfo(skb)->meta_len;
+}
+
+static inline void *skb_metadata_end(const struct sk_buff *skb)
+{
+ return skb_mac_header(skb);
+}
+
+static inline bool __skb_metadata_differs(const struct sk_buff *skb_a,
+ const struct sk_buff *skb_b,
+ u8 meta_len)
+{
+ const void *a = skb_metadata_end(skb_a);
+ const void *b = skb_metadata_end(skb_b);
+ /* Using more efficient varaiant than plain call to memcmp(). */
+#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64
+ u64 diffs = 0;
+
+ switch (meta_len) {
+#define __it(x, op) (x -= sizeof(u##op))
+#define __it_diff(a, b, op) (*(u##op *)__it(a, op)) ^ (*(u##op *)__it(b, op))
+ case 32: diffs |= __it_diff(a, b, 64);
+ case 24: diffs |= __it_diff(a, b, 64);
+ case 16: diffs |= __it_diff(a, b, 64);
+ case 8: diffs |= __it_diff(a, b, 64);
+ break;
+ case 28: diffs |= __it_diff(a, b, 64);
+ case 20: diffs |= __it_diff(a, b, 64);
+ case 12: diffs |= __it_diff(a, b, 64);
+ case 4: diffs |= __it_diff(a, b, 32);
+ break;
+ }
+ return diffs;
+#else
+ return memcmp(a - meta_len, b - meta_len, meta_len);
+#endif
+}
+
+static inline bool skb_metadata_differs(const struct sk_buff *skb_a,
+ const struct sk_buff *skb_b)
+{
+ u8 len_a = skb_metadata_len(skb_a);
+ u8 len_b = skb_metadata_len(skb_b);
+
+ if (!(len_a | len_b))
+ return false;
+
+ return len_a != len_b ?
+ true : __skb_metadata_differs(skb_a, skb_b, len_a);
+}
+
+static inline void skb_metadata_set(struct sk_buff *skb, u8 meta_len)
+{
+ skb_shinfo(skb)->meta_len = meta_len;
+}
+
+static inline void skb_metadata_clear(struct sk_buff *skb)
+{
+ skb_metadata_set(skb, 0);
+}
+
struct sk_buff *skb_clone_sk(struct sk_buff *skb);
#ifdef CONFIG_NETWORK_PHY_TIMESTAMPING