summaryrefslogtreecommitdiff
path: root/include/net/inet_frag.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/net/inet_frag.h')
-rw-r--r--include/net/inet_frag.h199
1 files changed, 106 insertions, 93 deletions
diff --git a/include/net/inet_frag.h b/include/net/inet_frag.h
index 6fdcd2427776..0eccd9c3a883 100644
--- a/include/net/inet_frag.h
+++ b/include/net/inet_frag.h
@@ -1,170 +1,171 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NET_FRAG_H__
#define __NET_FRAG_H__
-#include <linux/percpu_counter.h>
-
-struct netns_frags {
- /* The percpu_counter "mem" need to be cacheline aligned.
- * mem.count must not share cacheline with other writers
- */
- struct percpu_counter mem ____cacheline_aligned_in_smp;
+#include <linux/rhashtable-types.h>
+#include <linux/completion.h>
+#include <linux/in6.h>
+#include <linux/rbtree_types.h>
+#include <linux/refcount.h>
+#include <net/dropreason-core.h>
+/* Per netns frag queues directory */
+struct fqdir {
/* sysctls */
+ long high_thresh;
+ long low_thresh;
int timeout;
- int high_thresh;
- int low_thresh;
int max_dist;
+ struct inet_frags *f;
+ struct net *net;
+ bool dead;
+
+ struct rhashtable rhashtable ____cacheline_aligned_in_smp;
+
+ /* Keep atomic mem on separate cachelines in structs that include it */
+ atomic_long_t mem ____cacheline_aligned_in_smp;
+ struct work_struct destroy_work;
+ struct llist_node free_list;
};
/**
- * fragment queue flags
+ * enum: fragment queue flags
*
* @INET_FRAG_FIRST_IN: first fragment has arrived
* @INET_FRAG_LAST_IN: final fragment has arrived
* @INET_FRAG_COMPLETE: frag queue has been processed and is due for destruction
+ * @INET_FRAG_HASH_DEAD: inet_frag_kill() has not removed fq from rhashtable
+ * @INET_FRAG_DROP: if skbs must be dropped (instead of being consumed)
*/
enum {
INET_FRAG_FIRST_IN = BIT(0),
INET_FRAG_LAST_IN = BIT(1),
INET_FRAG_COMPLETE = BIT(2),
+ INET_FRAG_HASH_DEAD = BIT(3),
+ INET_FRAG_DROP = BIT(4),
+};
+
+struct frag_v4_compare_key {
+ __be32 saddr;
+ __be32 daddr;
+ u32 user;
+ u32 vif;
+ __be16 id;
+ u16 protocol;
+};
+
+struct frag_v6_compare_key {
+ struct in6_addr saddr;
+ struct in6_addr daddr;
+ u32 user;
+ __be32 id;
+ u32 iif;
};
/**
* struct inet_frag_queue - fragment queue
*
- * @lock: spinlock protecting the queue
+ * @node: rhash node
+ * @key: keys identifying this frag.
* @timer: queue expiration timer
- * @list: hash bucket list
+ * @lock: spinlock protecting this frag
* @refcnt: reference count of the queue
- * @fragments: received fragments head
+ * @rb_fragments: received fragments rb-tree root
* @fragments_tail: received fragments tail
+ * @last_run_head: the head of the last "run". see ip_fragment.c
* @stamp: timestamp of the last received fragment
* @len: total length of the original datagram
* @meat: length of received fragments so far
+ * @tstamp_type: stamp has a mono delivery time (EDT)
* @flags: fragment queue flags
* @max_size: maximum received fragment size
- * @net: namespace that this frag belongs to
- * @list_evictor: list of queues to forcefully evict (e.g. due to low memory)
+ * @fqdir: pointer to struct fqdir
+ * @rcu: rcu head for freeing deferall
*/
struct inet_frag_queue {
- spinlock_t lock;
+ struct rhash_head node;
+ union {
+ struct frag_v4_compare_key v4;
+ struct frag_v6_compare_key v6;
+ } key;
struct timer_list timer;
- struct hlist_node list;
+ spinlock_t lock;
refcount_t refcnt;
- struct sk_buff *fragments;
+ struct rb_root rb_fragments;
struct sk_buff *fragments_tail;
+ struct sk_buff *last_run_head;
ktime_t stamp;
int len;
int meat;
+ u8 tstamp_type;
__u8 flags;
u16 max_size;
- struct netns_frags *net;
- struct hlist_node list_evictor;
-};
-
-#define INETFRAGS_HASHSZ 1024
-
-/* averaged:
- * max_depth = default ipfrag_high_thresh / INETFRAGS_HASHSZ /
- * rounded up (SKB_TRUELEN(0) + sizeof(struct ipq or
- * struct frag_queue))
- */
-#define INETFRAGS_MAXDEPTH 128
-
-struct inet_frag_bucket {
- struct hlist_head chain;
- spinlock_t chain_lock;
+ struct fqdir *fqdir;
+ struct rcu_head rcu;
};
struct inet_frags {
- struct inet_frag_bucket hash[INETFRAGS_HASHSZ];
-
- struct work_struct frags_work;
- unsigned int next_bucket;
- unsigned long last_rebuild_jiffies;
- bool rebuild;
-
- /* The first call to hashfn is responsible to initialize
- * rnd. This is best done with net_get_random_once.
- *
- * rnd_seqlock is used to let hash insertion detect
- * when it needs to re-lookup the hash chain to use.
- */
- u32 rnd;
- seqlock_t rnd_seqlock;
unsigned int qsize;
- unsigned int (*hashfn)(const struct inet_frag_queue *);
- bool (*match)(const struct inet_frag_queue *q,
- const void *arg);
void (*constructor)(struct inet_frag_queue *q,
const void *arg);
void (*destructor)(struct inet_frag_queue *);
- void (*frag_expire)(unsigned long data);
+ void (*frag_expire)(struct timer_list *t);
struct kmem_cache *frags_cachep;
const char *frags_cache_name;
+ struct rhashtable_params rhash_params;
+ refcount_t refcnt;
+ struct completion completion;
};
int inet_frags_init(struct inet_frags *);
void inet_frags_fini(struct inet_frags *);
-static inline int inet_frags_init_net(struct netns_frags *nf)
-{
- return percpu_counter_init(&nf->mem, 0, GFP_KERNEL);
-}
-static inline void inet_frags_uninit_net(struct netns_frags *nf)
-{
- percpu_counter_destroy(&nf->mem);
-}
+int fqdir_init(struct fqdir **fqdirp, struct inet_frags *f, struct net *net);
-void inet_frags_exit_net(struct netns_frags *nf, struct inet_frags *f);
-
-void inet_frag_kill(struct inet_frag_queue *q, struct inet_frags *f);
-void inet_frag_destroy(struct inet_frag_queue *q, struct inet_frags *f);
-struct inet_frag_queue *inet_frag_find(struct netns_frags *nf,
- struct inet_frags *f, void *key, unsigned int hash);
-
-void inet_frag_maybe_warn_overflow(struct inet_frag_queue *q,
- const char *prefix);
-
-static inline void inet_frag_put(struct inet_frag_queue *q, struct inet_frags *f)
+static inline void fqdir_pre_exit(struct fqdir *fqdir)
{
- if (refcount_dec_and_test(&q->refcnt))
- inet_frag_destroy(q, f);
-}
+ /* Prevent creation of new frags.
+ * Pairs with READ_ONCE() in inet_frag_find().
+ */
+ WRITE_ONCE(fqdir->high_thresh, 0);
-static inline bool inet_frag_evicting(struct inet_frag_queue *q)
-{
- return !hlist_unhashed(&q->list_evictor);
+ /* Pairs with READ_ONCE() in inet_frag_kill(), ip_expire()
+ * and ip6frag_expire_frag_queue().
+ */
+ WRITE_ONCE(fqdir->dead, true);
}
+void fqdir_exit(struct fqdir *fqdir);
-/* Memory Tracking Functions. */
+void inet_frag_kill(struct inet_frag_queue *q, int *refs);
+void inet_frag_destroy(struct inet_frag_queue *q);
+struct inet_frag_queue *inet_frag_find(struct fqdir *fqdir, void *key);
-/* The default percpu_counter batch size is not big enough to scale to
- * fragmentation mem acct sizes.
- * The mem size of a 64K fragment is approx:
- * (44 fragments * 2944 truesize) + frag_queue struct(200) = 129736 bytes
- */
-static unsigned int frag_percpu_counter_batch = 130000;
+/* Free all skbs in the queue; return the sum of their truesizes. */
+unsigned int inet_frag_rbtree_purge(struct rb_root *root,
+ enum skb_drop_reason reason);
-static inline int frag_mem_limit(struct netns_frags *nf)
+static inline void inet_frag_putn(struct inet_frag_queue *q, int refs)
{
- return percpu_counter_read(&nf->mem);
+ if (refs && refcount_sub_and_test(refs, &q->refcnt))
+ inet_frag_destroy(q);
}
-static inline void sub_frag_mem_limit(struct netns_frags *nf, int i)
+/* Memory Tracking Functions. */
+
+static inline long frag_mem_limit(const struct fqdir *fqdir)
{
- percpu_counter_add_batch(&nf->mem, -i, frag_percpu_counter_batch);
+ return atomic_long_read(&fqdir->mem);
}
-static inline void add_frag_mem_limit(struct netns_frags *nf, int i)
+static inline void sub_frag_mem_limit(struct fqdir *fqdir, long val)
{
- percpu_counter_add_batch(&nf->mem, i, frag_percpu_counter_batch);
+ atomic_long_sub(val, &fqdir->mem);
}
-static inline unsigned int sum_frag_mem_limit(struct netns_frags *nf)
+static inline void add_frag_mem_limit(struct fqdir *fqdir, long val)
{
- return percpu_counter_sum_positive(&nf->mem);
+ atomic_long_add(val, &fqdir->mem);
}
/* RFC 3168 support :
@@ -178,4 +179,16 @@ static inline unsigned int sum_frag_mem_limit(struct netns_frags *nf)
extern const u8 ip_frag_ecn_table[16];
+/* Return values of inet_frag_queue_insert() */
+#define IPFRAG_OK 0
+#define IPFRAG_DUP 1
+#define IPFRAG_OVERLAP 2
+int inet_frag_queue_insert(struct inet_frag_queue *q, struct sk_buff *skb,
+ int offset, int end);
+void *inet_frag_reasm_prepare(struct inet_frag_queue *q, struct sk_buff *skb,
+ struct sk_buff *parent);
+void inet_frag_reasm_finish(struct inet_frag_queue *q, struct sk_buff *head,
+ void *reasm_data, bool try_coalesce);
+struct sk_buff *inet_frag_pull_head(struct inet_frag_queue *q);
+
#endif