summaryrefslogtreecommitdiff
path: root/include/net/inetpeer.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/net/inetpeer.h')
-rw-r--r--include/net/inetpeer.h169
1 files changed, 63 insertions, 106 deletions
diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h
index 53f464d7cddc..f475757daafb 100644
--- a/include/net/inetpeer.h
+++ b/include/net/inetpeer.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* INETPEER - A storage for permanent information about peers
*
@@ -15,179 +16,135 @@
#include <net/ipv6.h>
#include <linux/atomic.h>
-struct inetpeer_addr_base {
- union {
- __be32 a4;
- __be32 a6[4];
- };
+/* IPv4 address key for cache lookups */
+struct ipv4_addr_key {
+ __be32 addr;
+ int vif;
};
+#define INETPEER_MAXKEYSZ (sizeof(struct in6_addr) / sizeof(u32))
+
struct inetpeer_addr {
- struct inetpeer_addr_base addr;
+ union {
+ struct ipv4_addr_key a4;
+ struct in6_addr a6;
+ u32 key[INETPEER_MAXKEYSZ];
+ };
__u16 family;
};
struct inet_peer {
- /* group together avl_left,avl_right,v4daddr to speedup lookups */
- struct inet_peer __rcu *avl_left, *avl_right;
+ struct rb_node rb_node;
struct inetpeer_addr daddr;
- __u32 avl_height;
u32 metrics[RTAX_MAX];
u32 rate_tokens; /* rate limiting for ICMP */
+ u32 n_redirects;
unsigned long rate_last;
- union {
- struct list_head gc_list;
- struct rcu_head gc_rcu;
- };
/*
- * Once inet_peer is queued for deletion (refcnt == -1), following fields
- * are not available: rid, ip_id_count
+ * Once inet_peer is queued for deletion (refcnt == 0), following field
+ * is not available: rid
* We can share memory with rcu_head to help keep inet_peer small.
*/
union {
struct {
atomic_t rid; /* Frag reception counter */
- atomic_t ip_id_count; /* IP ID for the next packet */
};
struct rcu_head rcu;
- struct inet_peer *gc_next;
};
/* following fields might be frequently dirtied */
__u32 dtime; /* the time of last use of not referenced entries */
- atomic_t refcnt;
+ refcount_t refcnt;
};
struct inet_peer_base {
- struct inet_peer __rcu *root;
+ struct rb_root rb_root;
seqlock_t lock;
- u32 flush_seq;
int total;
};
-#define INETPEER_BASE_BIT 0x1UL
-
-static inline struct inet_peer *inetpeer_ptr(unsigned long val)
-{
- BUG_ON(val & INETPEER_BASE_BIT);
- return (struct inet_peer *) val;
-}
-
-static inline struct inet_peer_base *inetpeer_base_ptr(unsigned long val)
-{
- if (!(val & INETPEER_BASE_BIT))
- return NULL;
- val &= ~INETPEER_BASE_BIT;
- return (struct inet_peer_base *) val;
-}
+void inet_peer_base_init(struct inet_peer_base *);
-static inline bool inetpeer_ptr_is_peer(unsigned long val)
-{
- return !(val & INETPEER_BASE_BIT);
-}
+void inet_initpeers(void) __init;
-static inline void __inetpeer_ptr_set_peer(unsigned long *val, struct inet_peer *peer)
-{
- /* This implicitly clears INETPEER_BASE_BIT */
- *val = (unsigned long) peer;
-}
+#define INETPEER_METRICS_NEW (~(u32) 0)
-static inline bool inetpeer_ptr_set_peer(unsigned long *ptr, struct inet_peer *peer)
+static inline void inetpeer_set_addr_v4(struct inetpeer_addr *iaddr, __be32 ip)
{
- unsigned long val = (unsigned long) peer;
- unsigned long orig = *ptr;
-
- if (!(orig & INETPEER_BASE_BIT) ||
- cmpxchg(ptr, orig, val) != orig)
- return false;
- return true;
+ iaddr->a4.addr = ip;
+ iaddr->a4.vif = 0;
+ iaddr->family = AF_INET;
}
-static inline void inetpeer_init_ptr(unsigned long *ptr, struct inet_peer_base *base)
+static inline __be32 inetpeer_get_addr_v4(struct inetpeer_addr *iaddr)
{
- *ptr = (unsigned long) base | INETPEER_BASE_BIT;
+ return iaddr->a4.addr;
}
-static inline void inetpeer_transfer_peer(unsigned long *to, unsigned long *from)
+static inline void inetpeer_set_addr_v6(struct inetpeer_addr *iaddr,
+ struct in6_addr *in6)
{
- unsigned long val = *from;
-
- *to = val;
- if (inetpeer_ptr_is_peer(val)) {
- struct inet_peer *peer = inetpeer_ptr(val);
- atomic_inc(&peer->refcnt);
- }
+ iaddr->a6 = *in6;
+ iaddr->family = AF_INET6;
}
-extern void inet_peer_base_init(struct inet_peer_base *);
-
-void inet_initpeers(void) __init;
-
-#define INETPEER_METRICS_NEW (~(u32) 0)
-
-static inline bool inet_metrics_new(const struct inet_peer *p)
+static inline struct in6_addr *inetpeer_get_addr_v6(struct inetpeer_addr *iaddr)
{
- return p->metrics[RTAX_LOCK-1] == INETPEER_METRICS_NEW;
+ return &iaddr->a6;
}
/* can be called with or without local BH being disabled */
struct inet_peer *inet_getpeer(struct inet_peer_base *base,
- const struct inetpeer_addr *daddr,
- int create);
+ const struct inetpeer_addr *daddr);
static inline struct inet_peer *inet_getpeer_v4(struct inet_peer_base *base,
__be32 v4daddr,
- int create)
+ int vif)
{
struct inetpeer_addr daddr;
- daddr.addr.a4 = v4daddr;
+ daddr.a4.addr = v4daddr;
+ daddr.a4.vif = vif;
daddr.family = AF_INET;
- return inet_getpeer(base, &daddr, create);
+ return inet_getpeer(base, &daddr);
}
static inline struct inet_peer *inet_getpeer_v6(struct inet_peer_base *base,
- const struct in6_addr *v6daddr,
- int create)
+ const struct in6_addr *v6daddr)
{
struct inetpeer_addr daddr;
- *(struct in6_addr *)daddr.addr.a6 = *v6daddr;
+ daddr.a6 = *v6daddr;
daddr.family = AF_INET6;
- return inet_getpeer(base, &daddr, create);
+ return inet_getpeer(base, &daddr);
}
-/* can be called from BH context or outside */
-extern void inet_putpeer(struct inet_peer *p);
-extern bool inet_peer_xrlim_allow(struct inet_peer *peer, int timeout);
-
-extern void inetpeer_invalidate_tree(struct inet_peer_base *);
-extern void inetpeer_invalidate_family(int family);
-
-/*
- * temporary check to make sure we dont access rid, ip_id_count, tcp_ts,
- * tcp_ts_stamp if no refcount is taken on inet_peer
- */
-static inline void inet_peer_refcheck(const struct inet_peer *p)
+static inline int inetpeer_addr_cmp(const struct inetpeer_addr *a,
+ const struct inetpeer_addr *b)
{
- WARN_ON_ONCE(atomic_read(&p->refcnt) <= 0);
+ int i, n;
+
+ if (a->family == AF_INET)
+ n = sizeof(a->a4) / sizeof(u32);
+ else
+ n = sizeof(a->a6) / sizeof(u32);
+
+ for (i = 0; i < n; i++) {
+ if (a->key[i] == b->key[i])
+ continue;
+ if (a->key[i] < b->key[i])
+ return -1;
+ return 1;
+ }
+
+ return 0;
}
+/* can be called from BH context or outside */
+void inet_putpeer(struct inet_peer *p);
+bool inet_peer_xrlim_allow(struct inet_peer *peer, int timeout);
-/* can be called with or without local BH being disabled */
-static inline int inet_getid(struct inet_peer *p, int more)
-{
- int old, new;
- more++;
- inet_peer_refcheck(p);
- do {
- old = atomic_read(&p->ip_id_count);
- new = old + more;
- if (!new)
- new = 1;
- } while (atomic_cmpxchg(&p->ip_id_count, old, new) != old);
- return new;
-}
+void inetpeer_invalidate_tree(struct inet_peer_base *);
#endif /* _NET_INETPEER_H */