summaryrefslogtreecommitdiff
path: root/net/netfilter
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2014-12-05 20:56:46 -0800
committerDavid S. Miller <davem@davemloft.net>2014-12-05 20:56:46 -0800
commit244ebd9f8fa8beb7b37bdeebd6c5308b61f98aef (patch)
tree8b0a72056747f0bb9232fdafb480b0501dc83734 /net/netfilter
parentddd5c50f9bec7ffab5d28c5dd244db8a4c3f27e7 (diff)
parentcac3763967362ace7996532ad3933f493a928a1b (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/pablo/nf-next
Pablo Neira Ayuso says: ==================== Netfilter updates for net-next The following batch contains netfilter updates for net-next. Basically, enhancements for xt_recent, skip zeroing of timer in conntrack, fix linking problem with recent redirect support for nf_tables, ipset updates and a couple of cleanups. More specifically, they are: 1) Rise maximum number per IP address to be remembered in xt_recent while retaining backward compatibility, from Florian Westphal. 2) Skip zeroing timer area in nf_conn objects, also from Florian. 3) Inspect IPv4 and IPv6 traffic from the bridge to allow filtering using using meta l4proto and transport layer header, from Alvaro Neira. 4) Fix linking problems in the new redirect support when CONFIG_IPV6=n and IP6_NF_IPTABLES=n. And ipset updates from Jozsef Kadlecsik: 5) Support updating element extensions when the set is full (fixes netfilter bugzilla id 880). 6) Fix set match with 32-bits userspace / 64-bits kernel. 7) Indicate explicitly when /0 networks are supported in ipset. 8) Simplify cidr handling for hash:*net* types. 9) Allocate the proper size of memory when /0 networks are supported. 10) Explicitly add padding elements to hash:net,net and hash:net,port, because the elements must be u32 sized for the used hash function. Jozsef is also cooking ipset RCU conversion which should land soon if they reach the merge window in time. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/netfilter')
-rw-r--r--net/netfilter/Kconfig10
-rw-r--r--net/netfilter/Makefile1
-rw-r--r--net/netfilter/ipset/ip_set_hash_gen.h101
-rw-r--r--net/netfilter/ipset/ip_set_hash_netiface.c1
-rw-r--r--net/netfilter/ipset/ip_set_hash_netnet.c2
-rw-r--r--net/netfilter/ipset/ip_set_hash_netportnet.c2
-rw-r--r--net/netfilter/nf_conntrack_core.c11
-rw-r--r--net/netfilter/nf_nat_redirect.c127
-rw-r--r--net/netfilter/xt_REDIRECT.c3
-rw-r--r--net/netfilter/xt_recent.c64
-rw-r--r--net/netfilter/xt_set.c73
11 files changed, 310 insertions, 85 deletions
diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
index 57f15a9aa481..b02660fa9eb0 100644
--- a/net/netfilter/Kconfig
+++ b/net/netfilter/Kconfig
@@ -411,6 +411,13 @@ config NF_NAT_TFTP
depends on NF_CONNTRACK && NF_NAT
default NF_NAT && NF_CONNTRACK_TFTP
+config NF_NAT_REDIRECT
+ tristate "IPv4/IPv6 redirect support"
+ depends on NF_NAT
+ help
+ This is the kernel functionality to redirect packets to local
+ machine through NAT.
+
config NETFILTER_SYNPROXY
tristate
@@ -844,8 +851,7 @@ config NETFILTER_XT_TARGET_RATEEST
config NETFILTER_XT_TARGET_REDIRECT
tristate "REDIRECT target support"
depends on NF_NAT
- select NF_NAT_REDIRECT_IPV4 if NF_NAT_IPV4
- select NF_NAT_REDIRECT_IPV6 if NF_NAT_IPV6
+ select NF_NAT_REDIRECT
---help---
REDIRECT is a special case of NAT: all incoming connections are
mapped onto the incoming interface's address, causing the packets to
diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile
index f3eb4680f2ec..89f73a9e9874 100644
--- a/net/netfilter/Makefile
+++ b/net/netfilter/Makefile
@@ -51,6 +51,7 @@ nf_nat-y := nf_nat_core.o nf_nat_proto_unknown.o nf_nat_proto_common.o \
obj-$(CONFIG_NF_LOG_COMMON) += nf_log_common.o
obj-$(CONFIG_NF_NAT) += nf_nat.o
+obj-$(CONFIG_NF_NAT_REDIRECT) += nf_nat_redirect.o
# NAT protocols (nf_nat)
obj-$(CONFIG_NF_NAT_PROTO_DCCP) += nf_nat_proto_dccp.o
diff --git a/net/netfilter/ipset/ip_set_hash_gen.h b/net/netfilter/ipset/ip_set_hash_gen.h
index fee7c64e4dd1..974ff386db0f 100644
--- a/net/netfilter/ipset/ip_set_hash_gen.h
+++ b/net/netfilter/ipset/ip_set_hash_gen.h
@@ -147,16 +147,22 @@ hbucket_elem_add(struct hbucket *n, u8 ahash_max, size_t dsize)
#else
#define __CIDR(cidr, i) (cidr)
#endif
+
+/* cidr + 1 is stored in net_prefixes to support /0 */
+#define SCIDR(cidr, i) (__CIDR(cidr, i) + 1)
+
#ifdef IP_SET_HASH_WITH_NETS_PACKED
-/* When cidr is packed with nomatch, cidr - 1 is stored in the entry */
-#define CIDR(cidr, i) (__CIDR(cidr, i) + 1)
+/* When cidr is packed with nomatch, cidr - 1 is stored in the data entry */
+#define GCIDR(cidr, i) (__CIDR(cidr, i) + 1)
+#define NCIDR(cidr) (cidr)
#else
-#define CIDR(cidr, i) (__CIDR(cidr, i))
+#define GCIDR(cidr, i) (__CIDR(cidr, i))
+#define NCIDR(cidr) (cidr - 1)
#endif
#define SET_HOST_MASK(family) (family == AF_INET ? 32 : 128)
-#ifdef IP_SET_HASH_WITH_MULTI
+#ifdef IP_SET_HASH_WITH_NET0
#define NLEN(family) (SET_HOST_MASK(family) + 1)
#else
#define NLEN(family) SET_HOST_MASK(family)
@@ -292,24 +298,22 @@ mtype_add_cidr(struct htype *h, u8 cidr, u8 nets_length, u8 n)
int i, j;
/* Add in increasing prefix order, so larger cidr first */
- for (i = 0, j = -1; i < nets_length && h->nets[i].nets[n]; i++) {
+ for (i = 0, j = -1; i < nets_length && h->nets[i].cidr[n]; i++) {
if (j != -1)
continue;
else if (h->nets[i].cidr[n] < cidr)
j = i;
else if (h->nets[i].cidr[n] == cidr) {
- h->nets[i].nets[n]++;
+ h->nets[cidr - 1].nets[n]++;
return;
}
}
if (j != -1) {
- for (; i > j; i--) {
+ for (; i > j; i--)
h->nets[i].cidr[n] = h->nets[i - 1].cidr[n];
- h->nets[i].nets[n] = h->nets[i - 1].nets[n];
- }
}
h->nets[i].cidr[n] = cidr;
- h->nets[i].nets[n] = 1;
+ h->nets[cidr - 1].nets[n] = 1;
}
static void
@@ -320,16 +324,12 @@ mtype_del_cidr(struct htype *h, u8 cidr, u8 nets_length, u8 n)
for (i = 0; i < nets_length; i++) {
if (h->nets[i].cidr[n] != cidr)
continue;
- if (h->nets[i].nets[n] > 1 || i == net_end ||
- h->nets[i + 1].nets[n] == 0) {
- h->nets[i].nets[n]--;
+ h->nets[cidr -1].nets[n]--;
+ if (h->nets[cidr -1].nets[n] > 0)
return;
- }
- for (j = i; j < net_end && h->nets[j].nets[n]; j++) {
+ for (j = i; j < net_end && h->nets[j].cidr[n]; j++)
h->nets[j].cidr[n] = h->nets[j + 1].cidr[n];
- h->nets[j].nets[n] = h->nets[j + 1].nets[n];
- }
- h->nets[j].nets[n] = 0;
+ h->nets[j].cidr[n] = 0;
return;
}
}
@@ -486,7 +486,7 @@ mtype_expire(struct ip_set *set, struct htype *h, u8 nets_length, size_t dsize)
pr_debug("expired %u/%u\n", i, j);
#ifdef IP_SET_HASH_WITH_NETS
for (k = 0; k < IPSET_NET_COUNT; k++)
- mtype_del_cidr(h, CIDR(data->cidr, k),
+ mtype_del_cidr(h, SCIDR(data->cidr, k),
nets_length, k);
#endif
ip_set_ext_destroy(set, data);
@@ -633,29 +633,6 @@ mtype_add(struct ip_set *set, void *value, const struct ip_set_ext *ext,
bool flag_exist = flags & IPSET_FLAG_EXIST;
u32 key, multi = 0;
- if (h->elements >= h->maxelem && SET_WITH_FORCEADD(set)) {
- rcu_read_lock_bh();
- t = rcu_dereference_bh(h->table);
- key = HKEY(value, h->initval, t->htable_bits);
- n = hbucket(t,key);
- if (n->pos) {
- /* Choosing the first entry in the array to replace */
- j = 0;
- goto reuse_slot;
- }
- rcu_read_unlock_bh();
- }
- if (SET_WITH_TIMEOUT(set) && h->elements >= h->maxelem)
- /* FIXME: when set is full, we slow down here */
- mtype_expire(set, h, NLEN(set->family), set->dsize);
-
- if (h->elements >= h->maxelem) {
- if (net_ratelimit())
- pr_warn("Set %s is full, maxelem %u reached\n",
- set->name, h->maxelem);
- return -IPSET_ERR_HASH_FULL;
- }
-
rcu_read_lock_bh();
t = rcu_dereference_bh(h->table);
key = HKEY(value, h->initval, t->htable_bits);
@@ -680,15 +657,32 @@ mtype_add(struct ip_set *set, void *value, const struct ip_set_ext *ext,
j != AHASH_MAX(h) + 1)
j = i;
}
+ if (h->elements >= h->maxelem && SET_WITH_FORCEADD(set) && n->pos) {
+ /* Choosing the first entry in the array to replace */
+ j = 0;
+ goto reuse_slot;
+ }
+ if (SET_WITH_TIMEOUT(set) && h->elements >= h->maxelem)
+ /* FIXME: when set is full, we slow down here */
+ mtype_expire(set, h, NLEN(set->family), set->dsize);
+
+ if (h->elements >= h->maxelem) {
+ if (net_ratelimit())
+ pr_warn("Set %s is full, maxelem %u reached\n",
+ set->name, h->maxelem);
+ ret = -IPSET_ERR_HASH_FULL;
+ goto out;
+ }
+
reuse_slot:
if (j != AHASH_MAX(h) + 1) {
/* Fill out reused slot */
data = ahash_data(n, j, set->dsize);
#ifdef IP_SET_HASH_WITH_NETS
for (i = 0; i < IPSET_NET_COUNT; i++) {
- mtype_del_cidr(h, CIDR(data->cidr, i),
+ mtype_del_cidr(h, SCIDR(data->cidr, i),
NLEN(set->family), i);
- mtype_add_cidr(h, CIDR(d->cidr, i),
+ mtype_add_cidr(h, SCIDR(d->cidr, i),
NLEN(set->family), i);
}
#endif
@@ -705,7 +699,7 @@ reuse_slot:
data = ahash_data(n, n->pos++, set->dsize);
#ifdef IP_SET_HASH_WITH_NETS
for (i = 0; i < IPSET_NET_COUNT; i++)
- mtype_add_cidr(h, CIDR(d->cidr, i), NLEN(set->family),
+ mtype_add_cidr(h, SCIDR(d->cidr, i), NLEN(set->family),
i);
#endif
h->elements++;
@@ -766,7 +760,7 @@ mtype_del(struct ip_set *set, void *value, const struct ip_set_ext *ext,
h->elements--;
#ifdef IP_SET_HASH_WITH_NETS
for (j = 0; j < IPSET_NET_COUNT; j++)
- mtype_del_cidr(h, CIDR(d->cidr, j), NLEN(set->family),
+ mtype_del_cidr(h, SCIDR(d->cidr, j), NLEN(set->family),
j);
#endif
ip_set_ext_destroy(set, data);
@@ -827,15 +821,15 @@ mtype_test_cidrs(struct ip_set *set, struct mtype_elem *d,
u8 nets_length = NLEN(set->family);
pr_debug("test by nets\n");
- for (; j < nets_length && h->nets[j].nets[0] && !multi; j++) {
+ for (; j < nets_length && h->nets[j].cidr[0] && !multi; j++) {
#if IPSET_NET_COUNT == 2
mtype_data_reset_elem(d, &orig);
- mtype_data_netmask(d, h->nets[j].cidr[0], false);
- for (k = 0; k < nets_length && h->nets[k].nets[1] && !multi;
+ mtype_data_netmask(d, NCIDR(h->nets[j].cidr[0]), false);
+ for (k = 0; k < nets_length && h->nets[k].cidr[1] && !multi;
k++) {
- mtype_data_netmask(d, h->nets[k].cidr[1], true);
+ mtype_data_netmask(d, NCIDR(h->nets[k].cidr[1]), true);
#else
- mtype_data_netmask(d, h->nets[j].cidr[0]);
+ mtype_data_netmask(d, NCIDR(h->nets[j].cidr[0]));
#endif
key = HKEY(d, h->initval, t->htable_bits);
n = hbucket(t, key);
@@ -883,7 +877,7 @@ mtype_test(struct ip_set *set, void *value, const struct ip_set_ext *ext,
/* If we test an IP address and not a network address,
* try all possible network sizes */
for (i = 0; i < IPSET_NET_COUNT; i++)
- if (CIDR(d->cidr, i) != SET_HOST_MASK(set->family))
+ if (GCIDR(d->cidr, i) != SET_HOST_MASK(set->family))
break;
if (i == IPSET_NET_COUNT) {
ret = mtype_test_cidrs(set, d, ext, mext, flags);
@@ -1107,8 +1101,7 @@ IPSET_TOKEN(HTYPE, _create)(struct net *net, struct ip_set *set,
hsize = sizeof(*h);
#ifdef IP_SET_HASH_WITH_NETS
- hsize += sizeof(struct net_prefixes) *
- (set->family == NFPROTO_IPV4 ? 32 : 128);
+ hsize += sizeof(struct net_prefixes) * NLEN(set->family);
#endif
h = kzalloc(hsize, GFP_KERNEL);
if (!h)
diff --git a/net/netfilter/ipset/ip_set_hash_netiface.c b/net/netfilter/ipset/ip_set_hash_netiface.c
index 35dd35873442..758b002130d9 100644
--- a/net/netfilter/ipset/ip_set_hash_netiface.c
+++ b/net/netfilter/ipset/ip_set_hash_netiface.c
@@ -115,6 +115,7 @@ iface_add(struct rb_root *root, const char **iface)
#define IP_SET_HASH_WITH_NETS
#define IP_SET_HASH_WITH_RBTREE
#define IP_SET_HASH_WITH_MULTI
+#define IP_SET_HASH_WITH_NET0
#define STREQ(a, b) (strcmp(a, b) == 0)
diff --git a/net/netfilter/ipset/ip_set_hash_netnet.c b/net/netfilter/ipset/ip_set_hash_netnet.c
index da00284b3571..ea8772afb6e7 100644
--- a/net/netfilter/ipset/ip_set_hash_netnet.c
+++ b/net/netfilter/ipset/ip_set_hash_netnet.c
@@ -46,6 +46,7 @@ struct hash_netnet4_elem {
__be64 ipcmp;
};
u8 nomatch;
+ u8 padding;
union {
u8 cidr[2];
u16 ccmp;
@@ -271,6 +272,7 @@ hash_netnet4_uadt(struct ip_set *set, struct nlattr *tb[],
struct hash_netnet6_elem {
union nf_inet_addr ip[2];
u8 nomatch;
+ u8 padding;
union {
u8 cidr[2];
u16 ccmp;
diff --git a/net/netfilter/ipset/ip_set_hash_netportnet.c b/net/netfilter/ipset/ip_set_hash_netportnet.c
index b8053d675fc3..bfaa94c7baa7 100644
--- a/net/netfilter/ipset/ip_set_hash_netportnet.c
+++ b/net/netfilter/ipset/ip_set_hash_netportnet.c
@@ -53,6 +53,7 @@ struct hash_netportnet4_elem {
u8 cidr[2];
u16 ccmp;
};
+ u16 padding;
u8 nomatch:1;
u8 proto;
};
@@ -324,6 +325,7 @@ struct hash_netportnet6_elem {
u8 cidr[2];
u16 ccmp;
};
+ u16 padding;
u8 nomatch:1;
u8 proto;
};
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index 5016a6929085..a11674806707 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -824,22 +824,19 @@ __nf_conntrack_alloc(struct net *net, u16 zone,
atomic_dec(&net->ct.count);
return ERR_PTR(-ENOMEM);
}
- /*
- * Let ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode.next
- * and ct->tuplehash[IP_CT_DIR_REPLY].hnnode.next unchanged.
- */
- memset(&ct->tuplehash[IP_CT_DIR_MAX], 0,
- offsetof(struct nf_conn, proto) -
- offsetof(struct nf_conn, tuplehash[IP_CT_DIR_MAX]));
spin_lock_init(&ct->lock);
ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple = *orig;
ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode.pprev = NULL;
ct->tuplehash[IP_CT_DIR_REPLY].tuple = *repl;
/* save hash for reusing when confirming */
*(unsigned long *)(&ct->tuplehash[IP_CT_DIR_REPLY].hnnode.pprev) = hash;
+ ct->status = 0;
/* Don't set timer yet: wait for confirmation */
setup_timer(&ct->timeout, death_by_timeout, (unsigned long)ct);
write_pnet(&ct->ct_net, net);
+ memset(&ct->__nfct_init_offset[0], 0,
+ offsetof(struct nf_conn, proto) -
+ offsetof(struct nf_conn, __nfct_init_offset[0]));
#ifdef CONFIG_NF_CONNTRACK_ZONES
if (zone) {
struct nf_conntrack_zone *nf_ct_zone;
diff --git a/net/netfilter/nf_nat_redirect.c b/net/netfilter/nf_nat_redirect.c
new file mode 100644
index 000000000000..97b75f9bfbcd
--- /dev/null
+++ b/net/netfilter/nf_nat_redirect.c
@@ -0,0 +1,127 @@
+/*
+ * (C) 1999-2001 Paul `Rusty' Russell
+ * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org>
+ * Copyright (c) 2011 Patrick McHardy <kaber@trash.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Based on Rusty Russell's IPv4 REDIRECT target. Development of IPv6
+ * NAT funded by Astaro.
+ */
+
+#include <linux/if.h>
+#include <linux/inetdevice.h>
+#include <linux/ip.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/netfilter.h>
+#include <linux/types.h>
+#include <linux/netfilter_ipv4.h>
+#include <linux/netfilter_ipv6.h>
+#include <linux/netfilter/x_tables.h>
+#include <net/addrconf.h>
+#include <net/checksum.h>
+#include <net/protocol.h>
+#include <net/netfilter/nf_nat.h>
+#include <net/netfilter/nf_nat_redirect.h>
+
+unsigned int
+nf_nat_redirect_ipv4(struct sk_buff *skb,
+ const struct nf_nat_ipv4_multi_range_compat *mr,
+ unsigned int hooknum)
+{
+ struct nf_conn *ct;
+ enum ip_conntrack_info ctinfo;
+ __be32 newdst;
+ struct nf_nat_range newrange;
+
+ NF_CT_ASSERT(hooknum == NF_INET_PRE_ROUTING ||
+ hooknum == NF_INET_LOCAL_OUT);
+
+ ct = nf_ct_get(skb, &ctinfo);
+ NF_CT_ASSERT(ct && (ctinfo == IP_CT_NEW || ctinfo == IP_CT_RELATED));
+
+ /* Local packets: make them go to loopback */
+ if (hooknum == NF_INET_LOCAL_OUT) {
+ newdst = htonl(0x7F000001);
+ } else {
+ struct in_device *indev;
+ struct in_ifaddr *ifa;
+
+ newdst = 0;
+
+ rcu_read_lock();
+ indev = __in_dev_get_rcu(skb->dev);
+ if (indev != NULL) {
+ ifa = indev->ifa_list;
+ newdst = ifa->ifa_local;
+ }
+ rcu_read_unlock();
+
+ if (!newdst)
+ return NF_DROP;
+ }
+
+ /* Transfer from original range. */
+ memset(&newrange.min_addr, 0, sizeof(newrange.min_addr));
+ memset(&newrange.max_addr, 0, sizeof(newrange.max_addr));
+ newrange.flags = mr->range[0].flags | NF_NAT_RANGE_MAP_IPS;
+ newrange.min_addr.ip = newdst;
+ newrange.max_addr.ip = newdst;
+ newrange.min_proto = mr->range[0].min;
+ newrange.max_proto = mr->range[0].max;
+
+ /* Hand modified range to generic setup. */
+ return nf_nat_setup_info(ct, &newrange, NF_NAT_MANIP_DST);
+}
+EXPORT_SYMBOL_GPL(nf_nat_redirect_ipv4);
+
+static const struct in6_addr loopback_addr = IN6ADDR_LOOPBACK_INIT;
+
+unsigned int
+nf_nat_redirect_ipv6(struct sk_buff *skb, const struct nf_nat_range *range,
+ unsigned int hooknum)
+{
+ struct nf_nat_range newrange;
+ struct in6_addr newdst;
+ enum ip_conntrack_info ctinfo;
+ struct nf_conn *ct;
+
+ ct = nf_ct_get(skb, &ctinfo);
+ if (hooknum == NF_INET_LOCAL_OUT) {
+ newdst = loopback_addr;
+ } else {
+ struct inet6_dev *idev;
+ struct inet6_ifaddr *ifa;
+ bool addr = false;
+
+ rcu_read_lock();
+ idev = __in6_dev_get(skb->dev);
+ if (idev != NULL) {
+ list_for_each_entry(ifa, &idev->addr_list, if_list) {
+ newdst = ifa->addr;
+ addr = true;
+ break;
+ }
+ }
+ rcu_read_unlock();
+
+ if (!addr)
+ return NF_DROP;
+ }
+
+ newrange.flags = range->flags | NF_NAT_RANGE_MAP_IPS;
+ newrange.min_addr.in6 = newdst;
+ newrange.max_addr.in6 = newdst;
+ newrange.min_proto = range->min_proto;
+ newrange.max_proto = range->max_proto;
+
+ return nf_nat_setup_info(ct, &newrange, NF_NAT_MANIP_DST);
+}
+EXPORT_SYMBOL_GPL(nf_nat_redirect_ipv6);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
diff --git a/net/netfilter/xt_REDIRECT.c b/net/netfilter/xt_REDIRECT.c
index b6ec67efd900..03f0b370e178 100644
--- a/net/netfilter/xt_REDIRECT.c
+++ b/net/netfilter/xt_REDIRECT.c
@@ -26,8 +26,7 @@
#include <net/checksum.h>
#include <net/protocol.h>
#include <net/netfilter/nf_nat.h>
-#include <net/netfilter/ipv4/nf_nat_redirect.h>
-#include <net/netfilter/ipv6/nf_nat_redirect.h>
+#include <net/netfilter/nf_nat_redirect.h>
static unsigned int
redirect_tg6(struct sk_buff *skb, const struct xt_action_param *par)
diff --git a/net/netfilter/xt_recent.c b/net/netfilter/xt_recent.c
index a9faae89f955..30dbe34915ae 100644
--- a/net/netfilter/xt_recent.c
+++ b/net/netfilter/xt_recent.c
@@ -43,25 +43,29 @@ MODULE_LICENSE("GPL");
MODULE_ALIAS("ipt_recent");
MODULE_ALIAS("ip6t_recent");
-static unsigned int ip_list_tot = 100;
-static unsigned int ip_pkt_list_tot = 20;
-static unsigned int ip_list_hash_size = 0;
-static unsigned int ip_list_perms = 0644;
-static unsigned int ip_list_uid = 0;
-static unsigned int ip_list_gid = 0;
+static unsigned int ip_list_tot __read_mostly = 100;
+static unsigned int ip_list_hash_size __read_mostly;
+static unsigned int ip_list_perms __read_mostly = 0644;
+static unsigned int ip_list_uid __read_mostly;
+static unsigned int ip_list_gid __read_mostly;
module_param(ip_list_tot, uint, 0400);
-module_param(ip_pkt_list_tot, uint, 0400);
module_param(ip_list_hash_size, uint, 0400);
module_param(ip_list_perms, uint, 0400);
module_param(ip_list_uid, uint, S_IRUGO | S_IWUSR);
module_param(ip_list_gid, uint, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(ip_list_tot, "number of IPs to remember per list");
-MODULE_PARM_DESC(ip_pkt_list_tot, "number of packets per IP address to remember (max. 255)");
MODULE_PARM_DESC(ip_list_hash_size, "size of hash table used to look up IPs");
MODULE_PARM_DESC(ip_list_perms, "permissions on /proc/net/xt_recent/* files");
MODULE_PARM_DESC(ip_list_uid, "default owner of /proc/net/xt_recent/* files");
MODULE_PARM_DESC(ip_list_gid, "default owning group of /proc/net/xt_recent/* files");
+/* retained for backwards compatibility */
+static unsigned int ip_pkt_list_tot __read_mostly;
+module_param(ip_pkt_list_tot, uint, 0400);
+MODULE_PARM_DESC(ip_pkt_list_tot, "number of packets per IP address to remember (max. 255)");
+
+#define XT_RECENT_MAX_NSTAMPS 256
+
struct recent_entry {
struct list_head list;
struct list_head lru_list;
@@ -79,6 +83,7 @@ struct recent_table {
union nf_inet_addr mask;
unsigned int refcnt;
unsigned int entries;
+ u8 nstamps_max_mask;
struct list_head lru_list;
struct list_head iphash[0];
};
@@ -90,7 +95,8 @@ struct recent_net {
#endif
};
-static int recent_net_id;
+static int recent_net_id __read_mostly;
+
static inline struct recent_net *recent_pernet(struct net *net)
{
return net_generic(net, recent_net_id);
@@ -171,12 +177,15 @@ recent_entry_init(struct recent_table *t, const union nf_inet_addr *addr,
u_int16_t family, u_int8_t ttl)
{
struct recent_entry *e;
+ unsigned int nstamps_max = t->nstamps_max_mask;
if (t->entries >= ip_list_tot) {
e = list_entry(t->lru_list.next, struct recent_entry, lru_list);
recent_entry_remove(t, e);
}
- e = kmalloc(sizeof(*e) + sizeof(e->stamps[0]) * ip_pkt_list_tot,
+
+ nstamps_max += 1;
+ e = kmalloc(sizeof(*e) + sizeof(e->stamps[0]) * nstamps_max,
GFP_ATOMIC);
if (e == NULL)
return NULL;
@@ -197,7 +206,7 @@ recent_entry_init(struct recent_table *t, const union nf_inet_addr *addr,
static void recent_entry_update(struct recent_table *t, struct recent_entry *e)
{
- e->index %= ip_pkt_list_tot;
+ e->index &= t->nstamps_max_mask;
e->stamps[e->index++] = jiffies;
if (e->index > e->nstamps)
e->nstamps = e->index;
@@ -326,6 +335,7 @@ static int recent_mt_check(const struct xt_mtchk_param *par,
kuid_t uid;
kgid_t gid;
#endif
+ unsigned int nstamp_mask;
unsigned int i;
int ret = -EINVAL;
size_t sz;
@@ -349,19 +359,33 @@ static int recent_mt_check(const struct xt_mtchk_param *par,
return -EINVAL;
if ((info->check_set & XT_RECENT_REAP) && !info->seconds)
return -EINVAL;
- if (info->hit_count > ip_pkt_list_tot) {
- pr_info("hitcount (%u) is larger than "
- "packets to be remembered (%u)\n",
- info->hit_count, ip_pkt_list_tot);
+ if (info->hit_count >= XT_RECENT_MAX_NSTAMPS) {
+ pr_info("hitcount (%u) is larger than allowed maximum (%u)\n",
+ info->hit_count, XT_RECENT_MAX_NSTAMPS - 1);
return -EINVAL;
}
if (info->name[0] == '\0' ||
strnlen(info->name, XT_RECENT_NAME_LEN) == XT_RECENT_NAME_LEN)
return -EINVAL;
+ if (ip_pkt_list_tot && info->hit_count < ip_pkt_list_tot)
+ nstamp_mask = roundup_pow_of_two(ip_pkt_list_tot) - 1;
+ else if (info->hit_count)
+ nstamp_mask = roundup_pow_of_two(info->hit_count) - 1;
+ else
+ nstamp_mask = 32 - 1;
+
mutex_lock(&recent_mutex);
t = recent_table_lookup(recent_net, info->name);
if (t != NULL) {
+ if (info->hit_count > t->nstamps_max_mask) {
+ pr_info("hitcount (%u) is larger than packets to be remembered (%u) for table %s\n",
+ info->hit_count, t->nstamps_max_mask + 1,
+ info->name);
+ ret = -EINVAL;
+ goto out;
+ }
+
t->refcnt++;
ret = 0;
goto out;
@@ -377,6 +401,7 @@ static int recent_mt_check(const struct xt_mtchk_param *par,
goto out;
}
t->refcnt = 1;
+ t->nstamps_max_mask = nstamp_mask;
memcpy(&t->mask, &info->mask, sizeof(t->mask));
strcpy(t->name, info->name);
@@ -497,9 +522,12 @@ static void recent_seq_stop(struct seq_file *s, void *v)
static int recent_seq_show(struct seq_file *seq, void *v)
{
const struct recent_entry *e = v;
+ struct recent_iter_state *st = seq->private;
+ const struct recent_table *t = st->table;
unsigned int i;
- i = (e->index - 1) % ip_pkt_list_tot;
+ i = (e->index - 1) & t->nstamps_max_mask;
+
if (e->family == NFPROTO_IPV4)
seq_printf(seq, "src=%pI4 ttl: %u last_seen: %lu oldest_pkt: %u",
&e->addr.ip, e->ttl, e->stamps[i], e->index);
@@ -717,7 +745,9 @@ static int __init recent_mt_init(void)
{
int err;
- if (!ip_list_tot || !ip_pkt_list_tot || ip_pkt_list_tot > 255)
+ BUILD_BUG_ON_NOT_POWER_OF_2(XT_RECENT_MAX_NSTAMPS);
+
+ if (!ip_list_tot || ip_pkt_list_tot >= XT_RECENT_MAX_NSTAMPS)
return -EINVAL;
ip_list_hash_size = 1 << fls(ip_list_tot);
diff --git a/net/netfilter/xt_set.c b/net/netfilter/xt_set.c
index 5732cd64acc0..0d47afea9682 100644
--- a/net/netfilter/xt_set.c
+++ b/net/netfilter/xt_set.c
@@ -157,7 +157,7 @@ set_match_v1_destroy(const struct xt_mtdtor_param *par)
/* Revision 3 match */
static bool
-match_counter(u64 counter, const struct ip_set_counter_match *info)
+match_counter0(u64 counter, const struct ip_set_counter_match0 *info)
{
switch (info->op) {
case IPSET_COUNTER_NONE:
@@ -192,14 +192,60 @@ set_match_v3(const struct sk_buff *skb, struct xt_action_param *par)
if (!(ret && opt.cmdflags & IPSET_FLAG_MATCH_COUNTERS))
return ret;
- if (!match_counter(opt.ext.packets, &info->packets))
+ if (!match_counter0(opt.ext.packets, &info->packets))
return 0;
- return match_counter(opt.ext.bytes, &info->bytes);
+ return match_counter0(opt.ext.bytes, &info->bytes);
}
#define set_match_v3_checkentry set_match_v1_checkentry
#define set_match_v3_destroy set_match_v1_destroy
+/* Revision 4 match */
+
+static bool
+match_counter(u64 counter, const struct ip_set_counter_match *info)
+{
+ switch (info->op) {
+ case IPSET_COUNTER_NONE:
+ return true;
+ case IPSET_COUNTER_EQ:
+ return counter == info->value;
+ case IPSET_COUNTER_NE:
+ return counter != info->value;
+ case IPSET_COUNTER_LT:
+ return counter < info->value;
+ case IPSET_COUNTER_GT:
+ return counter > info->value;
+ }
+ return false;
+}
+
+static bool
+set_match_v4(const struct sk_buff *skb, struct xt_action_param *par)
+{
+ const struct xt_set_info_match_v4 *info = par->matchinfo;
+ ADT_OPT(opt, par->family, info->match_set.dim,
+ info->match_set.flags, info->flags, UINT_MAX);
+ int ret;
+
+ if (info->packets.op != IPSET_COUNTER_NONE ||
+ info->bytes.op != IPSET_COUNTER_NONE)
+ opt.cmdflags |= IPSET_FLAG_MATCH_COUNTERS;
+
+ ret = match_set(info->match_set.index, skb, par, &opt,
+ info->match_set.flags & IPSET_INV_MATCH);
+
+ if (!(ret && opt.cmdflags & IPSET_FLAG_MATCH_COUNTERS))
+ return ret;
+
+ if (!match_counter(opt.ext.packets, &info->packets))
+ return 0;
+ return match_counter(opt.ext.bytes, &info->bytes);
+}
+
+#define set_match_v4_checkentry set_match_v1_checkentry
+#define set_match_v4_destroy set_match_v1_destroy
+
/* Revision 0 interface: backward compatible with netfilter/iptables */
static unsigned int
@@ -573,6 +619,27 @@ static struct xt_match set_matches[] __read_mostly = {
.destroy = set_match_v3_destroy,
.me = THIS_MODULE
},
+ /* new revision for counters support: update, match */
+ {
+ .name = "set",
+ .family = NFPROTO_IPV4,
+ .revision = 4,
+ .match = set_match_v4,
+ .matchsize = sizeof(struct xt_set_info_match_v4),
+ .checkentry = set_match_v4_checkentry,
+ .destroy = set_match_v4_destroy,
+ .me = THIS_MODULE
+ },
+ {
+ .name = "set",
+ .family = NFPROTO_IPV6,
+ .revision = 4,
+ .match = set_match_v4,
+ .matchsize = sizeof(struct xt_set_info_match_v4),
+ .checkentry = set_match_v4_checkentry,
+ .destroy = set_match_v4_destroy,
+ .me = THIS_MODULE
+ },
};
static struct xt_target set_targets[] __read_mostly = {