summaryrefslogtreecommitdiff
path: root/include/net/netfilter/nf_conntrack_tuple.h
diff options
context:
space:
mode:
authorPatrick McHardy <kaber@trash.net>2007-07-07 22:31:32 -0700
committerDavid S. Miller <davem@sunset.davemloft.net>2007-07-10 22:17:55 -0700
commitd4156e8cd93f5772483928aaf4960120caebd789 (patch)
treee740e629df29d8ea1ad21244998851362b64a70e /include/net/netfilter/nf_conntrack_tuple.h
parentdf43b4e7ca46952756b2fc039ed80469b1bff62d (diff)
[NETFILTER]: nf_conntrack: reduce masks to a subset of tuples
Since conntrack currently allows to use masks for every bit of both helper and expectation tuples, we can't hash them and have to keep them on two global lists that are searched for every new connection. This patch removes the never used ability to use masks for the destination part of the expectation tuple and completely removes masks from helpers since the only reasonable choice is a full match on l3num, protonum and src.u.all. Signed-off-by: Patrick McHardy <kaber@trash.net> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'include/net/netfilter/nf_conntrack_tuple.h')
-rw-r--r--include/net/netfilter/nf_conntrack_tuple.h65
1 files changed, 43 insertions, 22 deletions
diff --git a/include/net/netfilter/nf_conntrack_tuple.h b/include/net/netfilter/nf_conntrack_tuple.h
index d02ce876b4ca..99934ab538e6 100644
--- a/include/net/netfilter/nf_conntrack_tuple.h
+++ b/include/net/netfilter/nf_conntrack_tuple.h
@@ -100,6 +100,14 @@ struct nf_conntrack_tuple
} dst;
};
+struct nf_conntrack_tuple_mask
+{
+ struct {
+ union nf_conntrack_address u3;
+ union nf_conntrack_man_proto u;
+ } src;
+};
+
/* This is optimized opposed to a memset of the whole structure. Everything we
* really care about is the source/destination unions */
#define NF_CT_TUPLE_U_BLANK(tuple) \
@@ -161,31 +169,44 @@ static inline int nf_ct_tuple_equal(const struct nf_conntrack_tuple *t1,
return nf_ct_tuple_src_equal(t1, t2) && nf_ct_tuple_dst_equal(t1, t2);
}
+static inline int nf_ct_tuple_mask_equal(const struct nf_conntrack_tuple_mask *m1,
+ const struct nf_conntrack_tuple_mask *m2)
+{
+ return (m1->src.u3.all[0] == m2->src.u3.all[0] &&
+ m1->src.u3.all[1] == m2->src.u3.all[1] &&
+ m1->src.u3.all[2] == m2->src.u3.all[2] &&
+ m1->src.u3.all[3] == m2->src.u3.all[3] &&
+ m1->src.u.all == m2->src.u.all);
+}
+
+static inline int nf_ct_tuple_src_mask_cmp(const struct nf_conntrack_tuple *t1,
+ const struct nf_conntrack_tuple *t2,
+ const struct nf_conntrack_tuple_mask *mask)
+{
+ int count;
+
+ for (count = 0; count < NF_CT_TUPLE_L3SIZE; count++) {
+ if ((t1->src.u3.all[count] ^ t2->src.u3.all[count]) &
+ mask->src.u3.all[count])
+ return 0;
+ }
+
+ if ((t1->src.u.all ^ t2->src.u.all) & mask->src.u.all)
+ return 0;
+
+ if (t1->src.l3num != t2->src.l3num ||
+ t1->dst.protonum != t2->dst.protonum)
+ return 0;
+
+ return 1;
+}
+
static inline int nf_ct_tuple_mask_cmp(const struct nf_conntrack_tuple *t,
const struct nf_conntrack_tuple *tuple,
- const struct nf_conntrack_tuple *mask)
+ const struct nf_conntrack_tuple_mask *mask)
{
- int count = 0;
-
- for (count = 0; count < NF_CT_TUPLE_L3SIZE; count++){
- if ((t->src.u3.all[count] ^ tuple->src.u3.all[count]) &
- mask->src.u3.all[count])
- return 0;
- }
-
- for (count = 0; count < NF_CT_TUPLE_L3SIZE; count++){
- if ((t->dst.u3.all[count] ^ tuple->dst.u3.all[count]) &
- mask->dst.u3.all[count])
- return 0;
- }
-
- if ((t->src.u.all ^ tuple->src.u.all) & mask->src.u.all ||
- (t->dst.u.all ^ tuple->dst.u.all) & mask->dst.u.all ||
- (t->src.l3num ^ tuple->src.l3num) & mask->src.l3num ||
- (t->dst.protonum ^ tuple->dst.protonum) & mask->dst.protonum)
- return 0;
-
- return 1;
+ return nf_ct_tuple_src_mask_cmp(t, tuple, mask) &&
+ nf_ct_tuple_dst_equal(t, tuple);
}
#endif /* _NF_CONNTRACK_TUPLE_H */