summaryrefslogtreecommitdiff
path: root/include/linux/rhashtable.h
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2017-05-02 16:40:27 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2017-05-02 16:40:27 -0700
commit8d65b08debc7e62b2c6032d7fe7389d895b92cbc (patch)
tree0c3141b60c3a03cc32742b5750c5e763b9dae489 /include/linux/rhashtable.h
parent5a0387a8a8efb90ae7fea1e2e5c62de3efa74691 (diff)
parent5d15af6778b8e4ed1fd41b040283af278e7a9a72 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next
Pull networking updates from David Millar: "Here are some highlights from the 2065 networking commits that happened this development cycle: 1) XDP support for IXGBE (John Fastabend) and thunderx (Sunil Kowuri) 2) Add a generic XDP driver, so that anyone can test XDP even if they lack a networking device whose driver has explicit XDP support (me). 3) Sparc64 now has an eBPF JIT too (me) 4) Add a BPF program testing framework via BPF_PROG_TEST_RUN (Alexei Starovoitov) 5) Make netfitler network namespace teardown less expensive (Florian Westphal) 6) Add symmetric hashing support to nft_hash (Laura Garcia Liebana) 7) Implement NAPI and GRO in netvsc driver (Stephen Hemminger) 8) Support TC flower offload statistics in mlxsw (Arkadi Sharshevsky) 9) Multiqueue support in stmmac driver (Joao Pinto) 10) Remove TCP timewait recycling, it never really could possibly work well in the real world and timestamp randomization really zaps any hint of usability this feature had (Soheil Hassas Yeganeh) 11) Support level3 vs level4 ECMP route hashing in ipv4 (Nikolay Aleksandrov) 12) Add socket busy poll support to epoll (Sridhar Samudrala) 13) Netlink extended ACK support (Johannes Berg, Pablo Neira Ayuso, and several others) 14) IPSEC hw offload infrastructure (Steffen Klassert)" * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next: (2065 commits) tipc: refactor function tipc_sk_recv_stream() tipc: refactor function tipc_sk_recvmsg() net: thunderx: Optimize page recycling for XDP net: thunderx: Support for XDP header adjustment net: thunderx: Add support for XDP_TX net: thunderx: Add support for XDP_DROP net: thunderx: Add basic XDP support net: thunderx: Cleanup receive buffer allocation net: thunderx: Optimize CQE_TX handling net: thunderx: Optimize RBDR descriptor handling net: thunderx: Support for page recycling ipx: call ipxitf_put() in ioctl error path net: sched: add helpers to handle extended actions qed*: Fix issues in the ptp filter config implementation. qede: Fix concurrency issue in PTP Tx path processing. stmmac: Add support for SIMATIC IOT2000 platform net: hns: fix ethtool_get_strings overflow in hns driver tcp: fix wraparound issue in tcp_lp bpf, arm64: fix jit branch offset related to ldimm64 bpf, arm64: implement jiting of BPF_XADD ...
Diffstat (limited to 'include/linux/rhashtable.h')
-rw-r--r--include/linux/rhashtable.h68
1 files changed, 50 insertions, 18 deletions
diff --git a/include/linux/rhashtable.h b/include/linux/rhashtable.h
index 092292b6675e..7d56a7ea2b2e 100644
--- a/include/linux/rhashtable.h
+++ b/include/linux/rhashtable.h
@@ -49,6 +49,21 @@
/* Base bits plus 1 bit for nulls marker */
#define RHT_HASH_RESERVED_SPACE (RHT_BASE_BITS + 1)
+/* Maximum chain length before rehash
+ *
+ * The maximum (not average) chain length grows with the size of the hash
+ * table, at a rate of (log N)/(log log N).
+ *
+ * The value of 16 is selected so that even if the hash table grew to
+ * 2^32 you would not expect the maximum chain length to exceed it
+ * unless we are under attack (or extremely unlucky).
+ *
+ * As this limit is only to detect attacks, we don't need to set it to a
+ * lower value as you'd need the chain length to vastly exceed 16 to have
+ * any real effect on the system.
+ */
+#define RHT_ELASTICITY 16u
+
struct rhash_head {
struct rhash_head __rcu *next;
};
@@ -110,29 +125,25 @@ struct rhashtable;
* @key_len: Length of key
* @key_offset: Offset of key in struct to be hashed
* @head_offset: Offset of rhash_head in struct to be hashed
- * @insecure_max_entries: Maximum number of entries (may be exceeded)
* @max_size: Maximum size while expanding
* @min_size: Minimum size while shrinking
- * @nulls_base: Base value to generate nulls marker
- * @insecure_elasticity: Set to true to disable chain length checks
- * @automatic_shrinking: Enable automatic shrinking of tables
* @locks_mul: Number of bucket locks to allocate per cpu (default: 128)
+ * @automatic_shrinking: Enable automatic shrinking of tables
+ * @nulls_base: Base value to generate nulls marker
* @hashfn: Hash function (default: jhash2 if !(key_len % 4), or jhash)
* @obj_hashfn: Function to hash object
* @obj_cmpfn: Function to compare key with object
*/
struct rhashtable_params {
- size_t nelem_hint;
- size_t key_len;
- size_t key_offset;
- size_t head_offset;
- unsigned int insecure_max_entries;
+ u16 nelem_hint;
+ u16 key_len;
+ u16 key_offset;
+ u16 head_offset;
unsigned int max_size;
- unsigned int min_size;
- u32 nulls_base;
- bool insecure_elasticity;
+ u16 min_size;
bool automatic_shrinking;
- size_t locks_mul;
+ u8 locks_mul;
+ u32 nulls_base;
rht_hashfn_t hashfn;
rht_obj_hashfn_t obj_hashfn;
rht_obj_cmpfn_t obj_cmpfn;
@@ -143,8 +154,8 @@ struct rhashtable_params {
* @tbl: Bucket table
* @nelems: Number of elements in table
* @key_len: Key length for hashfn
- * @elasticity: Maximum chain length before rehash
* @p: Configuration parameters
+ * @max_elems: Maximum number of elements in table
* @rhlist: True if this is an rhltable
* @run_work: Deferred worker to expand/shrink asynchronously
* @mutex: Mutex to protect current/future table swapping
@@ -154,8 +165,8 @@ struct rhashtable {
struct bucket_table __rcu *tbl;
atomic_t nelems;
unsigned int key_len;
- unsigned int elasticity;
struct rhashtable_params p;
+ unsigned int max_elems;
bool rhlist;
struct work_struct run_work;
struct mutex mutex;
@@ -318,8 +329,7 @@ static inline bool rht_grow_above_100(const struct rhashtable *ht,
static inline bool rht_grow_above_max(const struct rhashtable *ht,
const struct bucket_table *tbl)
{
- return ht->p.insecure_max_entries &&
- atomic_read(&ht->nelems) >= ht->p.insecure_max_entries;
+ return atomic_read(&ht->nelems) >= ht->max_elems;
}
/* The bucket lock is selected based on the hash and protects mutations
@@ -726,7 +736,7 @@ slow_path:
return rhashtable_insert_slow(ht, key, obj);
}
- elasticity = ht->elasticity;
+ elasticity = RHT_ELASTICITY;
pprev = rht_bucket_insert(ht, tbl, hash);
data = ERR_PTR(-ENOMEM);
if (!pprev)
@@ -916,6 +926,28 @@ static inline int rhashtable_lookup_insert_fast(
}
/**
+ * rhashtable_lookup_get_insert_fast - lookup and insert object into hash table
+ * @ht: hash table
+ * @obj: pointer to hash head inside object
+ * @params: hash table parameters
+ *
+ * Just like rhashtable_lookup_insert_fast(), but this function returns the
+ * object if it exists, NULL if it did not and the insertion was successful,
+ * and an ERR_PTR otherwise.
+ */
+static inline void *rhashtable_lookup_get_insert_fast(
+ struct rhashtable *ht, struct rhash_head *obj,
+ const struct rhashtable_params params)
+{
+ const char *key = rht_obj(ht, obj);
+
+ BUG_ON(ht->p.obj_hashfn);
+
+ return __rhashtable_insert_fast(ht, key + ht->p.key_offset, obj, params,
+ false);
+}
+
+/**
* rhashtable_lookup_insert_key - search and insert object to hash table
* with explicit key
* @ht: hash table