summaryrefslogtreecommitdiff
path: root/lib/rhashtable.c
diff options
context:
space:
mode:
Diffstat (limited to 'lib/rhashtable.c')
-rw-r--r--lib/rhashtable.c38
1 files changed, 24 insertions, 14 deletions
diff --git a/lib/rhashtable.c b/lib/rhashtable.c
index 6ae2ba8e06a2..3e555d012ed6 100644
--- a/lib/rhashtable.c
+++ b/lib/rhashtable.c
@@ -130,7 +130,8 @@ static union nested_table *nested_table_alloc(struct rhashtable *ht,
if (ntbl)
return ntbl;
- ntbl = kzalloc(PAGE_SIZE, GFP_ATOMIC);
+ ntbl = alloc_hooks_tag(ht->alloc_tag,
+ kmalloc_noprof(PAGE_SIZE, GFP_ATOMIC|__GFP_ZERO));
if (ntbl && leaf) {
for (i = 0; i < PAGE_SIZE / sizeof(ntbl[0]); i++)
@@ -157,7 +158,8 @@ static struct bucket_table *nested_bucket_table_alloc(struct rhashtable *ht,
size = sizeof(*tbl) + sizeof(tbl->buckets[0]);
- tbl = kzalloc(size, gfp);
+ tbl = alloc_hooks_tag(ht->alloc_tag,
+ kmalloc_noprof(size, gfp|__GFP_ZERO));
if (!tbl)
return NULL;
@@ -181,11 +183,13 @@ static struct bucket_table *bucket_table_alloc(struct rhashtable *ht,
int i;
static struct lock_class_key __key;
- tbl = kvzalloc(struct_size(tbl, buckets, nbuckets), gfp);
+ tbl = alloc_hooks_tag(ht->alloc_tag,
+ kvmalloc_node_noprof(struct_size(tbl, buckets, nbuckets),
+ gfp|__GFP_ZERO, NUMA_NO_NODE));
size = nbuckets;
- if (tbl == NULL && (gfp & ~__GFP_NOFAIL) != GFP_KERNEL) {
+ if (tbl == NULL && !gfpflags_allow_blocking(gfp)) {
tbl = nested_bucket_table_alloc(ht, nbuckets, gfp);
nbuckets = 0;
}
@@ -580,10 +584,6 @@ static struct bucket_table *rhashtable_insert_one(
*/
rht_assign_locked(bkt, obj);
- atomic_inc(&ht->nelems);
- if (rht_grow_above_75(ht, tbl))
- schedule_work(&ht->run_work);
-
return NULL;
}
@@ -611,15 +611,23 @@ static void *rhashtable_try_insert(struct rhashtable *ht, const void *key,
new_tbl = rht_dereference_rcu(tbl->future_tbl, ht);
data = ERR_PTR(-EAGAIN);
} else {
+ bool inserted;
+
flags = rht_lock(tbl, bkt);
data = rhashtable_lookup_one(ht, bkt, tbl,
hash, key, obj);
new_tbl = rhashtable_insert_one(ht, bkt, tbl,
hash, obj, data);
+ inserted = data && !new_tbl;
+ if (inserted)
+ atomic_inc(&ht->nelems);
if (PTR_ERR(new_tbl) != -EEXIST)
data = ERR_CAST(new_tbl);
rht_unlock(tbl, bkt, flags);
+
+ if (inserted && rht_grow_above_75(ht, tbl))
+ schedule_work(&ht->run_work);
}
} while (!IS_ERR_OR_NULL(new_tbl));
@@ -661,7 +669,7 @@ EXPORT_SYMBOL_GPL(rhashtable_insert_slow);
* structure outside the hash table.
*
* This function may be called from any process context, including
- * non-preemptable context, but cannot be called from softirq or
+ * non-preemptible context, but cannot be called from softirq or
* hardirq context.
*
* You must call rhashtable_walk_exit after this function returns.
@@ -1016,7 +1024,7 @@ static u32 rhashtable_jhash2(const void *key, u32 length, u32 seed)
* .obj_hashfn = my_hash_fn,
* };
*/
-int rhashtable_init(struct rhashtable *ht,
+int rhashtable_init_noprof(struct rhashtable *ht,
const struct rhashtable_params *params)
{
struct bucket_table *tbl;
@@ -1031,6 +1039,8 @@ int rhashtable_init(struct rhashtable *ht,
spin_lock_init(&ht->lock);
memcpy(&ht->p, params, sizeof(*params));
+ alloc_tag_record(ht->alloc_tag);
+
if (params->min_size)
ht->p.min_size = roundup_pow_of_two(params->min_size);
@@ -1076,7 +1086,7 @@ int rhashtable_init(struct rhashtable *ht,
return 0;
}
-EXPORT_SYMBOL_GPL(rhashtable_init);
+EXPORT_SYMBOL_GPL(rhashtable_init_noprof);
/**
* rhltable_init - initialize a new hash list table
@@ -1087,15 +1097,15 @@ EXPORT_SYMBOL_GPL(rhashtable_init);
*
* See documentation for rhashtable_init.
*/
-int rhltable_init(struct rhltable *hlt, const struct rhashtable_params *params)
+int rhltable_init_noprof(struct rhltable *hlt, const struct rhashtable_params *params)
{
int err;
- err = rhashtable_init(&hlt->ht, params);
+ err = rhashtable_init_noprof(&hlt->ht, params);
hlt->ht.rhlist = true;
return err;
}
-EXPORT_SYMBOL_GPL(rhltable_init);
+EXPORT_SYMBOL_GPL(rhltable_init_noprof);
static void rhashtable_free_one(struct rhashtable *ht, struct rhash_head *obj,
void (*free_fn)(void *ptr, void *arg),