summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorHerbert Xu <herbert@gondor.apana.org.au>2020-06-03 18:12:43 +1000
committerDavid S. Miller <davem@davemloft.net>2020-06-06 15:51:17 -0700
commit4a3084aaa88e70a0af0e38b72ca29af32ec6b9c5 (patch)
tree9fc4d042d8014f89709bb7fce36371365210795d
parent7f89cc07d22a4152f3c649a89aa8fe240effd24a (diff)
rhashtable: Drop raw RCU deref in nested_table_free
This patch replaces some unnecessary uses of rcu_dereference_raw in the rhashtable code with rcu_dereference_protected. The top-level nested table entry is only marked as RCU because it shares the same type as the tree entries underneath it. So it doesn't need any RCU protection. We also don't need RCU protection when we're freeing a nested RCU table because by this stage we've long passed a memory barrier when anyone could change the nested table. Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--lib/rhashtable.c17
1 files changed, 13 insertions, 4 deletions
diff --git a/lib/rhashtable.c b/lib/rhashtable.c
index bdb7e4cadf05..9f6890aedd1a 100644
--- a/lib/rhashtable.c
+++ b/lib/rhashtable.c
@@ -63,13 +63,22 @@ EXPORT_SYMBOL_GPL(lockdep_rht_bucket_is_held);
#define ASSERT_RHT_MUTEX(HT)
#endif
+static inline union nested_table *nested_table_top(
+ const struct bucket_table *tbl)
+{
+ /* The top-level bucket entry does not need RCU protection
+ * because it's set at the same time as tbl->nest.
+ */
+ return (void *)rcu_dereference_protected(tbl->buckets[0], 1);
+}
+
static void nested_table_free(union nested_table *ntbl, unsigned int size)
{
const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *));
const unsigned int len = 1 << shift;
unsigned int i;
- ntbl = rcu_dereference_raw(ntbl->table);
+ ntbl = rcu_dereference_protected(ntbl->table, 1);
if (!ntbl)
return;
@@ -89,7 +98,7 @@ static void nested_bucket_table_free(const struct bucket_table *tbl)
union nested_table *ntbl;
unsigned int i;
- ntbl = (union nested_table *)rcu_dereference_raw(tbl->buckets[0]);
+ ntbl = nested_table_top(tbl);
for (i = 0; i < len; i++)
nested_table_free(ntbl + i, size);
@@ -1173,7 +1182,7 @@ struct rhash_lock_head **__rht_bucket_nested(const struct bucket_table *tbl,
unsigned int subhash = hash;
union nested_table *ntbl;
- ntbl = (union nested_table *)rcu_dereference_raw(tbl->buckets[0]);
+ ntbl = nested_table_top(tbl);
ntbl = rht_dereference_bucket_rcu(ntbl[index].table, tbl, hash);
subhash >>= tbl->nest;
@@ -1213,7 +1222,7 @@ struct rhash_lock_head **rht_bucket_nested_insert(struct rhashtable *ht,
unsigned int size = tbl->size >> tbl->nest;
union nested_table *ntbl;
- ntbl = (union nested_table *)rcu_dereference_raw(tbl->buckets[0]);
+ ntbl = nested_table_top(tbl);
hash >>= tbl->nest;
ntbl = nested_table_alloc(ht, &ntbl[index].table,
size <= (1 << shift));