summaryrefslogtreecommitdiff
path: root/lib/rhashtable.c
diff options
context:
space:
mode:
authorRichard Weinberger <richard@nod.at>2019-07-06 22:51:56 +0200
committerRichard Weinberger <richard@nod.at>2019-07-06 22:51:56 +0200
commit1d2af80d581d1bae81594e497cd57e345235b940 (patch)
treee1f8bb3d3d1436fe62057becb3eb1233f843f3ba /lib/rhashtable.c
parentb07079f1642c28dac4f6f339d5aca66203519734 (diff)
parentbce9437a0a48dd5e19490f56e1cdc39a9be5563c (diff)
Merge tag 'nand/for-5.3' of git://git.kernel.org/pub/scm/linux/kernel/git/mtd/linux into mtd/next
NAND core changes: - use longest matching pattern in ->exec_op() default parser - export NAND operation tracer - add flag to indicate panic_write in MTD - use kzalloc() instead of kmalloc() and memset() Raw NAND controller drivers changes: - brcmnand: * fix BCH ECC layout for large page NAND parts * fallback to detected ecc-strength, ecc-step-size * when oops in progress use pio and interrupt polling * code refactor code to introduce helper functions * add support for v7.3 controller - FSMC: * use nand_op_trace for operation tracing - GPMI: * move all driver code into single file * various cleanups (including dmaengine changes) * use runtime PM to manage clocks * implement exec_op - MTK: * correct low level time calculation of r/w cycle * improve data sampling timing for read cycle * add validity check for CE# pin setting * fix wrongly assigned OOB buffer pointer issue * re-license MTK NAND driver as Dual MIT/GPL - STM32: * manage the get_irq error case * increase DMA completion timeouts Raw NAND chips drivers changes: - Macronix: add read-retry support Onenand driver changes: - add support for 8Gb datasize chips - avoid fall-through warnings SPI-NAND changes: - define macros for page-read ops with three-byte addresses - add support for two-byte device IDs and then for GigaDevice GD5F1GQ4UFxxG - add initial support for Paragon PN26G0xA - handle the case where the last page read has bitflips
Diffstat (limited to 'lib/rhashtable.c')
-rw-r--r--lib/rhashtable.c33
1 files changed, 17 insertions, 16 deletions
diff --git a/lib/rhashtable.c b/lib/rhashtable.c
index 6529fe1b45c1..935ec80f213f 100644
--- a/lib/rhashtable.c
+++ b/lib/rhashtable.c
@@ -34,7 +34,7 @@
union nested_table {
union nested_table __rcu *table;
- struct rhash_lock_head __rcu *bucket;
+ struct rhash_lock_head *bucket;
};
static u32 head_hashfn(struct rhashtable *ht,
@@ -131,7 +131,7 @@ static union nested_table *nested_table_alloc(struct rhashtable *ht,
INIT_RHT_NULLS_HEAD(ntbl[i].bucket);
}
- if (cmpxchg(prev, NULL, ntbl) == NULL)
+ if (cmpxchg((union nested_table **)prev, NULL, ntbl) == NULL)
return ntbl;
/* Raced with another thread. */
kfree(ntbl);
@@ -216,7 +216,7 @@ static struct bucket_table *rhashtable_last_table(struct rhashtable *ht,
}
static int rhashtable_rehash_one(struct rhashtable *ht,
- struct rhash_lock_head __rcu **bkt,
+ struct rhash_lock_head **bkt,
unsigned int old_hash)
{
struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
@@ -269,7 +269,7 @@ static int rhashtable_rehash_chain(struct rhashtable *ht,
unsigned int old_hash)
{
struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
- struct rhash_lock_head __rcu **bkt = rht_bucket_var(old_tbl, old_hash);
+ struct rhash_lock_head **bkt = rht_bucket_var(old_tbl, old_hash);
int err;
if (!bkt)
@@ -296,7 +296,8 @@ static int rhashtable_rehash_attach(struct rhashtable *ht,
* rcu_assign_pointer().
*/
- if (cmpxchg(&old_tbl->future_tbl, NULL, new_tbl) != NULL)
+ if (cmpxchg((struct bucket_table **)&old_tbl->future_tbl, NULL,
+ new_tbl) != NULL)
return -EEXIST;
return 0;
@@ -478,7 +479,7 @@ fail:
}
static void *rhashtable_lookup_one(struct rhashtable *ht,
- struct rhash_lock_head __rcu **bkt,
+ struct rhash_lock_head **bkt,
struct bucket_table *tbl, unsigned int hash,
const void *key, struct rhash_head *obj)
{
@@ -529,7 +530,7 @@ static void *rhashtable_lookup_one(struct rhashtable *ht,
}
static struct bucket_table *rhashtable_insert_one(struct rhashtable *ht,
- struct rhash_lock_head __rcu **bkt,
+ struct rhash_lock_head **bkt,
struct bucket_table *tbl,
unsigned int hash,
struct rhash_head *obj,
@@ -584,7 +585,7 @@ static void *rhashtable_try_insert(struct rhashtable *ht, const void *key,
{
struct bucket_table *new_tbl;
struct bucket_table *tbl;
- struct rhash_lock_head __rcu **bkt;
+ struct rhash_lock_head **bkt;
unsigned int hash;
void *data;
@@ -1166,8 +1167,8 @@ void rhashtable_destroy(struct rhashtable *ht)
}
EXPORT_SYMBOL_GPL(rhashtable_destroy);
-struct rhash_lock_head __rcu **__rht_bucket_nested(const struct bucket_table *tbl,
- unsigned int hash)
+struct rhash_lock_head **__rht_bucket_nested(const struct bucket_table *tbl,
+ unsigned int hash)
{
const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *));
unsigned int index = hash & ((1 << tbl->nest) - 1);
@@ -1195,10 +1196,10 @@ struct rhash_lock_head __rcu **__rht_bucket_nested(const struct bucket_table *tb
}
EXPORT_SYMBOL_GPL(__rht_bucket_nested);
-struct rhash_lock_head __rcu **rht_bucket_nested(const struct bucket_table *tbl,
- unsigned int hash)
+struct rhash_lock_head **rht_bucket_nested(const struct bucket_table *tbl,
+ unsigned int hash)
{
- static struct rhash_lock_head __rcu *rhnull;
+ static struct rhash_lock_head *rhnull;
if (!rhnull)
INIT_RHT_NULLS_HEAD(rhnull);
@@ -1206,9 +1207,9 @@ struct rhash_lock_head __rcu **rht_bucket_nested(const struct bucket_table *tbl,
}
EXPORT_SYMBOL_GPL(rht_bucket_nested);
-struct rhash_lock_head __rcu **rht_bucket_nested_insert(struct rhashtable *ht,
- struct bucket_table *tbl,
- unsigned int hash)
+struct rhash_lock_head **rht_bucket_nested_insert(struct rhashtable *ht,
+ struct bucket_table *tbl,
+ unsigned int hash)
{
const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *));
unsigned int index = hash & ((1 << tbl->nest) - 1);