diff options
Diffstat (limited to 'drivers/md/dm-region-hash.c')
| -rw-r--r-- | drivers/md/dm-region-hash.c | 82 |
1 files changed, 43 insertions, 39 deletions
diff --git a/drivers/md/dm-region-hash.c b/drivers/md/dm-region-hash.c index 69732e03eb34..e9b47b659976 100644 --- a/drivers/md/dm-region-hash.c +++ b/drivers/md/dm-region-hash.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2003 Sistina Software Limited. * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved. @@ -18,7 +19,8 @@ #define DM_MSG_PREFIX "region hash" -/*----------------------------------------------------------------- +/* + *------------------------------------------------------------------ * Region hash * * The mirror splits itself up into discrete regions. Each @@ -53,37 +55,39 @@ * lists in the region_hash, with the 'state', 'list' and * 'delayed_bios' fields of the regions. This is used from irq * context, so all other uses will have to suspend local irqs. - *---------------------------------------------------------------*/ + *------------------------------------------------------------------ + */ struct dm_region_hash { uint32_t region_size; - unsigned region_shift; + unsigned int region_shift; /* holds persistent region state */ struct dm_dirty_log *log; /* hash table */ rwlock_t hash_lock; - mempool_t *region_pool; - unsigned mask; - unsigned nr_buckets; - unsigned prime; - unsigned shift; + unsigned int mask; + unsigned int nr_buckets; + unsigned int prime; + unsigned int shift; struct list_head *buckets; - unsigned max_recovery; /* Max # of regions to recover in parallel */ + /* + * If there was a flush failure no regions can be marked clean. + */ + int flush_failure; + + unsigned int max_recovery; /* Max # of regions to recover in parallel */ spinlock_t region_lock; atomic_t recovery_in_flight; - struct semaphore recovery_count; struct list_head clean_regions; struct list_head quiesced_regions; struct list_head recovered_regions; struct list_head failed_recovered_regions; + struct semaphore recovery_count; - /* - * If there was a flush failure no regions can be marked clean. - */ - int flush_failure; + mempool_t region_pool; void *context; sector_t target_begin; @@ -126,7 +130,8 @@ EXPORT_SYMBOL_GPL(dm_rh_region_to_sector); region_t dm_rh_bio_to_region(struct dm_region_hash *rh, struct bio *bio) { - return dm_rh_sector_to_region(rh, bio->bi_sector - rh->target_begin); + return dm_rh_sector_to_region(rh, bio->bi_iter.bi_sector - + rh->target_begin); } EXPORT_SYMBOL_GPL(dm_rh_bio_to_region); @@ -161,13 +166,14 @@ struct dm_region_hash *dm_region_hash_create( struct bio_list *bios), void (*wakeup_workers)(void *context), void (*wakeup_all_recovery_waiters)(void *context), - sector_t target_begin, unsigned max_recovery, + sector_t target_begin, unsigned int max_recovery, struct dm_dirty_log *log, uint32_t region_size, region_t nr_regions) { struct dm_region_hash *rh; - unsigned nr_buckets, max_buckets; + unsigned int nr_buckets, max_buckets; size_t i; + int ret; /* * Calculate a suitable number of buckets for our hash @@ -178,7 +184,7 @@ struct dm_region_hash *dm_region_hash_create( ; nr_buckets >>= 1; - rh = kmalloc(sizeof(*rh), GFP_KERNEL); + rh = kzalloc(sizeof(*rh), GFP_KERNEL); if (!rh) { DMERR("unable to allocate region hash memory"); return ERR_PTR(-ENOMEM); @@ -192,7 +198,7 @@ struct dm_region_hash *dm_region_hash_create( rh->max_recovery = max_recovery; rh->log = log; rh->region_size = region_size; - rh->region_shift = ffs(region_size) - 1; + rh->region_shift = __ffs(region_size); rwlock_init(&rh->hash_lock); rh->mask = nr_buckets - 1; rh->nr_buckets = nr_buckets; @@ -200,7 +206,7 @@ struct dm_region_hash *dm_region_hash_create( rh->shift = RH_HASH_SHIFT; rh->prime = RH_HASH_MULT; - rh->buckets = vmalloc(nr_buckets * sizeof(*rh->buckets)); + rh->buckets = vmalloc_array(nr_buckets, sizeof(*rh->buckets)); if (!rh->buckets) { DMERR("unable to allocate region hash bucket memory"); kfree(rh); @@ -219,9 +225,9 @@ struct dm_region_hash *dm_region_hash_create( INIT_LIST_HEAD(&rh->failed_recovered_regions); rh->flush_failure = 0; - rh->region_pool = mempool_create_kmalloc_pool(MIN_REGIONS, - sizeof(struct dm_region)); - if (!rh->region_pool) { + ret = mempool_init_kmalloc_pool(&rh->region_pool, MIN_REGIONS, + sizeof(struct dm_region)); + if (ret) { vfree(rh->buckets); kfree(rh); rh = ERR_PTR(-ENOMEM); @@ -233,7 +239,7 @@ EXPORT_SYMBOL_GPL(dm_region_hash_create); void dm_region_hash_destroy(struct dm_region_hash *rh) { - unsigned h; + unsigned int h; struct dm_region *reg, *nreg; BUG_ON(!list_empty(&rh->quiesced_regions)); @@ -241,16 +247,14 @@ void dm_region_hash_destroy(struct dm_region_hash *rh) list_for_each_entry_safe(reg, nreg, rh->buckets + h, hash_list) { BUG_ON(atomic_read(®->pending)); - mempool_free(reg, rh->region_pool); + mempool_free(reg, &rh->region_pool); } } if (rh->log) dm_dirty_log_destroy(rh->log); - if (rh->region_pool) - mempool_destroy(rh->region_pool); - + mempool_exit(&rh->region_pool); vfree(rh->buckets); kfree(rh); } @@ -262,9 +266,9 @@ struct dm_dirty_log *dm_rh_dirty_log(struct dm_region_hash *rh) } EXPORT_SYMBOL_GPL(dm_rh_dirty_log); -static unsigned rh_hash(struct dm_region_hash *rh, region_t region) +static unsigned int rh_hash(struct dm_region_hash *rh, region_t region) { - return (unsigned) ((region * rh->prime) >> rh->shift) & rh->mask; + return (unsigned int) ((region * rh->prime) >> rh->shift) & rh->mask; } static struct dm_region *__rh_lookup(struct dm_region_hash *rh, region_t region) @@ -288,7 +292,7 @@ static struct dm_region *__rh_alloc(struct dm_region_hash *rh, region_t region) { struct dm_region *reg, *nreg; - nreg = mempool_alloc(rh->region_pool, GFP_ATOMIC); + nreg = mempool_alloc(&rh->region_pool, GFP_ATOMIC); if (unlikely(!nreg)) nreg = kmalloc(sizeof(*nreg), GFP_NOIO | __GFP_NOFAIL); @@ -304,7 +308,7 @@ static struct dm_region *__rh_alloc(struct dm_region_hash *rh, region_t region) reg = __rh_lookup(rh, region); if (reg) /* We lost the race. */ - mempool_free(nreg, rh->region_pool); + mempool_free(nreg, &rh->region_pool); else { __rh_insert(rh, nreg); if (nreg->state == DM_RH_CLEAN) { @@ -399,12 +403,12 @@ void dm_rh_mark_nosync(struct dm_region_hash *rh, struct bio *bio) region_t region = dm_rh_bio_to_region(rh, bio); int recovering = 0; - if (bio->bi_rw & REQ_FLUSH) { + if (bio->bi_opf & REQ_PREFLUSH) { rh->flush_failure = 1; return; } - if (bio->bi_rw & REQ_DISCARD) + if (bio_op(bio) == REQ_OP_DISCARD) return; /* We must inform the log that the sync count has changed. */ @@ -482,17 +486,17 @@ void dm_rh_update_states(struct dm_region_hash *rh, int errors_handled) list_for_each_entry_safe(reg, next, &recovered, list) { rh->log->type->clear_region(rh->log, reg->key); complete_resync_work(reg, 1); - mempool_free(reg, rh->region_pool); + mempool_free(reg, &rh->region_pool); } list_for_each_entry_safe(reg, next, &failed_recovered, list) { complete_resync_work(reg, errors_handled ? 0 : 1); - mempool_free(reg, rh->region_pool); + mempool_free(reg, &rh->region_pool); } list_for_each_entry_safe(reg, next, &clean, list) { rh->log->type->clear_region(rh->log, reg->key); - mempool_free(reg, rh->region_pool); + mempool_free(reg, &rh->region_pool); } rh->log->type->flush(rh->log); @@ -527,7 +531,7 @@ void dm_rh_inc_pending(struct dm_region_hash *rh, struct bio_list *bios) struct bio *bio; for (bio = bios->head; bio; bio = bio->bi_next) { - if (bio->bi_rw & (REQ_FLUSH | REQ_DISCARD)) + if (bio->bi_opf & REQ_PREFLUSH || bio_op(bio) == REQ_OP_DISCARD) continue; rh_inc(rh, dm_rh_bio_to_region(rh, bio)); } @@ -719,5 +723,5 @@ void dm_rh_start_recovery(struct dm_region_hash *rh) EXPORT_SYMBOL_GPL(dm_rh_start_recovery); MODULE_DESCRIPTION(DM_NAME " region hash"); -MODULE_AUTHOR("Joe Thornber/Heinz Mauelshagen <dm-devel@redhat.com>"); +MODULE_AUTHOR("Joe Thornber/Heinz Mauelshagen <dm-devel@lists.linux.dev>"); MODULE_LICENSE("GPL"); |
