summaryrefslogtreecommitdiff
path: root/drivers/md/dm-thin.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/md/dm-thin.c')
-rw-r--r--drivers/md/dm-thin.c54
1 files changed, 26 insertions, 28 deletions
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index 07c7f9795b10..05cf4e3f2bbe 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -453,12 +453,13 @@ static int bio_detain(struct pool *pool, struct dm_cell_key *key, struct bio *bi
cell_prealloc = dm_bio_prison_alloc_cell(pool->prison, GFP_NOIO);
r = dm_bio_detain(pool->prison, key, bio, cell_prealloc, cell_result);
- if (r)
+ if (r) {
/*
* We reused an old cell; we can get rid of
* the new one.
*/
dm_bio_prison_free_cell(pool->prison, cell_prealloc);
+ }
return r;
}
@@ -591,12 +592,6 @@ struct dm_thin_endio_hook {
struct dm_bio_prison_cell *cell;
};
-static void __merge_bio_list(struct bio_list *bios, struct bio_list *master)
-{
- bio_list_merge(bios, master);
- bio_list_init(master);
-}
-
static void error_bio_list(struct bio_list *bios, blk_status_t error)
{
struct bio *bio;
@@ -615,7 +610,7 @@ static void error_thin_bio_list(struct thin_c *tc, struct bio_list *master,
bio_list_init(&bios);
spin_lock_irq(&tc->lock);
- __merge_bio_list(&bios, master);
+ bio_list_merge_init(&bios, master);
spin_unlock_irq(&tc->lock);
error_bio_list(&bios, error);
@@ -644,8 +639,8 @@ static void requeue_io(struct thin_c *tc)
bio_list_init(&bios);
spin_lock_irq(&tc->lock);
- __merge_bio_list(&bios, &tc->deferred_bio_list);
- __merge_bio_list(&bios, &tc->retry_on_resume_list);
+ bio_list_merge_init(&bios, &tc->deferred_bio_list);
+ bio_list_merge_init(&bios, &tc->retry_on_resume_list);
spin_unlock_irq(&tc->lock);
error_bio_list(&bios, BLK_STS_DM_REQUEUE);
@@ -707,9 +702,10 @@ static void get_bio_block_range(struct thin_c *tc, struct bio *bio,
(void) sector_div(e, pool->sectors_per_block);
}
- if (e < b)
+ if (e < b) {
/* Can happen if the bio is within a single block. */
e = b;
+ }
*begin = b;
*end = e;
@@ -721,13 +717,14 @@ static void remap(struct thin_c *tc, struct bio *bio, dm_block_t block)
sector_t bi_sector = bio->bi_iter.bi_sector;
bio_set_dev(bio, tc->pool_dev->bdev);
- if (block_size_is_power_of_two(pool))
+ if (block_size_is_power_of_two(pool)) {
bio->bi_iter.bi_sector =
(block << pool->sectors_per_block_shift) |
(bi_sector & (pool->sectors_per_block - 1));
- else
+ } else {
bio->bi_iter.bi_sector = (block * pool->sectors_per_block) +
sector_div(bi_sector, pool->sectors_per_block);
+ }
}
static void remap_to_origin(struct thin_c *tc, struct bio *bio)
@@ -1401,9 +1398,10 @@ static void schedule_zero(struct thin_c *tc, dm_block_t virt_block,
if (pool->pf.zero_new_blocks) {
if (io_overwrites_block(pool, bio))
remap_and_issue_overwrite(tc, bio, data_block, m);
- else
+ else {
ll_zero(tc, m, data_block * pool->sectors_per_block,
(data_block + 1) * pool->sectors_per_block);
+ }
} else
process_prepared_mapping(m);
}
@@ -1416,17 +1414,17 @@ static void schedule_external_copy(struct thin_c *tc, dm_block_t virt_block,
sector_t virt_block_begin = virt_block * pool->sectors_per_block;
sector_t virt_block_end = (virt_block + 1) * pool->sectors_per_block;
- if (virt_block_end <= tc->origin_size)
+ if (virt_block_end <= tc->origin_size) {
schedule_copy(tc, virt_block, tc->origin_dev,
virt_block, data_dest, cell, bio,
pool->sectors_per_block);
- else if (virt_block_begin < tc->origin_size)
+ } else if (virt_block_begin < tc->origin_size) {
schedule_copy(tc, virt_block, tc->origin_dev,
virt_block, data_dest, cell, bio,
tc->origin_size - virt_block_begin);
- else
+ } else
schedule_zero(tc, virt_block, data_dest, cell, bio);
}
@@ -2334,10 +2332,9 @@ static struct thin_c *get_first_thin(struct pool *pool)
struct thin_c *tc = NULL;
rcu_read_lock();
- if (!list_empty(&pool->active_thins)) {
- tc = list_entry_rcu(pool->active_thins.next, struct thin_c, list);
+ tc = list_first_or_null_rcu(&pool->active_thins, struct thin_c, list);
+ if (tc)
thin_get(tc);
- }
rcu_read_unlock();
return tc;
@@ -2486,6 +2483,7 @@ static void pool_work_wait(struct pool_work *pw, struct pool *pool,
init_completion(&pw->complete);
queue_work(pool->wq, &pw->worker);
wait_for_completion(&pw->complete);
+ destroy_work_on_stack(&pw->worker);
}
/*----------------------------------------------------------------*/
@@ -2844,7 +2842,7 @@ static void disable_discard_passdown_if_not_supported(struct pool_c *pt)
{
struct pool *pool = pt->pool;
struct block_device *data_bdev = pt->data_dev->bdev;
- struct queue_limits *data_limits = &bdev_get_queue(data_bdev)->limits;
+ struct queue_limits *data_limits = bdev_limits(data_bdev);
const char *reason = NULL;
if (!pt->adjusted_pf.discard_passdown)
@@ -2950,7 +2948,7 @@ static struct pool *pool_create(struct mapped_device *pool_md,
pmd = dm_pool_metadata_open(metadata_dev, block_size, format_device);
if (IS_ERR(pmd)) {
*error = "Error creating metadata object";
- return (struct pool *)pmd;
+ return ERR_CAST(pmd);
}
pool = kzalloc(sizeof(*pool), GFP_KERNEL);
@@ -4081,10 +4079,10 @@ static void pool_io_hints(struct dm_target *ti, struct queue_limits *limits)
if (io_opt_sectors < pool->sectors_per_block ||
!is_factor(io_opt_sectors, pool->sectors_per_block)) {
if (is_factor(pool->sectors_per_block, limits->max_sectors))
- blk_limits_io_min(limits, limits->max_sectors << SECTOR_SHIFT);
+ limits->io_min = limits->max_sectors << SECTOR_SHIFT;
else
- blk_limits_io_min(limits, pool->sectors_per_block << SECTOR_SHIFT);
- blk_limits_io_opt(limits, pool->sectors_per_block << SECTOR_SHIFT);
+ limits->io_min = pool->sectors_per_block << SECTOR_SHIFT;
+ limits->io_opt = pool->sectors_per_block << SECTOR_SHIFT;
}
/*
@@ -4096,7 +4094,7 @@ static void pool_io_hints(struct dm_target *ti, struct queue_limits *limits)
if (pt->adjusted_pf.discard_enabled) {
disable_discard_passdown_if_not_supported(pt);
if (!pt->adjusted_pf.discard_passdown)
- limits->max_discard_sectors = 0;
+ limits->max_hw_discard_sectors = 0;
/*
* The pool uses the same discard limits as the underlying data
* device. DM core has already set this up.
@@ -4493,7 +4491,7 @@ static void thin_io_hints(struct dm_target *ti, struct queue_limits *limits)
if (pool->pf.discard_enabled) {
limits->discard_granularity = pool->sectors_per_block << SECTOR_SHIFT;
- limits->max_discard_sectors = pool->sectors_per_block * BIO_PRISON_MAX_RANGE;
+ limits->max_hw_discard_sectors = pool->sectors_per_block * BIO_PRISON_MAX_RANGE;
}
}
@@ -4560,5 +4558,5 @@ module_param_named(no_space_timeout, no_space_timeout_secs, uint, 0644);
MODULE_PARM_DESC(no_space_timeout, "Out of data space queue IO timeout in seconds");
MODULE_DESCRIPTION(DM_NAME " thin provisioning target");
-MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
+MODULE_AUTHOR("Joe Thornber <dm-devel@lists.linux.dev>");
MODULE_LICENSE("GPL");